diff --git "a/1142.jsonl" "b/1142.jsonl" new file mode 100644--- /dev/null +++ "b/1142.jsonl" @@ -0,0 +1,94 @@ +{"seq_id": "20640979959", "text": "import torch\nfrom ..tts_modules.encoder import SpeakerEncoderManager\nfrom ..tts_modules.synthesizer import SynthesizerManager\nfrom ..tts_modules.vocoder import VocoderManager\nfrom .configs import get_default_main_configs\n\n\nclass MultispeakerManager:\n \"\"\"\n A class used to control the three module managers and interactions between them\n\n Attributes\n -----------\n :param main_configs: main configuration (CfgNode object)\n :param encoder: Optional. The custom speaker encoder model. If None, the default DVecModel is used\n :param encoder_test_dataloader: Optional. Test dataset for speaker encoder model\n :param encoder_train_dataloader: Optional. Train dataset for speaker encoder model\n :param synthesizer: Optional. The custom synthesizer model. If None, the default Tacotron model is used\n :param synthesizer_test_dataloader: Optional. Test dataset for synthesizer model\n :param synthesizer_train_dataloader: Optional. Train dataset for synthesizer model\n :param vocoder: Optional. The custom vocoder model. If None, the default WaveRNN model is used\n :param vocoder_test_dataloader: Optional. Test dataset for vocoder model\n :param vocoder_train_dataloader: Optional. Train dataset for vocoder model\n\n Methods\n -----------\n inference()\n Runs the whole sound generation pipeline according to the given configuration.\n In particular, reads the given *.wav file with recorded sample voice and\n creates corresponding embeddings. After that reads the *.txt file with given text\n and produces the output result.wav file,\n where the text is read using the obtained voice embeddings.\n\n process_speaker(...)\n Processes the given *.wav file and produces the corresponding embeddings with the help of SpeakerEncoderManager\n\n synthesize_spectrograms(...)\n Creates spectrograms using voice embeddings and given text in *.txt file with the help of SynthesizerManager\n\n generate_waveform(..)\n Generates a result.wav file using the spectrograms with the help of VocoderManager\n \"\"\"\n\n def __init__(self,\n main_configs,\n encoder=None,\n encoder_test_dataloader=None,\n encoder_train_dataloader=None,\n synthesizer=None,\n synthesizer_test_dataloader=None,\n synthesizer_train_dataloader=None,\n vocoder=None,\n vocoder_test_dataloader=None,\n vocoder_train_dataloader=None\n ):\n self.main_configs = main_configs\n if self.main_configs is None:\n self.main_configs = get_default_main_configs()\n self.encoder_manager = SpeakerEncoderManager(self.main_configs,\n model=encoder,\n test_dataloader=encoder_test_dataloader,\n train_dataloader=encoder_train_dataloader\n )\n\n self.synthesizer_manager = SynthesizerManager(self.main_configs,\n model=synthesizer,\n test_dataloader=synthesizer_test_dataloader,\n train_dataloader=synthesizer_train_dataloader\n )\n self.vocoder_manager = VocoderManager(self.main_configs,\n model=vocoder,\n test_dataloader=vocoder_test_dataloader,\n train_dataloader=vocoder_train_dataloader\n )\n if torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n def inference(self):\n \"\"\"\n Runs the whole sound generation pipeline according to the given configuration.\n In particular, reads the given *.wav file with recorded sample voice and\n creates corresponding embeddings. After that reads the *.txt file with given text\n and produces the output result.wav file,\n where the text is read using the obtained voice embeddings.\n The result.wav can be save according to the configs\n\n :return: generated wav file\n \"\"\"\n embeddings = self.process_speaker(speaker_speech_path=self.main_configs.SPEAKER_SPEECH_PATH)\n with open(self.main_configs.INPUT_TEXTS_PATH, \"r\") as file:\n texts = file.readlines()\n specs = self.synthesize_spectrograms(texts=texts, embeddings=embeddings)\n specs = specs[0]\n wav = self.generate_waveform(specs)\n return wav\n\n def process_speaker(self, speaker_speech_path, save_embeddings_path=None,\n save_embeddings_speaker_name=\"test_speaker\"):\n \"\"\"\n Processes the given *.wav file and produces the corresponding embeddings with the help of SpeakerEncoderManager\n\n :param speaker_speech_path: The path to the *.wav file with sample recordings of new speaker\n :param save_embeddings_path: Optional. The path for saving the obtained embeddings\n :param save_embeddings_speaker_name: Optional. The name of the file for saving the obtained embeddings\n\n :return: embeddings\n \"\"\"\n embeddings = self.encoder_manager.process_speaker(speaker_speech_path,\n save_embeddings_path=save_embeddings_path,\n save_embeddings_speaker_name=save_embeddings_speaker_name\n )\n return embeddings\n\n def synthesize_spectrograms(self, texts, embeddings, do_save_spectrograms=True):\n \"\"\"\n Creates spectrograms using voice embeddings and given text in *.txt file with the help of SynthesizerManager\n\n :param texts: The text which is used to create spectrograms\n :param embeddings: The embeddings of a particular speaker used to create spectrograms\n :param do_save_spectrograms: Optional. Flag defines whether to save spectrograms or not\n\n :return: spectrograms\n \"\"\"\n specs = self.synthesizer_manager.synthesize_spectrograms(texts,\n embeddings,\n do_save_spectrograms=do_save_spectrograms\n )\n return specs\n\n def generate_waveform(self, mel, normalize=True, batched=True,\n target=8000, overlap=800, do_save_wav=True):\n \"\"\"\n Generates a result.wav file using the spectrograms with the help of VocoderManager\n\n :param mel: mel-spectrograms used to create the rsulting wav file\n :param normalize: Optional. The flag defines whether to normalize the mel-spectrograms or not\n :param batched: Optional. Flag define whether to fold with overlap and to xfade and unfold or not\n :param target: Optional. Target timesteps for each index of batch\n :param overlap: Optional. Timesteps for both xfade and rnn warmup\n :param do_save_wav: Optional. Flag define whether to save the resulting wav to a file or not\n\n :return: The resulting wav\n \"\"\"\n wav = self.vocoder_manager.infer_waveform(mel,\n normalize=normalize,\n batched=batched,\n target=target,\n overlap=overlap,\n do_save_wav=do_save_wav\n )\n return wav\n", "repo_name": "adasegroup/OSM-one-shot-multispeaker", "sub_path": "src/osms/common/multispeaker.py", "file_name": "multispeaker.py", "file_ext": "py", "file_size_in_byte": 8028, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "75", "api": [{"api_name": "configs.get_default_main_configs", "line_number": 58, "usage_type": "call"}, {"api_name": "tts_modules.encoder.SpeakerEncoderManager", "line_number": 59, "usage_type": "call"}, {"api_name": "tts_modules.synthesizer.SynthesizerManager", "line_number": 65, "usage_type": "call"}, {"api_name": "tts_modules.vocoder.VocoderManager", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "40661598094", "text": "from django.http import HttpResponse\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom faservice.data.request.depreciationsettingrequest import DepreciationSettingRequest\nfrom faservice.service.depreciationsettingservice import DepreciationSettingService\n#from inwardservice.data.request.escalationtyperequest import EscalatonTypeRequest\n#from inwardservice.service.escalationtypeservice import EscalationTypeService\n#from userservice.service.employeeservice import EmployeeService\nfrom userservice.service.employeeservice import EmployeeService\nfrom nwisefin.settings import logger\nfrom utilityservice.service.nwisefinauthenticate import NWisefinAuthentication\nfrom utilityservice.service.nwisefinpermission import NWisefinPermission\nimport json\nfrom utilityservice.data.response.nwisefinpage import NWisefinPage\nfrom faservice.util.FaApiService import FaApiService\n@csrf_exempt\n@api_view(['GET', 'POST'])\n@authentication_classes([NWisefinAuthentication])\n@permission_classes([IsAuthenticated, NWisefinPermission])\ndef create_depreciationsetting(request):\n if request.method == 'POST':\n scope = request.scope\n depsetting_serv = DepreciationSettingService(scope)\n depsetting_json = json.loads(request.body)\n logger.info('FAL_ASSET_DEPRICIATIONSETTINGS_DATA:{}'.format(depsetting_json))\n depsetting_obj = DepreciationSettingRequest(depsetting_json)\n user_id = request.user.id\n emp_service = FaApiService(scope)\n emp_id = request.employee_id\n resp_obj = depsetting_serv.create_depsetting( depsetting_obj, emp_id)\n response = HttpResponse(resp_obj.get(), content_type=\"application/json\")\n return response\n elif request.method == 'GET':\n return fetch_depsetting_list(request)\n\n\ndef fetch_depsetting_list(request):\n user_id = request.user.id\n page = request.GET.get('page', 1)\n page = int(page)\n vys_page = NWisefinPage(page, 10)\n query = request.GET.get('query', None)\n scope = request.scope\n depsetting_serv = DepreciationSettingService(scope)\n resp_obj = depsetting_serv.fetch_depsetting_list(query, vys_page)\n response = HttpResponse(resp_obj.get(), content_type=\"application/json\")\n return response\n\n@csrf_exempt\n@api_view(['GET', 'DELETE'])\n@authentication_classes([NWisefinAuthentication])\n@permission_classes([IsAuthenticated, NWisefinPermission])\ndef fetch_depsetting(request, depsetting_id):\n if request.method == 'GET':\n user_id = request.user.id\n scope = request.scope\n depsetting_serv = DepreciationSettingService(scope)\n resp_obj = depsetting_serv.fetch_depsetting(depsetting_id)\n response = HttpResponse(resp_obj.get(), content_type=\"application/json\")\n return response\n elif request.method == 'DELETE':\n return delete_depsetting(request, depsetting_id)\n\n\ndef delete_depsetting(request, depsetting_id):\n user_id = request.user.id\n scope = request.scope\n emp_service = FaApiService(scope)\n emp_id = request.employee_id\n depsetting_serv = DepreciationSettingService(scope)\n resp_obj = depsetting_serv.delete_depsetting(depsetting_id,emp_id)\n response = HttpResponse(resp_obj.get(), content_type=\"application/json\")\n return response\n", "repo_name": "Dhivyadharshinin/crm-test", "sub_path": "wisefin/faservice/controller/depreciationsettingcontroller.py", "file_name": "depreciationsettingcontroller.py", "file_ext": "py", "file_size_in_byte": 3394, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "faservice.service.depreciationsettingservice.DepreciationSettingService", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "nwisefin.settings.logger.info", "line_number": 27, "usage_type": "call"}, {"api_name": "nwisefin.settings.logger", "line_number": 27, "usage_type": "name"}, {"api_name": "faservice.data.request.depreciationsettingrequest.DepreciationSettingRequest", "line_number": 28, "usage_type": "call"}, {"api_name": "faservice.util.FaApiService.FaApiService", "line_number": 30, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.decorators.authentication_classes", "line_number": 20, "usage_type": "call"}, {"api_name": "utilityservice.service.nwisefinauthenticate.NWisefinAuthentication", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 21, "usage_type": "name"}, {"api_name": "utilityservice.service.nwisefinpermission.NWisefinPermission", "line_number": 21, "usage_type": "name"}, {"api_name": "utilityservice.data.response.nwisefinpage.NWisefinPage", "line_number": 43, "usage_type": "call"}, {"api_name": "faservice.service.depreciationsettingservice.DepreciationSettingService", "line_number": 46, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "faservice.service.depreciationsettingservice.DepreciationSettingService", "line_number": 59, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 61, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 51, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.decorators.authentication_classes", "line_number": 53, "usage_type": "call"}, {"api_name": "utilityservice.service.nwisefinauthenticate.NWisefinAuthentication", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 54, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 54, "usage_type": "name"}, {"api_name": "utilityservice.service.nwisefinpermission.NWisefinPermission", "line_number": 54, "usage_type": "name"}, {"api_name": "faservice.util.FaApiService.FaApiService", "line_number": 70, "usage_type": "call"}, {"api_name": "faservice.service.depreciationsettingservice.DepreciationSettingService", "line_number": 72, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "75112078003", "text": "from nonebot import on_command\nimport random\nimport os\nimport json\nfrom nonebot.adapters import Bot, Event\nfrom nonebot.rule import to_me\n\ndef ran(ilist, repeat):\n pool = list(ilist) * repeat\n while True:\n random.shuffle(pool)\n for item in pool:\n yield item\n\ndef some_famous(famous, before, after):\n xx = next(famous)\n xx = xx.replace(\"a\",random.choice(before))\n xx = xx.replace(\"b\",random.choice(after))\n return xx\n\ndef another():\n xx = \"。\"\n xx += \"\\n\"\n xx += \" \"\n return xx\n\nbullshit = on_command('狗屁不通', rule=to_me(), priority=5, block=True)\n\n@bullshit.handle()\nasync def _(bot: Bot, event: Event):\n \n group = int(event.get_session_id().split(\"_\")[1])\n msg = str(event.get_message()).split(\" \")\n\n if len(msg) == 1:\n \n reply = \"格式错误,应为:狗屁不通 主题\"\n\n else:\n\n assist_path = bot.config.assist_path\n data_path = os.path.join(os.getcwd(), assist_path, 'bullshit.json')\n with open(data_path,'rb') as fp:\n data = json.load(fp)\n \n famous = data['famous']\n before = data['before']\n after = data['after']\n bosh = data['bosh']\n xx = ''\n repeat = 2\n\n xx = msg[1]\n next_bosh = ran(bosh, repeat)\n next_famous = ran(famous, repeat)\n tmp = str()\n while ( len(tmp) < 200 ) :\n k = random.randint(0,100)\n if k < 5:\n tmp += another()\n elif k < 20 :\n tmp += some_famous(next_famous, before, after)\n else:\n tmp += next(next_bosh)\n reply = tmp.replace(\"x\" ,xx)\n \n await bot.send_group_msg(group_id=group, message=reply)", "repo_name": "JackFishxxx/Asoul-bot", "sub_path": "src/plugins/others/bullshit_generator.py", "file_name": "bullshit_generator.py", "file_ext": "py", "file_size_in_byte": 1738, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "75", "api": [{"api_name": "random.shuffle", "line_number": 11, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 17, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 18, "usage_type": "call"}, {"api_name": "nonebot.on_command", "line_number": 27, "usage_type": "call"}, {"api_name": "nonebot.rule.to_me", "line_number": 27, "usage_type": "call"}, {"api_name": "nonebot.adapters.Bot", "line_number": 30, "usage_type": "name"}, {"api_name": "nonebot.adapters.Event", "line_number": 30, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 42, "usage_type": "call"}, {"api_name": "json.load", "line_number": 44, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "44542571463", "text": "import os\nfrom typing import Optional, Dict, List\nfrom dataclasses import dataclass\nfrom pydantic import BaseModel\nfrom inference import Predictor\nfrom config import Config\nfrom version import Version\nimport argparse\nimport sys\n\n# local imports\nsys.path.append('message_bus')\nfrom message_bus import MessageBus\n\n# Get model path\nMODEL_PATH = os.path.dirname(__file__) + \"./data/sequential_baseline.pt\"\n\n# Create the TDAC instance\nclass Tdac:\n\n def __init__(self, args):\n\n # Create predictor object\n self.predictor = Predictor(model_path=MODEL_PATH, history_len=7)\n\n config_d = Config().get_d()\n version = Version().version\n self.message_bus = MessageBus(self,\n args.host, args.port, args.nochat, config_d, version)\n\n # The model should reset before and after each mission.\n def reset_model(self):\n self.predictor.reset_model()\n\n # classification for ASR and chat messages, utterance is formatted\n # as 'speaker : text'\n def classify_utterance(self, utterance):\n classification = self.predictor.predict(utterance)\n return classification\n\n\n# If run as a script, take command line args\nif __name__ == '__main__':\n\n # ingest command line args\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--host',\n action='store',\n default = 'localhost',\n help = 'The MQTT broker machine name.')\n parser.add_argument('--port',\n action='store',\n default = 1883,\n type=int,\n help = 'The MQTT broker port number.')\n parser.add_argument('--nochat',\n action='store_true',\n help = 'Do not process Minecraft Chat messages.')\n args = parser.parse_args(sys.argv[1:])\n\n # start the application\n tdac = Tdac(args)\n", "repo_name": "tamu-nlp/Dialogue-Act-Classification", "sub_path": "tdac.py", "file_name": "tdac.py", "file_ext": "py", "file_size_in_byte": 1830, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "75", "api": [{"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "inference.Predictor", "line_number": 24, "usage_type": "call"}, {"api_name": "config.Config", "line_number": 26, "usage_type": "call"}, {"api_name": "version.Version", "line_number": 27, "usage_type": "call"}, {"api_name": "message_bus.MessageBus", "line_number": 28, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 46, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "3102890121", "text": "from aiohttp import web\nfrom aiosparql.syntax import IRI, Literal\nfrom itertools import groupby\n\nfrom muswarmadmin import pipelines, repositories, services\n\n\nclass Triple:\n \"\"\"\n A triple: subject (s), predicate (p) and object (o)\n \"\"\"\n def __init__(self, data):\n assert isinstance(data, dict)\n assert \"s\" in data\n assert isinstance(data['s'], dict) and data['p']['type'] == \"uri\"\n assert \"p\" in data\n assert isinstance(data['p'], dict) and data['p']['type'] == \"uri\"\n assert \"o\" in data\n assert isinstance(data['o'], dict)\n self.s = IRI(data['s']['value'])\n self.p = IRI(data['p']['value'])\n if data['o']['type'] == \"uri\":\n self.o = IRI(data['o']['value'])\n elif data['o']['type'] in (\"literal\", \"typed-literal\"):\n self.o = Literal(data['o']['value'])\n else:\n raise NotImplementedError(\"object type %s\" % data['o']['type'])\n\n def __repr__(self): # pragma: no cover\n return \"<%s s=%s p=%s o=%s>\" % (\n self.__class__.__name__, self.s, self.p, self.o)\n\n def __hash__(self):\n return hash((hash(self.s), hash(self.p), hash(self.o)))\n\n def __eq__(self, other):\n if isinstance(other, Triple):\n return hash(self) == hash(other)\n else:\n return False\n\n\nclass UpdateData:\n \"\"\"\n A Delta service update: all its inserts, all its deletes\n \"\"\"\n def __init__(self, data):\n assert isinstance(data['graph'], str)\n assert isinstance(data['inserts'], list)\n assert isinstance(data['deletes'], list)\n self.graph = data['graph']\n inserts = set(map(Triple, data['inserts']))\n deletes = set(map(Triple, data['deletes']))\n null_operations = inserts & deletes\n self.inserts = list(inserts - null_operations)\n self.deletes = list(deletes - null_operations)\n\n def __repr__(self): # pragma: no cover\n return \"<%s graph=%s inserts=%s deletes=%s>\" % (\n self.__class__.__name__, self.graph, self.inserts, self.deletes)\n\n def filter_inserts(self, func):\n \"\"\"\n Filter inserts that func(x) match where x is a singe triple of the\n update\n \"\"\"\n assert callable(func)\n return (x for x in self.inserts if func(x))\n\n def filter_deletes(self, func):\n \"\"\"\n Filter deletes that func(x) match where x is a singe triple of the\n update\n \"\"\"\n assert callable(func)\n return (x for x in self.deletes if func(x))\n\n\ndef select_to_triples(result):\n \"\"\"\n Transform the query result of a SELECT query to a list of triples.\n \"\"\"\n return [Triple(x) for x in result['results']['bindings']]\n\n\ndef groupby_subject(triples):\n \"\"\"\n Group a list of triples by subject and return a dict where the keys are the\n subjects and the values are lists of triples\n \"\"\"\n return {\n s: list(group)\n for s, group in groupby(triples, lambda x: x.s)\n }\n\n\ndef filter_updates(data, resource_type):\n \"\"\"\n Filter updates for a resource type\n \"\"\"\n inserts = groupby_subject(data.filter_inserts(\n lambda x: x.s.value.startswith(resource_type.value)))\n deletes = groupby_subject(data.filter_deletes(\n lambda x: x.s.value.startswith(resource_type.value)))\n return (inserts, deletes)\n\n\nasync def update(request):\n \"\"\"\n The API entry point for the Delta service callback\n \"\"\"\n graph = request.app.sparql.graph\n try:\n data = await request.json()\n except Exception:\n raise web.HTTPBadRequest(body=\"invalid json\")\n try:\n data = [UpdateData(x) for x in data['delta']]\n except Exception:\n request.app.logger.exception(\"Cannot parse delta payload\")\n raise web.HTTPBadRequest(body=\"cannot parse deltas received\")\n try:\n first_data = next(x for x in data if x.graph == graph)\n except StopIteration:\n raise web.HTTPNoContent()\n\n await repositories.update(\n request.app,\n *filter_updates(\n first_data,\n request.app.base_resource + \"stacks/\"))\n\n await pipelines.update(\n request.app,\n *filter_updates(\n first_data,\n request.app.base_resource + \"pipeline-instances/\"))\n\n await services.update(\n request.app,\n *filter_updates(\n first_data,\n request.app.base_resource + \"services/\"))\n\n raise web.HTTPNoContent()\n\n\nasync def startup(app):\n \"\"\"\n Hook on the startup of the application that will find all the existing\n updates (restartRequested, requestedStatus, ...) and run them\n \"\"\"\n result = await repositories.get_existing_updates(app.sparql)\n app.loop.create_task(\n repositories.update(app,\n groupby_subject(select_to_triples(result)), {}))\n result = await pipelines.get_existing_updates(app.sparql)\n app.loop.create_task(\n pipelines.update(app, groupby_subject(select_to_triples(result)), {}))\n result = await services.get_existing_updates(app.sparql)\n app.loop.create_task(\n services.update(app, groupby_subject(select_to_triples(result)), {}))\n", "repo_name": "big-data-europe/mu-swarm-admin-service", "sub_path": "muswarmadmin/delta.py", "file_name": "delta.py", "file_ext": "py", "file_size_in_byte": 5189, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "75", "api": [{"api_name": "aiosparql.syntax.IRI", "line_number": 20, "usage_type": "call"}, {"api_name": "aiosparql.syntax.IRI", "line_number": 21, "usage_type": "call"}, {"api_name": "aiosparql.syntax.IRI", "line_number": 23, "usage_type": "call"}, {"api_name": "aiosparql.syntax.Literal", "line_number": 25, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 93, "usage_type": "call"}, {"api_name": "aiohttp.web.HTTPBadRequest", "line_number": 116, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 116, "usage_type": "name"}, {"api_name": "aiohttp.web.HTTPBadRequest", "line_number": 121, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 121, "usage_type": "name"}, {"api_name": "aiohttp.web.HTTPNoContent", "line_number": 125, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 125, "usage_type": "name"}, {"api_name": "muswarmadmin.repositories.update", "line_number": 127, "usage_type": "call"}, {"api_name": "muswarmadmin.repositories", "line_number": 127, "usage_type": "name"}, {"api_name": "muswarmadmin.pipelines.update", "line_number": 133, "usage_type": "call"}, {"api_name": "muswarmadmin.pipelines", "line_number": 133, "usage_type": "name"}, {"api_name": "muswarmadmin.services.update", "line_number": 139, "usage_type": "call"}, {"api_name": "muswarmadmin.services", "line_number": 139, "usage_type": "name"}, {"api_name": "aiohttp.web.HTTPNoContent", "line_number": 145, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 145, "usage_type": "name"}, {"api_name": "muswarmadmin.repositories.get_existing_updates", "line_number": 153, "usage_type": "call"}, {"api_name": "muswarmadmin.repositories", "line_number": 153, "usage_type": "name"}, {"api_name": "muswarmadmin.repositories.update", "line_number": 155, "usage_type": "call"}, {"api_name": "muswarmadmin.repositories", "line_number": 155, "usage_type": "name"}, {"api_name": "muswarmadmin.pipelines.get_existing_updates", "line_number": 157, "usage_type": "call"}, {"api_name": "muswarmadmin.pipelines", "line_number": 157, "usage_type": "name"}, {"api_name": "muswarmadmin.pipelines.update", "line_number": 159, "usage_type": "call"}, {"api_name": "muswarmadmin.pipelines", "line_number": 159, "usage_type": "name"}, {"api_name": "muswarmadmin.services.get_existing_updates", "line_number": 160, "usage_type": "call"}, {"api_name": "muswarmadmin.services", "line_number": 160, "usage_type": "name"}, {"api_name": "muswarmadmin.services.update", "line_number": 162, "usage_type": "call"}, {"api_name": "muswarmadmin.services", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "25719207614", "text": "from django.shortcuts import render\nfrom .core import classify_image\nimport base64\n\n# Create your views here.\ndef home(request):\n return render(request, 'home.html')\n\ndef results(request):\n if request.method == 'POST':\n image_data = request.FILES.get('image_data')\n\n if image_data:\n image_bytes = image_data.read()\n base64_image = base64.b64encode(image_bytes).decode('utf-8')\n result = classify_image(base64_image)\n\n for item in result:\n class_names = list(item['class_dictionary'].keys())\n class_probabilities = item['class_probability']\n\n item['class_data'] = list(zip(class_names, class_probabilities))\n\n return render(request, 'results.html', {'result': result})\n return render(request, 'results.html')\n\n", "repo_name": "mhassaan8979/Image_Classification", "sub_path": "Image_Classifier/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 832, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "django.shortcuts.render", "line_number": 7, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 15, "usage_type": "call"}, {"api_name": "core.classify_image", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "38317386834", "text": "from dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\n\nimport xtgeo\n\nfrom webviz_subsurface._providers import (\n EnsembleSurfaceProvider,\n QualifiedSurfaceAddress,\n SimulatedSurfaceAddress,\n StatisticalSurfaceAddress,\n SurfaceAddress,\n SurfaceImageMeta,\n SurfaceImageServer,\n)\nfrom webviz_subsurface._providers.ensemble_surface_provider.ensemble_surface_provider import (\n SurfaceStatistic,\n)\nfrom webviz_subsurface.plugins._co2_leakage._utilities.plume_extent import (\n truncate_surfaces,\n)\n\n\n@dataclass\nclass TruncatedSurfaceAddress:\n name: str\n datestr: str\n realizations: List[int]\n basis_attribute: str\n threshold: float\n smoothing: float\n\n @property\n def attribute(self) -> str:\n return f\"Truncated_{self.basis_attribute}_{self.threshold}_{self.smoothing}\"\n\n\ndef publish_and_get_surface_metadata(\n server: SurfaceImageServer,\n provider: EnsembleSurfaceProvider,\n address: Union[SurfaceAddress, TruncatedSurfaceAddress],\n) -> Tuple[Optional[SurfaceImageMeta], str]:\n if isinstance(address, TruncatedSurfaceAddress):\n return _publish_and_get_truncated_surface_metadata(server, provider, address)\n provider_id: str = provider.provider_id()\n qualified_address = QualifiedSurfaceAddress(provider_id, address)\n surf_meta = server.get_surface_metadata(qualified_address)\n if not surf_meta:\n # This means we need to compute the surface\n surface = provider.get_surface(address)\n if not surface:\n raise ValueError(f\"Could not get surface for address: {address}\")\n server.publish_surface(qualified_address, surface)\n surf_meta = server.get_surface_metadata(qualified_address)\n return surf_meta, server.encode_partial_url(qualified_address)\n\n\ndef _publish_and_get_truncated_surface_metadata(\n server: SurfaceImageServer,\n provider: EnsembleSurfaceProvider,\n address: TruncatedSurfaceAddress,\n) -> Tuple[Optional[SurfaceImageMeta], str]:\n qualified_address = QualifiedSurfaceAddress(\n provider.provider_id(),\n # TODO: Should probably use a dedicated address type for this. Statistical surface\n # is the closest, as it allows including a list of realizations. However, it is\n # perhaps not very \"statistical\", and the provided SurfaceStatistic is not\n # appropriate here.\n StatisticalSurfaceAddress(\n address.attribute,\n address.name,\n address.datestr,\n SurfaceStatistic.MEAN,\n address.realizations,\n ),\n )\n surf_meta = server.get_surface_metadata(qualified_address)\n if surf_meta is None:\n surface = _generate_surface(provider, address)\n if surface is None:\n raise ValueError(f\"Could not generate surface for address: {address}\")\n server.publish_surface(qualified_address, surface)\n surf_meta = server.get_surface_metadata(qualified_address)\n return surf_meta, server.encode_partial_url(qualified_address)\n\n\ndef _generate_surface(\n provider: EnsembleSurfaceProvider,\n address: TruncatedSurfaceAddress,\n) -> Optional[xtgeo.RegularSurface]:\n surfaces = [\n provider.get_surface(\n SimulatedSurfaceAddress(\n attribute=address.basis_attribute,\n name=address.name,\n datestr=address.datestr,\n realization=r,\n )\n )\n for r in address.realizations\n ]\n surfaces = [s for s in surfaces if s is not None]\n if len(surfaces) == 0:\n return None\n plume_count = truncate_surfaces(surfaces, address.threshold, address.smoothing)\n template: xtgeo.RegularSurface = surfaces[0].copy() # type: ignore\n template.values = plume_count\n template.values.mask = plume_count < 1e-4 # type: ignore\n return template\n", "repo_name": "equinor/webviz-subsurface", "sub_path": "webviz_subsurface/plugins/_co2_leakage/_utilities/surface_publishing.py", "file_name": "surface_publishing.py", "file_ext": "py", "file_size_in_byte": 3857, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 44, "dataset": "github-code", "pt": "75", "api": [{"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 23, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.SurfaceImageServer", "line_number": 38, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.EnsembleSurfaceProvider", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 40, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.SurfaceAddress", "line_number": 40, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.QualifiedSurfaceAddress", "line_number": 45, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.SurfaceImageMeta", "line_number": 41, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.SurfaceImageServer", "line_number": 58, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.EnsembleSurfaceProvider", "line_number": 59, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.QualifiedSurfaceAddress", "line_number": 62, "usage_type": "call"}, {"api_name": "webviz_subsurface._providers.StatisticalSurfaceAddress", "line_number": 68, "usage_type": "call"}, {"api_name": "webviz_subsurface._providers.ensemble_surface_provider.ensemble_surface_provider.SurfaceStatistic.MEAN", "line_number": 72, "usage_type": "attribute"}, {"api_name": "webviz_subsurface._providers.ensemble_surface_provider.ensemble_surface_provider.SurfaceStatistic", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 61, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.SurfaceImageMeta", "line_number": 61, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.EnsembleSurfaceProvider", "line_number": 87, "usage_type": "name"}, {"api_name": "webviz_subsurface._providers.SimulatedSurfaceAddress", "line_number": 92, "usage_type": "call"}, {"api_name": "webviz_subsurface.plugins._co2_leakage._utilities.plume_extent.truncate_surfaces", "line_number": 104, "usage_type": "call"}, {"api_name": "xtgeo.RegularSurface", "line_number": 105, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 89, "usage_type": "name"}, {"api_name": "xtgeo.RegularSurface", "line_number": 89, "usage_type": "attribute"}]} +{"seq_id": "28569292134", "text": "\"\"\"empty message\n\nRevision ID: 6d76ba6bedea\nRevises: 6255224a2846\nCreate Date: 2017-03-07 10:12:57.955000\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6d76ba6bedea'\ndown_revision = '6255224a2846'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('chartContainer', sa.Column('containerSize_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'chartContainer', 'containerSize', ['containerSize_id'], ['id'])\n op.drop_column('chartContainer', 'startsize')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('chartContainer', sa.Column('startsize', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'chartContainer', type_='foreignkey')\n op.drop_column('chartContainer', 'containerSize_id')\n # ### end Alembic commands ###\n", "repo_name": "ThePoulsen/strategy", "sub_path": "migrations/versions/6d76ba6bedea_.py", "file_name": "6d76ba6bedea_.py", "file_ext": "py", "file_size_in_byte": 1012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op.drop_constraint", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "35104757420", "text": "import argparse\nimport io\nimport json\nimport math\nimport multiprocessing as mp\nimport os\nimport numpy as np\nimport copy\n\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom functools import lru_cache\nfrom multiprocessing import Pool\n\n\n# https://github.com/facebookresearch/detectron2/issues/485\ndef process(coco_annoations_file:str, output_file:str):\n\n with io.open(coco_annoations_file, \"r\", encoding=\"utf-8\") as json_file:\n data = json.load(json_file)\n \n print(\"Total annotations = \", len(data['annotations']))\n id_to_remove = []\n id_to_keep = []\n # loop over categories and remove the ones that are not text\n for i in range(len(data['categories'])):\n name = data['categories'][i]['name'] \n marker = name.split('.')[0]\n # \"r\", \"s\", \"d\"\n # if marker not in [\"m\"] or name.endswith('_answer') or name != 'm.claim_data':\n if name != 'r.patient_name':\n id_to_remove.append(data['categories'][i]['id'])\n data['categories'][i] = None\n continue\n \n id_to_keep.append(data['categories'][i]['id'])\n print( name )\n\n # if data['categories'][i]['name'] != 'text':\n # data['categories'].pop(i)\n print(\"Total categories = \", len(data['categories']))\n print(\"Total id_to_remove = \", len(id_to_remove))\n print(\"Total id_to_keep = \", len(id_to_keep))\n\n print(id_to_keep)\n #remove the categories that are not text\n data['categories'] = [x for x in data['categories'] if x is not None]\n \n print(\"Total categories = \", len(data['categories'])) \n \n # loop over the annotations and ensure that the segmentation node is present and the area is set \n for i in range(len(data['annotations'])): \n ann = data['annotations'][i]\n\n # x1, y1, x2, y2 = ann['bbox']\n # x = max(0, min(math.floor(x1), math.floor(x2)))\n # y = max(0, min(math.floor(y1), math.floor(y2)))\n # w, h = math.ceil(abs(x2 - x1)), math.ceil(abs(y2 - y1))\n x, y, w, h = ann['bbox']\n bbox = [x, y, w, h]\n segmentation = [x, y, x + w, y, x + w, y + h, x, y + h]\n \n ann['bbox'] = bbox\n ann['segmentation'] = [segmentation]\n ann['area'] = w * h\n\n if ann['category_id'] not in id_to_keep:\n print(\"Removing annotation with id = \", ann['id'])\n data['annotations'][i] = None\n\n print(\"Total annotations = \", len(data['annotations']))\n #remove the annotations that are not text\n data['annotations'] = [x for x in data['annotations'] if x is not None]\n \n # need to loop over images and check if the image has a corresponding annotation entry if not remove it or it will cause an error in the training\n img2id, id2bbox = {}, {}\n updated_images = []\n\n for i in range(len(data['images'])):\n key = os.path.basename(data['images'][i]['file_name'][:-len('.png')])\n assert key not in img2id.keys()\n img2id[key] = data['images'][i]['id']\n has_annotation = False\n\n for j in range(len(data['annotations'])):\n if data['annotations'][j]['image_id'] == img2id[key]:\n has_annotation = True\n break\n if has_annotation:\n updated_images.append(data['images'][i])\n else:\n print(f\"Annotation not found for image {key}\")\n\n\n data['images'] = updated_images\n\n # validate the annotations data structure\n # This is base don the funds_evaluation.py script\n id2img = {}\n gt = {}\n for img in data['images']:\n id = img['id']\n name = os.path.basename(img['file_name'])[:-len('.jpg')]\n assert id not in id2img.keys()\n id2img[id] = name\n assert len(id2img) == len(data['images']) \n\n img2id, id2bbox = {}, {}\n for i in range(len(data['images'])):\n key = os.path.basename(data['images'][i]['file_name'][:-len('.png')])\n assert key not in img2id.keys()\n img2id[key] = data['images'][i]['id']\n\n for i in range(len(data['annotations'])):\n img_id = data['annotations'][i]['image_id']\n if img_id not in id2bbox.keys():\n id2bbox[img_id] = []\n x0, y0, w, h = data['annotations'][i]['bbox']\n x1, y1 = x0 + w, y0 + h\n line = [(x0, y0), (x1, y0), (x1, y1), (x0, y1)]\n # print(f\"img_id = {img_id} line = {line}\")\n id2bbox[img_id].append(\n {\n 'points': line,\n 'text': 1234,\n 'ignore': False,\n }\n )\n\n print(f\"len(id2img) = {len(id2img)}\")\n print(f\"len(img2id) = {len(img2id)}\")\n\n for key, val in img2id.items():\n assert key not in gt.keys()\n gt[key] = id2bbox[val]\n\n # save the annotations to a new file\n with open(output_file, 'w', encoding='utf-8') as f: \n json.dump(data, f, indent=2)\n\n\nif __name__ == \"__main__\":\n # ref https://github.com/open-mmlab/mmocr/blob/main/tools/dataset_converters/textdet/funsd_converter.py\n parser = argparse.ArgumentParser(description=\"Converts COCO annotations to DIT format\")\n parser.add_argument(\n \"--coco_annoations_file\",\n type=str,\n help=\"Path to the COCO annotations file\",\n required=True,\n )\n\n parser.add_argument(\n \"--output_file\",\n type=str,\n help=\"Path to the output file\",\n required=True,\n )\n \n args = parser.parse_args()\n\n process(args.coco_annoations_file, args.output_file) \n\n\n", "repo_name": "gregbugaj/unilm", "sub_path": "dit/text_detection/coco_funsd_eob_converter.py", "file_name": "coco_funsd_eob_converter.py", "file_ext": "py", "file_size_in_byte": 5493, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "75", "api": [{"api_name": "io.open", "line_number": 18, "usage_type": "call"}, {"api_name": "json.load", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 138, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "28683167979", "text": "import torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset # 데이터로더\n\nfrom kogpt2_transformers import get_kogpt2_tokenizer\n#from kobert_transformers import get_tokenizer\n\nimport nltk\nnltk.download('vader_lexicon')\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\n\ndef vader_polarity(text):\n \"\"\" Transform the output to a binary 0/1 result \"\"\"\n analyser = SentimentIntensityAnalyzer()\n score = analyser.polarity_scores(text)\n return 1 if score['pos'] > score['neg'] else 0\n\nclass WellnessAutoRegressiveDataset(Dataset):\n \"\"\"Wellness Auto Regressive Dataset\"\"\"\n\n def __init__(self, n_ctx = 1024):\n self.file_path = \"./TK_data/T1_wellness/T1_wellness_train.txt\"\n self.DATA =[]\n self.tokenizer = get_kogpt2_tokenizer()\n\n bos_token_id = [self.tokenizer.bos_token_id] # BEGIN of string \n eos_token_id = [self.tokenizer.eos_token_id] # END of string \n pad_token_id = [self.tokenizer.pad_token_id] # OTHER tokens \n\n\n file = open(self.file_path, 'r', encoding='utf-8')\n\n while True:\n line = file.readline()\n if not line:\n break\n datas = line.split(\" \")\n\n q = datas[0]\n q_toked = self.tokenizer.encode(q)\n #sentiment = analyser.polarity_scores(text))\n sentiment = vader_polarity(q)\n if sentiment ==1 :\n sentiment = 'g' #good\n else : \n sentiment = 'b' #bad\n sent_toked = self.tokenizer.encode(sentiment) \n a = datas[1]\n a_toked = self.tokenizer.encode(a[:-1])\n\n #===========++++ Q token\n q_toked = bos_token_id + q_toked + eos_token_id + \\\n bos_token_id + sent_toked + eos_token_id\n q_len = len(q_toked)\n\n #===========++++ A token\n #a_toked = bos_token_id + sent_toked + eos_token_id + \\\n a_toked = bos_token_id + a_toked + eos_token_id\n a_len = len(a_toked)\n\n #check padding LEN\n pad_token_len = n_ctx - q_len - a_len\n\n #===========++++ Padding\n index_of_words = q_toked + a_toked + pad_token_id * pad_token_len\n\n self.DATA.append(index_of_words)\n\n file.close()\n\n def __len__(self):\n return len(self.DATA)\n\n def __getitem__(self, idx):\n item = self.DATA[idx]\n return item\n'''\nclass WellnessTextClassificationDataset(Dataset):\n \"\"\"Wellness Text Classification Dataset\"\"\"\n def __init__(self,\n file_path = \"./data/wellness_dialog_for_text_classification.txt\",\n num_label = 359,\n device = 'cpu',\n max_seq_len = 512, # KoBERT max_length\n tokenizer = None\n ):\n self.file_path = file_path\n self.device = device\n self.data =[]\n self.tokenizer = tokenizer if tokenizer is not None else get_tokenizer()\n\n\n file = open(self.file_path, 'r', encoding='utf-8')\n\n while True:\n line = file.readline()\n if not line:\n break\n datas = line.split(\" \")\n index_of_words = tokenizer.encode(datas[0])\n token_type_ids = [0] * len(index_of_words)\n attention_mask = [1] * len(index_of_words)\n\n # Padding Length\n padding_length = max_seq_len - len(index_of_words)\n\n # Zero Padding\n index_of_words += [0] * padding_length\n token_type_ids += [0] * padding_length\n attention_mask += [0] * padding_length\n\n # Label\n label = int(datas[1][:-1])\n\n data = {\n 'input_ids': torch.tensor(index_of_words).to(self.device),\n 'token_type_ids': torch.tensor(token_type_ids).to(self.device),\n 'attention_mask': torch.tensor(attention_mask).to(self.device),\n 'labels': torch.tensor(label).to(self.device)\n }\n\n self.data.append(data)\n\n file.close()\n\n def __len__(self):\n return len(self.data)\n def __getitem__(self,index):\n item = self.data[index]\n return item\n'''\nif __name__ == \"__main__\":\n dataset = WellnessAutoRegressiveDataset()\n #dataset2 = WellnessTextClassificationDataset()\n print(dataset)\n #print(dataset2)\n", "repo_name": "tk1star2/tk_kogpt2_wellness", "sub_path": "TK_utils/T1_dataloader.py", "file_name": "T1_dataloader.py", "file_ext": "py", "file_size_in_byte": 3998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "nltk.download", "line_number": 9, "usage_type": "call"}, {"api_name": "nltk.sentiment.vader.SentimentIntensityAnalyzer", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 19, "usage_type": "name"}, {"api_name": "kogpt2_transformers.get_kogpt2_tokenizer", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "4562594264", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nimport scipy.optimize as opt\n\n\ndef epsi(y):\n return y/(np.exp(y)-1)\ndef dQ(Temp0,Tempf,n):\n return 3*n*(Temp0*epsi(Etemp/Temp0)-Tempf*epsi(Etemp/Tempf))\n\ndef plot_from_file(file):\n table=np.loadtxt(file,delimiter=\",\")\n time=table.transpose()[0]\n mass=table.transpose()[1]\n plt.plot(time,mass,\".\")\n plt.show()\n return True\n#plot_from_file(\"time-masstable.txt\")\ndef linregress(file):\n table=np.loadtxt(file,delimiter=\",\")\n return stats.linregress(table)\nprint(\"total\",linregress(\"time-masstable.txt\"))\nprint(\"prealu\",linregress(\"time-mass-prealu.txt\"))\nprint(\"postalu\",linregress(\"time-mass-post-alu.txt\"))\ndef plot_from_regress(table1,table2):\n t=np.linspace(0,660,1000)\n slope1=table1[0]\n\n intersect1=table1[1]\n slope2 = table2[0]\n intersect2 = table2[1]-5.91\n # print(slope1,intersect1,slope2,intersect2)\n plt.plot(t,slope1*t+intersect1)\n plt.plot(t,slope2*t+intersect2)\n plt.show()\n return True\n\nplot_from_regress(linregress(\"time-mass-prealu.txt\"),linregress(\"time-mass-post-alu.txt\"))\ndef dist_avg(table1,table2):\n t=np.linspace(306,360,10000)\n avg1=np.average(table1[0]*t+table1[1])\n avg2=np.average(table2[0]*t+table2[1]-5.91)\n return(avg1-avg2)\nm=dist_avg(linregress(\"time-mass-prealu.txt\"),linregress(\"time-mass-post-alu.txt\"))\nprint(m)\nL=2*10**2\nprint(\"dq\",m*L)\ndef dq(Etemp):\n return (3 * 5.91/26.981538 *8.31446* (295 * (Etemp / 295)/(np.exp(Etemp / 295)-1) - 77 * (Etemp /77)/(np.exp(Etemp /77)-1))-m*L)\nprint(opt.fsolve(dq,280))\n\n\n\n\n\n\n\n\n\n", "repo_name": "JosteinGj/School", "sub_path": "Old_Python_projects/termisk/lab_einstein/eksperiment.py", "file_name": "eksperiment.py", "file_ext": "py", "file_size_in_byte": 1604, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "75", "api": [{"api_name": "numpy.exp", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.optimize.fsolve", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "38525221544", "text": "\"\"\"tests for the updater archives and its instructions file\"\"\"\n\nimport pytest\nfrom oresat_linux_updater.instruction import Instruction, InstructionType\nfrom oresat_linux_updater.update_archive import UpdateArchiveError, \\\n read_instructions_file, extract_update_archive, \\\n write_instructions_file, create_update_archive\nfrom .common import TEST_WORK_DIR, TEST_INST_FILE1, TEST_INST_FILE2, \\\n TEST_INST_FILE3, TEST_INST_FILE4, TEST_INST_FILE5, TEST_UPDATE0, \\\n TEST_UPDATE1, TEST_UPDATE2, TEST_UPDATE3, TEST_UPDATE4, TEST_UPDATE5, \\\n TEST_UPDATE6, TEST_UPDATE7, TEST_UPDATE8, TEST_UPDATE9, \\\n clear_test_work_dir, TEST_DEB_PKG1, TEST_DEB_PKG2, TEST_DEB_PKG1_NAME,\\\n TEST_DEB_PKG2_NAME, TEST_BASH_SCRIPT\n\n\ndef test_read_instructions_file():\n \"\"\"Test opening instructions file.\"\"\"\n\n # valid instructions file\n\n read_instructions_file(TEST_INST_FILE1, TEST_WORK_DIR)\n\n # invalid instructions files\n\n with pytest.raises(UpdateArchiveError):\n read_instructions_file(TEST_INST_FILE2, TEST_WORK_DIR)\n\n with pytest.raises(UpdateArchiveError):\n read_instructions_file(TEST_INST_FILE3, TEST_WORK_DIR)\n\n with pytest.raises(UpdateArchiveError):\n read_instructions_file(TEST_INST_FILE4, TEST_WORK_DIR)\n\n with pytest.raises(UpdateArchiveError):\n read_instructions_file(TEST_INST_FILE5, TEST_WORK_DIR)\n\n\ndef test_write_instructions_file():\n \"\"\"Test writing a instructions file.\"\"\"\n\n inst_list1 = [\n Instruction(InstructionType.DPKG_INSTALL, [TEST_DEB_PKG1, TEST_DEB_PKG2]),\n Instruction(InstructionType.BASH_SCRIPT, [TEST_BASH_SCRIPT])\n ]\n\n clear_test_work_dir()\n write_instructions_file(inst_list1, TEST_WORK_DIR)\n\n\ndef test_extract_update_archive():\n \"\"\"Test opening updates archives.\"\"\"\n\n # valid updates\n\n clear_test_work_dir()\n extract_update_archive(TEST_UPDATE0, TEST_WORK_DIR)\n\n clear_test_work_dir()\n extract_update_archive(TEST_UPDATE1, TEST_WORK_DIR)\n\n clear_test_work_dir()\n extract_update_archive(TEST_UPDATE2, TEST_WORK_DIR)\n\n # invalid updates\n\n clear_test_work_dir()\n with pytest.raises(FileNotFoundError):\n extract_update_archive(TEST_UPDATE3, TEST_WORK_DIR)\n\n clear_test_work_dir()\n with pytest.raises(UpdateArchiveError):\n extract_update_archive(TEST_UPDATE4, TEST_WORK_DIR)\n\n clear_test_work_dir()\n with pytest.raises(UpdateArchiveError):\n extract_update_archive(TEST_UPDATE5, TEST_WORK_DIR)\n\n clear_test_work_dir()\n with pytest.raises(UpdateArchiveError):\n extract_update_archive(TEST_UPDATE6, TEST_WORK_DIR)\n\n clear_test_work_dir()\n with pytest.raises(UpdateArchiveError):\n extract_update_archive(TEST_UPDATE7, TEST_WORK_DIR)\n\n clear_test_work_dir()\n with pytest.raises(UpdateArchiveError):\n extract_update_archive(TEST_UPDATE8, TEST_WORK_DIR)\n\n clear_test_work_dir()\n with pytest.raises(UpdateArchiveError):\n extract_update_archive(TEST_UPDATE9, TEST_WORK_DIR)\n\n clear_test_work_dir()\n with pytest.raises(UpdateArchiveError):\n extract_update_archive(\"invalid-file\", TEST_WORK_DIR)\n\n\ndef test_create_update_archive():\n \"\"\"Test making a new update archive.\"\"\"\n\n inst_list1 = [\n Instruction(InstructionType.DPKG_INSTALL, [TEST_DEB_PKG1, TEST_DEB_PKG2]),\n Instruction(InstructionType.DPKG_REMOVE, [TEST_DEB_PKG1_NAME, TEST_DEB_PKG2_NAME]),\n Instruction(InstructionType.BASH_SCRIPT, [TEST_BASH_SCRIPT])\n ]\n\n inst_list2 = [\n Instruction(InstructionType.DPKG_INSTALL, [\"invalid1\", \"invalid2\"]),\n ]\n\n create_update_archive(\"test\", inst_list1, TEST_WORK_DIR, False)\n\n with pytest.raises(UpdateArchiveError):\n create_update_archive(\"test\", inst_list2, TEST_WORK_DIR)\n", "repo_name": "oresat/oresat-linux-updater", "sub_path": "tests/test_update_archive.py", "file_name": "test_update_archive.py", "file_ext": "py", "file_size_in_byte": 3854, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "75", "api": [{"api_name": "oresat_linux_updater.update_archive.read_instructions_file", "line_number": 21, "usage_type": "call"}, {"api_name": "common.TEST_INST_FILE1", "line_number": 21, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 21, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 25, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 25, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.read_instructions_file", "line_number": 26, "usage_type": "call"}, {"api_name": "common.TEST_INST_FILE2", "line_number": 26, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 26, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 28, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 28, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.read_instructions_file", "line_number": 29, "usage_type": "call"}, {"api_name": "common.TEST_INST_FILE3", "line_number": 29, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 29, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 31, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 31, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.read_instructions_file", "line_number": 32, "usage_type": "call"}, {"api_name": "common.TEST_INST_FILE4", "line_number": 32, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 32, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 34, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 34, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.read_instructions_file", "line_number": 35, "usage_type": "call"}, {"api_name": "common.TEST_INST_FILE5", "line_number": 35, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 35, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.instruction.Instruction", "line_number": 42, "usage_type": "call"}, {"api_name": "oresat_linux_updater.instruction.InstructionType.DPKG_INSTALL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "oresat_linux_updater.instruction.InstructionType", "line_number": 42, "usage_type": "name"}, {"api_name": "common.TEST_DEB_PKG1", "line_number": 42, "usage_type": "name"}, {"api_name": "common.TEST_DEB_PKG2", "line_number": 42, "usage_type": "name"}, {"api_name": "oresat_linux_updater.instruction.Instruction", "line_number": 43, "usage_type": "call"}, {"api_name": "oresat_linux_updater.instruction.InstructionType.BASH_SCRIPT", "line_number": 43, "usage_type": "attribute"}, {"api_name": "oresat_linux_updater.instruction.InstructionType", "line_number": 43, "usage_type": "name"}, {"api_name": "common.TEST_BASH_SCRIPT", "line_number": 43, "usage_type": "name"}, {"api_name": "common.clear_test_work_dir", "line_number": 46, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.write_instructions_file", "line_number": 47, "usage_type": "call"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 47, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 55, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 56, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE0", "line_number": 56, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 56, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 58, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 59, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE1", "line_number": 59, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 59, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 61, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 62, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE2", "line_number": 62, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 62, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 66, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 67, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 68, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE3", "line_number": 68, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 68, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 70, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 71, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 71, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 72, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE4", "line_number": 72, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 72, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 74, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 75, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 75, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 76, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE5", "line_number": 76, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 76, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 78, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 79, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 79, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 80, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE6", "line_number": 80, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 80, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 82, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 83, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 83, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 84, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE7", "line_number": 84, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 84, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 86, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 87, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 87, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 88, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE8", "line_number": 88, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 88, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 90, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 91, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 91, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 92, "usage_type": "call"}, {"api_name": "common.TEST_UPDATE9", "line_number": 92, "usage_type": "argument"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 92, "usage_type": "argument"}, {"api_name": "common.clear_test_work_dir", "line_number": 94, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 95, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 95, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.extract_update_archive", "line_number": 96, "usage_type": "call"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 96, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.instruction.Instruction", "line_number": 103, "usage_type": "call"}, {"api_name": "oresat_linux_updater.instruction.InstructionType.DPKG_INSTALL", "line_number": 103, "usage_type": "attribute"}, {"api_name": "oresat_linux_updater.instruction.InstructionType", "line_number": 103, "usage_type": "name"}, {"api_name": "common.TEST_DEB_PKG1", "line_number": 103, "usage_type": "name"}, {"api_name": "common.TEST_DEB_PKG2", "line_number": 103, "usage_type": "name"}, {"api_name": "oresat_linux_updater.instruction.Instruction", "line_number": 104, "usage_type": "call"}, {"api_name": "oresat_linux_updater.instruction.InstructionType.DPKG_REMOVE", "line_number": 104, "usage_type": "attribute"}, {"api_name": "oresat_linux_updater.instruction.InstructionType", "line_number": 104, "usage_type": "name"}, {"api_name": "common.TEST_DEB_PKG1_NAME", "line_number": 104, "usage_type": "name"}, {"api_name": "common.TEST_DEB_PKG2_NAME", "line_number": 104, "usage_type": "name"}, {"api_name": "oresat_linux_updater.instruction.Instruction", "line_number": 105, "usage_type": "call"}, {"api_name": "oresat_linux_updater.instruction.InstructionType.BASH_SCRIPT", "line_number": 105, "usage_type": "attribute"}, {"api_name": "oresat_linux_updater.instruction.InstructionType", "line_number": 105, "usage_type": "name"}, {"api_name": "common.TEST_BASH_SCRIPT", "line_number": 105, "usage_type": "name"}, {"api_name": "oresat_linux_updater.instruction.Instruction", "line_number": 109, "usage_type": "call"}, {"api_name": "oresat_linux_updater.instruction.InstructionType.DPKG_INSTALL", "line_number": 109, "usage_type": "attribute"}, {"api_name": "oresat_linux_updater.instruction.InstructionType", "line_number": 109, "usage_type": "name"}, {"api_name": "oresat_linux_updater.update_archive.create_update_archive", "line_number": 112, "usage_type": "call"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 112, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 114, "usage_type": "call"}, {"api_name": "oresat_linux_updater.update_archive.UpdateArchiveError", "line_number": 114, "usage_type": "argument"}, {"api_name": "oresat_linux_updater.update_archive.create_update_archive", "line_number": 115, "usage_type": "call"}, {"api_name": "common.TEST_WORK_DIR", "line_number": 115, "usage_type": "argument"}]} +{"seq_id": "37184878007", "text": "from utils.lexer import lex\nfrom utils.syntax import synt\nfrom utils.gen import find_vars\nfrom utils.const import *\n\nfrom nose.tools import assert_equal, assert_false, assert_true\n\n\nclass TestSyntax(object):\n @classmethod\n def setup_class(klass):\n pass\n\n @classmethod\n def teardown_class(klass):\n pass\n\n def setUp(self):\n pass\n\n def teardown(self):\n pass\n\n def test_smoke(self):\n str = \"i = 10+1;\\n\"\\\n \"j = 5*i;\\n\"\n tree = [(A_BLOCK, list(reversed([\n (A_ASSIGN, [\n 'i', [('+', ['10', '1'])]\n ]\n ),\n (A_ASSIGN, [\n 'j', [('*', ['5', 'i'])]\n ]\n ), ]))\n ), ]\n assert_equal(synt(lex(str)), tree)\n\n stat = find_vars(tree)\n assert_equal(set(stat.vars), set(['i', 'j']))\n assert_false(stat.use_print)\n assert_false(stat.use_read)\n\n def test_print(self):\n str = 'print i;\\n'\n tree = [(A_BLOCK, [(A_PRINT, \"i\")])]\n assert_equal(synt(lex(str)), tree)\n\n stat = find_vars(tree)\n assert_equal(set(stat.vars), set(['i']))\n assert_true(stat.use_print)\n assert_false(stat.use_read)\n\n def test_if(self):\n str = 'if x>0:\\n'\\\n 'print \"1\";\\n'\\\n 'else:\\n'\\\n 'print \"2\";\\n'\\\n 'endif;\\n'\n tree = [(A_BLOCK, [(A_IF, [[\n ('>', ['x', '0'])],\n (A_BLOCK, [(A_PRINT, '\"1\"')]),\n (A_BLOCK, [(A_PRINT, '\"2\"')])\n ],\n ), ]\n ), ]\n\n assert_equal(synt(lex(str)), tree)\n\n def test_use_print(self):\n str = 'while x>0:\\n'\\\n 'print \"1\";\\n'\\\n 'endwhile;\\n'\n stat = find_vars(synt(lex(str)))\n assert_equal(set(stat.vars), set(['x']))\n assert_equal(set(stat.strs), set(['\"1\"']))\n assert_true(stat.use_print)\n assert_false(stat.use_read)\n\n str = 'if x>0:\\n'\\\n 'print \"1\";\\n'\\\n 'else:\\n'\\\n 'print \"2\";\\n'\\\n 'endif;\\n'\n stat = find_vars(synt(lex(str)))\n assert_equal(set(stat.vars), set(['x']))\n assert_equal(set(stat.strs), set(['\"1\"', '\"2\"']))\n assert_true(stat.use_print)\n assert_false(stat.use_read)\n\n str = 'read i;\\nprint i;'\n stat = find_vars(synt(lex(str)))\n assert_equal(set(stat.vars), set(['i']))\n assert_true(stat.use_print)\n assert_true(stat.use_read)\n\n def test_loop(self):\n str = 'while x>0:\\n'\\\n 'print \"1\";\\n'\\\n 'endwhile;\\n'\n tree = [(A_BLOCK, [(A_WHILE, [[\n ('>', ['x', '0'])],\n (A_BLOCK, [(A_PRINT, '\"1\"')]),\n ],\n ), ]\n ), ]\n\n assert_equal(synt(lex(str)), tree)\n", "repo_name": "rrader/pyCompiler", "sub_path": "tests/test_syntax.py", "file_name": "test_syntax.py", "file_ext": "py", "file_size_in_byte": 3044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "nose.tools.assert_equal", "line_number": 37, "usage_type": "call"}, {"api_name": "utils.syntax.synt", "line_number": 37, "usage_type": "call"}, {"api_name": "utils.lexer.lex", "line_number": 37, "usage_type": "call"}, {"api_name": "utils.gen.find_vars", "line_number": 39, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 40, "usage_type": "call"}, {"api_name": "nose.tools.assert_false", "line_number": 41, "usage_type": "call"}, {"api_name": "nose.tools.assert_false", "line_number": 42, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.syntax.synt", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.lexer.lex", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.gen.find_vars", "line_number": 49, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 50, "usage_type": "call"}, {"api_name": "nose.tools.assert_true", "line_number": 51, "usage_type": "call"}, {"api_name": "nose.tools.assert_false", "line_number": 52, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 68, "usage_type": "call"}, {"api_name": "utils.syntax.synt", "line_number": 68, "usage_type": "call"}, {"api_name": "utils.lexer.lex", "line_number": 68, "usage_type": "call"}, {"api_name": "utils.gen.find_vars", "line_number": 74, "usage_type": "call"}, {"api_name": "utils.syntax.synt", "line_number": 74, "usage_type": "call"}, {"api_name": "utils.lexer.lex", "line_number": 74, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 75, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 76, "usage_type": "call"}, {"api_name": "nose.tools.assert_true", "line_number": 77, "usage_type": "call"}, {"api_name": "nose.tools.assert_false", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.gen.find_vars", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.syntax.synt", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.lexer.lex", "line_number": 85, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 86, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 87, "usage_type": "call"}, {"api_name": "nose.tools.assert_true", "line_number": 88, "usage_type": "call"}, {"api_name": "nose.tools.assert_false", "line_number": 89, "usage_type": "call"}, {"api_name": "utils.gen.find_vars", "line_number": 92, "usage_type": "call"}, {"api_name": "utils.syntax.synt", "line_number": 92, "usage_type": "call"}, {"api_name": "utils.lexer.lex", "line_number": 92, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 93, "usage_type": "call"}, {"api_name": "nose.tools.assert_true", "line_number": 94, "usage_type": "call"}, {"api_name": "nose.tools.assert_true", "line_number": 95, "usage_type": "call"}, {"api_name": "nose.tools.assert_equal", "line_number": 108, "usage_type": "call"}, {"api_name": "utils.syntax.synt", "line_number": 108, "usage_type": "call"}, {"api_name": "utils.lexer.lex", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "6869264031", "text": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\nimport config\nfrom dto import db_entities\n\n\ntry:\n # Set up connection to firestore\n cred = credentials.Certificate(config.DB_CONFIG_PATH)\n firebase_admin.initialize_app(cred)\n db = firestore.client()\nexcept:\n db = None\n print(\"WARN: Using local website scrapping information\\n\")\n local_db = {\n 'www.cnn.com': '//*[@id=\"body-text\"]/div[1]',\n 'www.foxnews.com': '//*[@id=\"wrapper\"]/div[2]/div[1]/main/article/div/div/div[1]/p',\n 'www.nytimes.com': '//*[@id=\"story\"]/section/div/div/p'\n }\n \n\n# retrieve xpath from database\ndef get_xpath(domain):\n x_path = \"\"\n\n if db:\n doc_ref = db.collection(u'NewsSite').document(domain)\n news_doc = doc_ref.get()\n if news_doc.exists:\n x_path = news_doc.to_dict().get('xpath')\n else:\n x_path = local_db.get(domain)\n\n return x_path\n\ndef is_bias_stored(url, content):\n if not db:\n return False\n \n doc_ref = db.collection(u'StoredBiases').document(content)\n doc = doc_ref.get()\n\n return doc.exists\n\ndef get_stored_bias(url, content):\n if not db:\n print(\"Unable to connect to the database\")\n return\n\n doc_ref = db.collection(u'StoredBiases').document(content)\n\ndef store_bias(url, content, bias_value):\n if not db:\n return\n predicted_entry = db_entities.PredictedEntry(content, url, bias_value)\n db.child(u'StoredBiases').push(predicted_entry)\n \n# check if database connection is healthy\ndef db_health():\n try:\n db.collection(u'NewsSite')\n return \"UP\"\n except:\n return \"DOWN\"\n\n", "repo_name": "LostLaser/Daratos", "sub_path": "Daratos_API/handlers/db_handler.py", "file_name": "db_handler.py", "file_ext": "py", "file_size_in_byte": 1691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "75", "api": [{"api_name": "firebase_admin.credentials.Certificate", "line_number": 11, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 11, "usage_type": "name"}, {"api_name": "config.DB_CONFIG_PATH", "line_number": 11, "usage_type": "attribute"}, {"api_name": "firebase_admin.initialize_app", "line_number": 12, "usage_type": "call"}, {"api_name": "firebase_admin.firestore.client", "line_number": 13, "usage_type": "call"}, {"api_name": "firebase_admin.firestore", "line_number": 13, "usage_type": "name"}, {"api_name": "dto.db_entities.PredictedEntry", "line_number": 57, "usage_type": "call"}, {"api_name": "dto.db_entities", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "26758338474", "text": "import os\nfrom setuptools import setup\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\n\nrequires = [\n 'cssselect',\n 'lxml',\n 'requests'\n]\n\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='middlebury-directory',\n version='1.0.0',\n packages=['directory'],\n include_package_data=True,\n license='MIT',\n description='A Python API for the Middlebury directory.',\n long_description=README,\n url='https://github.com/coursereviews/directory',\n install_requires=requires,\n author='Dana Silver',\n author_email='dsilver@middlebury.edu',\n keywords='directory middlebury search people',\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ]\n)\n", "repo_name": "coursereviews/directory", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1219, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "os.path.join", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 13, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "21710316684", "text": "from django.urls import path\nfrom . import views\n\n# 고정 url: http://localhost:8000/polls/\napp_name = 'polls' # namespace\nurlpatterns = [\n path('', views.index, name='index'), # polls:index\n\n # polls/숫자/\n # 변하는 값 명시 <숫자:데이터>\n path('/', views.detail, name='detail'), # detail: 해당 설문에 대한 항목 리스트\n path('/vote/', views.vote, name='vote'), # polls:vote\n\n # http://localhost:8000/polls/1/results/\n path('/results/', views.results, name='results') # polls:results\n\n]\n", "repo_name": "sammitako/poll-django", "sub_path": "polls/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 584, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "14672870639", "text": "from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom business.pokemon.models import Pokemon, PokemonStat\nfrom business.pokemon_species.models import PokemonSpecies\nfrom business.stats.models import Stat\n\n\nclass GetPokemonByIdTests(APITestCase):\n\n def setUp(self):\n self.pokemon_species = PokemonSpecies.objects.create(name=\"test\")\n self.data = {\n 'id': 1,\n 'name': 'test',\n 'height': 12,\n 'weight': 13,\n 'species': self.pokemon_species,\n }\n self.pokemon = Pokemon.objects.create(**self.data)\n self.stat = Stat.objects.create(name=\"speed\")\n self.pokemon_stat = PokemonStat.objects.create(\n base_stat=12,\n effort=12,\n pokemon=self.pokemon,\n stat=self.stat\n )\n\n def test_get_company_by_owner(self):\n url = reverse('pokemon', args=(self.pokemon.name, ))\n response = self.client.get(url)\n self.assertEqual(response.data.get('id'), self.data.get('id'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n", "repo_name": "johan-smc/PokeApi", "sub_path": "poke_api/apps/business/pokemon/tests/views/test_pokemon_view.py", "file_name": "test_pokemon_view.py", "file_ext": "py", "file_size_in_byte": 1152, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "rest_framework.test.APITestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "business.pokemon_species.models.PokemonSpecies.objects.create", "line_number": 13, "usage_type": "call"}, {"api_name": "business.pokemon_species.models.PokemonSpecies.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "business.pokemon_species.models.PokemonSpecies", "line_number": 13, "usage_type": "name"}, {"api_name": "business.pokemon.models.Pokemon.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "business.pokemon.models.Pokemon.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "business.pokemon.models.Pokemon", "line_number": 21, "usage_type": "name"}, {"api_name": "business.stats.models.Stat.objects.create", "line_number": 22, "usage_type": "call"}, {"api_name": "business.stats.models.Stat.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "business.stats.models.Stat", "line_number": 22, "usage_type": "name"}, {"api_name": "business.pokemon.models.PokemonStat.objects.create", "line_number": 23, "usage_type": "call"}, {"api_name": "business.pokemon.models.PokemonStat.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "business.pokemon.models.PokemonStat", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 34, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "39236820840", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('newsletter', '0011_auto_20170127_1807'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='list',\n name='provider',\n field=models.CharField(max_length=32, default='mailerlite', editable=False),\n ),\n ]\n", "repo_name": "rogerhil/flaviabernardes", "sub_path": "flaviabernardes/flaviabernardes/newsletter/migrations/0012_auto_20170402_1535.py", "file_name": "0012_auto_20170402_1535.py", "file_ext": "py", "file_size_in_byte": 443, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "39351336683", "text": "#!/usr/bin/env python\n\"\"\"Command that displays the first n rows of datafile(s).\n\nRequired arguments include path to datafile(s).\nOptional argument include number of rows to display.\"\"\"\nimport argparse\n\nfrom src.utilities import display_head # pylint: disable=import-error\n\n\ndef main(args): # pylint: disable=redefined-outer-name\n \"\"\"Displays first n rows of dataframe(s).\"\"\"\n for fname in args.infile:\n display_head(fname, args.rows)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Displays first n rows of user-inputted datafile(s).\"\n )\n parser.add_argument(\n \"infile\", type=str, nargs=\"*\", default=\"-\", help=\"Input datafile path(s)\"\n )\n parser.add_argument(\n \"-r\", \"--rows\", type=int, help=\"# rows to display for dataframe(s)\"\n )\n args = parser.parse_args()\n main(args)\n", "repo_name": "nanyasrivastav/S22-06682-Project", "sub_path": "src/h2activity/display.py", "file_name": "display.py", "file_ext": "py", "file_size_in_byte": 862, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "src.utilities.display_head", "line_number": 14, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "3088791829", "text": "\nimport pprint\nf = open('baseline.detail','r') \nf.readline()\n\n\ncounter = 1\ndic = {}\nl = []\nfor line in f: \n if counter%7 ==1: \n \tdic[\"question\"] = str(line).rstrip() \n elif counter%7 ==2:\n \tdic[\"predict label\"] = str(line).split()[2]\n elif counter%7 ==3:\n \tdic[\"predict prob\"] = float(line.rstrip().split()[2])\n elif counter%7 ==4:\n \tdic[\"true label\"] = str(line).split()[2]\n elif counter%7 ==5:\n \tdic[\"correct\"] = str(line).split()[2]\n elif counter%7 ==6:\n \tdic[\"entropy\"] = float(line.rstrip().split()[2])\n elif counter%7 ==0:\n \tl.append(dic)\n \tdic = {}\n counter=counter+1\nf.close() \n\n\npp = pprint.PrettyPrinter(indent=4)\n\ndef predicting_statstics(threshold, l):\n l = list(filter(lambda d: d[\"entropy\"]>threshold, l))\n l.sort(key=lambda d:d['entropy'], reverse=True)\n print(\"entropy > %s 的資料有 %s 筆\"%(threshold,len(l)))\n\npredicting_statstics(3,l)\npredicting_statstics(2.5,l)\npredicting_statstics(2,l)\npredicting_statstics(1.5,l)\npredicting_statstics(1,l)\npredicting_statstics(0.5,l)\npredicting_statstics(0,l)\n\nl.sort(key=lambda d:d['entropy'], reverse=True)\nl = list(filter(lambda d: d[\"entropy\"]>1.5, l))\n# pp.pprint(l)\n\nfrom itertools import groupby\nl.sort(key=lambda d:d['true label'], reverse=True)\nhistogram = []\nfor k, g in groupby(l, lambda x: x[\"true label\"]):\n histogram.append([k, len(list(g))])\n\nhistogram.sort(key = lambda x:x[1])\npp.pprint(histogram)\n", "repo_name": "voidful/fastText_bagboost", "sub_path": "baseline_result_parser.py", "file_name": "baseline_result_parser.py", "file_ext": "py", "file_size_in_byte": 1440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "pprint.PrettyPrinter", "line_number": 30, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "34179868244", "text": "#coding:utf-8\n#import necessary package\nimport socket\nimport time\nimport sys\nimport json\nimport numpy as np\nfrom typing import List, Tuple, Dict\n\n \nclass SocketInterface_Pi:\n \n \"\"\"\n subscriber_cmd()\n publisher_env(msg_state:Dict)\n get_command(self, msg, do_print=False), msg={\"servo_degree\": np.array, \"info\": Dict}\n \"\"\"\n def __init__(self, Socket_config ) -> None:\n \n # parameters\n self.HOST_IP = Socket_config.HOST_IP\n self.HOST_PORT = Socket_config.HOST_PORT\n self.message_rate = Socket_config.message_rate\n \n # # IP address of Rasberry Pi\n # self.HOST_IP = \"192.168.31.52\"\n # self.HOST_PORT = 50000\n # self.message_rate = 50\n\n\n # socket connection\n print(\"Starting socket: TCP...\")\n #1.create socket object:socket=socket.socket(family,type)\n self.socket_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n print(\"TCP server listen @ %s:%d!\" %(self.HOST_IP, self.HOST_PORT) )\n self.host_addr = (self.HOST_IP, self.HOST_PORT)\n #2.bind socket to addr:socket.bind(address)\n self.socket_tcp.bind(self.host_addr)\n #3.listen connection request:socket.listen(backlog)\n self.socket_tcp.listen(1)\n #4.waite for client:connection,address=socket.accept()\n self.socket_con, (client_ip, client_port) = self.socket_tcp.accept()\n print(\"Connection accepted from %s.\" %client_ip)\n msg = \"Welcome to Pupper Rpi TCP server!\"\n # msg=msg.encode(encoding='utf_8', errors='strict')\n msg = json.dumps(msg)\n self.socket_con.send(bytes(msg.encode('utf-8')))\n\n\n def subscriber_cmd(self):\n while True:\n # print(\"Receiving package...\")\n data = self.socket_con.recv(512)\n # print(\"Receiving package...\")\n\n if len(data)>0:\n msg_recv = json.loads(data)\n return msg_recv\n\n\n def publisher_env(self, msg_state: Dict):\n try:\n env_pub = json.dumps(msg_state)\n self.socket_con.send(bytes(env_pub.encode('utf-8')))\n\n except Exception:\n self.socket_con.close()\n sys.exit(1)\n\n \n def get_command(self, msg, do_print=False):\n \"\"\"\n msg: {\"servo_degree\": np.array, \"info\": Dict}\n return (servo_degree, info) \n \"\"\"\n \n # command = {\"servo_degree\": np.array, \"info\": Dict}\n \n set_points = msg[\"servo_degree\"]\n info = msg[\"info\"]\n return set_points, info\n\n\n def __del__(self):\n self.socket_con.close()\n self.socket_tcp.close()\n print(\"Terminate Pi Socket Connection!\")\n\n\nif __name__ == \"__main__\":\n # connection test\n # Establish connection\n socket_Pi = SocketInterface_Pi()\n \n # PC -> Pi\n cmd_recv = socket_Pi.subscriber_cmd()\n print(\"Pi receive:\",cmd_recv)\n\n # Pi -> PC\n state = {\"a\":1, \"b\":2}\n print(\"Pi send:\", state)\n socket_Pi.publisher_env(state)\n\n ", "repo_name": "AlfredMoore/AmphibiousQuadruped", "sub_path": "communication/SocketInterface_Pi_RL.py", "file_name": "SocketInterface_Pi_RL.py", "file_ext": "py", "file_size_in_byte": 3086, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "75", "api": [{"api_name": "socket.socket", "line_number": 34, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 34, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 34, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 35, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 35, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 59, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 63, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "71737924401", "text": "\nfrom datetime import datetime, date\nfrom time import perf_counter\nfrom dateutil.relativedelta import relativedelta\n\n\nclass APIFetchError(Exception):\n \"\"\"Custom exception for API fetch errors.\"\"\"\n\n pass\n\n\ndef time_delta_YMD(start_date, end_date=None):\n \"\"\"Calculates the time difference between two dates in years, months, and days.\"\"\"\n\n date_format = '%Y-%m-%dT%H:%M:%SZ'\n start_date = datetime.strptime(start_date, date_format)\n end_date = datetime.strptime(end_date, date_format) if end_date else datetime.now()\n\n time_delta = relativedelta(end_date, start_date)\n years, months, days = time_delta.years, time_delta.months, time_delta.days\n\n return f\"{years} yrs, {months} mth , {days} dys\"\n\ndef time_delta_DHM(start_date, end_date=None):\n \"\"\"Calculates the time difference between two dates in days, hours, and minutes.\"\"\"\n\n date_format = '%Y-%m-%dT%H:%M:%SZ'\n start_date = datetime.strptime(start_date, date_format)\n end_date = datetime.strptime(end_date, date_format) if end_date else datetime.now()\n\n time_delta = relativedelta(end_date, start_date)\n days, hours, minutes = time_delta.days, time_delta.hours, time_delta.minutes\n\n return f\"{days} dys, {hours} hrs , {minutes} min\"\n\ndef time_async_operation(func):\n \"\"\"A decorator to measure the elapsed time for an asynchronous function.\"\"\"\n \n async def wrapper(*args, **kwargs):\n start_time = perf_counter()\n result = await func(*args, **kwargs)\n end_time = perf_counter()\n elapsed_time = end_time - start_time\n print(f\"{func.__name__} completed in {elapsed_time:.2f} seconds\")\n return result\n return wrapper\n\n", "repo_name": "Yantiomene/Github-stats", "sub_path": "app/github_api/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1668, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "75", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 32, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 41, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "714769844", "text": "from django.http import HttpResponse\nfrom django.views.generic import View\n\nfrom django_cradmin.apps.cradmin_email import emailutils\n\n\nclass DemoEmail(emailutils.AbstractEmail):\n subject_template = 'cradmin_email/cradmin_email_send_testmail/subject.django.txt'\n html_message_template = 'cradmin_email/cradmin_email_send_testmail/html_message.django.html'\n\n def get_context_data(self):\n context = super(DemoEmail, self).get_context_data()\n context['name'] = 'Test Name'\n return context\n\n\nclass EmailDesignView(View):\n def get(self, request, format='html'):\n email = DemoEmail()\n if format == 'plaintext':\n return HttpResponse(email.render_plaintext_message(), content_type='text/plain')\n else:\n return HttpResponse(email.render_html_message())\n", "repo_name": "appressoas/django_cradmin", "sub_path": "django_cradmin/apps/cradmin_email/views/email_design.py", "file_name": "email_design.py", "file_ext": "py", "file_size_in_byte": 819, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "75", "api": [{"api_name": "django_cradmin.apps.cradmin_email.emailutils.AbstractEmail", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django_cradmin.apps.cradmin_email.emailutils", "line_number": 7, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 17, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 21, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "16406723591", "text": "import json\nfrom molotov import scenario\n\n\n@scenario(5)\nasync def scenario_one(session):\n async with session.get(\"http://localhost:5000/api\") as res:\n assert res.status == 200\n d = await res.json()\n assert d[\"Hello\"] == \"World!\"\n \n\n@scenario(30)\nasync def scenario_two(session):\n somedata = json.dumps({\"OK\": 1})\n async with session.post(\"http://localhost:5000/api\", data=somedata) as res:\n assert res.status_code == 405\n\n\n# molotov .\\chapter03\\molotov_test.py -w 10 -d 30\n", "repo_name": "yooseongc/python-microservice-study", "sub_path": "chapter03/molotov_test.py", "file_name": "molotov_test.py", "file_ext": "py", "file_size_in_byte": 513, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "molotov.scenario", "line_number": 5, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 15, "usage_type": "call"}, {"api_name": "molotov.scenario", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "30696810511", "text": "from abc import ABC, abstractmethod\nfrom typing import Literal, Optional, Union\n\n\nclass Dictionable(ABC):\n @abstractmethod\n def to_dict(self) -> dict:\n raise NotImplementedError()\n\n @staticmethod\n @abstractmethod\n def from_dict(res: dict) -> 'Dictionable':\n raise NotImplementedError()\n\n\nSandboxStatus = Optional[Union[Literal['APPROVED'], Literal['DECLINED'], Literal['ERROR']]]\n", "repo_name": "GoPreki/WompiSDK", "sub_path": "wompi/models/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 409, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "abc.ABC", "line_number": 5, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 6, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Literal", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "4805380210", "text": "from requests import get\nfrom bs4 import BeautifulSoup\nfrom queue import Queue\nfrom threading import Thread\nfrom datetime import datetime\n\n#websites and URLs\n\n# Get top from one site\n\nheaders = {\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36\"\n}\n\n\ndef get_top(site, websites, n=10):\n top = {}\n # Get request\n s = get(websites[site][0], headers={\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36\"})\n soup = BeautifulSoup(s.content, features=\"html.parser\")\n # parse soup based on site\n if site == \"BuzzFeed\" or site == \"Huffington Post\":\n for i in websites[site][1]:\n for j in soup.find_all(attrs={\"class\": i}, href=True):\n top[j.get_text()] = j['href']\n elif site == \"CNN\":\n for i in websites[site][1]:\n for j in soup.find_all(attrs={\"class\": i}):\n top[j.get_text()] = \"https://cnn.com\" + \\\n j.find_parent(href=True)['href']\n elif site == \"BBC News\":\n for i in websites[site][1]:\n for j in soup.find_all(attrs={\"class\": i}):\n top[j.get_text()] = \"https://www.bbc.com\" + \\\n j.find_parent(href=True)['href']\n elif site == \"New York Times\":\n for j in soup.find_all(\"span\", attrs={\"class\": None}):\n if j.find_parent(href=True) and \"nytimes.com\" not in j.find_parent(href=True)['href']:\n top[j.get_text()] = \"https://www.nytimes.com\" + \\\n j.find_parent(href=True)['href']\n for j in soup.find_all(attrs={\"class\": websites[site][1][0]}):\n if j.find_parent(href=True) and \"nytimes.com\" not in j.find_parent(href=True)['href']:\n top[j.get_text()] = \"https://www.nytimes.com\" + \\\n j.find_parent(href=True)['href']\n elif site == \"NBC News\" or site == \"NPR News\":\n for i in websites[site][1]:\n for j in soup.find_all(attrs={\"class\": i}):\n if j.find_parent(href=True):\n top[j.get_text()] = j.find_parent(href=True)['href']\n elif site == \"Washington Post\":\n for j in soup.find_all(attrs={\"data-pb-placeholder\": \"Write headline here\"}, href=True):\n top[j.get_text().strip()] = j['href']\n elif site == \"Wall Street Journal\":\n for i in soup.find_all(\"a\", href=True):\n if \"articles\" in i['href'].split(\"/\") and len(i.get_text().split(\" \")) > 4:\n top[i.get_text()] = i['href']\n elif site == \"The Atlantic\":\n for i in websites[site][1]:\n for j in soup.find_all(attrs={\"class\": i}):\n top[j.get_text().strip()] = \"https://www.theatlantic.com\" + \\\n j.find_parent(href=True)['href']\n elif site == \"ABC News\":\n for j in soup.find_all(attrs={\"data-analytics\": \"cid=clicksource_4380645_3_mobile_web_only_headlines_headlines_hed\"}, href=True):\n top[j.get_text()] = j['href']\n elif site == \"The Onion\":\n for j in soup.find_all(attrs={\"class\": websites[site][1][0]}):\n top[j.get_text()] = j.find_parent(href=True)['href']\n elif site == \"Fox News\" or site == 'Reuters':\n for j in soup.find_all(attrs={\"class\": websites[site][1][0]}):\n if j.find(href=True):\n top[j.get_text()] = j.find(href=True)['href']\n elif site == \"POLITICO\":\n for j in soup.find_all(attrs={\"class\": websites[site][1][0]}, href=True):\n if len(j.get_text().split(\" \")) > 3:\n top[j.get_text()] = j['href']\n elif site == \"Associated Press\":\n for j in soup.find_all(\"a\", attrs={\"data-key\": \"related-story-link\"}, href=True):\n top[j.get_text().split(\"By\")[0]] = \"https://apnews.com\" + j['href']\n elif site == \"CBS News\":\n for j in soup.find_all(attrs={\"class\": websites[site][1][0]}):\n if j.find_parent(href=True):\n if len(j.get_text().strip().split(\" \")) > 3:\n top[j.get_text()] = j.find_parent(href=True)['href']\n ans = {}\n # get top n results\n k = list(top.keys())\n for i in range(n):\n if i < len(k):\n ans[k[i].strip()] = top[k[i]]\n # return results\n print(site)\n return ans\n\n\ndef getTop100():\n bs = BeautifulSoup(get(\"https://www.billboard.com/charts/hot-100\").content,\n features=\"html.parser\")\n print(\"gottem\")\n ans = []\n cur = \"1\"\n for i in bs.find_all(attrs={\"class\": \"chart-element__information\"}):\n tx = [h for h in i.get_text().strip().split(\"\\n\") if h]\n ans.append(cur + \" - \" + tx[0] + \" - \" + tx[1])\n cur = str(int(cur)+1)\n return ans\n\n\ndef get_today():\n map_ = {\"01\": \"January\", \"02\": \"February\", \"03\": \"March\",\n \"04\": \"April\", \"05\": \"May\", \"06\": \"June\", \"07\": \"July\", \"08\": \"August\", \"09\": \"September\", \"10\": \"October\", \"11\": \"November\", \"12\": \"December\"}\n today_ = str(datetime.today()).split(\" \")[0].split(\"-\")\n\n # URL\n URL = \"https://www.historynet.com/today-in-history/\" + \\\n map_[today_[1]] + \"-\" + today_[2]\n ans = []\n # get website\n p = get(URL, headers=headers)\n # parse for dates\n soup = BeautifulSoup(p.content, \"html.parser\")\n for i in soup.find_all(attrs={\"class\": \"war-event\"}):\n s = i.get_text().strip().split(\"\\n\\n\")\n ans.append(s[0].strip() + \": \" + s[1].strip())\n return ans\n\n\ndef get_all_news(websites, n=5):\n ans = {}\n # how many threads\n cc = len(websites.keys())\n # get from one website\n\n def one():\n i = q.get()\n if i == \"Billboard\":\n print(\"starting music\")\n ans[i] = getTop100()\n print(\"done music\")\n elif i == \"Today\":\n print(\"starting today\")\n ans[i] = get_today()\n print(\"done today\")\n else:\n try:\n ans[i] = get_top(i, websites, n)\n except:\n pass\n q.task_done()\n # queue of sites\n q = Queue(cc)\n for i in range(cc):\n t = Thread(target=one)\n t.daemon = True\n t.start()\n\n for i in websites.keys():\n q.put(i)\n q.join()\n\n return ans\n", "repo_name": "AndyLi23/HomePage", "sub_path": "HomePage/backend/news/news.py", "file_name": "news.py", "file_ext": "py", "file_size_in_byte": 6298, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 21, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 97, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 119, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 121, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 151, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "6922246276", "text": "# filter pointcloud\n\nimport numpy as np\nimport open3d as o3d\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy.ndimage import gaussian_filter1d\nfrom scipy.optimize import curve_fit\nfrom sklearn import linear_model\nfrom skimage.measure import LineModel, ransac\nimport utility\nimport time\n\nfor pts_itr in range(1,181,3):\n pts = np.load(str(pts_itr)+'.npy')\n print(\"loaded: \" + str(pts_itr) + \".npy\" )\n N_SCAN = 16\n Horizon_SCAN = 1800\n ang_res_x = 0.2\n ang_res_y = 2.0\n ang_bottom = 15.0+0.1\n\n range_image = np.zeros((N_SCAN,1800,6))\n augmented_range_image = np.zeros(range_image.shape)\n\n start_time = time.time()\n\n poi = []\n\n for pt in pts:\n horizonAngle = math.atan2(pt[1], pt[0]) * 180 / math.pi\n dist = np.sqrt(np.square(pt[0]) + np.square(pt[1]) + np.square(pt[2]) )\n if(horizonAngle < 90 and horizonAngle > -90 and dist > 1):\n columnIdx = round(-1*horizonAngle/ang_res_x) + 450\n range_image[int(pt[4]),int(columnIdx),:5] = pt\n range_image[int(pt[4]),int(columnIdx),5] = np.sqrt(np.square(pt[0]) + np.square(pt[1]))\n if pt[4] < 5:\n poi.append(pt)\n\n for i in range(range_image.shape[0]):\n augmented_range_image[i,:,0] = utility.simple_augment_holes(range_image[i,:,0].copy(),.1)\n augmented_range_image[i,:,1] = utility.simple_augment_holes(range_image[i,:,1].copy(),.1)\n augmented_range_image[i,:,2] = utility.simple_augment_holes(range_image[i,:,2].copy(),.1)\n augmented_range_image[i,:,5] = utility.simple_augment_holes(range_image[i,:,5].copy(),.1)\n\n drivable_region = []\n\n window = 20\n\n for i in range(0,5):\n for j in range(augmented_range_image.shape[1] - window):\n if np.std(augmented_range_image[i,j:j+window,5]) < .5:\n drivable_region.append(augmented_range_image[i,j,:])\n \n # plt.plot(augmented_range_image[i,:,0])\n # plt.plot(np.array(drivable_region)[:,0],np.array(drivable_region)[:,1])\n # plt.show()\n\n print(\"exec time: \" + str(time.time() - start_time))\n\n drivable_region = np.array(drivable_region)\n\n pc = np.array(poi)\n\n geom = []\n\n # visualize pointcloud\n pts_pcd = o3d.geometry.PointCloud()\n pts_pcd.points = o3d.utility.Vector3dVector(pc[:,:3])\n pts_pcd.paint_uniform_color(np.array([[0.0],[0.8],[0.9]], dtype=np.float64))\n geom.append(pts_pcd)\n\n # for pt in drivable_region:\n # pcd = o3d.geometry.TriangleMesh.create_sphere(radius=0.1)\n # pcd.paint_uniform_color(np.array([[0.8],[0.8],[0.0]], dtype=np.float64))\n # pcd.translate(pt)\n # geom.append(pcd)\n\n drivable_pcd = o3d.geometry.PointCloud()\n drivable_pcd.points = o3d.utility.Vector3dVector(drivable_region[:,:3])\n drivable_pcd.paint_uniform_color(np.array([[0.0],[1.0],[0.0]], dtype=np.float64))\n geom.append(drivable_pcd)\n\n mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=3.0, origin=np.array([0., 0., 0.]))\n geom.append(mesh_frame)\n\n o3d.visualization.draw_geometries(geom)\n", "repo_name": "moloydas/road_segmentation_lidar", "sub_path": "filter_pointcloud.py", "file_name": "filter_pointcloud.py", "file_ext": "py", "file_size_in_byte": 3075, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "75", "api": [{"api_name": "numpy.load", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 31, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 36, "usage_type": "call"}, {"api_name": "utility.simple_augment_holes", "line_number": 41, "usage_type": "call"}, {"api_name": "utility.simple_augment_holes", "line_number": 42, "usage_type": "call"}, {"api_name": "utility.simple_augment_holes", "line_number": 43, "usage_type": "call"}, {"api_name": "utility.simple_augment_holes", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "open3d.geometry.PointCloud", "line_number": 68, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 68, "usage_type": "attribute"}, {"api_name": "open3d.utility.Vector3dVector", "line_number": 69, "usage_type": "call"}, {"api_name": "open3d.utility", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 70, "usage_type": "attribute"}, {"api_name": "open3d.geometry.PointCloud", "line_number": 79, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 79, "usage_type": "attribute"}, {"api_name": "open3d.utility.Vector3dVector", "line_number": 80, "usage_type": "call"}, {"api_name": "open3d.utility", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 81, "usage_type": "attribute"}, {"api_name": "open3d.geometry.TriangleMesh.create_coordinate_frame", "line_number": 84, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 84, "usage_type": "call"}, {"api_name": "open3d.visualization.draw_geometries", "line_number": 87, "usage_type": "call"}, {"api_name": "open3d.visualization", "line_number": 87, "usage_type": "attribute"}]} +{"seq_id": "70762054644", "text": "from tkinter import *\nfrom pytube import YouTube as yt\n\n\nroot = Tk()\nroot.geometry(\"550x250\")\nroot.resizable(0, False)\nroot.title(\"Yt downloader\")\n\nlabel_1 = Label(root, text=\"Downloade videos here \", font=\"san-serif\").pack()\nlink = StringVar()\nlabel_2 = Label(root, text=\"ENTER YOUR URL BELOW\", font='san-serif').pack()\nEntry(root, width=60, textvariable=link).place(x=30, y=80)\n\n\n\n\ndef download():\n save_path = \"/home/sandy/Desktop\"\n url = yt(str(link.get()))\n video = url.streams.first()\n video.download(save_path)\n label_3 = Label(root, text=\"Downlloaded\", font=\"san-serif 15\").pack()\n print(url)\n\n\nbtn = Button(root, text=\"DOWNLOAD\", font=\"san-serif\", bg=\"green\", padx=2, command=\"download\").place(x=230, y=150)\nroot.mainloop()\n\n\n", "repo_name": "Binary-Shade/Youtube-Video_Downloader", "sub_path": "yt_downloader.py", "file_name": "yt_downloader.py", "file_ext": "py", "file_size_in_byte": 753, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "75", "api": [{"api_name": "pytube.YouTube", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "30836593603", "text": "from smbus2 import SMBus\nfrom mlx90614 import MLX90614\n\nbus = SMBus(1)\nsensor = MLX90614(bus, address=0x5A)\n\namb_temp = sensor.get_amb_temp()\namb_temp_two = \"{:.2f}\".format(amb_temp)\n\nobj_temp = sensor.get_obj_temp()\nobj_temp_two = \"{:.2f}\".format(obj_temp)\n\nprint(\"Ambient Temperature :\", amb_temp_two)\nprint(\"Object Temperature :\", obj_temp_two)\n\nbus.close()\n\n\n", "repo_name": "sejalchovatiya21/Capstone-Project", "sub_path": "Temperature_Monitoring/mlxread.py", "file_name": "mlxread.py", "file_ext": "py", "file_size_in_byte": 363, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "smbus2.SMBus", "line_number": 4, "usage_type": "call"}, {"api_name": "mlx90614.MLX90614", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "25046820518", "text": "import datetime\nimport mysql.connector\nimport os.path\nimport random\n\nstrUserName = os.getlogin()\n\nmeterReadsPath = 'C:\\\\Users\\\\' + strUserName + '\\\\Documents\\\\Meter-Reads-Website\\\\'\n\ndataBaseName = 'MeterReadsWebsite'\n\nfacilities = {\n 'facilityName': ['Apartment One', 'Penthouse'],\n 'units': [5, 10]\n}\n\nstartDate = datetime.datetime.today().strftime('%Y') + '-01-01'\ntoday = datetime.datetime.today()\n\ndef generateMeterReads(facilityName, units):\n start = datetime.datetime.strptime(startDate, '%Y-%m-%d')\n days = (today - start).days\n\n facilityName = facilityName.replace(\" \", \"\").lower()\n\n #stores all the individual meter reads to be inserted into the database\n #its a multidimensional array with individual reads for ele, nga, and water\n meterReads = [[], [], []]\n\n #used to make sure we continuously build on top of what the meter reads say\n meterValues = [[], [], []]\n\n #loops through the number of units and adds a starter value for each meter\n for array in meterValues:\n print(array)\n for i in range(units):\n array.append(0)\n\n #loops through each day and creates meter reads for that day\n for i in range(days):\n date = str((start+datetime.timedelta(days=i)).strftime('%Y-%m-%d'))\n\n for x in range(len(meterReads)):\n if x == 0:\n utility = 'ele'\n if x == 1:\n utility = 'nga'\n if x == 2:\n utility = 'water'\n\n #loops through the units to add a meter read for each unit\n for y in range(units):\n read = random.randint(0, 30)\n meterValues[x][y] += read\n print(y + 1)\n meterReads[x].append([facilityName, (y + 1), meterValues[x][y], utility, date])\n\n return meterReads\n\ndef postMeterReads(mydb, mycursor):\n #iterates through the facilities to create meter reads for them\n for i in range(len(facilities['facilityName'])):\n facilityName = facilities['facilityName'][i]\n units = facilities['units'][i]\n\n meterReads = generateMeterReads(facilityName, units)\n print(meterReads)\n #uploads the meter reads for water, nga, and ele\n for array in meterReads:\n sql = \"INSERT INTO meterreads (facility, unit, meter_read, utility, date) VALUES (%s,%s,%s,%s,%s)\"\n print(\"uploading those meter reads now\")\n\n mycursor.executemany(sql, array)\n mydb.commit()\n\n print(\"Meter reads uploaded\")\n\ndef connect():\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"\"\n )\n mycursor = mydb.cursor()\n\n return mydb, mycursor\n\ndef connectDB():\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"\",\n database=dataBaseName\n )\n mycursor = mydb.cursor()\n return mydb, mycursor\n\ndef createTables(mydb, mycursor):\n mycursor.execute(\"CREATE TABLE meterReads (id INT AUTO_INCREMENT PRIMARY KEY,facility VARCHAR(255), unit VARCHAR(255), meter_read VARCHAR(255), utility VARCHAR(255), date VARCHAR(255))\")\n mycursor.execute(\"CREATE TABLE stats (id INT AUTO_INCREMENT PRIMARY KEY, facility VARCHAR(255), stat VARCHAR(255), utility VARCHAR(255), value VARCHAR(255), date VARCHAR(255))\")\n mydb.commit()\n\ndef checkDB(mydb, mycursor, dataBaseName):\n dataBaseExists = False\n\n mycursor.execute(\"SHOW DATABASES\")\n\n # loops through the databases to check if they exists\n for x in mycursor:\n # print(x)\n if (dataBaseName.lower() in str(x).lower()):\n dataBaseExists = True\n print(\"DB Exists\")\n #connects to the database\n mydb, mycursor = connectDB()\n\n if (dataBaseExists == False):\n # creats a database with the name of the facility\n mycursor.execute(\"CREATE DATABASE \" + dataBaseName)\n mydb.commit()\n\n # connects to the database\n mydb, mycursor = connectDB()\n\n createTables(mydb, mycursor)\n\n print(\"DB Created\")\n return mydb, mycursor\n\ndef clearMeterReads(mydb, mycursor):\n sql = \"TRUNCATE TABLE meterreads\"\n mycursor = mydb.cursor()\n mycursor.execute(sql)\n mydb.commit()\n print(\"Table truncated\")\n\ndef start():\n #gets a connection to the main host that will contain the database\n mydb, mycursor = connect()\n\n #checks to see if the database exists, if not it creates it\n #if it does it returns the connection\n mydb, mycursor = checkDB(mydb, mycursor, dataBaseName)\n\n #clears the meter reads in the database\n clearMeterReads(mydb, mycursor)\n\n #calls a function that will generate and post meter reads to the database\n postMeterReads(mydb, mycursor)\n\nstart()", "repo_name": "trevorhauter/Meter-Reads-Website", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4762, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "os.path.getlogin", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 41, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 53, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 79, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 79, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 79, "usage_type": "name"}, {"api_name": "mysql.connector.connector.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 89, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "783596183", "text": "\"\"\"\nFlicker routine for visual inspection of the differences between\ntwo images. (following flicker in IDL)\n\nAuthor: Momchil Molnar (momo@nso.edu)\nDate: 31/12/2020\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\n\n\ndef replot():\n \"\"\"\n\n :return:\n \"\"\"\n\n\ndef flicker(im1, im2, wait_time=.25, repetitions=100):\n \"\"\"\n The main flicker function.\n\n : im1: ndarray [nx, ny]\n Image 1 to be plotted\n : im2: ndarray [nx, ny]\n Image 2 to be plotted\n : wait_time: int\n delay between replots [seconds]\n\n :return: success code (0==success!)\n \"\"\"\n\n fig = plt.figure()\n sz_im = im1.shape[0]\n\n ims = []\n im = plt.imshow(im1, animated=True)\n ims.append([im])\n im = plt.imshow(im2, animated=True)\n ims.append([im])\n\n ani = animation.ArtistAnimation(fig, ims, interval=wait_time*1e3, blit=True,\n repeat_delay=wait_time*1e3)\n\n plt.show()\n\n return 0\n\n\ndef test_flicker():\n im1 = np.random.random((1000, 1000))\n im2 = np.random.random((1000, 1000))*.0\n flicker(im1, im2)\n # assert (flicker(im1, im2) == 0)\n\n\nif __name__ == '__main__':\n test_flicker()\n", "repo_name": "momomolnar/Destretching_Algorithms", "sub_path": "visualization/flicker.py", "file_name": "flicker.py", "file_ext": "py", "file_size_in_byte": 1207, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "75", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.animation.ArtistAnimation", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 54, "usage_type": "attribute"}]} +{"seq_id": "19417250466", "text": "import re\nfrom urllib.parse import parse_qs, urlparse\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom userena.compat import SiteProfileNotAvailable\nfrom userena.utils import (\n generate_nonce,\n get_gravatar,\n get_profile_model,\n get_protocol,\n signin_redirect,\n)\n\n\nclass UtilsTests(TestCase):\n \"\"\"Test the extra utils methods\"\"\"\n\n fixtures = [\"users\"]\n\n def test_generate_nonce(self):\n h1 = generate_nonce()\n h2 = generate_nonce()\n h3 = generate_nonce()\n # Check valid activation key\n self.assertTrue(re.match(r\"^[\\w]{40}$\", h1))\n self.assertTrue(re.match(r\"^[\\w]{40}$\", h2))\n self.assertTrue(re.match(r\"^[\\w]{40}$\", h3))\n self.assertNotEqual(h1, h2)\n self.assertNotEqual(h2, h3)\n self.assertNotEqual(h1, h3)\n\n def test_get_gravatar(self):\n template = \"s=%(size)s&d=%(type)s\"\n\n # Check the defaults.\n parsed = urlparse(get_gravatar(\"alice@example.com\"))\n self.assertEqual(\n parse_qs(parsed.query),\n parse_qs(template % {\"size\": 80, \"type\": \"identicon\"}),\n )\n\n # Check different size\n parsed = urlparse(get_gravatar(\"alice@example.com\", size=200))\n self.assertEqual(\n parse_qs(parsed.query),\n parse_qs(template % {\"size\": 200, \"type\": \"identicon\"}),\n )\n\n # Check different default\n parsed = urlparse(get_gravatar(\"alice@example.com\", default=\"404\"))\n self.assertEqual(\n parse_qs(parsed.query),\n parse_qs(template % {\"size\": 80, \"type\": \"404\"}),\n )\n\n def test_signin_redirect(self):\n \"\"\"\n Test redirect function which should redirect the user after a\n succesfull signin.\n\n \"\"\"\n # Test with a requested redirect\n self.assertEqual(signin_redirect(redirect=\"/accounts/\"), \"/accounts/\")\n\n # Test with only the user specified\n user = get_user_model().objects.get(pk=1)\n self.assertEqual(\n signin_redirect(user=user), \"/accounts/%s/\" % user.username\n )\n\n # The ultimate fallback, probably never used\n self.assertEqual(signin_redirect(), settings.LOGIN_REDIRECT_URL)\n\n def test_get_profile_model(self):\n \"\"\"\n Test if the correct profile model is returned when\n ``get_profile_model()`` is called.\n\n \"\"\"\n # A non existent model should also raise ``SiteProfileNotAvailable``\n # error.\n with self.settings(AUTH_PROFILE_MODULE=\"userena.FakeProfile\"):\n self.assertRaises(SiteProfileNotAvailable, get_profile_model)\n\n # An error should be raised when there is no ``AUTH_PROFILE_MODULE``\n # supplied.\n with self.settings(AUTH_PROFILE_MODULE=None):\n self.assertRaises(SiteProfileNotAvailable, get_profile_model)\n\n def test_get_protocol(self):\n \"\"\"Test if the correct protocol is returned\"\"\"\n self.assertEqual(get_protocol(), \"http\")\n\n with self.settings(USERENA_USE_HTTPS=True):\n self.assertEqual(get_protocol(), \"https\")\n", "repo_name": "django-userena-ce/django-userena-ce", "sub_path": "userena/tests/tests_utils.py", "file_name": "tests_utils.py", "file_ext": "py", "file_size_in_byte": 3158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 82, "dataset": "github-code", "pt": "75", "api": [{"api_name": "django.test.TestCase", "line_number": 18, "usage_type": "name"}, {"api_name": "userena.utils.generate_nonce", "line_number": 24, "usage_type": "call"}, {"api_name": "userena.utils.generate_nonce", "line_number": 25, "usage_type": "call"}, {"api_name": "userena.utils.generate_nonce", "line_number": 26, "usage_type": "call"}, {"api_name": "re.match", "line_number": 28, "usage_type": "call"}, {"api_name": "re.match", "line_number": 29, "usage_type": "call"}, {"api_name": "re.match", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 39, "usage_type": "call"}, {"api_name": "userena.utils.get_gravatar", "line_number": 39, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 41, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 42, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 46, "usage_type": "call"}, {"api_name": "userena.utils.get_gravatar", "line_number": 46, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 48, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 49, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 53, "usage_type": "call"}, {"api_name": "userena.utils.get_gravatar", "line_number": 53, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 55, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 56, "usage_type": "call"}, {"api_name": "userena.utils.signin_redirect", "line_number": 66, "usage_type": "call"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 69, "usage_type": "call"}, {"api_name": "userena.utils.signin_redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "userena.utils.signin_redirect", "line_number": 75, "usage_type": "call"}, {"api_name": "django.conf.settings.LOGIN_REDIRECT_URL", "line_number": 75, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 75, "usage_type": "name"}, {"api_name": "userena.compat.SiteProfileNotAvailable", "line_number": 86, "usage_type": "argument"}, {"api_name": "userena.utils.get_profile_model", "line_number": 86, "usage_type": "argument"}, {"api_name": "userena.compat.SiteProfileNotAvailable", "line_number": 91, "usage_type": "argument"}, {"api_name": "userena.utils.get_profile_model", "line_number": 91, "usage_type": "argument"}, {"api_name": "userena.utils.get_protocol", "line_number": 95, "usage_type": "call"}, {"api_name": "userena.utils.get_protocol", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "38316854214", "text": "from pathlib import Path\n\nimport numpy as np\nimport pytest\nimport xtgeo\n\nfrom webviz_subsurface._models import WellSetModel\n\n\n@pytest.mark.usefixtures(\"app\")\ndef test_well_set_model(testdata_folder: Path) -> None:\n wellfiles = [\n testdata_folder / \"reek_test_data\" / \"observed_data\" / \"wells\" / well\n for well in [\"OP_1.w\", \"OP_2.w\", \"OP_3.w\", \"OP_4.w\", \"OP_5.w\", \"OP_6.w\"]\n ]\n\n wmodel = WellSetModel(wellfiles=wellfiles)\n assert set(wmodel.well_names) == set(\n [\"OP_1\", \"OP_2\", \"OP_3\", \"OP_4\", \"OP_5\", \"OP_6\"]\n )\n for name, well in wmodel.wells.items():\n assert isinstance(name, str)\n assert isinstance(well, xtgeo.Well)\n op_6 = wmodel.get_well(\"OP_6\")\n assert isinstance(op_6, xtgeo.Well)\n assert op_6.name == \"OP_6\"\n\n\n@pytest.mark.usefixtures(\"app\")\ndef test_logs(testdata_folder: Path) -> None:\n wmodel = WellSetModel(\n wellfiles=[\n testdata_folder / \"reek_test_data\" / \"observed_data\" / \"wells\" / \"OP_6.w\"\n ],\n zonelog=\"Zonelog\",\n )\n well = wmodel.get_well(\"OP_6\")\n assert well.zonelogname == \"Zonelog\"\n\n\n@pytest.mark.usefixtures(\"app\")\ndef test_tvd_truncation(testdata_folder: Path) -> None:\n wmodel = WellSetModel(\n wellfiles=[\n testdata_folder / \"reek_test_data\" / \"observed_data\" / \"wells\" / \"OP_6.w\"\n ],\n tvdmin=1000,\n tvdmax=1500,\n )\n well = wmodel.get_well(\"OP_6\")\n assert well.dataframe[\"Z_TVDSS\"].min() >= 1000\n assert well.dataframe[\"Z_TVDSS\"].max() <= 1501\n\n\n@pytest.mark.usefixtures(\"app\")\ndef test_get_fence(testdata_folder: Path) -> None:\n wmodel = WellSetModel(\n wellfiles=[\n testdata_folder / \"reek_test_data\" / \"observed_data\" / \"wells\" / \"OP_6.w\"\n ],\n zonelog=\"Zonelog\",\n )\n fence = wmodel.get_fence(\"OP_6\")\n assert isinstance(fence, np.ndarray)\n # Test horizontal length\n assert int(fence[:, 3].min()) == -40\n assert int(fence[:, 3].max()) == 2713\n # Test tvd\n assert int(fence[:, 2].min()) == 0\n assert int(fence[:, 2].max()) == 1643\n", "repo_name": "equinor/webviz-subsurface", "sub_path": "tests/unit_tests/model_tests/test_well_set_model.py", "file_name": "test_well_set_model.py", "file_ext": "py", "file_size_in_byte": 2081, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 44, "dataset": "github-code", "pt": "75", "api": [{"api_name": "pathlib.Path", "line_number": 11, "usage_type": "name"}, {"api_name": "webviz_subsurface._models.WellSetModel", "line_number": 17, "usage_type": "call"}, {"api_name": "xtgeo.Well", "line_number": 23, "usage_type": "attribute"}, {"api_name": "xtgeo.Well", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pytest.mark.usefixtures", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 30, "usage_type": "name"}, {"api_name": "webviz_subsurface._models.WellSetModel", "line_number": 31, "usage_type": "call"}, {"api_name": "pytest.mark.usefixtures", "line_number": 29, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 42, "usage_type": "name"}, {"api_name": "webviz_subsurface._models.WellSetModel", "line_number": 43, "usage_type": "call"}, {"api_name": "pytest.mark.usefixtures", "line_number": 41, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 56, "usage_type": "name"}, {"api_name": "webviz_subsurface._models.WellSetModel", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pytest.mark.usefixtures", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 55, "usage_type": "attribute"}]} +{"seq_id": "9218683131", "text": "# <-- task -->\n# take the price of ASUS TUF Gaming NVIDIA GeForce RTX 4080 OC Edition Video Card (PCIe 4.0, 16GB GDDR6X, HDMI 2.1, DisplayPort 1.4a, GPU Tweak) TUF-RTX4080-O16G-GAMING\n# from this url -> https://www.newegg.ca/asus-geforce-rtx-4080-tuf-rtx4080-o16g-gaming/p/N82E16814126598?Item=N82E16814126598\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nurl = \"https://www.newegg.ca/asus-geforce-rtx-4080-tuf-rtx4080-o16g-gaming/p/N82E16814126598?Item=N82E16814126598\"\n\nresult = requests.get(url)\ndoc = BeautifulSoup(result.text, \"html.parser\")\n# print(doc.prettify())/\n\nprices = doc.find_all(text=\"$\")\nparent = prices[0].parent\nstrong = parent.find('strong')\nprint(strong.string)\n\n# completed", "repo_name": "mrdjangodev/Web-scrapping-training", "sub_path": "try1/try2.py", "file_name": "try2.py", "file_ext": "py", "file_size_in_byte": 697, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "40546963672", "text": "from collections import deque\n\nclass Graph:\n def __init__(self):\n self.graph = {}\n\n def add_edge(self, node, neighbors):\n self.graph[node] = neighbors\n\n\ndef bfs(graph, start_vertex):\n visited = [False] * len(graph.graph)\n queue = deque()\n\n visited[start_vertex] = True\n queue.append(start_vertex)\n\n while queue:\n node = queue.popleft()\n print(node, end=' ')\n\n for neighbor in graph.graph[node]:\n if not visited[neighbor]:\n visited[neighbor] = True\n queue.append(neighbor)\n\n\nif __name__ == \"__main__\":\n g = Graph()\n\n # Take user input to define the graph\n while True:\n try:\n node = int(input(\"Enter node (or -1 to stop): \"))\n if node == -1:\n break\n neighbors = list(map(int, input(f\"Enter neighbors for node {node}: \").split()))\n g.add_edge(node, neighbors)\n except ValueError:\n print(\"Invalid input. Please enter integers.\")\n\n start_vertex = int(input(\"Enter the starting vertex for BFS: \"))\n\n print(\"BFS starting from vertex\", start_vertex, \":\")\n bfs(g, start_vertex)\n", "repo_name": "SankshipthShetty/Pull-Request-Pioneer", "sub_path": "PYTHON/Breadth_First_Search_Graph.py", "file_name": "Breadth_First_Search_Graph.py", "file_ext": "py", "file_size_in_byte": 1168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "75", "api": [{"api_name": "collections.deque", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "14961270772", "text": "'''\n ControlICE Application\n'''\nfrom art import tprint\nfrom Components.manager import Manager\nfrom time import sleep\nimport requests\n\nclass ControlICE:\n def __init__(self):\n '''Constructor'''\n self.manager = Manager()\n self.__print_app_welcome()\n\n def launch_controlICE(self):\n '''Launches the application'''\n self.manager.launch_app()\n\n @staticmethod\n def __print_app_welcome():\n '''Prints App welcome message'''\n print(\"\\n\\n\")\n tprint(\"ControlICE\",font = \"larry3d\")\n print(\"CP.SL:\\n- Josue Carlos Zenteno Yave\"\n \"\\n- Sergio Silvestre Pavon\"\n \"\\n- Alejandro Riquelme Castaño\"\n \"\\n- Javier Santana Delgado\"\n \"\\n- Julio Sanchez de las Heras Martin Consuegra\\n\")\n\ntry:\n controlICE = ControlICE()\n while True:\n try:\n controlICE.launch_controlICE()\n sleep(10)\n except requests.exceptions.ConnectionError:\n print(\"A connection error ocurred. Trying again...\")\nexcept KeyboardInterrupt:\n pass\n", "repo_name": "Josue-Zenteno/ControlICE", "sub_path": "src/ControlICE.py", "file_name": "ControlICE.py", "file_ext": "py", "file_size_in_byte": 1098, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "Components.manager.Manager", "line_number": 12, "usage_type": "call"}, {"api_name": "art.tprint", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "35822282718", "text": "import sys\n\nsys.path.append(\"../\")\n\nimport pytest\nimport pandas as pd\nfrom sqlalchemy.engine.base import Engine\n\nfrom worker import CsvToMssql, ExcelToMssql\nfrom data.constants import SERVER, DATABASE, SCHEMA, TABLE_NAME, USERNAME, PASSWORD\n\n\n@pytest.fixture\ndef csv_to_mssql():\n return CsvToMssql(\n csv_path=\"../data/csv_data_test.csv\",\n server=SERVER,\n database=DATABASE,\n schema=SCHEMA,\n table_name=TABLE_NAME,\n username=USERNAME,\n password=PASSWORD,\n )\n\n\ndef test_read_csv(csv_to_mssql):\n df = csv_to_mssql.read_csv(columns=[\"col1\", \"col2\"], sep=\",\", encoding=\"utf-8\")\n assert isinstance(df, pd.DataFrame)\n assert set(df.columns) == set([\"col1\", \"col2\"])\n\n\ndef test_create_engine(csv_to_mssql):\n engine = csv_to_mssql.create_engine()\n assert isinstance(engine, Engine)\n\n\ndef test_insert_csv_data(csv_to_mssql):\n df = pd.DataFrame({\"col1\": [2, 6, 4, 9], \"col2\": [5, 2, 2, 0]})\n engine = csv_to_mssql.create_engine()\n csv_to_mssql.insert_data(df, engine, action=\"replace\", chunksize=len(df))\n result = pd.read_sql_table(TABLE_NAME, engine)\n print(result)\n print(\"#########\")\n print(df)\n assert result.equals(df)\n\n\ndef test_csv_to_mssql(csv_to_mssql):\n assert csv_to_mssql.csv_to_mssql(\n columns=[\"col1\", \"col2\"], sep=\",\", encoding=\"utf-8\", action=\"replace\"\n )\n engine = csv_to_mssql.create_engine()\n result = pd.read_sql_table(TABLE_NAME, engine)\n assert set(result.columns) == set([\"col1\", \"col2\"])\n\n\n@pytest.fixture\ndef excel_to_mssql():\n return ExcelToMssql(\n excel_path=\"../data/excel_data_test.xlsx\",\n server=SERVER,\n database=DATABASE,\n schema=SCHEMA,\n table_name=TABLE_NAME,\n username=USERNAME,\n password=PASSWORD,\n )\n\n\ndef test_read_excel(excel_to_mssql):\n df = excel_to_mssql.read_excel(sheet=0, columns=[\"col1\", \"col2\"])\n assert isinstance(df, pd.DataFrame)\n assert set(df.columns) == set([\"col1\", \"col2\"])\n\n\ndef test_insert__excel_data(excel_to_mssql):\n df = pd.DataFrame({\"col1\": [2, 6, 4, 9], \"col2\": [5, 2, 2, 0]})\n engine = excel_to_mssql.create_engine()\n excel_to_mssql.insert_data(df, engine, action=\"replace\", chunksize=len(df))\n result = pd.read_sql_table(TABLE_NAME, engine)\n assert result.equals(df)\n\n\ndef test_excel_to_mssql(excel_to_mssql):\n assert excel_to_mssql.excel_to_mssql(\n sheet=0, columns=[\"col1\", \"col2\"], action=\"replace\"\n )\n engine = excel_to_mssql.create_engine()\n result = pd.read_sql_table(TABLE_NAME, engine)\n assert set(result.columns) == set([\"col1\", \"col2\"])\n", "repo_name": "josehenriqueroveda/excel-to-mssql", "sub_path": "excel2mssql/test/test_excel2mssql.py", "file_name": "test_excel2mssql.py", "file_ext": "py", "file_size_in_byte": 2620, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "75", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "worker.CsvToMssql", "line_number": 15, "usage_type": "call"}, {"api_name": "data.constants.SERVER", "line_number": 17, "usage_type": "name"}, {"api_name": "data.constants.DATABASE", "line_number": 18, "usage_type": "name"}, {"api_name": "data.constants.SCHEMA", "line_number": 19, "usage_type": "name"}, {"api_name": "data.constants.TABLE_NAME", "line_number": 20, "usage_type": "name"}, {"api_name": "data.constants.USERNAME", "line_number": 21, "usage_type": "name"}, {"api_name": "data.constants.PASSWORD", "line_number": 22, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.base.Engine", "line_number": 34, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.read_sql_table", "line_number": 41, "usage_type": "call"}, {"api_name": "data.constants.TABLE_NAME", "line_number": 41, "usage_type": "argument"}, {"api_name": "pandas.read_sql_table", "line_number": 53, "usage_type": "call"}, {"api_name": "data.constants.TABLE_NAME", "line_number": 53, "usage_type": "argument"}, {"api_name": "worker.ExcelToMssql", "line_number": 59, "usage_type": "call"}, {"api_name": "data.constants.SERVER", "line_number": 61, "usage_type": "name"}, {"api_name": "data.constants.DATABASE", "line_number": 62, "usage_type": "name"}, {"api_name": "data.constants.SCHEMA", "line_number": 63, "usage_type": "name"}, {"api_name": "data.constants.TABLE_NAME", "line_number": 64, "usage_type": "name"}, {"api_name": "data.constants.USERNAME", "line_number": 65, "usage_type": "name"}, {"api_name": "data.constants.PASSWORD", "line_number": 66, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.read_sql_table", "line_number": 80, "usage_type": "call"}, {"api_name": "data.constants.TABLE_NAME", "line_number": 80, "usage_type": "argument"}, {"api_name": "pandas.read_sql_table", "line_number": 89, "usage_type": "call"}, {"api_name": "data.constants.TABLE_NAME", "line_number": 89, "usage_type": "argument"}]} +{"seq_id": "20431681749", "text": "from typing import List\n\n# This is O(n) time complexity and O(1) space\n# We keep a pointer that increases everytime the elements are different\n# If they are equal only the iterator j increments\n# then when they are different again we change the last element that was a duplicate with a different element present in the array\n# Since we only have to pass the whole array once the time complexity is O(n)\ndef removeDuplicates(A:List[int]) -> List[int]:\n if(not A):\n return []\n \n i = 1\n for j in range(1, len(A)):\n if(A[i -1] != A[j]):\n A[i] = A[j]\n i+=1\n return A[:i]\n\nprint(removeDuplicates([1,1,2,3,4,5,5,6,7,7,7,8,8,9]))\n\n# in python we can just return a set as well\n\ndef oneLinerRemoveDuplicates(A:List[int]) -> List[int]:\n return list(set(A))\n\nprint(oneLinerRemoveDuplicates([1,1,2,3,4,5,5,6,7,7,7,8,8,9]))\n", "repo_name": "Vinicoreia/AlgoRythm", "sub_path": "python/EPI/remove_duplicates.py", "file_name": "remove_duplicates.py", "file_ext": "py", "file_size_in_byte": 865, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "71934759602", "text": "##############################\n# w2j.parser\n# 解析器,解析 html 源码\n##############################\n\nfrom datetime import datetime, timezone, timedelta\nfrom os import link\nfrom pathlib import Path\nimport re\nimport chardet\nfrom inscriptis import get_text\n\n\nRE_A_START = r'([^<]+)'\n\n# 附件内链\n# 早期的链接没有双斜杠\n# wiz:open_attachment?guid=8337764c-f89d-4267-bdf2-2e26ff156098\n# 后期的链接有双斜杠\n# wiz://open_attachment?guid=52935f17-c1bb-45b7-b443-b7ba1b6f854e\nRE_OPEN_ATTACHMENT_HREF = r'wiz:/{0,2}(open_\\w+)\\?guid=([a-z0-9\\-]{36})'\nRE_OPEN_ATTACHMENT_OUTERHTML = RE_A_START + RE_OPEN_ATTACHMENT_HREF + RE_A_END\n\n# 文档内链,只需要提取 guid 后面的部分即可\n# wiz://open_document?guid=c6204f26-f966-4626-ad41-1b5fbdb6829e&kbguid=&private_kbguid=69899a48-dc52-11e0-892c-00237def97cc\nRE_OPEN_DOCUMENT_HREF = r'wiz:/{0,2}(open_\\w+)\\?guid=([a-z0-9\\-]{36})&kbguid=&private_kbguid=([a-z0-9\\-]{36})'\nRE_OPEN_DOCUMENT_OUTERHTML = RE_A_START + RE_OPEN_DOCUMENT_HREF + RE_A_END\n\n\n# 图像文件在 body 中存在的形式,即使是在 .md 文件中,也依然使用这种形式存在\nRE_IMAGE_OUTERHTML = r']*>'\n\n\nclass WizInternalLink(object):\n \"\"\" 嵌入 html 正文中的为知笔记内部链接,可能是笔记,也可能是附件\n \"\"\"\n # 原始链接的整个 HTML 内容,包括 名称\n outerhtml: str = None\n\n # 链接的 title\n title: str = None\n\n # 原始链接中的资源 guid,可能是 attachemnt 或者是 document\n guid: str = None\n\n # 值为 open_attachment 或者 open_document\n link_type: str = 'open_attachment'\n\n def __init__(self, outerhtml: str, guid: str, title: str, link_type: str) -> None:\n self.outerhtml = outerhtml\n self.guid = guid\n self.title = title\n self.link_type = link_type\n\n def __repr__(self) -> str:\n return f''\n\n\nclass WizImage(object):\n \"\"\" 在为知笔记文章中包含的本地图像\n\n 在为知笔记中,本地图像不属于资源,也没有自己的 guid\n \"\"\"\n # 原始图像的整个 HTML 内容,包括 \n outerhtml: str = None\n\n # 仅包含图像的 src 部分\n src: str = None\n\n # 图像文件的 Path 对象,在硬盘上的路径\n file: Path = None\n\n def __init__(self, outerhtml: str, src: str, note_extract_dir: Path) -> None:\n self.outerhtml = outerhtml\n self.src = src\n self.file = note_extract_dir.joinpath(src)\n\n if not self.file.exists():\n raise FileNotFoundError(f'找不到文件 {self.file}!')\n\n def __repr__(self) -> str:\n return f''\n\n\ndef parse_wiz_html(note_extract_dir: Path, title: str) -> tuple[str, list[WizInternalLink], list[WizImage]]:\n \"\"\" 在为知笔记文档的 index.html 中搜索内链的附件和文档链接\n \"\"\"\n index_html = note_extract_dir.joinpath('index.html')\n if not index_html.is_file:\n raise FileNotFoundError(f'主文档文件不存在! {index_html} |{title}|')\n html_body_bytes = index_html.read_bytes()\n # 早期版本的 html 文件使用的是 UTF-16 LE(BOM) 编码保存。最新的文件是使用 UTF-8(BOM) 编码保存。要判断编码进行解析\n enc = chardet.detect(html_body_bytes)\n html_body = html_body_bytes.decode(encoding=enc['encoding'])\n\n # 去掉换行符,早期版本的 html 文件使用了 \\r\\n 换行符,而且会切断 html 标记。替换掉换行符方便正则\n html_body = html_body.replace('\\r\\n', '')\n html_body = html_body.replace('\\n', '')\n\n internal_links: list[WizInternalLink] = []\n\n open_attachments = re.finditer(RE_OPEN_ATTACHMENT_OUTERHTML, html_body, re.IGNORECASE)\n for open_attachement in open_attachments:\n link = WizInternalLink(\n open_attachement.group(0),\n open_attachement.group(2),\n open_attachement.group(3),\n open_attachement.group(1))\n internal_links.append(link)\n\n open_documents = re.finditer(RE_OPEN_DOCUMENT_OUTERHTML, html_body, re.IGNORECASE)\n for open_document in open_documents:\n link = WizInternalLink(\n open_document.group(0),\n open_document.group(2),\n open_document.group(4),\n open_document.group(1))\n internal_links.append(link)\n\n images: list[WizImage] = []\n image_match = re.finditer(RE_IMAGE_OUTERHTML, html_body, re.IGNORECASE)\n for image in image_match:\n img = WizImage(image.group(0), image.group(1), note_extract_dir)\n images.append(img)\n return html_body, internal_links, images\n\n\ndef tots(dt: str):\n \"\"\" 转换本地时间到时间戳,数据库中记录的是东八区本地时间\n \"\"\"\n return int(datetime.strptime(dt, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone(timedelta(hours=8))).timestamp()*1000)\n\n\ndef towizid(id: str) -> str:\n \"\"\" 从 joplin 的 id 格式转为 wiz 的 guid 格式\n \"\"\"\n one = id[:8]\n two = id[8:12]\n three = id[12:16]\n four = id[16:20]\n five = id[20:]\n return '-'.join([one, two, three, four, five])\n\n\ndef tojoplinid(guid: str) -> str:\n \"\"\" 从 wiz 的 guid 格式转为 joplin 的 id 格式\n \"\"\"\n return ''.join(guid.split('-'))\n\n\nclass JoplinInternalLink(object):\n \"\"\" 与 Wiz 内链不同,Joplin 内链包括 附件(链接到 resource)、图像(链接到 resource)、文档(链接到 note)\n \"\"\"\n note_id: str\n resource_id: str\n\n # image / open_attachment / open_document\n link_type: str\n\n # 链接的 title\n title: str = None\n\n # 链接的整个文本内容,可能是 markdown 格式也可能是html格式,取决于 note_id 是何种格式\n outertext: str\n\n def __init__(self, note_id: str, resource_id: str, title: str, link_type: int, outertext:str='') -> None:\n self.note_id = note_id\n self.resource_id = resource_id\n self.title = title\n self.link_type = link_type\n self.outertext = outertext\n\n @property\n def id(self) -> str:\n return f'{self.note_id}-{self.resource_id}'\n\n\ndef gen_ilstr(is_markdown: bool, jil: JoplinInternalLink) -> str:\n \"\"\" 返回被替换的内链\n ilstr = internal link str\n \"\"\"\n if is_markdown:\n body = f'[{jil.title}](:/{jil.resource_id})'\n if jil.link_type == 'image':\n return '!' + body\n return body\n if jil.link_type == 'image':\n return f'\"{jil.title}\"'\n return f'{jil.title}'\n\n\ndef gen_end_ilstr(is_markdown: bool, jils: list[JoplinInternalLink]):\n \"\"\" 返回 body 底部要加入的内容\n ilstr = internal link str\n \"\"\"\n if is_markdown:\n return '\\n\\n# 附件链接\\n\\n' + '\\n'.join([ '- ' + gen_ilstr(is_markdown, jil) for jil in jils])\n body = ''.join([ f'
  • {gen_ilstr(is_markdown, jil)}
  • ' for jil in jils])\n return f'

    附件链接

      {body}
    '\n \n\ndef convert_joplin_body(body: str, is_markdown: bool, internal_links: list[JoplinInternalLink]) -> str:\n \"\"\" 将为知笔记中的 body 转换成 Joplin 内链\n \"\"\"\n insert_to_end: list[JoplinInternalLink] = []\n for jil in internal_links:\n # 替换链接\n if jil.outertext:\n body = body.replace(jil.outertext, gen_ilstr(is_markdown, jil))\n # 所有的附件,需要在body 底部加入链接\n if jil.link_type == 'open_attachment':\n insert_to_end.append(jil)\n # 处理 markdown 转换\n if is_markdown:\n body = get_text(body)\n if insert_to_end:\n body += gen_end_ilstr(is_markdown, insert_to_end)\n return body", "repo_name": "zrong/wiz2joplin", "sub_path": "w2j/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 7836, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 85, "dataset": "github-code", "pt": "75", "api": [{"api_name": "pathlib.Path", "line_number": 72, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 74, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 86, "usage_type": "name"}, {"api_name": "chardet.detect", "line_number": 94, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 103, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.link", "line_number": 105, "usage_type": "name"}, {"api_name": "os.link", "line_number": 110, "usage_type": "argument"}, {"api_name": "re.finditer", "line_number": 112, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.link", "line_number": 114, "usage_type": "name"}, {"api_name": "os.link", "line_number": 119, "usage_type": "argument"}, {"api_name": "re.finditer", "line_number": 122, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 122, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "name"}, {"api_name": "datetime.timezone", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 132, "usage_type": "call"}, {"api_name": "inscriptis.get_text", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "45295322233", "text": "from flask import Flask, request, redirect\r\nfrom twilio.twiml.voice_response import VoiceResponse\r\nfrom twilio.twiml.messaging_response import MessagingResponse\r\nimport random\r\n\r\n\r\ndef getFortune():\r\n fortunes = ['You will become very rich!',\r\n 'You will fall into a big hole, and start a great adventure!',\r\n 'You will find a golden ticket in your next apple!',\r\n 'You will find your umbrella!',\r\n 'You will dig up some treasure at the beach!',\r\n 'You will turn into a unicorn!',\r\n 'You will get no homework tomorrow!',\r\n 'You will get to ride a giant dragon!']\r\n\r\n fortune1 = random.choice(fortunes)\r\n\r\n with open('wisdom.txt') as f:\r\n lines = f.readlines()\r\n advice = random.choice(lines)\r\n # print(random.choice(lines))\r\n\r\n\r\n with open('fortunes.txt', encoding=\"utf8\") as f2:\r\n lines = f2.readlines()\r\n fortune2 = random.choice(lines)\r\n # print(random.choice(lines))\r\n\r\n\r\n sentence1 = \"the robotic fortune teller says..... \" + fortune1\r\n\r\n sentence2 = \", hello human, here is some advice from the robot who knows all ...... \" + advice\r\n \r\n sentence3 = \"finally, hear this, \" + fortune2\r\n \r\n choice = random.randrange(0,2)\r\n\r\n q1 =\"may the force be with you\"\r\n q2 = \"the needs of the many outweigh the needs of the few\"\r\n q3 = \"so say we all\"\r\n\r\n if choice == 0:\r\n cquote = q1\r\n\r\n if choice == 1:\r\n cquote = q2\r\n\r\n if choice == 2:\r\n cquote = q3\r\n \r\n\r\n sentence4 = \"I have spoken, human, goodbye, and remember\" + cquote\r\n\r\n return sentence1, sentence2, sentence3, sentence4\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n\r\n@app.route(\"/sms\", methods=['GET', 'POST'])\r\ndef sms_reply():\r\n \"\"\"Respond to incoming calls with a simple text message.\"\"\"\r\n # Start our TwiML response\r\n body = request.values.get('Body', None)\r\n\r\n s1, s2, s3, s4 = getFortune()\r\n\r\n # Start our TwiML response\r\n resp = MessagingResponse()\r\n\r\n # Determine the right reply for this message\r\n if 'fortune' in body.lower():\r\n resp.message(s1 + \" \" + s4)\r\n if 'random' in body.lower():\r\n resp.message(s3 + \" \" + s4)\r\n if 'advice' in body.lower():\r\n resp.message(s2 + \" \" + s4)\r\n\r\n elif body == 'test':\r\n resp.message(\"Goodbye\")\r\n\r\n\r\n return str(resp)\r\n\r\n\r\n@app.route(\"/voice\", methods=['GET', 'POST'])\r\ndef voice():\r\n \"\"\"Respond to incoming phone calls with a fortune message\"\"\"\r\n # Start our TwiML response\r\n resp = VoiceResponse()\r\n\r\n # Read a message aloud to the caller\r\n s1, s2, s3, s4 = getFortune()\r\n resp.say(s1, voice='alice')\r\n resp.say(s2, voice='alice')\r\n resp.say(s3, voice='alice')\r\n resp.say(s4, voice='alice')\r\n\r\n return str(resp)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host = 'localhost', debug=True, port = 8003)", "repo_name": "jemsbhai/robotfortuneteller", "sub_path": "basictwiliocallserver.py", "file_name": "basictwiliocallserver.py", "file_ext": "py", "file_size_in_byte": 2913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "random.choice", "line_number": 17, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 21, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 27, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "twilio.twiml.messaging_response.MessagingResponse", "line_number": 71, "usage_type": "call"}, {"api_name": "twilio.twiml.voice_response.VoiceResponse", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "34243836524", "text": "import os\n\nfrom flask import Flask, session, render_template\nfrom flask_jsglue import JSGlue\nfrom flask_session import Session\nfrom flask_socketio import SocketIO, emit\nfrom models import *\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\") or \\\n 'e5ac358c-f0bf-11e5-9e39-d3b532c10a28'\nsocketio = SocketIO(app)\njsglue = JSGlue(app)\n\n#configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n#initialize a list of empty channels\n# channel_list = []\nchannels_dic = {}\nuid = ''\n\n@app.route(\"/\")\ndef index():\n\t#if user was on a previous channel in browser, try to go to that page\n\t# if session.get(\"channel_name\") is not None:\n\t# \treturn redirect()\n\n\treturn render_template(\"index.html\", channels_dic = channels_dic)\n\n@app.route(\"/\", methods = [\"POST\", \"GET\"])\ndef channel(channel_name):\n\tif channel_name not in channels_dic:\n\t\treturn render_template(\"error.html\", message=\"No such channel, please first create it before trying to navigate to it.\")\n\n\t#show existing messages in channel:\n\tmessages = channels_dic[channel_name].messages\n\tfor message in messages:\n\t\tprint(message)\n\n\t#set up so session to remember which channel user is in\n\tprint(channel_name)\n\tsession[\"channel_name\"] = channel_name\n\n\treturn render_template(\"channel.html\", channel_name=channel_name, messages = messages)\n\"\"\"\n-Q: How do we save the list_channels across browser session? When I close the app and reload it, wouldn't it restart it from blank?\n\"\"\"\n@socketio.on(\"create username\")\ndef create_username(data):\n\t#save username of user to session\n\tnew_username = data[\"username\"]\n\tsession[\"username\"] = new_username\n\tprint(session.get(\"username\"))\n\t#update username\n\tglobal uid\n\tuid = new_username\n\t# print(uid)\n\n#user trying to create channel\n@socketio.on(\"create channel\")\ndef create_channel(data):\n\tchannel = data[\"channel\"]\n\t#check if channel already exists with same name:\n\tif channel not in channels_dic:\n\t\t#put channel on everyone's list\n\t\temit(\"new channel created\", {\"channel\": channel}, broadcast=True)\n\t\t#if channel is new, create a Channel object using the name user input - which will be used to save messages after.\n\t\t# channel_name = channel\n\t\t# channel = Channel(name=channel_name)\n\n\t\t#append the channel to the list so if user refreshes all the channels that have already been created shows\n\t\t# channel_list.append(channel)\n\t\tchannels_dic[channel] = Channel(name=channel)\n\n\telse:\n\t\temit(\"channel already exists\", {\"channel\": channel})\n\n#user sending a new message inside channel\n@socketio.on(\"new message\")\ndef new_message(data):\n\tmsg = data[\"msg\"]\n\t# uid = session[\"username\"]\n\tnew_msg = Message(username = uid, text = msg)\n\tt = \"{} \\n Sent by {} at {}\".format(new_msg.text, new_msg.username, new_msg.time)\n\temit(\"new message sent\", {\"msg\": t}, broadcast = True)\n\t# cur_channel = data[\"cur_channel\"]\n\t# #add message to current channel\n\t# channels_dic[cur_channel].add_message(new_msg)\n\tch = session.get(\"channel_name\")\n\tchannels_dic[ch].add_message(new_msg)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "repo_name": "Light52/project2", "sub_path": "application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 3119, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_socketio.SocketIO", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_jsglue.JSGlue", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_session.Session", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 56, "usage_type": "name"}, {"api_name": "flask_socketio.emit", "line_number": 69, "usage_type": "call"}, {"api_name": "flask_socketio.emit", "line_number": 79, "usage_type": "call"}, {"api_name": "flask_socketio.emit", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "6262617774", "text": "from collections import defaultdict\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport pandas as pd\nimport spacy\nfrom src.utils import delete_dir, parse_date\n\nmax_source_length = 512\nmax_target_length = 128\n\n\nclass UnifiedQATrainer:\n def __init__(self,\n run_files,\n model,\n tokenizer,\n optimizer,\n lr_schedular,\n device,\n eval_batch_size,\n no_answer_dataset,\n n_gpu=0):\n self.run_files = run_files\n self.model = model\n self.tokenizer = tokenizer\n self.eval_batch_size = eval_batch_size\n self.optimizer = optimizer\n self.lr_schedular = lr_schedular\n self.device = device\n self.best_score = defaultdict(lambda: 0)\n self.n_gpu = n_gpu\n self.no_answer_dataset = no_answer_dataset\n\n def train(self, dataloader, epoch):\n\n def convert_to_single_gpu(state_dict):\n def _convert(key):\n if key.startswith('module.'):\n return key[7:]\n return key\n\n return {_convert(key): value for key, value in state_dict.items()}\n\n total_loss = 0\n train_losses = []\n print(f'Starting training: epoch {epoch}')\n for batch in tqdm(dataloader):\n encoding = self.tokenizer(batch['input'], padding=\"longest\", max_length=max_source_length, truncation=True,\n return_tensors=\"pt\")\n input_ids, attention_mask = encoding.input_ids, encoding.attention_mask\n\n target_encoding = self.tokenizer(batch['output'], padding=\"longest\", max_length=max_target_length,\n truncation=True)\n labels = target_encoding.input_ids\n # replace padding token id's of the labels by -100, so it's ignored by the loss\n labels = torch.tensor(labels)\n labels[labels == self.tokenizer.pad_token_id] = -100\n\n loss = self.model(input_ids=input_ids.to(self.device), attention_mask=attention_mask.to(self.device),\n labels=labels.to(self.device)).loss\n if self.n_gpu > 1:\n loss = loss.mean()\n\n train_losses.append(loss.detach().cpu())\n loss.backward()\n total_loss += loss.item()\n\n self.optimizer.step()\n self.lr_schedular.step()\n self.optimizer.zero_grad()\n\n print(f'\\nTotal loss: epoch {epoch}: {total_loss}')\n\n def evaluate(self, epoch, dataset_name, dataset, logfile, evaluate_dates=False):\n print(f'Evaluating dataset: {dataset_name}')\n total, correct, tp, tn, fp, fn = len(dataset), 0, 0, 0, 0, 0\n\n dataloader = DataLoader(dataset, shuffle=True, batch_size=self.eval_batch_size)\n predictions_to_save = []\n for batch in tqdm(dataloader):\n encoding = self.tokenizer(batch['input'], padding=\"longest\", max_length=max_source_length, truncation=True,\n return_tensors=\"pt\")\n input_ids, attention_mask = encoding.input_ids, encoding.attention_mask\n\n if self.n_gpu > 1:\n res = self.model.module.generate(input_ids.to(self.device))\n else:\n res = self.model.generate(input_ids.to(self.device))\n predictions = self.tokenizer.batch_decode(res, skip_special_tokens=True)\n\n if evaluate_dates:\n if self.no_answer_dataset:\n correct, fn, fp, tn, tp = self.evaluate_helper_no_answer(batch,\n correct,\n fn,\n fp,\n predictions,\n tn,\n tp)\n else:\n correct, fn, fp, tn, tp = self.evaluate_helper_no_no_answer(batch,\n correct,\n fn,\n fp,\n predictions,\n tn,\n tp)\n else:\n correct, fn, fp, tn, tp = self.evaluate_helper(batch, correct, fn, fp, predictions, tn, tp)\n\n if dataset_name != 'train':\n for ip, label, pred in zip(batch['input'], batch['output'], predictions):\n predictions_to_save.append((ip, label, pred, label == pred))\n\n precision = tp / (tp + fp) if (tp + fp) else 0\n recall = tp / (tp + fn) if (tp + fn) else 0\n f1 = (2 * precision * recall) / (precision + recall) if (precision and recall) else 0\n accuracy = correct / total\n print(f'\\ntp: {tp}, fp: {fp}, tn: {tn}, fn: {fn}')\n score_string = f'\\nprecision: {precision}, recall: {recall}, F1: {f1}, accuracy: {accuracy}'\n print(score_string)\n with open(logfile, 'a') as f:\n f.write(f'{dataset_name}\\t{epoch}\\t{precision}\\t{recall}\\t{f1}\\t{accuracy}\\n')\n\n results_df = pd.DataFrame(predictions_to_save,\n columns=['Input', 'Correct', 'Prediction', 'Is Correct prediction'])\n results_df.to_csv(\n f'{self.run_files}/predictions_{dataset_name}_{\"pretrained\" if epoch == -1 else epoch}.csv', index=False)\n if epoch == -1 or 'train' in dataset_name:\n self.best_score[dataset_name] = accuracy\n return\n\n if accuracy > self.best_score[dataset_name] or True:\n save_path = f'{self.run_files}/fine_tuned_model_{dataset_name}_{epoch}'\n self.best_score[dataset_name] = accuracy\n delete_dir(save_path)\n print(f'Saving best model on {dataset_name} at epoch {epoch} with accuracy: {accuracy}')\n if self.n_gpu > 1:\n self.model.module.save_pretrained(save_path)\n else:\n self.model.save_pretrained(save_path)\n with open(f'{save_path}/score.txt', 'w') as f:\n f.write(f'Epoch: {epoch}\\n')\n f.write(score_string)\n\n def evaluate_helper(self, batch, correct, fn, fp, predictions, tn, tp):\n correct += len([1 for actual, pred, in zip(batch['output'], predictions) if actual == pred])\n tp += len([1 for actual, pred, in zip(batch['output'], predictions) if actual == pred == 'yes'])\n tn += len([1 for actual, pred, in zip(batch['output'], predictions) if actual == pred == 'no'])\n fp += len([1 for actual, pred, in zip(batch['output'], predictions) if actual == 'no' and pred == 'yes'])\n fn += len([1 for actual, pred, in zip(batch['output'], predictions) if actual == 'yes' and pred == 'no'])\n return correct, fn, fp, tn, tp\n\n def evaluate_helper_no_answer(self, batch, correct: int, fn: int, fp: int, predictions, tn: int, tp: int):\n for actual, pred in zip(batch['output'], predictions):\n if actual == '':\n if pred in ('', 'no answer>'):\n correct += 1\n tn += 1\n else:\n fp += 1\n else:\n if pred in ('', 'no answer>'):\n fn += 1\n else:\n actual_parsed = parse_date(actual,\n 'year',\n parse_dates_with_spacy=False) # parse with most lenient granularity\n if len(actual_parsed) > 0:\n actual_parsed = actual_parsed[0]\n pred_parsed = parse_date(pred,\n 'year',\n parse_dates_with_spacy=False)\n if len(pred_parsed) > 0:\n pred_parsed = pred_parsed[0]\n if actual_parsed == pred_parsed and actual_parsed is not None and pred_parsed is not None:\n correct += 1\n tp += 1\n\n return correct, fn, fp, tn, tp\n\n def evaluate_helper_no_no_answer(self, batch, correct: int, fn: int, fp: int, predictions, tn: int, tp: int):\n for actual, pred in zip(batch['output'], predictions):\n actual_parsed = parse_date(actual,\n 'year',\n parse_dates_with_spacy=False) # parse with most lenient granularity\n if len(actual_parsed) > 0:\n actual_parsed = actual_parsed[0]\n pred_parsed = parse_date(pred,\n 'year',\n parse_dates_with_spacy=False)\n if len(pred_parsed) > 0:\n pred_parsed = pred_parsed[0]\n if actual_parsed == pred_parsed and actual_parsed is not None and pred_parsed is not None:\n correct += 1\n tp += 1\n else:\n fp += 1\n fn += 1\n\n return correct, fn, fp, tn, tp\n", "repo_name": "usc-isi-i2/time-qualifier-prediction", "sub_path": "src/unifiedqa_trainer.py", "file_name": "unifiedqa_trainer.py", "file_ext": "py", "file_size_in_byte": 9710, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "collections.defaultdict", "line_number": 32, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 80, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 127, "usage_type": "call"}, {"api_name": "src.utils.delete_dir", "line_number": 138, "usage_type": "call"}, {"api_name": "src.utils.parse_date", "line_number": 168, "usage_type": "call"}, {"api_name": "src.utils.parse_date", "line_number": 173, "usage_type": "call"}, {"api_name": "src.utils.parse_date", "line_number": 186, "usage_type": "call"}, {"api_name": "src.utils.parse_date", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "71557594162", "text": "from avi_objects.infra_imports import *\nimport jsonpointer\nimport json\nimport avi_objects.logger_utils as logger_utils\n\n\ndef json_diff(doc1, doc2, name, *args, **kwargs):\n val1 = json_value(doc1, name, *args, **kwargs)\n val2 = json_value(doc2, name, *args, **kwargs)\n return int(val1) - int(val2)\n\ndef json_value(doc, name, *args, **kwargs):\n if (not isinstance(doc, dict) and\n not isinstance(doc, list)):\n doc = json.loads(doc)\n if isinstance(doc, list):\n for obj in doc:\n try:\n val = json_value(obj, name, *args, **kwargs)\n return val\n except:\n pass\n fail('Unable to find the field')\n else:\n pointer = get_json_pointer(doc, name, *args, **kwargs)\n val = jsonpointer.resolve_pointer(doc, pointer)\n return val\n\ndef get_json_pointer(doc, name, *args, **kwargs):\n if (not isinstance(doc, dict) and\n not isinstance(doc, list)):\n doc = json.loads(doc)\n field = kwargs.get('field', None)\n if field is not None:\n del kwargs['field']\n pointer = ''\n try:\n # Find the field in the doc\n jsonpointer.resolve_pointer(doc, field)\n pointer = field\n except:\n # Walk the dictionary and find the field. Ignore any list that is\n # found\n path = []\n if get_json_path(doc, name, path, field, 0, 0, *args, **kwargs) is False:\n raise RuntimeError('Unable to find the field')\n pointer = ''\n for p in path:\n pointer = pointer + '/' + p\n return pointer\n\ndef get_json_path(doc1, name, path, field, match_index, match_field,\n *args, **kwargs):\n index = kwargs\n # print name, path, field\n for key, val in doc1.iteritems():\n # print key, val\n if isinstance(val, dict):\n path.append(key)\n # print path\n # print '*****Key = %s val = %s name = %s'%(key, val, name)\n if key == name and field is None and len(index) == 0:\n # Matched the structure that is being looked at\n return True\n if get_json_path(val, name, path, field,\n 1 if key == name and len(index) > 0 else 0,\n 1 if field is not None else 0, *args, **kwargs) is True:\n return True\n path.pop()\n # print path\n elif isinstance(val, list):\n path.append(key)\n if key == name and field is None and len(index) == 0:\n return True\n arr_index = 0\n while arr_index < len(val):\n path.append(str(arr_index))\n if isinstance(val[arr_index], dict) and \\\n get_json_path(val[arr_index], name, path, field,\n 1 if key == name and len(\n index) > 0 else 0,\n 1 if field is not None else 0, *args, **kwargs) is True:\n return True\n arr_index = arr_index + 1\n path.pop()\n path.pop()\n else:\n # print \"match_index %d match_field %d\" % (match_index,\n # match_field)\n if match_index == 1:\n # Is this field part of the index\n index_field = index.get(key, None)\n # print \"***** Index is %s and index_field is %s\"% (index, index_field)\n # print \"***** index_field %s val is %s\" % (index_field,\n # str(val))\n if index_field is not None and index_field == str(val):\n del index[key]\n\n if len(index) != 0:\n # not a full match of index. continue searching\n continue\n else:\n # print '*****Index fully matched'\n if field is None:\n return True\n # print doc1\n return get_json_path(doc1, name, path, field,\n 0, 1, *args)\n if match_field == 1 and len(index) == 0:\n # match for the field\n # print field\n if field is None:\n return True\n tokens = field.split('/')\n if len(path) < len(tokens):\n continue\n path.append(key)\n found = True\n for pos in xrange(1, len(tokens) + 1):\n # print \"**tokens compare %s : %s\" %(path[-pos],\n # tokens[-pos])\n if path[-pos] != tokens[-pos]:\n found = False\n break\n if found is True:\n # print \"Field found\"\n # print path\n return True\n else:\n path.pop()\n elif key == name:\n path.append(key)\n # print 'Field found'\n return True\n return False\n\n\ndef compare_json(upgrade_history, status):\n for (key, value), (key2, value2) in zip(upgrade_history.items(), status.items()):\n if isinstance(value, dict):\n return compare_json(value, value2)\n if isinstance(value, list):\n for ele1, ele2 in zip(value, value2):\n if isinstance(ele1, dict):\n compare_json(ele1, ele2)\n elif isinstance(ele1, list) and ele1 != ele2:\n logger_utils.fail('upgrade_history != upgrade_status, failed {} : {}'.format(ele1, ele2))\n\n elif str(value).lower() != str(value2).lower():\n logger_utils.fail('upgrade_history != upgrade_status, failed {} : {'\n '}'.format(key, value), '{} : {}'.format(key2, value2))\n\n", "repo_name": "balajeen/avi-monitoring", "sub_path": "test/avitest/lib/json_utils.py", "file_name": "json_utils.py", "file_ext": "py", "file_size_in_byte": 5907, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "jsonpointer.resolve_pointer", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "jsonpointer.resolve_pointer", "line_number": 39, "usage_type": "call"}, {"api_name": "avi_objects.logger_utils.fail", "line_number": 147, "usage_type": "call"}, {"api_name": "avi_objects.logger_utils", "line_number": 147, "usage_type": "name"}, {"api_name": "avi_objects.logger_utils.fail", "line_number": 150, "usage_type": "call"}, {"api_name": "avi_objects.logger_utils", "line_number": 150, "usage_type": "name"}]} +{"seq_id": "27571891237", "text": "import csv\nimport io\nfrom selenium import webdriver\nfrom selenium.common import exceptions\nimport sys\nimport time\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom requests_html import HTMLSession \nfrom selenium import webdriver \nimport argparse\nfrom bs4 import BeautifulSoup as bs # importing BeautifulSoup\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport sys\nimport io\nimport csv\nimport json\nimport pandas as pd\nfrom os.path import exists\n\ntotal_comments = 0\n\ndef addCounter():\n global total_comments\n total_comments = total_comments + 1\n return total_comments\n\ndef scrape(url, FILENAME):\n\n\n\n print(\"URL IS\" , url)\n driver = webdriver.Chrome(ChromeDriverManager().install())\n # driver = webdriver.Chrome(ChromeDriverManager(version=\"87.0.4280.88\").install())\n driver.get(url)\n driver.maximize_window()\n time.sleep(5)\n\n try:\n title = driver.find_element_by_xpath('//*[@id=\"container\"]/h1/yt-formatted-string').text\n comment_section = driver.find_element_by_xpath('//*[@id=\"comments\"]')\n driver.execute_script(\"arguments[0].scrollIntoView();\", comment_section)\n time.sleep(7)\n except exceptions.NoSuchElementException:\n \n error = \"Error: Double check selector OR \"\n error += \"element may not yet be on the screen at the time of the find operation\"\n print(error)\n\n # Scroll into view the comment section, then allow some time\n # for everything to be loaded as necessary.\n \n\n # Scroll all the way down to the bottom in order to get all the\n # elements loaded (since Youtube dynamically loads them).\n last_height = driver.execute_script(\"return document.documentElement.scrollHeight\")\n\n while True:\n # Scroll down 'til \"next load\".\n driver.execute_script(\"window.scrollTo(0, document.documentElement.scrollHeight);\")\n\n # Wait to load everything thus far.\n time.sleep(3)\n\n # Calculate new scroll height and compare with last scroll height.\n new_height = driver.execute_script(\"return document.documentElement.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\n # One last scroll just in case.\n driver.execute_script(\"window.scrollTo(0, document.documentElement.scrollHeight);\")\n\n try:\n # Extract the elements storing the usernames and comments.\n username_elems = driver.find_elements_by_xpath('//*[@id=\"author-text\"]')\n comment_elems = driver.find_elements_by_xpath('//*[@id=\"content-text\"]')\n except exceptions.NoSuchElementException:\n error = \"Error: Double check selector OR \"\n error += \"element may not yet be on the screen at the time of the find operation\"\n print(error)\n\n with io.open( FILENAME, 'w', newline='', encoding=\"utf-16\") as file:\n writer = csv.writer(file, delimiter =\",\", quoting=csv.QUOTE_ALL)\n writer.writerow([\"Comment\"])\n for comment in zip(username_elems, comment_elems):\n try:\n writer.writerow([comment.text])\n addCounter()\n except: \n i =0\n for c in comment:\n if i%2 ==1:\n writer.writerow([c.text])\n addCounter()\n i = i +1\n\n driver.close()\n driver.quit()\n\n\ndef getID(data):\n id_list = []\n df = pd.read_csv(data)\n for id in df[\"ID\"]:\n id_list.append(id)\n return id_list\nrejected = 0\nif __name__ == \"__main__\":\n df = pd.read_csv(\"data/youtube/videos_to_scrape.csv\")\n # print(df)\n # driver = webdriver.Chrome(ChromeDriverManager().install())\n i = 0 \n for id in getID(\"data/youtube/videos_to_scrape.csv\"): \n if len(id) > 0:\n if not exists(\"data/youtube/comments/\" + str(id)+ \".csv\"):\n scrape( \"https://www.youtube.com/watch?v=\" + id, \"data/youtube/comments/\" + str(id)+ \".csv\")\n time.sleep(2)\n print( total_comments)\n i+=1\n else:\n rejected +=1\n if (rejected % 100 == 0):\n print(\"rejected: \", rejected)\n\n ", "repo_name": "linusfoo/scrappers", "sub_path": "get_id.py", "file_name": "get_id.py", "file_ext": "py", "file_size_in_byte": 4177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 32, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 32, "usage_type": "name"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 43, "usage_type": "attribute"}, {"api_name": "selenium.common.exceptions", "line_number": 43, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 77, "usage_type": "attribute"}, {"api_name": "selenium.common.exceptions", "line_number": 77, "usage_type": "name"}, {"api_name": "io.open", "line_number": 82, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 83, "usage_type": "call"}, {"api_name": "csv.QUOTE_ALL", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 115, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "73721604722", "text": "import logging\n\nlog = False\nlog_level = logging.WARNING\nlog_name = 'osm2gmns'\n\nverbose = True\n\nlonlat_coord_precision = 7\nlocal_coord_precision = 2\n\n\ndefault_int_buffer = 20.0 # meter\nsegment_resolution = 5.0 # meter\n\nosm_highway_type_dict = {'motorway': ('motorway', False),\n 'motorway_link': ('motorway', True),\n 'trunk': ('trunk', False),\n 'trunk_link': ('trunk', True),\n 'primary': ('primary', False),\n 'primary_link': ('primary', True),\n 'secondary': ('secondary', False),\n 'secondary_link': ('secondary', True),\n 'tertiary': ('tertiary', False),\n 'tertiary_link': ('tertiary', True),\n 'residential': ('residential', False),\n 'residential_link': ('residential', True),\n 'living_street': ('living_street', False),\n 'service': ('service', False),\n 'services': ('service', False),\n 'cycleway': ('cycleway', False),\n 'footway': ('footway', False),\n 'pedestrian': ('footway', False),\n 'steps': ('footway', False),\n 'track': ('track', False),\n 'unclassified': ('unclassified', False)}\n\nlink_type_no_dict = {'motorway': 1, 'trunk': 2, 'primary': 3, 'secondary': 4,\n 'tertiary': 5, 'residential': 6, 'living_street': 7,\n 'service': 8, 'cycleway': 9, 'footway': 10, 'track': 11,\n 'unclassified': 15, 'connector': 20, 'railway': 30, 'aeroway': 31}\n\ndefault_lanes_dict = {'motorway': 4, 'trunk': 3, 'primary': 3, 'secondary': 2,\n 'tertiary': 2, 'residential': 1, 'living_street': 1, 'service': 1,\n 'cycleway': 1, 'footway': 1, 'track': 1, 'unclassified': 1, 'connector': 2}\n\ndefault_speed_dict = {'motorway': 120, 'trunk': 100, 'primary': 80, 'secondary': 60,\n 'tertiary': 40, 'residential': 30, 'living_street': 30, 'service': 30,\n 'cycleway': 5, 'footway': 5, 'track': 30, 'unclassified': 30, 'connector': 120}\n\ndefault_capacity_dict = {'motorway': 2300, 'trunk': 2200, 'primary': 1800,\n 'secondary': 1600, 'tertiary': 1200, 'residential': 1000,\n 'living_street': 1000, 'service': 800,\n 'cycleway': 800, 'footway': 800, 'track': 800,\n 'unclassified': 800, 'connector': 9999}\n\ndefault_oneway_flag_dict = {'motorway': False, 'trunk': False, 'primary': False,\n 'secondary': False, 'tertiary': False,\n 'residential': False, 'living_street': False,\n 'service': False, 'cycleway': True, 'footway': True,\n 'track': True, 'unclassified': False, 'connector': False,\n 'railway': True, 'aeroway': True}\n\ndefault_bounds = {'minlat': -90.0,\n 'minlon': -180.0,\n 'maxlat': 90.0,\n 'maxlon': 180.0}\n", "repo_name": "jiawlu/OSM2GMNS", "sub_path": "osm2gmns/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 3305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 74, "dataset": "github-code", "pt": "75", "api": [{"api_name": "logging.WARNING", "line_number": 4, "usage_type": "attribute"}]} +{"seq_id": "38781557173", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nClassify test image 2\r\n\r\n\"\"\"\r\n\r\n# -----------------------------------------------------------------------------\r\n# READS AN IMAGE AND CLASSIFIES IT INTO ONE OF THE FOUR CLASSES: BENIGN TISSUE, \r\n# IN SITU CARCINOMA, INVASIVE CARCINOMA AND NORMAL CARCINOMA\r\n# -----------------------------------------------------------------------------\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import print_function\r\nimport numpy as np\r\nnp.random.seed(1337) # for reproducibility\r\n\r\nimport numpy as np\r\nimport normalizeStaining as norm\r\n#import glob, os\r\nimport cPickle\r\nimport sys\r\nimport tifffile as tiff\r\nimport os\r\n#from matplotlib.pyplot import *\r\n\r\n\r\n#print (str(sys.argv[1]))\r\n#image_name=\"images\\\\4_2.tif\";\r\nnr_of_inputs = len(sys.argv)\r\nif nr_of_inputs<2:\r\n sys.exit()\r\nsys.argv.pop(0)\r\nimage_names = str(sys.argv)\r\n#model_name=\"models\\\\model_new_128RV_4classes_13_10_49G.pkl\";\r\n#modelw_name = \"models\\\\weigths.pkl\";\r\n#sec_classif_name=\"models\\\\SVM_classifG.pkl\";\r\nsec_classif_name=os.path.join(\"models\",\"SVM_classif_try2_16_10_25.pkl\");\r\n\r\nmodelCompiled_name = os.path.join(\"models\",\"modelCompiled.pkl\");\r\n\r\n# Prepare test image\r\n# -----------------------------------------------------------------------------\r\ncount=-1;\r\n\r\ndata_all=np.zeros((120,15552,32));\r\ndata2_all=np.zeros((120,15552,1));\r\nsofthist_all=np.zeros((120,32));\r\ncrisphist_all=np.zeros((120,32));\r\naux_labels_all=np.zeros((120,1));\r\n\r\n#f, (ax1,ax2) = plt.subplots(1,2)\r\n\r\nnb_classes=4;\r\nnb_channels=3;\r\npatchSize = 128; # patchSize x patchSize\r\nshapex=2048 # ncols\r\nshapey=1536 # nrows\r\nnum_images=1;\r\nnum_channels=3;\r\nnb_patch=int(shapex/patchSize)*int(shapey/patchSize); # total number of patches\r\n\r\n\r\n# - Load models\r\nf_model= open( modelCompiled_name, \"rb\" )\r\nmodel2 = cPickle.load(f_model); #CNN\r\nsec_classif= cPickle.load( open( sec_classif_name, \"rb\" ) ) #SVM\r\n\r\nimage_name = sys.argv.pop(0)\r\nprint(image_name)\r\n# - Load image\r\na = np.zeros((num_images,num_channels,shapey,shapex),dtype=np.uint8)\r\nimg = tiff.imread(image_name)\r\nimg_temp = np.array(img, dtype=np.uint8)\r\nimg_temp = np.array(img_temp, dtype=np.float64)\r\n\r\n# - Pre-process image (HE normalization)\r\nimg_preproc=norm.normalizeStaining(img_temp)\r\n\r\na[0,0,:,:] = img_preproc[:,:,0]\r\na[0,1,:,:] = img_preproc[:,:,1]\r\na[0,2,:,:] = img_preproc[:,:,2]\r\ntest_set_x=a;\r\n\r\n\r\n# - Split test image in patches\r\npatches_all=np.zeros(shape=(nb_patch,num_channels,patchSize,patchSize),dtype='uint8');\r\n# labels_all=np.zeros(shape=((nb_patch,1)));\r\nfirst_lin_patch=0;\r\nfirst_col_patch=0; \r\nfor p in range(0,nb_patch): #for each patch \r\n patch_prov=test_set_x[0,:,first_lin_patch : first_lin_patch + patchSize,first_col_patch : first_col_patch + patchSize]; # provisory patch (before possible rotation) \r\n\r\n \r\n patches_all[p,:,:,:] = patch_prov\r\n first_col_patch += patchSize\r\n\r\n \r\n if first_col_patch==shapex: #run all collumns in a row, then pass to the next row\r\n first_col_patch=0\r\n first_lin_patch += patchSize\r\n\r\ntest_set_x=patches_all; \r\n\r\n\r\nX_test=test_set_x;\r\nX_test = X_test.astype(\"float32\")\r\nX_test /= 255\r\n\r\n\r\nnb_test = X_test.shape[0] \r\nfor curr_patch in range(0,nb_test):\r\n temp = X_test[curr_patch,:,:,:]\r\n for curr_channel in range(0,3):\r\n temp[curr_channel,:,:] = temp[curr_channel,:,:]-np.average(temp[curr_channel,:,:])\r\n X_test[curr_patch,:,:,:] = temp \r\n\r\n# - Test model with image\r\ntest=model2.predict(X_test)\r\n\r\n\r\n# - Prepare test image for classification\r\ntmp2 =np.reshape(test,(12,16,32,9,9))\r\ntmp3 = np.transpose(tmp2, (0,3,1,4,2))\r\ntmp4 = np.reshape(tmp3, (12*9,16*9,32))\r\n#tmp5 = tmp4\r\n#tmp5[:,:,29] = 0\r\n#tmp5 =np.argmax(tmp5,axis=2)\r\n\r\ndata = np.reshape(tmp4,(108*144,32))\r\nsofthist = np.sum(data, axis=0);\r\nsofthist = softhist/np.sum(softhist)\r\n\r\n#data2 = np.reshape(tmp5,(108*144,1))\r\n#crisphist = np.histogram(data2,range=(0,31),bins=32,normed=True)\r\n\r\n\r\n# - Classify test image (softhist)\r\nresult=sec_classif.predict(softhist)\r\n#dec = clf.decision_function([[1]])\r\n#dec.shape[1]\r\n\r\n\r\n# Show classification result\r\n# -----------------------------------------------------------------------------\r\nresult_class=result;\r\nclass_names=['Benign tissue', 'In situ carcinoma', 'Invasive carcinoma', 'Normal tissue'];\r\nresult_class_name=class_names[int(result_class[0])];\r\nprint(image_name + \" - \" + result_class_name)\r\n", "repo_name": "Naeempatel010/Doctify", "sub_path": "models/Cancer/predict_clean_1.py", "file_name": "predict_clean_1.py", "file_ext": "py", "file_size_in_byte": 4402, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "numpy.random.seed", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.argv.pop", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 65, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 66, "usage_type": "call"}, {"api_name": "sys.argv.pop", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tifffile.imread", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 74, "usage_type": "attribute"}, {"api_name": "normalizeStaining.normalizeStaining", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "42518405489", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport re\nimport json\nimport logging\n\nfrom memory_profiler import profile\n\nimport spacy\nimport spacy_sentence_bert\nfrom spacy.tokens import Doc\nfrom spacy.matcher import PhraseMatcher\nfrom spacy.pipeline import EntityRuler\n\nspacy.tokens.token.Token.set_extension('tmp_mask', default='')\n\nlogging.basicConfig(format='[INFO] %(asctime)s - %(message)s', level=logging.INFO)\n\n@profile\ndef load_spacy_pipe(model_path, sbert_model=None):\n nlp_model = model_path\n nlp = spacy.load(nlp_model, disable=[\"tagger\", \"parser\"])\n logging.info(\"loaded spacy model\")\n \n if sbert_model is not None:\n # load one of the models listed at https://github.com/MartinoMensio/spacy-sentence-bert/\n # nlp = spacy_sentence_bert.load_model(cfg.models.sentence_transformers.xlm)\n #this adds in the sentence-bert vectors\n # xx_paraphrase_xlm_r_multilingual_v1\n # sbert_model = cfg.models.sentence_transformers.dist\n logging.info(\"adding the sentence embeddings...\")\n nlp = spacy_sentence_bert.create_from(nlp, sbert_model)\n logging.info(f\"loaded sentence-transformers model from {sbert_model}\")\n\n return nlp\n\nclass WhitespaceTokenizer:\n \"\"\"simple whitespace tokenizer to be used when\n input is pretokenised (e.g. output of sentiment\n analysis system)\"\"\"\n \n def __init__(self, vocab):\n self.vocab = vocab\n\n def __call__(self, text):\n words = text.split(\" \")\n return Doc(self.vocab, words=words)\n\ndef add_special_tokens_to_tokenizer(nlp):\n logging.info(\"adding special tokens to spacy model\")\n nlp.tokenizer.add_special_case(\"---SEP---\", [{\"ORTH\": \"---SEP---\"}])\n nlp.tokenizer.add_special_case(\"\", [{\"ORTH\": \"\"}])\n nlp.tokenizer.add_special_case(\"\", [{\"ORTH\": \"\"}])\n nlp.tokenizer.add_special_case(\"\", [{\"ORTH\": \"\"}])\n nlp.tokenizer.add_special_case(\"\", [{\"ORTH\": \"\"}])\n nlp.tokenizer.add_special_case(\"\", [{\"ORTH\": \"\"}])\n nlp.tokenizer.add_special_case(\"\", [{\"ORTH\": \"\"}])\n nlp.tokenizer.add_special_case(\"\", [{\"ORTH\": \"\"}])\n nlp.tokenizer.add_special_case(\"[sep]\", [{\"ORTH\": \"[sep]\"}])\n nlp.tokenizer.add_special_case(\"[cls]\", [{\"ORTH\": \"[cls]\"}])\n nlp.tokenizer.add_special_case(\"\", [{\"ORTH\": \"\"}])\n nlp.tokenizer.add_special_case(\"\", [{\"ORTH\": \"\"}])\n \n\ndef add_gazetteer_to_nlp(nlp, terms):\n logging.info(\"creating `phrasematcher` from gazetteer with {} terms\".format(len(terms)))\n matcher = PhraseMatcher(nlp.vocab, attr=\"LOWER\")\n # Only run nlp.make_doc to speed things up\n # patterns = [nlp.make_doc(text) for text in terms]\n patterns = list(nlp.tokenizer.pipe(terms))\n matcher.add(\"Companies\", patterns)\n \n return matcher\n\ndef add_entity_ruler(nlp, terms):\n logging.info(\"creating `entity_ruler` from gazetteer with {} terms\".format(len(terms)))\n ruler = nlp.create_pipe(\"entity_ruler\")\n nlp.add_pipe(ruler, before=\"ner\")\n # ruler = nlp.add_pipe(\"entity_ruler\", before=\"ner\")\n # ruler = nlp.add_pipe(nlp.create_pipe('entity_ruler'))\n patterns = []\n for term in terms:\n term_tokens = re.split(r'[\\s-]', term)\n pattern_list = [{\"LOWER\": token} for token in term_tokens]\n \n term_pattern = {\"label\": \"ORG\", \"pattern\": pattern_list, \"id\": term}\n patterns.append(term_pattern)\n print(term_pattern)\n \n ruler.add_patterns(patterns)\n return nlp\n\nif __name__ == \"__main__\":\n pass", "repo_name": "ovsilya/respondelligent_cpu", "sub_path": "fastapi_app/app/src/spacy_utils.py", "file_name": "spacy_utils.py", "file_ext": "py", "file_size_in_byte": 3582, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "spacy.tokens.token.Token.set_extension", "line_number": 16, "usage_type": "call"}, {"api_name": "spacy.tokens", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 18, "usage_type": "attribute"}, {"api_name": "spacy.load", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 32, "usage_type": "call"}, {"api_name": "spacy_sentence_bert.create_from", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 34, "usage_type": "call"}, {"api_name": "memory_profiler.profile", "line_number": 20, "usage_type": "name"}, {"api_name": "spacy.tokens.Doc", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 51, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 67, "usage_type": "call"}, {"api_name": "spacy.matcher.PhraseMatcher", "line_number": 68, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 77, "usage_type": "call"}, {"api_name": "re.split", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "42668937966", "text": "import glob\n\nfrom Utils import *\nimport torch\nimport torchvision\nfrom FEQEModel import FeqeModel\nfrom DataSet import Div2k\nfrom torch.utils.tensorboard import SummaryWriter\nfrom VGGLoss import VGGPerceptualLoss\nimport time\nfrom piqa import SSIM\nfrom pthflops import count_ops\nfrom torchvision.io import read_image\nimport torchvision.transforms.functional as F\n\nRUN_NAME = \"C32_N5\"\nLOW_RES_SCALE = 2\n# --- Hyper Parameters ---\nBATCH_SIZE = 8\nTRAIN_IM_SIZE = (196, 196)\nNUM_WORKERS = 0\nRESIDUAL_BLOCKS = 20\nCHANNELS = 16\nLEARNING_RATE = 1e-4\nEPOCHS = 50\nCHECKPOINT_EVERY = 5\nVGG_LOSS_WEIGHT = 1e-3\nssim = SSIM().cuda()\n\n\ndef get_device():\n if torch.cuda.is_available():\n torch.cuda.current_device()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"running on device:\", device)\n return device\n\n\ndef train(data_loader_train, data_loader_test, device, model, optimizer, vgg_loss_fn, scheduler, writer):\n\n for epoch in range(EPOCHS + 1):\n start = time.time()\n for i, im_batch in enumerate(data_loader_train):\n im_batch = im_batch.to(device)\n im_batch_lowres = down_sample(im_batch, LOW_RES_SCALE)\n\n im_batch_lowres_recon = model(im_batch_lowres)\n mse_loss = torch.nn.MSELoss()(im_batch_lowres_recon, im_batch)\n vgg_loss = vgg_loss_fn(im_batch_lowres_recon, im_batch)\n loss = (1 - VGG_LOSS_WEIGHT) * mse_loss + VGG_LOSS_WEIGHT * vgg_loss\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n iter = epoch * len(data_loader_train) + i\n writer.add_scalar('Loss/loss', loss.item(), iter)\n writer.add_scalar('Loss/mse_loss', mse_loss.item(), iter)\n writer.add_scalar('Loss/vgg_loss', vgg_loss.item(), iter)\n writer.add_scalar('Metrics/PSNR_Train', psnr(im_batch * 255, im_batch_lowres_recon * 255), iter)\n writer.add_scalar('Metrics/SSIM_Train', ssim(im_batch, im_batch_lowres_recon), iter)\n\n # evaluate on test set\n with torch.no_grad():\n test_avg_psnr = 0\n test_avg_ssim = 0\n for i, im_batch in enumerate(data_loader_test):\n im_batch = im_batch.to(device)\n im_batch_lowres = down_sample(im_batch, LOW_RES_SCALE)\n\n im_batch_lowres_recon = model(im_batch_lowres)\n\n test_avg_psnr += psnr(im_batch * 255, im_batch_lowres_recon * 255)\n test_avg_ssim += ssim(im_batch, im_batch_lowres_recon)\n test_avg_psnr /= len(data_loader_test)\n test_avg_ssim /= len(data_loader_test)\n writer.add_scalar('Metrics/PSNR_Test', test_avg_psnr, epoch * len(data_loader_test) + i)\n writer.add_scalar('Metrics/SSIM_Test', test_avg_ssim, epoch * len(data_loader_test) + i)\n\n if epoch % CHECKPOINT_EVERY == 0:\n torch.save(model, 'checkpoints/' + RUN_NAME + '_' + str(epoch) + '.pth')\n\n scheduler.step()\n\n print(\"Epoch: {}/{}\".format(epoch, EPOCHS), \"Time: {:.2f}\".format(time.time() - start))\n print(\"Test Avg. PSNR: {:.2f}\".format(test_avg_psnr))\n print(\"Test Avg. SSIM: {:.2f}\".format(test_avg_ssim))\n\n return model\n\n\ndef banchmark_test(data_loader_test, device, model, writer, name):\n\n # evaluate on test set\n with torch.no_grad():\n test_avg_psnr = 0\n test_avg_ssim = 0\n for i, im_batch in enumerate(data_loader_test):\n im_batch = im_batch.to(device)\n im_batch_lowres = down_sample(im_batch, LOW_RES_SCALE)\n\n im_batch_lowres_recon = model(im_batch_lowres)\n\n test_avg_psnr += psnr(im_batch * 255, im_batch_lowres_recon * 255)\n test_avg_ssim += ssim(im_batch, im_batch_lowres_recon)\n test_avg_psnr /= len(data_loader_test)\n test_avg_ssim /= len(data_loader_test)\n print(\"Test Avg. PSNR: {:.2f}\".format(test_avg_psnr))\n print(\"Test Avg. SSIM: {:.2f}\".format(test_avg_ssim))\n writer.add_scalar(f'{name}/Metrics/PSNR_Test', test_avg_psnr)\n writer.add_scalar(f'{name}/Metrics/SSIM_Test', test_avg_ssim)\n\n\ndef reconstruct_image(model, device, transform):\n\n image_name = '0840'\n image_path = f'Data/Test/{image_name}.png'\n image = read_image(image_path)\n\n fig, ax = plt.subplots(2, 2, figsize=(10, 10))\n ax[0, 0].title.set_text('Original Image')\n ax[0, 0].imshow(F.to_pil_image(image))\n image = transform(image)\n image = (image.unsqueeze(0) / 255).to(device)\n im_l = down_sample(image, LOW_RES_SCALE)\n im_l_recon = model(im_l)\n ax[0, 1].title.set_text('Ground Truth')\n ax[0, 1].imshow(F.to_pil_image(image.squeeze(0)))\n ax[1, 0].title.set_text('Low Resolution')\n ax[1, 0].imshow(F.to_pil_image(im_l.squeeze(0)))\n ax[1, 1].title.set_text('Reconstructed')\n ax[1, 1].imshow(F.to_pil_image(im_l_recon.squeeze(0)))\n fig.show()\n fig.savefig('Results/' + image_name + '_reconstructed.png')\n\n\ndef main():\n device = get_device()\n\n transform = torchvision.transforms.Compose([\n torchvision.transforms.RandomCrop(TRAIN_IM_SIZE),\n ])\n\n # writer = SummaryWriter(comment=RUN_NAME)\n\n \"\"\"Train Model\"\"\"\n # dataset_train = Div2k('Data/Train/', transform=transform)\n # data_loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE,\n # shuffle=True, num_workers=0)\n #\n # dataset_test = Div2k('Data/Test/')\n # data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1,\n # shuffle=True, drop_last=True, num_workers=0)\n #\n # model = FeqeModel(num_residual_blocks=RESIDUAL_BLOCKS, channels=CHANNELS).to(device)\n # optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE,\n # betas=(0.9, 0.999), eps=1e-08)\n # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2 * EPOCHS // 3, gamma=0.1)\n #\n # vgg_loss_fn = VGGPerceptualLoss(resize=False).to(device)\n\n # train(data_loader_train, data_loader_test, device, model, optimizer, vgg_loss_fn, scheduler, writer)\n\n \"\"\"Load Model\"\"\"\n model = torch.load('checkpoints/' + RUN_NAME + '_' + '50' + '.pth')\n\n \"\"\"Calculate Parameters and Flops\"\"\"\n # print(f\"Parameters: {count_parameters(model)}\")\n #\n # input = torch.randn(1, 3, 196, 196).to(device)\n # print(f\"FLOPS: {count_ops(model, input)}\")\n\n \"\"\"Reconstruct image\"\"\"\n reconstruct_image(model, device, transform)\n\n \"\"\"Get Banchmark Results\"\"\"\n # banchmark_test(data_loader_test, device, model, writer, 'Feqe')\n # for benchmark_folder in glob.glob('Data/Benchmark/*'):\n # print(benchmark_folder)\n # dataset_benchmark = Div2k(benchmark_folder)\n # data_loader_benchmark = torch.utils.data.DataLoader(dataset_benchmark, batch_size=1,\n # shuffle=True, drop_last=True, num_workers=0)\n # banchmark_test(data_loader_benchmark, device, model, writer, name=benchmark_folder)\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "Technion-Yuval-Goshen-Tuval-Gelvan/Fast-Efficient-Image-Enhancement", "sub_path": "Train.py", "file_name": "Train.py", "file_ext": "py", "file_size_in_byte": 7242, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "piqa.SSIM", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.cuda.current_device", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 34, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 95, "usage_type": "call"}, {"api_name": "torchvision.io.read_image", "line_number": 118, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional.to_pil_image", "line_number": 122, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 122, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.to_pil_image", "line_number": 128, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 128, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.to_pil_image", "line_number": 130, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 130, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.to_pil_image", "line_number": 132, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 132, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 140, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 140, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 141, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 141, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "21593137663", "text": "import logging\nimport os\n\nimport environ\n\n\ndef str2bool(v: str) -> bool:\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\n@environ.config(prefix=\"\")\nclass AppConfig:\n @environ.config\n class PG:\n name: str = environ.var(\"fias_db\")\n host: str = environ.var(\"127.0.0.1\")\n port: int = environ.var(5432, converter=int)\n user: str = environ.var(\"fias\")\n password: str = environ.var(\"\")\n pool_recycle: float = environ.var(30.0, converter=float)\n\n @environ.config\n class Sphinx:\n listen: str = environ.var(\"127.0.0.1:9312\")\n index_addrobj: str = 'idx_fias_addrobj'\n index_sugg: str = 'idx_fias_sugg'\n min_length_to_star: int = 3\n delta_len: int = 2\n default_rating_delta: int = 2\n regression_coef: float = 0.08\n max_results_count: int = 10\n search_freq_words: bool = True\n suggestions_count = 6\n sphinx_user_uid = 104\n\n def listen_port(self) -> str:\n if self.listen.startswith('/'):\n return self.listen\n else:\n return self.listen.split(':')[1]\n\n pg: PG = environ.group(PG)\n sphinx: Sphinx = environ.group(Sphinx)\n\n\nLOG_LEVEL = logging.DEBUG if str2bool(os.environ.get('APP_DEBUG', '1')) else logging.INFO\n", "repo_name": "jar3b/py-phias", "sub_path": "settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 1306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "75", "api": [{"api_name": "environ.var", "line_number": 15, "usage_type": "call"}, {"api_name": "environ.var", "line_number": 16, "usage_type": "call"}, {"api_name": "environ.var", "line_number": 17, "usage_type": "call"}, {"api_name": "environ.var", "line_number": 18, "usage_type": "call"}, {"api_name": "environ.var", "line_number": 19, "usage_type": "call"}, {"api_name": "environ.var", "line_number": 20, "usage_type": "call"}, {"api_name": "environ.config", "line_number": 13, "usage_type": "attribute"}, {"api_name": "environ.var", "line_number": 24, "usage_type": "call"}, {"api_name": "environ.config", "line_number": 22, "usage_type": "attribute"}, {"api_name": "environ.group", "line_number": 42, "usage_type": "call"}, {"api_name": "environ.group", "line_number": 43, "usage_type": "call"}, {"api_name": "environ.config", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 46, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 46, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 46, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "31810639987", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom ebikes_app.models import Usuario, Bicicleta, Insumo\nfrom ebikes_app.forms import UsuarioForm, BicicletaForm, InsumoForm, BicicletaBusqueda\n# Create your views here.\n\n\ndef form_usuario(request):\n\n if request.method == \"POST\":\n \n mi_formulario = UsuarioForm(request.POST)\n\n if mi_formulario.is_valid():\n datos = mi_formulario.cleaned_data \n usuario = Usuario(nombre=datos[\"nombre\"], apellido=datos[\"apellido\"], email=datos[\"email\"])\n usuario.save()\n mi_formulario = UsuarioForm()\n return render(request, \"ebikes_app/formulario_usuario.html\", {\"mensaje\":\"agregado con exito!\", \"mi_formulario\":mi_formulario})\n \n else:\n\n mi_formulario = UsuarioForm()\n \n return render(request, \"ebikes_app/formulario_usuario.html\", {\"mi_formulario\":mi_formulario})\n\n\n\n\ndef form_bicicleta(request):\n\n if request.method == \"POST\":\n \n mi_formulario = BicicletaForm(request.POST)\n\n if mi_formulario.is_valid():\n datos = mi_formulario.cleaned_data \n bicicleta = Bicicleta(marca=datos[\"marca\"], modelo=datos[\"modelo\"], rodado=datos[\"rodado\"], precio=datos[\"precio\"])\n bicicleta.save()\n mi_formulario = BicicletaForm ()\n return render(request, \"ebikes_app/formulario_bicicleta.html\", {\"mensaje\":\"agregado con exito!\", \"mi_formulario\":mi_formulario})\n \n else:\n\n mi_formulario = BicicletaForm()\n \n return render(request, \"ebikes_app/formulario_bicicleta.html\", {\"mi_formulario\":mi_formulario})\n\n\n\n\n \ndef form_insumo(request):\n\n if request.method == \"POST\":\n \n mi_formulario = InsumoForm(request.POST)\n\n if mi_formulario.is_valid():\n datos = mi_formulario.cleaned_data \n insumo = Insumo(marca=datos[\"marca\"], descripcion=datos[\"descripcion\"], precio=datos[\"precio\"])\n insumo.save()\n mi_formulario = InsumoForm()\n return render(request, \"ebikes_app/formulario_insumo.html\", {\"mensaje\":\"agregado con exito!\", \"mi_formulario\":mi_formulario})\n \n else:\n\n mi_formulario = InsumoForm()\n \n return render(request, \"ebikes_app/formulario_insumo.html\", {\"mi_formulario\":mi_formulario})\n\n\n\ndef form_busqueda(request):\n\n busqueda_formulario = BicicletaBusqueda()\n\n buscado = False\n\n if request.GET: \n busqueda_formulario = BicicletaBusqueda(request.GET)\n if busqueda_formulario.is_valid():\n bicicletas = Bicicleta.objects.filter(marca=busqueda_formulario.cleaned_data.get(\"criterio\")).all()\n return render(request, \"ebikes_app/bicicleta_busqueda.html\", {\"busqueda_formulario\": busqueda_formulario, \"bicicletas\": bicicletas, \"buscado\" : True})\n \n \n return render(request, \"ebikes_app/bicicleta_busqueda.html\", {\"busqueda_formulario\": busqueda_formulario, \"buscado\":buscado})", "repo_name": "ccosta6136/ebikes", "sub_path": "ebikes_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2961, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "ebikes_app.forms.UsuarioForm", "line_number": 12, "usage_type": "call"}, {"api_name": "ebikes_app.models.Usuario", "line_number": 16, "usage_type": "call"}, {"api_name": "ebikes_app.forms.UsuarioForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "ebikes_app.forms.UsuarioForm", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "ebikes_app.forms.BicicletaForm", "line_number": 34, "usage_type": "call"}, {"api_name": "ebikes_app.models.Bicicleta", "line_number": 38, "usage_type": "call"}, {"api_name": "ebikes_app.forms.BicicletaForm", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "ebikes_app.forms.BicicletaForm", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "ebikes_app.forms.InsumoForm", "line_number": 57, "usage_type": "call"}, {"api_name": "ebikes_app.models.Insumo", "line_number": 61, "usage_type": "call"}, {"api_name": "ebikes_app.forms.InsumoForm", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 64, "usage_type": "call"}, {"api_name": "ebikes_app.forms.InsumoForm", "line_number": 68, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "ebikes_app.forms.BicicletaBusqueda", "line_number": 76, "usage_type": "call"}, {"api_name": "ebikes_app.forms.BicicletaBusqueda", "line_number": 81, "usage_type": "call"}, {"api_name": "ebikes_app.models.Bicicleta.objects.filter", "line_number": 83, "usage_type": "call"}, {"api_name": "ebikes_app.models.Bicicleta.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "ebikes_app.models.Bicicleta", "line_number": 83, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "72516628401", "text": "import cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlady_img = cv.imread('../Resources/Photos/lady.jpg')\n# cv.imshow('Lady:',lady_img)\ngroup1_img = cv.imread('../Resources/Photos/group 1.jpg')\n# cv.imshow('Group 1:',group1_img)\ngroup2_img = cv.imread('../Resources/Photos/group 2.jpg')\n# cv.imshow('Group 2:',group2_img)\n\ngrey_lady_img = cv.cvtColor(lady_img, cv.COLOR_BGR2GRAY)\ngrey_g1_img = cv.cvtColor(group1_img, cv.COLOR_BGR2GRAY)\ngrey_g2_img = cv.cvtColor(group2_img, cv.COLOR_BGR2GRAY)\n\nhaar_cascade = cv.CascadeClassifier('../Resources/data/haar-face-default.xml')\n\nlady_face_rect = haar_cascade.detectMultiScale(grey_lady_img, scaleFactor=1.1, minNeighbors=2)\ng1_face_rect = haar_cascade.detectMultiScale(grey_g1_img, scaleFactor=1.1, minNeighbors=2)\ng2_face_rect = haar_cascade.detectMultiScale(grey_g2_img, scaleFactor=1.1, minNeighbors=2)\n\n\nfor (x, y, w, h) in lady_face_rect:\n cv.rectangle(lady_img, (x, y), (x+w, y+h), (0, 255, 0), thickness=2)\n\nfor (x, y, w, h) in g1_face_rect:\n cv.rectangle(group1_img, (x, y), (x+w, y+h), (0, 255, 0), thickness=2)\n\nfor (x, y, w, h) in g2_face_rect:\n cv.rectangle(group2_img, (x, y), (x+w, y+h), (0, 255, 0), thickness=2)\n\ncv.imshow(f\"{len(lady_face_rect)} Detected Faces:\", lady_img)\ncv.imshow(f\"{len(g1_face_rect)} Detected Faces:\", group1_img)\ncv.imshow(f\"{len(g2_face_rect)} Detected Faces:\", group2_img)\n\ncv.waitKey(0)", "repo_name": "amdp-chauhan/python-opencv-exp", "sub_path": "Face detection and recognizition/face-detection-using-haar-ascades.py", "file_name": "face-detection-using-haar-ascades.py", "file_ext": "py", "file_size_in_byte": 1400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.CascadeClassifier", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "18797226913", "text": "from pprint import pprint\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.trainer.supporters import CombinedLoader\n\nfrom models.gnns import LitProcessorSet\nfrom hyperparameters import get_hyperparameters\nfrom models.algorithm_reasoner import LitAlgorithmReasoner\nfrom datasets.constants import _DATASET_CLASSES, _DATASET_ROOTS\nfrom train_config import MODULE_CONFIG\n\nclass LitAlgorithmProcessor(pl.LightningModule):\n\n def __init__(self,\n hidden_dim,\n algorithm_names,\n dataset_kwargs,\n algo_classes,\n ensure_permutation,\n processors=['MPNN'],\n bias=get_hyperparameters()['bias'],\n reduce_proc_hid_w_MLP=False,\n update_edges_hidden=False,\n use_gate=False,\n use_LSTM=False,\n use_ln=False,\n use_TF=False,\n transferring=False,\n freeze_proc=False,\n double_process=False,\n xavier_on_scalars=False,\n biased_gate=False,\n test_with_val=True,\n test_with_val_every_n_epoch=20,\n test_train_every_n_epoch=20,\n lr=get_hyperparameters()['lr'],\n weight_decay=get_hyperparameters()['weight_decay']):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.processors = processors\n self.bias = bias\n self.reduce_proc_hid_w_MLP = reduce_proc_hid_w_MLP\n self.use_gate = use_gate\n self.use_LSTM = use_LSTM\n self.use_ln = use_ln\n self.use_TF = use_TF\n self.update_edges_hidden = update_edges_hidden\n self.transferring = transferring\n self.learning_rate = lr\n self.weight_decay = weight_decay\n self.xavier_on_scalars = xavier_on_scalars\n self.biased_gate = biased_gate\n self.freeze_proc = freeze_proc\n self.double_process = double_process\n self.test_with_val = test_with_val\n self.test_with_val_every_n_epoch = test_with_val_every_n_epoch\n self.test_train_every_n_epoch = test_train_every_n_epoch\n self.val_dataloader = self.val_dataloader_normal\n if self.test_with_val:\n self.val_dataloader = self.val_dataloader_alt\n self.validation_step = self.validation_step_alt\n self.processor_set = LitProcessorSet(\n 2*hidden_dim,\n hidden_dim,\n reduce_with_MLP=reduce_proc_hid_w_MLP,\n update_edges_hidden=update_edges_hidden,\n edge_dim=hidden_dim,\n bias=bias,\n use_gate=use_gate,\n use_LSTM=use_LSTM,\n use_ln=use_ln,\n biased_gate=biased_gate,\n processors=processors)\n self.algorithm_names = algorithm_names\n self.algorithms = nn.ModuleDict()\n for algo in algorithm_names:\n self.algorithms[algo] = algo_classes[algo](\n algorithm=algo,\n hidden_dim=hidden_dim,\n algo_processor=self.processor_set,\n dataset_class=_DATASET_CLASSES[algo],\n dataset_root=_DATASET_ROOTS[algo],\n dataset_kwargs=dataset_kwargs[algo],\n bias=bias,\n use_TF=use_TF,\n transferring=transferring,\n ensure_permutation=ensure_permutation,\n xavier_on_scalars=xavier_on_scalars,\n test_with_val=False, # ALWAYS FALSE\n test_with_val_every_n_epoch=test_with_val_every_n_epoch,\n test_train_every_n_epoch=test_train_every_n_epoch,\n double_process=self.double_process,\n )\n self.save_hyperparameters(ignore=[])\n self.debug_epoch = 1e9\n\n def train_dataloader(self):\n return [self.algorithms[algo].train_dataloader() for algo in self.algorithm_names]\n # return CombinedLoader(dict((name, algo.train_dataloader()) for name, algo in self.algorithms.items()), mode='max_size_cycle')\n\n def val_dataloader_normal(self):\n return CombinedLoader(dict((name, algo.val_dataloader()) for name, algo in self.algorithms.items()), mode='max_size_cycle')\n\n def val_dataloader_alt(self):\n return [self.val_dataloader_normal(), self.test_dataloader()]\n\n def test_dataloader(self, suffix=''):\n return CombinedLoader(dict((name, algo.test_dataloader(suffix=suffix)) for name, algo in self.algorithms.items()), mode='max_size_cycle')\n\n\n def forward(self, batch):\n return self.fwd_step(batch, 0)\n\n def fwd_step(self, batch, batch_idx):\n assert not self.freeze_proc or not any(k.requires_grad for k in self.processor_set.processors[0].parameters()), breakpoint()\n outputs = {}\n for name, algorithm in self.algorithms.items():\n outputs[name] = algorithm.fwd_step(batch[name], batch_idx)\n return outputs\n\n def on_train_epoch_start(self):\n for algorithm in self.algorithms.values():\n algorithm.current_epoch = self.current_epoch\n\n\n def training_step(self, batch, batch_idx):\n total_loss = 0\n for name, algo_batch in zip(self.algorithm_names, batch):\n algorithm = self.algorithms[name]\n output = algorithm.training_step(algo_batch, batch_idx)\n if isinstance(algo_batch, list):\n num_graphs = algo_batch[0].num_graphs\n else:\n num_graphs = algo_batch.num_graphs\n self.log_dict(dict((f'train/loss/{name}/{k}', v) for k, v in output['losses_dict'].items()), batch_size=num_graphs)\n self.log(f'train/loss/{name}/average_loss', output['loss'], on_step=True, on_epoch=True, batch_size=num_graphs)\n self.log_dict(dict((f'train/acc/{name}/{k}', v) for k, v in output['accuracies'].items()), batch_size=num_graphs, add_dataloader_idx=False, on_epoch=True, on_step=False)\n total_loss = total_loss + output['loss']\n total_loss = total_loss / len(self.algorithms)\n self.log('train/loss/average_loss', total_loss, on_step=True, on_epoch=True, prog_bar=False, batch_size=num_graphs)\n if self.current_epoch >= self.debug_epoch:\n breakpoint()\n return {'loss': total_loss}\n\n def valtest_step(self, batch, batch_idx, mode):\n output = {}\n total_loss = 0\n for name, algorithm in self.algorithms.items():\n output[name] = algorithm.valtest_step(batch[name], batch_idx, mode)\n self.log_dict(dict((f'{mode}/loss/{name}/{k}', v) for k, v in output[name]['losses'].items()), batch_size=batch[name].num_graphs, add_dataloader_idx=False)\n average_loss = sum(output[name]['losses'].values()) / len(output[name]['losses'])\n self.log(f'{mode}/loss/{name}/average_loss', average_loss, batch_size=batch[name].num_graphs, add_dataloader_idx=False, on_epoch=True)\n self.log_dict(dict((f'{mode}/acc/{name}/{k}', v) for k, v in output[name]['accuracies'].items()), batch_size=batch[name].num_graphs, add_dataloader_idx=False)\n total_loss = total_loss + average_loss\n total_loss = total_loss / len(self.algorithms)\n self.log(f'{mode}/loss/average_loss', total_loss, batch_size=batch[name].num_graphs, add_dataloader_idx=False)\n return output\n\n\n def validation_step_alt(self, batch, batch_idx, dataloader_idx):\n if dataloader_idx == 1 and not self.trainer.state.stage == 'sanity_check' and self.current_epoch % self.test_with_val_every_n_epoch == 0:\n return self.valtest_step(batch, batch_idx, 'periodic_test')\n if dataloader_idx == 0:\n return self.valtest_step(batch, batch_idx, 'val')\n\n def validation_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'val')\n\n def test_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'test')\n\n def configure_optimizers(self):\n term_params = []\n normal_params = []\n for name, param in self.named_parameters():\n if '_term' in name or 'termination' in name or 'predinet' in name:\n term_params.append(param)\n else:\n normal_params.append(param)\n lr = self.learning_rate\n optimizer = optim.Adam([\n {'params': term_params, 'lr': lr},\n {'params': normal_params, 'lr': lr}\n ],\n lr=lr,\n weight_decay=self.weight_decay)\n return optimizer\n\nif __name__ == '__main__':\n ...\n", "repo_name": "danilonumeroso/conar", "sub_path": "models/algorithm_processor.py", "file_name": "algorithm_processor.py", "file_ext": "py", "file_size_in_byte": 8747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "75", "api": [{"api_name": "pytorch_lightning.LightningModule", "line_number": 15, "usage_type": "attribute"}, {"api_name": "hyperparameters.get_hyperparameters", "line_number": 24, "usage_type": "call"}, {"api_name": "hyperparameters.get_hyperparameters", "line_number": 39, "usage_type": "call"}, {"api_name": "hyperparameters.get_hyperparameters", "line_number": 40, "usage_type": "call"}, {"api_name": "models.gnns.LitProcessorSet", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.ModuleDict", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "datasets.constants._DATASET_CLASSES", "line_number": 84, "usage_type": "name"}, {"api_name": "datasets.constants._DATASET_ROOTS", "line_number": 85, "usage_type": "name"}, {"api_name": "pytorch_lightning.trainer.supporters.CombinedLoader", "line_number": 105, "usage_type": "call"}, {"api_name": "pytorch_lightning.trainer.supporters.CombinedLoader", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 184, "usage_type": "name"}]} +{"seq_id": "21244330668", "text": "\"\"\"Session collector for managing sessions instances\"\"\"\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout\n\nSESSION_LIMIT_SECONDS = getattr(settings, 'SESSION_LIMIT_SECONDS', 0)\nSESSION_TOKEN_LIMIT_SECONDS = getattr(settings, 'SESSION_TOKEN_LIMIT_SECONDS', 300)\n\nclass SessionCollector(object):\n \"\"\"Collector of sessions for user\"\"\"\n def __init__(self):\n self._sessions = {}\n\n def register(self, request):\n \"\"\"Register session by key with creation time\"\"\"\n if not request.session.session_key in self._sessions.keys():\n self._sessions[request.session.session_key] = (datetime.now(), request)\n\n @property\n def opened(self):\n \"\"\"Return the number of sessions after a flush\"\"\"\n self.flush()\n return len(self._sessions)\n\n def flush(self, session_limit=SESSION_LIMIT_SECONDS):\n \"\"\"Flush the cache of opened sessions and\n close expired session\"\"\"\n now = datetime.now()\n\n for session_key, values in self._sessions.items():\n creation_time, request = values\n delta = now - creation_time\n if session_limit and delta.seconds >= session_limit:\n logout(request)\n if not request.session.exists(session_key):\n del self._sessions[session_key]\n\n def set_unique(self):\n \"\"\"Choose the current session, and close the others\"\"\"\n current_session_key = self.get_current_session_key()\n\n for session_key, values in self._sessions.items():\n if session_key != current_session_key:\n creation_time, request = values\n logout(request)\n del self._sessions[session_key]\n\n def get_current_session_key(self, session_token_limit=SESSION_TOKEN_LIMIT_SECONDS):\n \"\"\"Return the current session key, selected by his creation time\n and his limit before destruction, we suppose that we always\n have 2 items in sessions\"\"\"\n sessions = self._sessions.items()[:2]\n\n if sessions[0][1][0] > sessions[1][1][0]:\n most_recent_session = sessions[0]\n most_oldest_session = sessions[1]\n else:\n most_recent_session = sessions[1]\n most_oldest_session = sessions[0]\n\n delta_oldest = datetime.now() - most_oldest_session[1][0]\n if session_token_limit and delta_oldest.seconds < session_token_limit:\n return most_oldest_session[0]\n return most_recent_session[0]\n\n", "repo_name": "Fantomas42/django-ut-sessions", "sub_path": "utsessions/sessions.py", "file_name": "sessions.py", "file_ext": "py", "file_size_in_byte": 2529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "django.conf.settings", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 8, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "35887109027", "text": "######## Picamera Object Detection Using Tensorflow Classifier #########\r\n#\r\n# Author: Evan Juras\r\n# Date: 4/15/18\r\n# Description: \r\n# This program uses a TensorFlow classifier to perform object detection.\r\n# It loads the classifier uses it to perform object detection on a Picamera feed.\r\n# It draws boxes and scores around the objects of interest in each frame from\r\n# the Picamera. It also can be used with a webcam by adding \"--usbcam\"\r\n# when executing this script from the terminal.\r\n\r\n## Some of the code is copied from Google's example at\r\n## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\r\n\r\n## and some is copied from Dat Tran's example at\r\n## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py\r\n\r\n## but I changed it to make it more understandable to me.\r\n\r\n\r\n# Import packages\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nfrom picamera.array import PiRGBArray\r\nfrom picamera import PiCamera\r\nimport tensorflow as tf\r\nimport argparse\r\nimport sys\r\n\r\n\r\n#servo control imports and vars\r\nimport RPi.GPIO as GPIO\r\nfrom time import sleep\r\nfrom Object_detection_servo_GPIO import SetPW\r\n\r\n\r\nGPIO.setmode(GPIO.BOARD)\r\nGPIO.setwarnings(False)\r\nGPIO.setup(3, GPIO.OUT)\r\npwm = GPIO.PWM(3, 50)\r\npwm.start(0)\r\nGPIO.setwarnings(False)\r\n#pwm.ChangeDutyCycle(0)\r\n###########################\r\n\r\n# Set up camera constants\r\n#IM_WIDTH = 1280\r\n#IM_HEIGHT = 720\r\nIM_WIDTH = 640 #Use smaller resolution for\r\nIM_HEIGHT = 480 #slightly faster framerate\r\n\r\nk_counter = 0\r\nf_counter = 0\r\ns_counter = 0 #spoon\r\n\r\n# Select camera type (if user enters --usbcam when calling this script,\r\n# a USB webcam will be used)\r\ncamera_type = 'picamera'\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--usbcam', help='Use a USB webcam instead of picamera',\r\n action='store_true')\r\nargs = parser.parse_args()\r\nif args.usbcam:\r\n camera_type = 'usb'\r\n\r\n# This is needed since the working directory is the object_detection folder.\r\nsys.path.append('..')\r\n\r\n# Import utilites\r\nfrom utils import label_map_util\r\nfrom utils import visualization_utils as vis_util\r\n\r\n# Name of the directory containing the object detection module we're using\r\nMODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'\r\n\r\n# Grab path to current working directory\r\nCWD_PATH = os.getcwd()\r\n\r\n# Path to frozen detection graph .pb file, which contains the model that is used\r\n# for object detection.\r\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\r\n\r\n# Path to label map file\r\nPATH_TO_LABELS = os.path.join(CWD_PATH,'data','backup2.pbtx')\r\n\r\n# Number of classes the object detector can identify\r\nNUM_CLASSES = 90\r\n\r\n## Load the label map.\r\n# Label maps map indices to category names, so that when the convolution\r\n# network predicts `5`, we know that this corresponds to `airplane`.\r\n# Here we use internal utility functions, but anything that returns a\r\n# dictionary mapping integers to appropriate string labels would be fine\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\n\r\n# Load the Tensorflow model into memory.\r\ndetection_graph = tf.Graph()\r\nwith detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n sess = tf.Session(graph=detection_graph)\r\n\r\n\r\n# Define input and output tensors (i.e. data) for the object detection classifier\r\n\r\n# Input tensor is the image\r\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n\r\n# Output tensors are the detection boxes, scores, and classes\r\n# Each box represents a part of the image where a particular object was detected\r\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n\r\n# Each score represents level of confidence for each of the objects.\r\n# The score is shown on the result image, together with the class label.\r\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n\r\n# Number of objects detected\r\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n\r\n# Initialize frame rate calculation\r\nframe_rate_calc = 2\r\nfreq = cv2.getTickFrequency()\r\nfont = cv2.FONT_HERSHEY_SIMPLEX\r\nprevious_pw = 0\r\n\r\n# Initialize camera and perform object detection.\r\n#sleep(1)\r\n\r\nSetPW(2.2)\r\nSetPW(0)# overflow\r\nprint(\"overflow\")\r\nsleep(1)\r\nSetPW(1.8)\r\nSetPW(0)\r\nprint(\"fork\")\r\nsleep(1)\r\nSetPW(1.5)\r\nSetPW(0)\r\nprint(\"knife\")\r\nsleep(1)\r\nSetPW(1.2)\r\nprint(\"spoon\")\r\nsleep(1)\r\n#SetPW(2.2)\r\n#print(\"back to overflow\")\r\n\r\nif camera_type == 'picamera':\r\n \r\n # Initialize Picamera and grab reference to the raw capture\r\n camera = PiCamera()\r\n camera.resolution = (IM_WIDTH,IM_HEIGHT)\r\n camera.framerate = 10\r\n rawCapture = PiRGBArray(camera, size=(IM_WIDTH,IM_HEIGHT))\r\n rawCapture.truncate(0)\r\n\r\n for frame1 in camera.capture_continuous(rawCapture, format=\"bgr\",use_video_port=True):\r\n\r\n t1 = cv2.getTickCount()\r\n \r\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\r\n # i.e. a single-column array, where each item in the column has the pixel RGB value\r\n frame = np.copy(frame1.array)\r\n frame.setflags(write=1)\r\n frame_expanded = np.expand_dims(frame, axis=0)\r\n\r\n # Perform the actual detection by running the model with the image as input\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: frame_expanded})\r\n\r\n # Draw the results of the detection (aka 'visulaize the results')\r\n \r\n \"\"\"\r\n print(\"classes\")\r\n print(classes[0][0])\r\n print(\"boxes\")\r\n print(boxes[0][0])\r\n print(\"scores\")\r\n print(scores[0][0])\r\n print(\"num detections\")\r\n print(num)\r\n \r\n box_index = -1\r\n # get number of items detected - 1\r\n for i in classes[0][0]:\r\n if i == 48 or i == 49 or i == 50:\r\n box_index = box_index + 1\r\n # need to figure out how to deal with misidentified objects\r\n \r\n # get first detected object \r\n first_detected = classes[0][box_index]\r\n \r\n #object with the largest boxes[][][] value gets priority\r\n largest_val = boxes[0][box_index][2]\r\n \r\n for i in boxes[0][box_index]:\r\n if boxes[0][box_index][2] > largest_val:\r\n first_detected = classes[0][box_index]\r\n largest_val = boxes[0][box_index][2]\r\n # decrement box_index\r\n box_index = box_index - 1\r\n #don't need to access array element [-1]\r\n if box_index = -1:\r\n break\r\n \"\"\"\r\n #idea 2\r\n \r\n cnt = 0\r\n for i in classes[0]:\r\n if i != 48 and i != 49 and i != 50 and i != 1:\r\n classes[0][cnt] = 1\r\n cnt += 1\r\n \r\n #following logic not necessary if objects are spaced at proper intervals\r\n \r\n first_detected = classes[0][0] #first detected defaults to first element in array\r\n largest_val = boxes[0][0][2] #largest value defaults to first element in array\r\n \r\n counter = 0\r\n \r\n for i in classes[0]:\r\n if i == 48 or i ==49 or i == 50:\r\n if boxes[0][counter][2] > largest_val:\r\n print(\"reached if statement\")\r\n print(\"largest value: \")\r\n print(boxes[0][counter][2])\r\n print()\r\n largest_val = boxes[0][counter][2]\r\n first_detected = i\r\n counter = counter + 1\r\n \r\n print(\"first detected: \")\r\n print(first_detected)\r\n \r\n wait_flag = 0\r\n \r\n if 48 == classes[0][0] and scores[0][0] > 0.4:\r\n if previous_pw != 1.8:\r\n SetPW(1.8)\r\n #SetPW(0)\r\n previous_pw = 1.8\r\n print(\"found fork\")\r\n print()\r\n wait_flag = 1\r\n f_counter = f_counter + 1\r\n \r\n elif 49 == classes[0][0] and scores[0][0] > 0.4:\r\n if previous_pw != 1.5: \r\n SetPW(1.5)\r\n #SetPW(0)\r\n previous_pw = 1.5\r\n print(\"found knife\")\r\n print()\r\n wait_flag = 1\r\n k_counter = k_counter + 1\r\n \r\n \r\n elif 50 == classes[0][0] and scores[0][0] > 0.4:\r\n if previous_pw != 1.2:\r\n SetPW(1.2)\r\n #etPW(0)\r\n previous_pw = 1.2\r\n print(\"found spoon\")\r\n print()\r\n wait_flag = 1\r\n s_counter = s_counter + 1\r\n \r\n else:\r\n if previous_pw != 2.2:\r\n SetPW(2.2)\r\n #SetPW(0)\r\n previous_pw = 2.2\r\n print(\"overflow bin\")\r\n print()\r\n #wait_flag = 1\r\n\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n frame,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8,\r\n min_score_thresh=0.40)\r\n\r\n #cv2.putText(frame,\"FPS: {0:.2f}\".format(frame_rate_calc),(30,50),font,1,(255,255,0),2,cv2.LINE_AA)\r\n cv2.putText(frame,f\"fork count: {f_counter}\", (20, 100),font,1,(255, 255, 0),2,cv2.LINE_AA)\r\n cv2.putText(frame,f\"knife count: {k_counter}\", (20, 200),font,1,(255, 255, 0),2,cv2.LINE_AA)\r\n cv2.putText(frame,f\"spoon count: {s_counter}\", (20, 300),font,1,(255, 255, 0),2,cv2.LINE_AA)\r\n\r\n \r\n # All the results have been drawn on the frame, so it's time to display it.\r\n cv2.imshow('Object detector', frame)\r\n\r\n t2 = cv2.getTickCount()\r\n time1 = (t2-t1)/freq\r\n frame_rate_calc = 1/time1\r\n \r\n if wait_flag == 1:\r\n sleep(5)\r\n \r\n # Press 'q' to quit\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\n rawCapture.truncate(0)\r\n\r\n camera.close()\r\n\r\ncv2.destroyAllWindows()\r\n\r\n", "repo_name": "Nbrandt45/Visual_Recognition", "sub_path": "Object_detection_picamera.py", "file_name": "Object_detection_picamera.py", "file_ext": "py", "file_size_in_byte": 10692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "RPi.GPIO.setmode", "line_number": 38, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 38, "usage_type": "name"}, {"api_name": "RPi.GPIO.BOARD", "line_number": 38, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 39, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 39, "usage_type": "name"}, {"api_name": "RPi.GPIO.setup", "line_number": 40, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 40, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PWM", "line_number": 41, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 41, "usage_type": "name"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 43, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 43, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "utils.label_map_util.load_labelmap", "line_number": 95, "usage_type": "call"}, {"api_name": "utils.label_map_util", "line_number": 95, "usage_type": "name"}, {"api_name": "utils.label_map_util.convert_label_map_to_categories", "line_number": 96, "usage_type": "call"}, {"api_name": "utils.label_map_util", "line_number": 96, "usage_type": "name"}, {"api_name": "utils.label_map_util.create_category_index", "line_number": 97, "usage_type": "call"}, {"api_name": "utils.label_map_util", "line_number": 97, "usage_type": "name"}, {"api_name": "tensorflow.Graph", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.GraphDef", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tensorflow.import_graph_def", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.getTickFrequency", "line_number": 130, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 131, "usage_type": "attribute"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 137, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 138, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 140, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 141, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 142, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 144, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 145, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 146, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 148, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 149, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 151, "usage_type": "call"}, {"api_name": "picamera.PiCamera", "line_number": 158, "usage_type": "call"}, {"api_name": "picamera.array.PiRGBArray", "line_number": 161, "usage_type": "call"}, {"api_name": "cv2.getTickCount", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 172, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 247, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 257, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 268, "usage_type": "call"}, {"api_name": "Object_detection_servo_GPIO.SetPW", "line_number": 278, "usage_type": "call"}, {"api_name": "utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "line_number": 285, "usage_type": "call"}, {"api_name": "utils.visualization_utils", "line_number": 285, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 288, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 289, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 296, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 296, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 297, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 297, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 298, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 298, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 302, "usage_type": "call"}, {"api_name": "cv2.getTickCount", "line_number": 304, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 309, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 312, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 319, "usage_type": "call"}]} +{"seq_id": "5632660193", "text": "from django.shortcuts import render\nimport uuid\nimport dialogflow\nimport json\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nimport re\n\n# 요청 url 인 bbs/번호 에 대해서 urls.py 에 정의된 view.bbs_detail 이 호출된다\n@api_view(['GET', 'PUT', 'DELETE'])\ndef df_result(request, text, format=None):\n\n if request.method == 'GET':\n\n project_id = 'restfultest-54056'\n session_id = str(uuid.uuid4())\n result = detect_intent_texts(project_id, session_id, text, 'en-US')\n\n return Response(result)\n\n\ndef detect_intent_texts(project_id, session_id, text, language_code):\n \"\"\"Returns the result of detect intent with texts as inputs.\n\n Using the same `session_id` between requests allows continuation\n of the conversaion.\"\"\"\n\n session_client = dialogflow.SessionsClient()\n\n session = session_client.session_path(project_id, session_id)\n print('Session path: {}\\n'.format(session))\n\n\n text_input = dialogflow.types.TextInput(\n text=text, language_code=language_code)\n\n query_input = dialogflow.types.QueryInput(text=text_input)\n\n response = session_client.detect_intent(\n session=session, query_input=query_input)\n\n\n resposeDic = {\n \"query_text\" : response.query_result.query_text,\n \"query_text\": response.query_result.query_text,\n \"action\" : response.query_result.action,\n \"intent_detection_confidence\" : response.query_result.intent_detection_confidence,\n \"display_name\" : response.query_result.intent.display_name,\n }\n\n paramDic = {}\n for param in response.query_result.parameters:\n paramDic[param] = response.query_result.parameters[param]\n\n resposeDic[\"parameters\"] = paramDic\n\n jsonString = json.dumps(resposeDic)\n jsonString = jsonString.replace(\"\\\\\", \"\")\n print (jsonString)\n return resposeDic\n\n# Create your views here.", "repo_name": "wooccy/django", "sub_path": "df/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1917, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "uuid.uuid4", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 10, "usage_type": "call"}, {"api_name": "dialogflow.SessionsClient", "line_number": 28, "usage_type": "call"}, {"api_name": "dialogflow.types.TextInput", "line_number": 34, "usage_type": "call"}, {"api_name": "dialogflow.types", "line_number": 34, "usage_type": "attribute"}, {"api_name": "dialogflow.types.QueryInput", "line_number": 37, "usage_type": "call"}, {"api_name": "dialogflow.types", "line_number": 37, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "24794958664", "text": "from pyspark.sql.types import *\nfrom pyspark.sql import SparkSession\nfrom fbprophet import Prophet\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\nfrom pyspark.sql.functions import current_date\nfrom pyspark.sql.types import *\nfrom pyspark import SparkContext\n\nimport pickle\n\n\n# Create Spark session\nspark = SparkSession.builder.getOrCreate()\n\nprint(\"Se declara el esquema\")\n\n# structure of the training data set\ntrain_schema = StructType([\n StructField('date', DateType()),\n StructField('store', IntegerType()),\n StructField('item', IntegerType()),\n StructField('sales', IntegerType())\n ])\n\nprint(train_schema)\nprint(\"Se lee el archivo train.csv pasandole el esquema\")\n\n# read the training file into a dataframe\ntrain = spark.read.csv(\n 'hdfs://hdfs:9000/train.csv', \n header=True, \n schema=train_schema\n)\n\ntrain.show()\n\nprint(\"Se crea la vista temporal train para poder consultarla desde sql\")\ntrain.createOrReplaceTempView('train')\n\nsql_statement = '''\n SELECT\n store,\n item,\n CAST(date as date) as ds,\n SUM(sales) as y\n FROM train\n GROUP BY store, item, ds\n ORDER BY store, item, ds\n '''\n\nprint(sql_statement)\n\nstore_item_history = (\n spark\n .sql( sql_statement )\n .repartition(spark.sparkContext.defaultParallelism, ['store', 'item'])\n ).cache()\n\n\nprint(\"Se define el esquema que nos devolvera la UDF, con las predicciones hechas\")\nresult_schema =StructType([\n StructField('ds',DateType()),\n StructField('store',IntegerType()),\n StructField('item',IntegerType()),\n StructField('y',FloatType()),\n StructField('yhat',FloatType()),\n StructField('yhat_upper',FloatType()),\n StructField('yhat_lower',FloatType()),\n StructField('interval_width',FloatType()),\n StructField('growth',StringType()),\n StructField('daily_seasonality',BooleanType()),\n StructField('weekly_seasonality',BooleanType()),\n StructField('seasonality_mode',StringType()),\n StructField('yearly_seasonality',BooleanType()),\n ])\n\n\n@pandas_udf( result_schema, PandasUDFType.GROUPED_MAP )\ndef forecast_store_item( history_pd ):\n \n # TRAIN MODEL AS BEFORE\n # --------------------------------------\n # remove missing values (more likely at day-store-item level)\n history_pd = history_pd.dropna()\n \n # configure the model\n model = Prophet(\n interval_width=0.95,\n growth='linear',\n daily_seasonality=False,\n weekly_seasonality=True,\n yearly_seasonality=True,\n seasonality_mode='multiplicative'\n )\n \n # train the model\n model.fit( history_pd )\n # --------------------------------------\n \n # BUILD FORECAST AS BEFORE\n # --------------------------------------\n # make predictions\n future_pd = model.make_future_dataframe(\n periods=90, \n freq='d', \n include_history=True\n )\n forecast_pd = model.predict( future_pd ) \n\n print(\"hdfs://hdfs:9000/modelo_entrenado_{0}_{1}.pickle\".format(history_pd['store'].iloc[0],history_pd['item'].iloc[0]))\n\n\n #with open(\"/usr/local/spark/resources/modelos/modelo_entrenado_{0}_{1}.pickle\".format(history_pd['store'].iloc[0],history_pd['item'].iloc[0]), \"wb\") as f:\n # pickle.dump(model, f)\n\n\n # --------------------------------------\n \n # ASSEMBLE EXPECTED RESULT SET\n # --------------------------------------\n # get relevant fields from forecast\n f_pd = forecast_pd[ ['ds','yhat', 'yhat_upper', 'yhat_lower'] ].set_index('ds')\n \n # get relevant fields from history\n h_pd = history_pd[['ds','store','item','y']].set_index('ds')\n \n # join history and forecast\n results_pd = f_pd.join( h_pd, how='left' )\n results_pd.reset_index(level=0, inplace=True)\n \n # get store & item from incoming data set\n results_pd['store'] = history_pd['store'].iloc[0]\n results_pd['item'] = history_pd['item'].iloc[0]\n # --------------------------------------\n\n results_pd['interval_width'] = 0.95\n results_pd['growth'] = 'linear'\n results_pd['daily_seasonality'] = False\n results_pd['weekly_seasonality'] = True\n results_pd['yearly_seasonality'] = True\n results_pd['seasonality_mode'] = 'multiplicative'\n\n # return expected dataset\n return results_pd[ ['ds', 'store', 'item', 'y', 'yhat', 'yhat_upper', 'yhat_lower', 'interval_width','growth',\n 'daily_seasonality','weekly_seasonality','yearly_seasonality','seasonality_mode'] ] \n\n\nprint(\"Se ejecuta la UDF por cada combinacion articulo-tienda\")\nresults = (\n store_item_history\n .groupBy('store', 'item')\n .apply(forecast_store_item)\n .withColumn('training_date', current_date() )\n )\n\nprint(\"El resultado de las predicciones se almacenan en una vista temporal\")\nresults.createOrReplaceTempView('new_forecasts')\nresults.show()\nprint(\"SE GUARDA EN HDFS EL NEW_FORECAST\")\n\ndf = spark.sql(\"SELECT * FROM new_forecasts\")\ndf.show()\ndf.coalesce(1).write.mode(\"overwrite\").format(\"parquet\").save(\"hdfs://hdfs:9000/new_forecasts\")\n\n\n", "repo_name": "cesarpazguzman/spark-airflow-databricks", "sub_path": "cluster/AIRFLOW/dags/spark/app/construccion_modelo.py", "file_name": "construccion_modelo.py", "file_ext": "py", "file_size_in_byte": 4800, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "pyspark.sql.SparkSession.builder.getOrCreate", "line_number": 13, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 13, "usage_type": "name"}, {"api_name": "fbprophet.Prophet", "line_number": 87, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.pandas_udf", "line_number": 78, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.PandasUDFType.GROUPED_MAP", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pyspark.sql.functions.PandasUDFType", "line_number": 78, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.current_date", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "69943084403", "text": "# -*- coding: utf-8 -*-\nfrom django.shortcuts import render, get_object_or_404,redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom .models import User,ufid,logger\nfrom django.utils import timezone\nfrom .slack_post import slack\nimport json\nfrom datetime import date, datetime\n\n# date, datetimeの変換関数\ndef json_serial(obj):\n # 日付型の場合には、文字列に変換します\n if isinstance(obj, (datetime, date)):\n return str(obj.year) +'/'+ str(obj.month) +'/'+ str(obj.day) +'---'+ str(obj.hour) +':'+ str(obj.minute)\n # 上記以外はサポート対象外.\n raise TypeError (\"Type %s not serializable\" % type(obj))\n\n@login_required\ndef index(request):\n users = User.objects.all()\n return render(request, 'logsys/state_list.html', {'users': users})\n\ndef ufid_scan(tag):\n uuuu = get_object_or_404(ufid, tag=tag)\n user = get_object_or_404(User, pk=uuuu.user_id.pk)\n print(uuuu.tag,user.email)\n\ndef stateupdate(request,pk):\n print('test')\n ufid_scan(tag='111,111,111,111')\n print('test end')\n user = get_object_or_404(User, pk=pk)\n \n if user.login_or_out:\n user.login_or_out = False\n slack.notify(text = user.first_name+\"がログアウトしました\")\n user.logout_date = timezone.now()\n log = logger()\n log.user_id = user\n log.login_date = user.login_date\n log.logout_date = user.logout_date\n log.set_minute()\n log.set_hour()\n log.save()\n str = json.dumps(timezone.now() + timezone.timedelta(days=1) - timezone.timedelta(hours=15), default=json_serial)\n slack.notify(text = str)\n else:\n user.login_or_out = True\n slack.notify(text = user.first_name+\"がログインしました\")\n user.login_date = timezone.now()\n str = json.dumps(timezone.now() + timezone.timedelta(days=1) - timezone.timedelta(hours=15), default=json_serial)\n slack.notify(text = str)\n user.save()\n users = User.objects.all()\n return render(request,'logsys/state_list.html', {'users': users})\n\n", "repo_name": "YaCpotato/Django-RFID-register-API", "sub_path": "logsys/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2120, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 14, "usage_type": "name"}, {"api_name": "models.User.objects.all", "line_number": 21, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 19, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 25, "usage_type": "call"}, {"api_name": "models.ufid", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 26, "usage_type": "call"}, {"api_name": "models.User", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 33, "usage_type": "call"}, {"api_name": "models.User", "line_number": 33, "usage_type": "argument"}, {"api_name": "slack_post.slack.notify", "line_number": 37, "usage_type": "call"}, {"api_name": "slack_post.slack", "line_number": 37, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 38, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 38, "usage_type": "name"}, {"api_name": "models.logger", "line_number": 39, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 46, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 46, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 46, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 46, "usage_type": "call"}, {"api_name": "slack_post.slack.notify", "line_number": 47, "usage_type": "call"}, {"api_name": "slack_post.slack", "line_number": 47, "usage_type": "name"}, {"api_name": "slack_post.slack.notify", "line_number": 50, "usage_type": "call"}, {"api_name": "slack_post.slack", "line_number": 50, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 51, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 51, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 52, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 52, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 52, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 52, "usage_type": "call"}, {"api_name": "slack_post.slack.notify", "line_number": 53, "usage_type": "call"}, {"api_name": "slack_post.slack", "line_number": 53, "usage_type": "name"}, {"api_name": "models.User.objects.all", "line_number": 55, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 55, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "23880051921", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 14 09:25:55 2018\r\n\r\n@author: admin\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 14 20:56:07 2018\r\n\r\n@author: admin\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nfrom matplotlib.colors import LogNorm\r\nimport seaborn as sns\r\nimport xml.dom.minidom as x\r\nfrom xml.dom.minidom import parse,parseString\r\nfrom collections import Counter\r\noctave = [1,2,3,4,5,6,7]\r\nstep = [\"C\",\"C1\",\"D-1\",\"D\",\"D1\",\"E-1\",\"E\",\"F\",\"F1\",\"G-1\",\"G\",\"G1\",\"A-1\",\"A\",\"A1\",\"B-1\",\"B\"]\r\npitch = []\r\npitch.append(\"0A\")\r\npitch.append(\"0A1\")\r\npitch.append(\"0B-1\")\r\npitch.append(\"0B\")\r\nfor i in octave:\r\n for j in step:\r\n pitch.append(str(i)+j)\r\n#pitch.remove('A7')\r\n#pitch.remove('B7')\r\npitch.append('8C')\r\npitch.append('rest') ## 110 nodes, according to pinao keyboard\r\n\r\npitch_m = []\r\nfor i in pitch:\r\n if \"-1\" in i:\r\n continue\r\n else:\r\n pitch_m.append(i) ##only 89 nodes in pitch space\r\nnote = [\"32nd\",\"16th\",\"eighth\",\"quarter\",\"half\",\"whole\"]\r\n\r\n#### get score sequence from xml files\r\ndef get_score(content):\r\n Note = content.getElementsByTagName(\"note\")\r\n #get the text from the Text Node within the ,\r\n #and convert it from unicode to ascii\r\n t1 = []\r\n t2 = []\r\n t3 = []\r\n t4 = []\r\n t5 = []\r\n t6 = []\r\n for n in Note:\r\n try:\r\n step = n.getElementsByTagName(\"step\")\r\n t1.append(str(step[0].childNodes[0].nodeValue))\r\n except:\r\n t1.append(str(\"rest\"))\r\n try:\r\n octave = n.getElementsByTagName(\"octave\")\r\n t2.append(str(octave[0].childNodes[0].nodeValue))\r\n except:\r\n t2.append(str(\"\"))\r\n try:\r\n alter = n.getElementsByTagName(\"alter\")\r\n t5.append(str(alter[0].childNodes[0].nodeValue))\r\n except:\r\n t5.append(str(\"\"))\r\n try:\r\n duration = n.getElementsByTagName(\"duration\")\r\n t3.append(str(duration[0].childNodes[0].nodeValue))\r\n except:\r\n t3.append(str(\"\"))\r\n try:\r\n type_ = n. getElementsByTagName(\"type\")\r\n t4.append(str(type_[0].childNodes[0].nodeValue))\r\n except:\r\n t4.append(str(\"rest\")) \r\n \r\n chord = n.getElementsByTagName(\"chord\")\r\n if chord==[]:\r\n t6.append(0)\r\n else:\r\n t6.append(1) \r\n Matri = pd.DataFrame(np.vstack((t1,t2,t3,t4,t5,t6)))\r\n for i in range(0,len(t1)):\r\n Matri.loc[6,i] = str(Matri.iloc[1,i])+str(Matri.iloc[0,i])+str(Matri.iloc[4,i])\r\n for i in range(0,len(t1)):\r\n if \"-1\" in Matri.loc[6][i]:\r\n Matri.loc[6][i] = pitch[pitch.index(Matri.loc[6][i])-1] \r\n for i in range(0,len(t1)): \r\n if Matri.loc[6][i] not in pitch_m:\r\n pitch_exist = Matri.loc[6][i][0:2]\r\n switch = 1 \r\n Matri.loc[6][i] = pitch_m[pitch_m.index(pitch_exist)+switch]\r\n for i in range(0,len(t2)):\r\n if Matri.loc[3][i]=='rest':\r\n del Matri[i] \r\n Matri.columns = range(0,Matri.shape[1])\r\n return Matri\r\n\r\n\r\n\r\n\r\ndef get_frequency(content,musicname):\r\n score = get_score(content)\r\n #num_octave = [] \r\n #num_step = [] \r\n feature = []\r\n for h in pitch_m:\r\n feature.append(list(score.loc[6]).count(h))\r\n length = sum(feature)\r\n for b in note:\r\n feature.append(list(score.loc[3]).count(h))\r\n feature.append(length)\r\n return feature\r\n \r\n## calculate the data we use for SNB method\r\ndef get_matrix(content,musicname):\r\n matrix_pitch = pd.DataFrame(0,columns=pitch_m,index = pitch_m) \r\n matrix_type = pd.DataFrame(0,columns = note,index = note)\r\n pitch_num = []\r\n beat_num = []\r\n score = get_score(content)\r\n \r\n length_effective = 0\r\n first_note = [score.loc[6,0],score.loc[3,0]] \r\n for i in range(0,score.shape[1]-1):\r\n j = i+1\r\n pitch_now = score.loc[6,i]\r\n pitch_follow = score.loc[6,j]\r\n \r\n if pitch_now==\"rest\" and pitch_follow == \"rest\":\r\n continue\r\n else: \r\n length_effective = length_effective+1\r\n matrix_pitch.ix[pitch_now,pitch_follow] = matrix_pitch.ix[pitch_now,pitch_follow]+1\r\n type_now = score.loc[3,i]\r\n type_follow = score.loc[3,j]\r\n matrix_type.ix[type_now,type_follow] = matrix_type.ix[type_now,type_follow]+1 \r\n for i in range(0,matrix_pitch.shape[1]):\r\n num = sum(matrix_pitch.iloc[i])\r\n pitch_num.append(num)\r\n \r\n for i in range(0,matrix_type.shape[1]):\r\n num = sum(matrix_type.iloc[i])\r\n beat_num.append(num)\r\n \r\n return matrix_pitch,matrix_type,length_effective,first_note,pitch_num,beat_num\r\n\r\n\r\n\r\nfilepath_1 = \"D:\\\\xml\\\\jazz\"\r\nfilename_1 = os.listdir(filepath_1)\r\nfilepath_2 = \"D:\\\\xml\\\\folk\"\r\nfilename_2 = os.listdir(filepath_2)\r\nfilepath_3 = \"D:\\\\xml\\\\classic\"\r\nfilename_3 = os.listdir(filepath_3)\r\n\r\n\r\n\r\ncol=[]\r\nfor i in range(0,89*89+6*6+1+2+1+1+89+6): # add Length, First pitch, First beat,genre,music name\r\n col.append(str(i))\r\n\r\ndata = pd.DataFrame(columns = col)\r\n\r\nindex = 0\r\n\r\nfor file in filename_1:\r\n try:\r\n content = x.parse(filepath_1+\"\\\\\"+str(file))\r\n music_name = file.split('.xml')[0]\r\n # score_tem = get_score(content)\r\n m_pitch, m_type,length_effective,first_note,pitch_num,beat_num = get_matrix_chord_2(content,music_name)\r\n m1 = m_pitch.values.reshape(89*89)\r\n m2 = m_type.values.reshape(6*6)\r\n m = np.hstack((m1,m2))\r\n m = list(m)\r\n m.append(length_effective)\r\n m.append(first_note[0])\r\n m.append(first_note[1])\r\n m.append(\"jazz\")\r\n m.append(music_name)\r\n m = m + pitch_num\r\n m = m + beat_num\r\n data.loc[index] = m\r\n index = index+1\r\n print(file+str(1))\r\n except:\r\n print(\"failed\"+file)\r\n continue\r\n\r\nfor file in filename_2:\r\n try:\r\n content = x.parse(filepath_2+\"\\\\\"+str(file))\r\n music_name = file.split('.xml')[0]\r\n # score_tem = get_score(content)\r\n m_pitch, m_type,length_effective,first_note,pitch_num,beat_num = get_matrix_chord_2(content,music_name)\r\n m1 = m_pitch.values.reshape(89*89)\r\n m2 = m_type.values.reshape(6*6)\r\n m = np.hstack((m1,m2))\r\n m = list(m)\r\n m.append(length_effective)\r\n m.append(first_note[0])\r\n m.append(first_note[1])\r\n m.append(\"folk\")\r\n m.append(music_name)\r\n m = m + pitch_num\r\n m = m + beat_num\r\n data.loc[index] = m\r\n \r\n index = index+1\r\n print(file+str(2))\r\n except:\r\n print(\"failed\"+file)\r\n continue\r\n\r\nfor file in filename_3:\r\n try: \r\n content = x.parse(filepath_3+\"\\\\\"+str(file))\r\n music_name = file.split('.xml')[0]\r\n # score_tem = get_score(content)\r\n m_pitch, m_type,length_effective,first_note,pitch_num,beat_num = get_matrix_chord_2(content,music_name)\r\n m1 = m_pitch.values.reshape(89*89)\r\n m2 = m_type.values.reshape(6*6)\r\n m = np.hstack((m1,m2))\r\n m = list(m)\r\n m.append(length_effective)\r\n m.append(first_note[0])\r\n m.append(first_note[1])\r\n m.append(\"classic\")\r\n m.append(music_name)\r\n m = m + pitch_num\r\n m = m + beat_num\r\n data.loc[index] = m\r\n index = index+1\r\n print(file+str(3))\r\n except:\r\n print(\"failed\"+file)\r\n continue\r\n \r\ndata.to_csv(\"E:/data_newscore_withnum.csv\",index = False)\r\n### including transitions and marginal frequency\r\n\r\n\r\n\r\n\r\n", "repo_name": "rtngsm/SequenceMusicScore", "sub_path": "get_score.py", "file_name": "get_score.py", "file_ext": "py", "file_size_in_byte": 7685, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "pandas.DataFrame", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 128, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 161, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 163, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 165, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 173, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parse", "line_number": 179, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 179, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 185, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parse", "line_number": 203, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 203, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 209, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parse", "line_number": 228, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 228, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 234, "usage_type": "call"}]} +{"seq_id": "2620686029", "text": "import logging\nimport urllib\nfrom scrapy import Request\n\nfrom spider.items import ZhiLianItem\nfrom spider.spiders.BaseSpider import BaseSpider\nfrom util import cy_logger as logger\n\n\n# 智联招聘\nclass ZhiLianSpider(BaseSpider):\n name = \"zhi_lian\"\n allowed_domains = [\"zhaopin.com\"]\n company_url = \"https://m.zhaopin.com/all-489/?keyword={}&pageindex={}\"\n\n # 自定义设置\n custom_settings = {\n \"LOG_LEVEL\": logging.WARN,\n # \"DOWNLOAD_DELAY\": 1,\n 'ITEM_PIPELINES': {\n 'spider.pipelines.ZhiLianSpiderPipeline': 100,\n },\n }\n\n def __init__(self, *a, **kw):\n super(ZhiLianSpider, self).__init__(*a, **kw)\n\n def start_requests(self):\n current_page = 1\n result = self.query_company_page(current_page)\n pages = result.get(\"pages\")\n logger.log(\"公司总页数:\" + str(pages))\n while current_page <= pages:\n result = self.query_company_page(current_page)\n for row in result.get(\"rows\"):\n full_name = row[0]\n name = row[1]\n if full_name is not None:\n yield Request(self.company_url.format(full_name, 1), dont_filter=True)\n else:\n yield Request(self.company_url.format(name, 1), dont_filter=True)\n current_page += 1\n\n def parse(self, response):\n # 解析数据\n job_list = response.xpath(\"//section[@class='job-list ']\")\n for job in job_list:\n item = ZhiLianItem()\n item[\"link\"] = job.xpath(\"./a/@data-link\").extract_first()\n item[\"company_name\"] = job.xpath(\"./a//div[@class='comp-name fl']/text()\").extract_first()\n item[\"job_name\"] = job.xpath(\"./a//div[@class='job-name fl ']/text()\").extract_first()\n item[\"location\"] = job.xpath(\"./a//span[@class='ads']/text()\").extract_first()\n item[\"salary\"] = job.xpath(\"./a//div[@class='job-sal fr']/div[@class='fl']/text()\").extract_first()\n item[\"release_time\"] = job.xpath(\"./a//div[@class='time fr']/text()\").extract_first()\n item[\"platform\"] = '智联招聘' # 发布平台\n if item[\"link\"] is not None:\n # 加载详细页\n item[\"link\"] = urllib.parse.urljoin(response.url, item[\"link\"])\n yield Request(item[\"link\"], callback=self.parse_detail, meta={\"item\": item})\n # 加载下一页\n next_page = response.xpath(\"//div[@class='j_page']//a[@class='nextpage']/@href\").extract_first()\n if next_page is not None:\n next_page = urllib.parse.urljoin(response.url, next_page)\n yield Request(next_page, callback=self.parse)\n\n def parse_detail(self, response):\n item = response.meta['item']\n item[\"education\"] = response.xpath(\n \"//div[@class='about-position']/div[@class='job-detail']/div[@class='box1 fl']/span[3]/text()\").extract_first().replace(\n \"\\n\", \"\").strip()\n if len(item[\"education\"]) == 0:\n item[\"education\"] = \"学历不限\"\n item[\"years\"] = response.xpath(\n \"//div[@class='about-position']//span[@class='exp']/text()\").extract_first().replace(\"\\n\", \"\").strip()\n if len(item[\"years\"]) == 0:\n item[\"years\"] = \"经验不限\"\n yield item\n\n # 获取公司列表\n def query_company_page(self, page_no=1):\n return self.select_rows_paper(\n sql=\"SELECT `full_name`,`name` FROM `yx_project`\",\n page_no=page_no,\n page_size=20\n )\n", "repo_name": "zuolinlin/spider", "sub_path": "spider/spiders/zhilian/ZhiLianSpider.py", "file_name": "ZhiLianSpider.py", "file_ext": "py", "file_size_in_byte": 3570, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "spider.spiders.BaseSpider.BaseSpider", "line_number": 11, "usage_type": "name"}, {"api_name": "logging.WARN", "line_number": 18, "usage_type": "attribute"}, {"api_name": "util.cy_logger.log", "line_number": 32, "usage_type": "call"}, {"api_name": "util.cy_logger", "line_number": 32, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 39, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 41, "usage_type": "call"}, {"api_name": "spider.items.ZhiLianItem", "line_number": 48, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 58, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 58, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 59, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 63, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 63, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "18586602601", "text": "\n# coding: utf-8\n\n# In[1]:\n\ndef return_digits_only(input_str):\n output_str = ''.join([i for i in input_str if i.isdigit()])\n \n return output_str\n\n\n# In[2]:\n\ndef return_date_posted_appl(input_date_str):\n \n date_str = input_date_str.strip('Posted: ')\n \n return date_str\n\ndef get_apple_job_data(html):\n \n from bs4 import BeautifulSoup\n \n job_data_dict = {'job_title':'', 'ref_number':'', 'date_posted':'', 'location':''}\n\n soup = BeautifulSoup(html, 'html.parser')\n \n data_table = soup.find(class_=\"sosumi\").find_all('li')\n \n job_data_dict['job_title'] = soup.h2.get_text()\n job_data_dict['ref_number'] = return_digits_only(data_table[0].get_text())\n job_data_dict['date_posted'] = return_date_posted_appl(data_table[2].get_text())\n job_data_dict['location'] = data_table[1].get_text()\n \n return job_data_dict\n\n\n# In[3]:\n\ndef get_intui_surgic_job_data(html):\n \n from bs4 import BeautifulSoup\n \n job_data_dict = {'job_title':'', 'ref_number':'', 'date_posted':'', 'location':''}\n \n soup = BeautifulSoup(html, 'html.parser')\n \n job_data_dict['job_title'] = soup.find(id='requisitionDescriptionInterface.reqTitleLinkAction.row1').get_text()\n job_data_dict['ref_number'] = soup.find(id='requisitionDescriptionInterface.reqContestNumberValue.row1').get_text()\n job_data_dict['date_posted'] = ''\n job_data_dict['location'] = soup.find(id='requisitionDescriptionInterface.ID1614.row1').get_text()\n \n return job_data_dict\n\n\n# In[4]:\n\ndef get_udacity_job_data(html):\n \n from bs4 import BeautifulSoup\n \n job_data_dict = {'job_title':'', 'ref_number':'', 'date_posted':'', 'location':''}\n \n soup = BeautifulSoup(html, 'html.parser')\n \n job_data_dict['job_title'] = soup.find(class_=\"posting-headline\").h2.get_text()\n job_data_dict['ref_number'] = ''\n job_data_dict['date_posted'] = ''\n job_data_dict['location'] = soup.find(class_=\"sort-by-time posting-category medium-category-label\").get_text()\n \n return job_data_dict\n\n\n# In[5]:\n\ndef get_tesla_motors_job_data(html):\n \n from bs4 import BeautifulSoup\n \n job_data_dict = {'job_title':'', 'ref_number':'', 'date_posted':'', 'location':''}\n \n soup = BeautifulSoup(html, 'html.parser')\n \n job_data_dict['job_title'] = soup.h1.get_text()\n job_data_dict['ref_number'] = soup.find_all(class_=\"formFieldNormal top\")[0].get_text()\n job_data_dict['date_posted'] = ''\n job_data_dict['location'] = soup.find_all(class_=\"formFieldNormal top\")[1].get_text()\n \n return job_data_dict\n\n\n# In[37]:\n\ndef get_google_job_data(html):\n \n from bs4 import BeautifulSoup\n \n job_data_dict = {'job_title':'', 'ref_number':'', 'date_posted':'', 'location':''}\n \n soup = BeautifulSoup(html, 'html.parser')\n \n job_data_dict['job_title'] = soup.find(itemprop=\"name title\").get_text()\n job_data_dict['ref_number'] = ''\n job_data_dict['date_posted'] = soup.find(itemprop=\"datePosted\").get_text()\n job_data_dict['location'] = soup.find(itemprop=\"name\").get_text()\n \n return job_data_dict\n\n\n# In[38]:\n\ndef get_job_data(jobs_df):\n \"\"\"\n Update jobs_df with the fields job_title, ref_number, date_posted, and location.\n These values will be scraped from the field job_desc_html. How the data is\n scraped will depend on the layout of the html. html layout is dependent on the \n company.\n \"\"\"\n import pandas as pd\n \n job_data_list = []\n \n for i in range(len(jobs_df['job_desc_html'])):\n \n job_data_dict = {'job_title':'', 'ref_number':'', 'date_posted':'', 'location':''}\n \n if(jobs_df.iloc[i]['company'] == 'Apple'):\n job_data_dict = get_apple_job_data(jobs_df.iloc[i]['job_desc_html'])\n \n elif(jobs_df.iloc[i]['company'] == 'Intuitive Surgical'):\n job_data_dict = get_intui_surgic_job_data(jobs_df.iloc[i]['job_desc_html'])\n \n elif(jobs_df.iloc[i]['company'] == 'Udacity'):\n job_data_dict = get_udacity_job_data(jobs_df.iloc[i]['job_desc_html'])\n \n elif(jobs_df.iloc[i]['company'] == 'Tesla Motors'):\n job_data_dict = get_tesla_motors_job_data(jobs_df.iloc[i]['job_desc_html'])\n \n elif(jobs_df.iloc[i]['company'] == 'Google'):\n job_data_dict = get_google_job_data(jobs_df.iloc[i]['job_desc_html'])\n \n job_data_list.append(job_data_dict)\n \n return pd.DataFrame(job_data_list)\n\n\n# In[39]:\n\ndef load_jobs_from_html(job_path):\n \n from bs4 import BeautifulSoup\n import pandas as pd\n\n # Use Beautiful soup constructor to parse through the html and build a more organized data structure. \n try:\n html = open(job_path, encoding=\"utf8\")\n except:\n raise\n else:\n html = open(job_path)\n \n soup = BeautifulSoup(html.read(), 'html.parser')\n html_list = []\n \n for child in soup.children:\n if(child.name):\n html_list.append((str(child),child.get_text()))\n \n return pd.DataFrame(html_list, columns=['job_desc_html', 'job_desc_text'])\n\n\n# In[40]:\n\nif __name__ == \"__main__\":\n \n import sql_pandas\n \n file_path = input('Please enter the file path of the file containing'\n ' the html you would like to add to the library.\\n')\n new_jobs_df = load_jobs_from_html(file_path)\n \n company = input('Please enter the company name associated with the file.\\n')\n new_jobs_df['company'] = company\n \n # Execute main content.\n print(get_job_data(new_jobs_df))\n\n\n# In[ ]:\n\n\n\n", "repo_name": "zoleitschuk/NeuralRecruiter", "sub_path": "Job Hunter/python_scripts/job_data.py", "file_name": "job_data.py", "file_ext": "py", "file_size_in_byte": 5622, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "75", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 26, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 46, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 64, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 82, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 144, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 162, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "41644021066", "text": "# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2023-02-16 15:34\n# @Author : 皆人\n# @File : get_yaml_data_analysis.py\n# @Software: PyCharm\nimport os.path\nfrom typing import Union,Text,Dict,List\n\nfrom utils.read_file_tools.yaml_control import GetYamlData\nfrom utils.others_tool.models import TestCase\nfrom utils.cache_process.cache_control import CacheHandler\nfrom utils.others_tool.exceptions import ValueNotFoundError\nfrom utils import config\n\nclass CaseData:\n\n def __init__(self, file_path):\n self.file_path = file_path\n def __new__(cls, file_path):\n if os.path.exists(file_path) is True:\n return object.__new__(cls)\n else:\n raise FileNotFoundError('用例地址未找到')\n\n def case_process(self, case_id_switch: Union[None, bool] = None):\n data_dict = GetYamlData(self.file_path).get_yaml_data()\n case_lists = []\n for key, values in data_dict.items():\n if key != 'case_common':\n case_date = {\n \"url\": self.get_host(case_id=key, case_data=values),\n \"method\": self.get_case_method(case_id=key, case_data=values),\n \"headers\": self.get_headers(case_id=key, case_data=values),\n \"detail\": self.get_case_detail(case_id=key, case_data=values),\n \"requestType\": self.get_request_type(case_id=key, case_data=values),\n \"data\": self.get_case_datas(case_id=key, case_data=values),\n \"is_run\": self.get_is_run(case_id=key, case_data=values),\n \"sql\": self.get_sql(case_id=key, case_data=values),\n \"teardown_sql\": self.teardown_sql(values),\n \"assert_data\": self.get_assert(case_id=key, case_data=values),\n 'dependence_case': self.get_dependence_case(case_id=key, case_data=values),\n 'dependence_case_data': self.get_dependence_case_data(case_id=key, case_data=values),\n \"current_request_set_cache\": self.get_current_request_set_cache(values),\n \"setup_sql\": self.setup_sql(values),\n \"teardown\": self.tear_down(values),\n \"sleep\": self.time_sleep(values),\n }\n if case_id_switch is True:\n case_lists.append({key: TestCase(**case_date).dict()})\n else:\n case_lists.append(TestCase(**case_date).dict())\n return case_lists\n\n def get_host(\n self,case_id: Text,\n case_data: Dict)->Text:\n try:\n\n _url =case_data['url']\n _host=case_data['host']\n if _url is None or _host is None:\n raise ValueNotFoundError(\n f\"用例中的url 或者 host不能为空!\\n\"\n f\"用例ID:{case_id}\\n\"\n f\"用例路径: {self.file_path}\"\n )\n return _host+_url\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(data_name=\"url或host\",case_id=case_id)\n ) from exc\n\n\n def get_case_method(self,\n case_id: Text,\n case_data: Dict)->Text:\n try:\n _case_method=case_data['method']\n _request_method=['GET','POST','PUT','DELETE','PATCH','HEAD','OPTION']\n if _case_method.upper() not in _request_method:\n raise ValueNotFoundError(\n f\" method 目前只支持{_request_method}请求方式,如需新增请联系管理员!\"\n f\"{self.raise_value_error(data_name='请求方式',case_id=case_id,detail=_case_method)}\"\n )\n return _case_method.upper()\n except AttributeError as exc:\n raise ValueNotFoundError(\n f\"method 目前只支持 {['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'HEAD', 'OPTION']} 请求方式,\"\n f\"如需新增请联系管理员! \"\n f\"{self.raise_value_error(data_name='请求方式', case_id=case_id, detail=case_data['method'])}\"\n ) from exc\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(data_name=\"method\", case_id=case_id)\n ) from exc\n\n @classmethod\n def get_current_request_set_cache(cls, case_data: Dict) -> Dict:\n \"\"\"将当前请求的用例数据存入缓存\"\"\"\n try:\n return case_data['current_request_set_cache']\n except KeyError:\n ...\n\n\n\n def get_headers(\n self,\n case_id:Text,\n case_data:Dict) ->Dict:\n try:\n _header =case_data['headers']\n return _header\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(case_id=case_id,data_name=\"headers\")\n ) from exc\n\n\n def get_request_type(\n self,\n case_id: Text,\n case_data:Dict) ->Text:\n\n _types = ['JSON', 'PARAMS', 'FILE', 'DATA', \"EXPORT\", \"NONE\"]\n try:\n _request_type =str(case_data['requestType'])\n #判断用户填写的requesttype是否符合规范\n if _request_type.upper() not in _types:\n raise ValueNotFoundError(\n self.raise_value_error(\n data_name='requestType',\n case_id=case_id,\n detail=_request_type\n )\n )\n return _request_type.upper()\n except AttributeError as exc:\n raise ValueNotFoundError(\n self.raise_value_error(\n data_name='requestType',\n case_id=case_id,\n detail=case_data['requestType'])\n ) from exc\n\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(case_id=case_id, data_name=\"requestType\")\n ) from exc\n\n\n def get_assert(\n self,\n case_id:Text,\n case_data:Dict):\n try:\n _assert =case_data['assert']\n if _assert is None:\n raise self.raise_value_error(data_name='assert',case_id=case_id,detail=_assert)\n return case_data['assert']\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(case_id=case_id,data_name=\"assert\")\n ) from exc\n\n\n\n def get_sql(\n self,\n case_id:Text,\n case_data: Dict) ->[list,None]:\n try:\n _sql = case_data['sql']\n if config.mysql_db.switch and _sql is None:\n return None\n return case_data['sql']\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(case_id=case_id,data_name='sql')\n )from exc\n\n @classmethod\n def setup_sql(cls, case_data: Dict) -> Union[list, None]:\n \"\"\"\n 获取前置sql,比如该条用例中需要从数据库中读取sql作为用例参数,则需填写setup_sql\n :return:\n \"\"\"\n try:\n _setup_sql = case_data['setup_sql']\n return _setup_sql\n except KeyError:\n return None\n\n @classmethod\n def tear_down(cls, case_data: Dict) -> Union[Dict, None]:\n \"\"\"\n 获取后置请求数据\n \"\"\"\n try:\n _teardown = case_data['teardown']\n return _teardown\n except KeyError:\n return None\n\n def get_case_detail(\n self,\n case_id:Text,\n case_data:Dict)->Text:\n try:\n return case_data['detail']\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(case_id=case_id, data_name=\"detail\")\n ) from exc\n\n\n def get_case_datas(\n self,\n case_id:Text,\n case_data:Dict) ->Dict:\n try:\n _dates=case_data['data']\n return _dates\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(case_id=case_id,data_name=\"data\")\n ) from exc\n\n def get_is_run(\n self,\n case_id:Text,\n case_data:Dict) ->Dict:\n try:\n return case_data['is_run']\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(case_id=case_id, data_name='is_run')\n ) from exc\n\n\n def get_dependence_case(\n self,\n case_id: Text,\n case_data: Dict) ->Dict:\n try:\n _dependence_case =case_data['dependence_case']\n return _dependence_case\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(case_id =case_id, data_name ='dependence_case')\n ) from exc\n\n\n # TODO 对 dependence_case_data 中的值进行验证\n def get_dependence_case_data(\n self,\n case_id: Text,\n case_data: Dict) -> Union[Dict, None]:\n \"\"\"\n 获取依赖的用例\n :return:\n \"\"\"\n # 判断如果该用例有依赖,则返回依赖数据,否则返回None\n if self.get_dependence_case(case_id=case_id, case_data=case_data):\n try:\n _dependence_case_data = case_data['dependence_case_data']\n # 判断当用例中设置的需要依赖用例,但是dependence_case_data下方没有填写依赖的数据,异常提示\n if _dependence_case_data is None:\n raise ValueNotFoundError(f\"dependence_case_data 依赖数据中缺少依赖相关数据!\"\n f\"如有填写,请检查缩进是否正确\"\n f\"用例ID: {case_id}\"\n f\"用例路径: {self.file_path}\")\n\n return _dependence_case_data\n except KeyError as exc:\n raise ValueNotFoundError(\n self.raise_value_null_error(case_id=case_id, data_name=\"dependence_case_data\")\n ) from exc\n else:\n return None\n\n\n\n def raise_value_error(\n self,\n data_name: Text,\n case_id: Text,\n detail: [Text, list, Dict]) -> Text:\n \"\"\"\n 所有用例填写不规范的异常提示\n :param data_name: 参数名称\n :param case_id: 用例ID\n :param detail: 参数内容\n :return:\n \"\"\"\n detail = f\"用例中的{data_name}填写不正确!\\n \" \\\n f\"用例ID: {case_id} \\n\" \\\n f\"用例路径: {self.file_path} \\n\" \\\n f\"当前填写的内容: {detail}\"\n\n return detail\n\n @classmethod\n def teardown_sql(\n cls,\n case_data: Union[list,Dict]) ->None:\n try:\n _teardown_sql =case_data['teardown_sql']\n return _teardown_sql\n except KeyError:\n return None\n\n\n def raise_value_null_error(\n self, data_name: Text,\n case_id: Text) -> Text:\n \"\"\"\n 用例中参数名称为空的异常提示\n :param data_name: 参数名称\n :param case_id: 用例ID\n :return:\n \"\"\"\n detail = f\"用例中未找到{data_name}参数,如已填写,请检查配置文件用例缩进,用例字段是否有空格,格式等问题\" \\\n f\" 用例ID: {case_id} \" \\\n f\"用例路径: {self.file_path}\"\n return detail\n\n @classmethod\n def time_sleep(cls, case_data: Dict) -> Union[int, float, None]:\n \"\"\" 设置休眠时间 \"\"\"\n try:\n _sleep_time = case_data['sleep']\n return _sleep_time\n except KeyError:\n return None\n\n\nclass GetTestCase:\n @staticmethod\n def case_data(case_id_lists: List):\n case_lists =[]\n for i in case_id_lists:\n _data = CacheHandler.get_cache(i)\n case_lists.append(_data)\n return case_lists\n\n\n\n\n", "repo_name": "guoyuchen0514/Agent_Api", "sub_path": "utils/read_file_tools/get_yaml_data_analysis.py", "file_name": "get_yaml_data_analysis.py", "file_ext": "py", "file_size_in_byte": 12367, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "os.path.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 26, "usage_type": "name"}, {"api_name": "utils.read_file_tools.yaml_control.GetYamlData", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.others_tool.models.TestCase", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.others_tool.models.TestCase", "line_number": 52, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 57, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 70, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 77, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 82, "usage_type": "call"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 88, "usage_type": "call"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 94, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 111, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 116, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 124, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 131, "usage_type": "call"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 140, "usage_type": "call"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 148, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 155, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 156, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 163, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 171, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 172, "usage_type": "name"}, {"api_name": "utils.config.mysql_db", "line_number": 175, "usage_type": "attribute"}, {"api_name": "utils.config", "line_number": 175, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 179, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 184, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 184, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 208, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 209, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 213, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 209, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 220, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 221, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 226, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 232, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 233, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 237, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 244, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 245, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 250, "usage_type": "call"}, {"api_name": "typing.Text", "line_number": 258, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 259, "usage_type": "name"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 270, "usage_type": "call"}, {"api_name": "utils.others_tool.exceptions.ValueNotFoundError", "line_number": 277, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 259, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 287, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 288, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 289, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 289, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 307, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 307, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 316, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 317, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 330, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 330, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 341, "usage_type": "name"}, {"api_name": "utils.cache_process.cache_control.CacheHandler.get_cache", "line_number": 344, "usage_type": "call"}, {"api_name": "utils.cache_process.cache_control.CacheHandler", "line_number": 344, "usage_type": "name"}]} +{"seq_id": "23685158734", "text": "\"\"\"\nFunctions for plotting a bootstrapped confidence interval of a KDE.\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\nfrom kde import kde\n\n\ndef sample_with_replacement(data):\n \"\"\"\n Sample the provided data (with replacement) and return a new dataset of the same size.\n \"\"\"\n\n return data[np.random.randint(0, len(data), len(data))].copy()\n\n\ndef plot_kde_uncertainty(data, n_resamples=1000, x_resolution=1000, significance=0.05, palette=sns.color_palette()):\n \"\"\"\n Bootstrap a confidence interval for the KDE of the provided dataset, and plot along with the KDE.\n \"\"\"\n\n assert n_resamples >= 100\n\n x_grid = np.linspace(min(data), max(data), x_resolution)\n orig_kde = kde(data, x_grid)\n\n resampled_kdes = np.zeros((n_resamples, x_resolution))\n\n for i in range(n_resamples):\n resample = sample_with_replacement(data)\n resampled_kdes[i] = kde(resample, x_grid)\n\n # sort to get percentiles\n resampled_kdes.sort(axis=0)\n\n def percentile_index(percentile, N):\n \"\"\"\n Find the index of the x'th percentile in a sorted collection of size N.\n \"\"\"\n\n assert 0 <= percentile <= 1\n\n return int(np.round(percentile * N))\n\n def ci_index(alpha, N):\n \"\"\"\n Find the indices in a sorted collection of size N of the two bounds of a confidence interval of significance level alpha.\n \"\"\"\n\n return percentile_index(alpha/2., N), percentile_index((1. - alpha/2.), N)\n\n i_lower, i_higher = ci_index(significance, n_resamples)\n plt.plot(x_grid, resampled_kdes[i_lower], '--', color=palette[0])\n plt.plot(x_grid, resampled_kdes[i_higher], '--', color=palette[0])\n\n plt.plot(x_grid, orig_kde, '-', color=palette[0])\n sns.despine()\n", "repo_name": "metavee/kde-primer-plots", "sub_path": "bootstrap.py", "file_name": "bootstrap.py", "file_ext": "py", "file_size_in_byte": 1774, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "numpy.random.randint", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "seaborn.color_palette", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 28, "usage_type": "call"}, {"api_name": "kde.kde", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "kde.kde", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "seaborn.despine", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "33706681026", "text": "from datetime import time\n\nfrom django.test import TestCase\n\nfrom apps.common import days_of_week\nfrom apps.enrollment.courses.models.course_instance import CourseInstance\nfrom apps.enrollment.courses.models.group import Group, GroupType\nfrom apps.enrollment.courses.models.term import Term\nfrom apps.enrollment.courses.tests import factories as courses_factories\nfrom apps.schedulersync.management.commands.import_schedule import Command, SZTerm, Slack\nfrom apps.users.tests import factories as users_factories\n\n\nclass SchedulerImportTestCase(TestCase):\n \"\"\"Tests the subsequent import operation between Scheduler and Zapisy.\"\"\"\n @classmethod\n def setUpTestData(cls):\n cls.bolek = users_factories.EmployeeFactory()\n cls.lolek = users_factories.EmployeeFactory()\n\n cls.r1 = courses_factories.ClassroomFactory()\n\n # Semester one will have no groups yet. It will have CourseInstances\n # created by SchedulerMapper.\n cls.s1 = courses_factories.SemesterFactory()\n cls.c1 = courses_factories.CourseInstanceFactory(semester=cls.s1)\n\n def test_import(self):\n \"\"\"Imports a simple schedule.\n\n We test subsequent operations in subtests, so the failure is traceable\n and we do not need to prepare separate data for each test\n (https://stackoverflow.com/a/50868492).\n \"\"\"\n terms = [\n SZTerm(scheduler_id=1,\n teacher=self.bolek,\n course=self.c1,\n type=GroupType.LECTURE,\n limit=25,\n dayOfWeek=days_of_week.MONDAY,\n start_time=time(hour=12),\n end_time=time(hour=14),\n classrooms=[self.r1]),\n SZTerm(scheduler_id=2,\n teacher=self.bolek,\n course=self.c1,\n type=GroupType.LECTURE,\n limit=25,\n dayOfWeek=days_of_week.THURSDAY,\n start_time=time(hour=8),\n end_time=time(hour=10),\n classrooms=[self.r1]),\n SZTerm(scheduler_id=3,\n teacher=self.bolek,\n course=self.c1,\n type=GroupType.EXERCISES,\n limit=13,\n dayOfWeek=days_of_week.MONDAY,\n start_time=time(hour=10),\n end_time=time(hour=12),\n classrooms=[self.r1]),\n SZTerm(scheduler_id=4,\n teacher=self.bolek,\n course=self.c1,\n type=GroupType.EXERCISES,\n limit=13,\n dayOfWeek=days_of_week.THURSDAY,\n start_time=time(hour=10),\n end_time=time(hour=12),\n classrooms=[self.r1]),\n ]\n\n with self.subTest(msg=\"Fresh import\"):\n \"\"\"No groups are in the database yet.\"\"\"\n ims = Command()\n ims.semester = self.s1\n ims.update_terms(terms, False)\n self.assertEqual(CourseInstance.objects.count(), 1)\n # Two lecture group terms should be merged.\n self.assertEqual(self.c1.groups.count(), 3)\n self.assertEqual(self.c1.groups.get(type=GroupType.LECTURE).term.count(), 2)\n self.assertEqual(Term.objects.count(), 4)\n # All these groups should be taught by Bolek.\n self.assertEqual(Group.objects.filter(teacher=self.bolek).count(), 3)\n\n with self.subTest(msg=\"Add new term\"):\n \"\"\"New term is added. It creates a new group for the second course.\"\"\"\n # Second course will be created by the mapper.\n self.c2 = courses_factories.CourseInstanceFactory(semester=self.s1)\n terms.append(\n SZTerm(scheduler_id=5,\n teacher=self.lolek,\n course=self.c2,\n type=GroupType.LAB,\n limit=13,\n dayOfWeek=days_of_week.THURSDAY,\n start_time=time(hour=10),\n end_time=time(hour=12),\n classrooms=[self.r1]))\n ims = Command()\n ims.semester = self.s1\n ims.update_terms(terms, False)\n # Nothing should change for the first course.\n self.assertEqual(self.c1.groups.count(), 3)\n t = Term.objects.filter(group__course=self.c1, group__type=GroupType.EXERCISES).first()\n self.assertCountEqual(t.classrooms.all(), [self.r1])\n # A new group should be created for the second course.\n self.assertEqual(self.c2.groups.count(), 1)\n\n with self.subTest(msg=\"None mappings\"):\n \"\"\"Terms with course=None should be ignored.\"\"\"\n # course=None means that the course could not be identified by the mapper.\n terms.append(\n SZTerm(scheduler_id=7,\n teacher=self.lolek,\n course=None,\n type=GroupType.EXERCISES,\n limit=13,\n dayOfWeek=days_of_week.THURSDAY,\n start_time=time(hour=10),\n end_time=time(hour=12),\n classrooms=[self.r1]))\n ims = Command()\n ims.semester = self.s1\n ims.update_terms(terms, False)\n slack = Slack(\"\")\n slack.prepare_message(ims.summary)\n slack.write_to_screen()\n # No new groups should turn up.\n self.assertEqual(self.c1.groups.count(), 3)\n self.assertEqual(self.c2.groups.count(), 1)\n self.assertEqual(Group.objects.count(), 4)\n self.assertEqual(Term.objects.count(), 5)\n\n with self.subTest(msg=\"Do not delete flag\"):\n \"\"\"Removing terms with and without dont_delete_terms flag.\"\"\"\n # We leave only c1 terms. One is unscheduled and one has unknown teacher.\n terms = terms[:4]\n ims = Command()\n ims.semester = self.s1\n ims.update_terms(terms, True)\n # No terms will be deleted.\n self.assertEqual(CourseInstance.objects.count(), 2)\n self.assertEqual(Group.objects.count(), 4)\n self.assertEqual(Term.objects.count(), 5)\n\n ims.update_terms(terms, False)\n # This should delete entire c2 and only leave 3 groups of c1.\n self.assertEqual(CourseInstance.objects.count(), 1)\n self.assertEqual(Group.objects.count(), 3)\n self.assertEqual(Term.objects.count(), 4)\n", "repo_name": "iiuni/projektzapisy", "sub_path": "zapisy/apps/schedulersync/tests/test_scheduler_import.py", "file_name": "test_scheduler_import.py", "file_ext": "py", "file_size_in_byte": 6657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 29, "dataset": "github-code", "pt": "75", "api": [{"api_name": "django.test.TestCase", "line_number": 14, "usage_type": "name"}, {"api_name": "apps.users.tests.factories.EmployeeFactory", "line_number": 18, "usage_type": "call"}, {"api_name": "apps.users.tests.factories", "line_number": 18, "usage_type": "name"}, {"api_name": "apps.users.tests.factories.EmployeeFactory", "line_number": 19, "usage_type": "call"}, {"api_name": "apps.users.tests.factories", "line_number": 19, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.tests.factories.ClassroomFactory", "line_number": 21, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.tests.factories", "line_number": 21, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.tests.factories.SemesterFactory", "line_number": 25, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.tests.factories", "line_number": 25, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.tests.factories.CourseInstanceFactory", "line_number": 26, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.tests.factories", "line_number": 26, "usage_type": "name"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.SZTerm", "line_number": 36, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.GroupType.LECTURE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.GroupType", "line_number": 39, "usage_type": "name"}, {"api_name": "apps.common.days_of_week.MONDAY", "line_number": 41, "usage_type": "attribute"}, {"api_name": "apps.common.days_of_week", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 43, "usage_type": "call"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.SZTerm", "line_number": 45, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.GroupType.LECTURE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.GroupType", "line_number": 48, "usage_type": "name"}, {"api_name": "apps.common.days_of_week.THURSDAY", "line_number": 50, "usage_type": "attribute"}, {"api_name": "apps.common.days_of_week", "line_number": 50, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 52, "usage_type": "call"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.SZTerm", "line_number": 54, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.GroupType.EXERCISES", "line_number": 57, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.GroupType", "line_number": 57, "usage_type": "name"}, {"api_name": "apps.common.days_of_week.MONDAY", "line_number": 59, "usage_type": "attribute"}, {"api_name": "apps.common.days_of_week", "line_number": 59, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 61, "usage_type": "call"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.SZTerm", "line_number": 63, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.GroupType.EXERCISES", "line_number": 66, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.GroupType", "line_number": 66, "usage_type": "name"}, {"api_name": "apps.common.days_of_week.THURSDAY", "line_number": 68, "usage_type": "attribute"}, {"api_name": "apps.common.days_of_week", "line_number": 68, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 70, "usage_type": "call"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.Command", "line_number": 76, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.course_instance.CourseInstance.objects.count", "line_number": 79, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.course_instance.CourseInstance.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.course_instance.CourseInstance", "line_number": 79, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.group.GroupType.LECTURE", "line_number": 82, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.GroupType", "line_number": 82, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects.count", "line_number": 83, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.term.Term", "line_number": 83, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.group.Group.objects.filter", "line_number": 85, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.Group.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.Group", "line_number": 85, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.tests.factories.CourseInstanceFactory", "line_number": 90, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.tests.factories", "line_number": 90, "usage_type": "name"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.SZTerm", "line_number": 92, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.GroupType.LAB", "line_number": 95, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.GroupType", "line_number": 95, "usage_type": "name"}, {"api_name": "apps.common.days_of_week.THURSDAY", "line_number": 97, "usage_type": "attribute"}, {"api_name": "apps.common.days_of_week", "line_number": 97, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 99, "usage_type": "call"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.Command", "line_number": 101, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.term.Term", "line_number": 106, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.group.GroupType.EXERCISES", "line_number": 106, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.GroupType", "line_number": 106, "usage_type": "name"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.SZTerm", "line_number": 115, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.GroupType.EXERCISES", "line_number": 118, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.GroupType", "line_number": 118, "usage_type": "name"}, {"api_name": "apps.common.days_of_week.THURSDAY", "line_number": 120, "usage_type": "attribute"}, {"api_name": "apps.common.days_of_week", "line_number": 120, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 122, "usage_type": "call"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.Command", "line_number": 124, "usage_type": "call"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.Slack", "line_number": 127, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.Group.objects.count", "line_number": 133, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.Group.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.Group", "line_number": 133, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects.count", "line_number": 134, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.term.Term", "line_number": 134, "usage_type": "name"}, {"api_name": "apps.schedulersync.management.commands.import_schedule.Command", "line_number": 140, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.course_instance.CourseInstance.objects.count", "line_number": 144, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.course_instance.CourseInstance.objects", "line_number": 144, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.course_instance.CourseInstance", "line_number": 144, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.group.Group.objects.count", "line_number": 145, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.Group.objects", "line_number": 145, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.Group", "line_number": 145, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects.count", "line_number": 146, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.term.Term", "line_number": 146, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.course_instance.CourseInstance.objects.count", "line_number": 150, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.course_instance.CourseInstance.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.course_instance.CourseInstance", "line_number": 150, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.group.Group.objects.count", "line_number": 151, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.group.Group.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.group.Group", "line_number": 151, "usage_type": "name"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects.count", "line_number": 152, "usage_type": "call"}, {"api_name": "apps.enrollment.courses.models.term.Term.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "apps.enrollment.courses.models.term.Term", "line_number": 152, "usage_type": "name"}]} +{"seq_id": "475782320", "text": "import unittest\nfrom datetime import date, time\nfrom uuid import UUID\n\nfrom common.entities.base_entities.customer_delivery import CustomerDelivery\nfrom common.entities.base_entities.delivery_option import DeliveryOption\nfrom common.entities.base_entities.delivery_request import DeliveryRequest\nfrom common.entities.base_entities.entity_id import EntityID\nfrom common.entities.base_entities.package import PackageType\nfrom common.entities.base_entities.package_delivery_plan import PackageDeliveryPlan\nfrom common.entities.base_entities.temporal import TimeWindowExtension, DateTimeExtension\nfrom common.math.angle import Angle, AngleUnit\nfrom geometry.geo_factory import create_point_2d\nfrom grid.azimuth_options import AzimuthOptions\nfrom grid.grid_cell import GridCell, EnvelopeGridCell\nfrom grid.grid_cell_services import GridCellServices\nfrom grid.delivery_request_envelope_cells import PotentialEnvelope, DeliveryRequestPotentialEnvelopes\nfrom grid.grid_location import GridLocation, GridLocationServices\nfrom grid.grid_service import GridService\nfrom grid.slides_factory import generate_slides_container\nfrom services.mock_envelope_services import MockEnvelopeServices\n\n\nclass BasicDeliveryRequestEnvelopeCellsTestCase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.envelope_service = MockEnvelopeServices()\n cls.cell_width_resolution = 1\n cls.cell_height_resolution = 2\n cls.cell_ratio_required = 0.5\n cls.drone_azimuth_resolution = 8\n cls.drop_azimuth_resolution = 8\n cls.package_types = [package_type for package_type in PackageType]\n cls.slides_container = generate_slides_container(MockEnvelopeServices(),\n cls.package_types,\n cls.drone_azimuth_resolution,\n cls.drop_azimuth_resolution,\n cls.cell_width_resolution,\n cls.cell_height_resolution,\n cls.cell_ratio_required)\n\n cls.pdp_1 = PackageDeliveryPlan(id_=EntityID(UUID(int=42)),\n drop_point=create_point_2d(10, 20),\n azimuth=Angle(135, AngleUnit.DEGREE),\n pitch=Angle(90, AngleUnit.DEGREE),\n package_type=PackageType.TINY)\n\n cls.pdp_2 = PackageDeliveryPlan(id_=EntityID(UUID(int=43)),\n drop_point=create_point_2d(20, 30),\n azimuth=Angle(135, AngleUnit.DEGREE),\n pitch=Angle(45, AngleUnit.DEGREE),\n package_type=PackageType.TINY)\n\n cls.pdp_3 = PackageDeliveryPlan(id_=EntityID(UUID(int=44)),\n drop_point=create_point_2d(30, 40),\n azimuth=Angle(45, AngleUnit.DEGREE),\n pitch=Angle(45, AngleUnit.DEGREE),\n package_type=PackageType.TINY)\n\n cls.do_1 = DeliveryOption(delivery_options_id=EntityID(UUID(int=14)),\n customer_deliveries=[CustomerDelivery(customer_delivery_id=EntityID(UUID(int=24)),\n package_delivery_plans=[cls.pdp_1, cls.pdp_2,\n cls.pdp_3])])\n\n cls.dr_1 = DeliveryRequest(delivery_options=[cls.do_1],\n time_window=TimeWindowExtension(\n DateTimeExtension(dt_date=date(2021, 1, 1), dt_time=time(6, 0, 0)),\n DateTimeExtension(dt_date=date(2021, 1, 1), dt_time=time(6, 0, 0))),\n priority=1, id_=EntityID.generate_uuid())\n\n cls.delivery_requests_envelope_cells = DeliveryRequestPotentialEnvelopes(cls.slides_container, cls.dr_1)\n cls.delivery_requests_envelope_cells_dict_do_1 = cls.delivery_requests_envelope_cells.delivery_options_cells[0]\n\n def test_drone_azimuth(self):\n self.assertEqual(self.delivery_requests_envelope_cells_dict_do_1.keys(),\n AzimuthOptions(self.slides_container.get_drone_azimuth_resolution).values)\n\n def test_envelope_cells(self):\n pdp_list = [self.pdp_1, self.pdp_2, self.pdp_3]\n for drone_azimuth in AzimuthOptions(self.slides_container.get_drone_azimuth_resolution).values:\n scale_to_grid_list = self._scale_to_grid_list(drone_azimuth, pdp_list)\n\n self._test_envelope_cells_average(drone_azimuth, scale_to_grid_list)\n\n self._test_envelope_cells_pdp_ids(drone_azimuth, scale_to_grid_list)\n\n def _test_envelope_cells_pdp_ids(self, drone_azimuth, scale_to_grid_list):\n empty_indices = GridLocationServices.get_not_empty_indices(scale_to_grid_list)\n package_delivery_plan_list = list(map([self.pdp_1, self.pdp_2, self.pdp_3].__getitem__, empty_indices))\n self.assertEqual(\n self.delivery_requests_envelope_cells_dict_do_1[\n drone_azimuth].package_delivery_plans_ids(),\n [package_delivery_plan.id for package_delivery_plan in package_delivery_plan_list])\n\n def _test_envelope_cells_average(self, drone_azimuth, scale_to_grid_list):\n expected_average_location = GridLocationServices.calc_average(scale_to_grid_list)\n self.assertEqual(self.delivery_requests_envelope_cells_dict_do_1[drone_azimuth].location,\n expected_average_location)\n\n def _scale_to_grid_list(self, drone_azimuth, pdp_list):\n scale_to_grid_list = []\n for pdp in pdp_list:\n drop_point_grid_location = \\\n GridService.get_grid_location(pdp.drop_point, self.slides_container.cell_width_resolution,\n self.slides_container.cell_height_resolution)\n drop_azimuth = GridCellServices.get_drop_azimuth(drone_azimuth, pdp.azimuth, pdp.pitch)\n envelope_location = self.slides_container.get_envelope_location(drone_azimuth,\n drop_azimuth,\n pdp.package_type)\n scale_to_grid_list.append(GridService.scale_to_grid(drop_point_grid_location, envelope_location))\n return scale_to_grid_list\n\n\nclass BasicDeliveryRequestEnvelopeCellsDictTestCase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.cell_1 = GridCell(location=GridLocation(10, 15))\n\n cls.pdp_1 = PackageDeliveryPlan(id_=EntityID(UUID(int=42)),\n drop_point=create_point_2d(1, 2),\n azimuth=Angle(30, AngleUnit.DEGREE),\n pitch=Angle(80, AngleUnit.DEGREE),\n package_type=PackageType.TINY)\n\n cls.pdp_2 = PackageDeliveryPlan(id_=EntityID(UUID(int=43)),\n drop_point=create_point_2d(1, 3),\n azimuth=Angle(40, AngleUnit.DEGREE),\n pitch=Angle(90, AngleUnit.DEGREE),\n package_type=PackageType.TINY)\n\n cls.envelope_cell_1 = EnvelopeGridCell(location=GridLocation(10, 15),\n drone_azimuth=Angle(45, AngleUnit.DEGREE),\n package_delivery_plans=[cls.pdp_1, cls.pdp_2])\n\n cls.envelope_cell_2 = EnvelopeGridCell(location=GridLocation(20, 15),\n drone_azimuth=Angle(90, AngleUnit.DEGREE),\n package_delivery_plans=[cls.pdp_1, cls.pdp_2])\n\n cls.delivery_request_envelope_cells = PotentialEnvelope(\n [cls.envelope_cell_1, cls.envelope_cell_2])\n\n def test_delivery_request_envelope_cells_dict(self):\n ec1_expected_grid_location = GridLocation(10, 15)\n ec1_expected_angle = Angle(45, AngleUnit.DEGREE)\n\n ec2_expected_grid_location = GridLocation(20, 15)\n ec2_expected_angle = Angle(90, AngleUnit.DEGREE)\n\n expected_pdp_1 = PackageDeliveryPlan(id_=EntityID(UUID(int=42)),\n drop_point=create_point_2d(1, 2),\n azimuth=Angle(30, AngleUnit.DEGREE),\n pitch=Angle(80, AngleUnit.DEGREE),\n package_type=PackageType.TINY)\n\n expected_pdp_2 = PackageDeliveryPlan(id_=EntityID(UUID(int=43)),\n drop_point=create_point_2d(1, 3),\n azimuth=Angle(40, AngleUnit.DEGREE),\n pitch=Angle(90, AngleUnit.DEGREE),\n package_type=PackageType.TINY)\n\n expected_pdp_list = [expected_pdp_1, expected_pdp_2]\n\n self.assertEqual(self.delivery_request_envelope_cells[ec1_expected_angle].location, ec1_expected_grid_location)\n self.assertEqual(self.delivery_request_envelope_cells[ec1_expected_angle].drone_azimuth, ec1_expected_angle)\n self.assertEqual(self.delivery_request_envelope_cells[ec1_expected_angle].package_delivery_plans,\n expected_pdp_list)\n self.assertEqual(self.delivery_request_envelope_cells[ec1_expected_angle].package_delivery_plans_ids(),\n [pdp.id for pdp in expected_pdp_list])\n\n self.assertEqual(self.delivery_request_envelope_cells[ec2_expected_angle].location, ec2_expected_grid_location)\n self.assertEqual(self.delivery_request_envelope_cells[ec2_expected_angle].drone_azimuth, ec2_expected_angle)\n self.assertEqual(self.delivery_request_envelope_cells[ec2_expected_angle].package_delivery_plans,\n expected_pdp_list)\n self.assertEqual(self.delivery_request_envelope_cells[ec2_expected_angle].package_delivery_plans_ids(),\n [pdp.id for pdp in expected_pdp_list])\n", "repo_name": "drorp24/Ice_Ring_Matching_Algo", "sub_path": "grid/tests/test_delivery_request_envelope_cells.py", "file_name": "test_delivery_request_envelope_cells.py", "file_ext": "py", "file_size_in_byte": 10587, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "unittest.TestCase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "services.mock_envelope_services.MockEnvelopeServices", "line_number": 28, "usage_type": "call"}, {"api_name": "common.entities.base_entities.package.PackageType", "line_number": 34, "usage_type": "name"}, {"api_name": "grid.slides_factory.generate_slides_container", "line_number": 35, "usage_type": "call"}, {"api_name": "services.mock_envelope_services.MockEnvelopeServices", "line_number": 35, "usage_type": "call"}, {"api_name": "common.entities.base_entities.package_delivery_plan.PackageDeliveryPlan", "line_number": 43, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 43, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 43, "usage_type": "call"}, {"api_name": "geometry.geo_factory.create_point_2d", "line_number": 44, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 45, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 45, "usage_type": "name"}, {"api_name": "common.math.angle.Angle", "line_number": 46, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 46, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package.PackageType.TINY", "line_number": 47, "usage_type": "attribute"}, {"api_name": "common.entities.base_entities.package.PackageType", "line_number": 47, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package_delivery_plan.PackageDeliveryPlan", "line_number": 49, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 49, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 49, "usage_type": "call"}, {"api_name": "geometry.geo_factory.create_point_2d", "line_number": 50, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 51, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 51, "usage_type": "name"}, {"api_name": "common.math.angle.Angle", "line_number": 52, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 52, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package.PackageType.TINY", "line_number": 53, "usage_type": "attribute"}, {"api_name": "common.entities.base_entities.package.PackageType", "line_number": 53, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package_delivery_plan.PackageDeliveryPlan", "line_number": 55, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 55, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 55, "usage_type": "call"}, {"api_name": "geometry.geo_factory.create_point_2d", "line_number": 56, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 57, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 57, "usage_type": "name"}, {"api_name": "common.math.angle.Angle", "line_number": 58, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 58, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 58, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package.PackageType.TINY", "line_number": 59, "usage_type": "attribute"}, {"api_name": "common.entities.base_entities.package.PackageType", "line_number": 59, "usage_type": "name"}, {"api_name": "common.entities.base_entities.delivery_option.DeliveryOption", "line_number": 61, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 61, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 61, "usage_type": "call"}, {"api_name": "common.entities.base_entities.customer_delivery.CustomerDelivery", "line_number": 62, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 62, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 62, "usage_type": "call"}, {"api_name": "common.entities.base_entities.delivery_request.DeliveryRequest", "line_number": 66, "usage_type": "call"}, {"api_name": "common.entities.base_entities.temporal.TimeWindowExtension", "line_number": 67, "usage_type": "call"}, {"api_name": "common.entities.base_entities.temporal.DateTimeExtension", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 68, "usage_type": "call"}, {"api_name": "common.entities.base_entities.temporal.DateTimeExtension", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 69, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID.generate_uuid", "line_number": 70, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 70, "usage_type": "name"}, {"api_name": "grid.delivery_request_envelope_cells.DeliveryRequestPotentialEnvelopes", "line_number": 72, "usage_type": "call"}, {"api_name": "grid.azimuth_options.AzimuthOptions", "line_number": 77, "usage_type": "call"}, {"api_name": "grid.azimuth_options.AzimuthOptions", "line_number": 81, "usage_type": "call"}, {"api_name": "grid.grid_location.GridLocationServices.get_not_empty_indices", "line_number": 89, "usage_type": "call"}, {"api_name": "grid.grid_location.GridLocationServices", "line_number": 89, "usage_type": "name"}, {"api_name": "grid.grid_location.GridLocationServices.calc_average", "line_number": 97, "usage_type": "call"}, {"api_name": "grid.grid_location.GridLocationServices", "line_number": 97, "usage_type": "name"}, {"api_name": "grid.grid_service.GridService.get_grid_location", "line_number": 105, "usage_type": "call"}, {"api_name": "grid.grid_service.GridService", "line_number": 105, "usage_type": "name"}, {"api_name": "grid.grid_cell_services.GridCellServices.get_drop_azimuth", "line_number": 107, "usage_type": "call"}, {"api_name": "grid.grid_cell_services.GridCellServices", "line_number": 107, "usage_type": "name"}, {"api_name": "grid.grid_service.GridService.scale_to_grid", "line_number": 111, "usage_type": "call"}, {"api_name": "grid.grid_service.GridService", "line_number": 111, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 115, "usage_type": "attribute"}, {"api_name": "grid.grid_cell.GridCell", "line_number": 119, "usage_type": "call"}, {"api_name": "grid.grid_location.GridLocation", "line_number": 119, "usage_type": "call"}, {"api_name": "common.entities.base_entities.package_delivery_plan.PackageDeliveryPlan", "line_number": 121, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 121, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 121, "usage_type": "call"}, {"api_name": "geometry.geo_factory.create_point_2d", "line_number": 122, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 123, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 123, "usage_type": "name"}, {"api_name": "common.math.angle.Angle", "line_number": 124, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 124, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 124, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package.PackageType.TINY", "line_number": 125, "usage_type": "attribute"}, {"api_name": "common.entities.base_entities.package.PackageType", "line_number": 125, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package_delivery_plan.PackageDeliveryPlan", "line_number": 127, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 127, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 127, "usage_type": "call"}, {"api_name": "geometry.geo_factory.create_point_2d", "line_number": 128, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 129, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 129, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 129, "usage_type": "name"}, {"api_name": "common.math.angle.Angle", "line_number": 130, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 130, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 130, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package.PackageType.TINY", "line_number": 131, "usage_type": "attribute"}, {"api_name": "common.entities.base_entities.package.PackageType", "line_number": 131, "usage_type": "name"}, {"api_name": "grid.grid_cell.EnvelopeGridCell", "line_number": 133, "usage_type": "call"}, {"api_name": "grid.grid_location.GridLocation", "line_number": 133, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 134, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 134, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 134, "usage_type": "name"}, {"api_name": "grid.grid_cell.EnvelopeGridCell", "line_number": 137, "usage_type": "call"}, {"api_name": "grid.grid_location.GridLocation", "line_number": 137, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 138, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 138, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 138, "usage_type": "name"}, {"api_name": "grid.delivery_request_envelope_cells.PotentialEnvelope", "line_number": 141, "usage_type": "call"}, {"api_name": "grid.grid_location.GridLocation", "line_number": 145, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 146, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 146, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 146, "usage_type": "name"}, {"api_name": "grid.grid_location.GridLocation", "line_number": 148, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 149, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 149, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 149, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package_delivery_plan.PackageDeliveryPlan", "line_number": 151, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 151, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 151, "usage_type": "call"}, {"api_name": "geometry.geo_factory.create_point_2d", "line_number": 152, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 153, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 153, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 153, "usage_type": "name"}, {"api_name": "common.math.angle.Angle", "line_number": 154, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 154, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 154, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package.PackageType.TINY", "line_number": 155, "usage_type": "attribute"}, {"api_name": "common.entities.base_entities.package.PackageType", "line_number": 155, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package_delivery_plan.PackageDeliveryPlan", "line_number": 157, "usage_type": "call"}, {"api_name": "common.entities.base_entities.entity_id.EntityID", "line_number": 157, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 157, "usage_type": "call"}, {"api_name": "geometry.geo_factory.create_point_2d", "line_number": 158, "usage_type": "call"}, {"api_name": "common.math.angle.Angle", "line_number": 159, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 159, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 159, "usage_type": "name"}, {"api_name": "common.math.angle.Angle", "line_number": 160, "usage_type": "call"}, {"api_name": "common.math.angle.AngleUnit.DEGREE", "line_number": 160, "usage_type": "attribute"}, {"api_name": "common.math.angle.AngleUnit", "line_number": 160, "usage_type": "name"}, {"api_name": "common.entities.base_entities.package.PackageType.TINY", "line_number": 161, "usage_type": "attribute"}, {"api_name": "common.entities.base_entities.package.PackageType", "line_number": 161, "usage_type": "name"}]} +{"seq_id": "7142557272", "text": "import os\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\ndef get_filename_ext(filepath):\n base_name = os.path.basename(filepath)\n name, ext = os.path.splitext(base_name)\n return name, ext\n\n\ndef upload_image_path(instance, filename):\n name, ext = get_filename_ext(filename)\n final_name = f\"{instance.id}-{User}-{os.times}{ext}\"\n return f\"'profileimages/'{final_name}\"\n\nclass Profile(models.Model):\n firstname = models.CharField(max_length=150,default='empty')\n lastname = models.CharField(max_length=150,default='empty')\n email = models.EmailField(max_length=150,default='empty')\n slogan = models.CharField(max_length=150,default='empty')\n text = models.TextField(default='empty')\n image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)\n author = models.ForeignKey(User,default='empty',on_delete=models.CASCADE)\n\n def __str__(self):\n return self.firstname", "repo_name": "Muhammadkhk/Artstory", "sub_path": "story_account/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "os.path.basename", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 15, "usage_type": "name"}, {"api_name": "os.times", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 25, "usage_type": "attribute"}]} +{"seq_id": "1666758223", "text": "\"\"\"\nПрограмма позволяет сравнить json-схему и ответ апи по ключам.\n1) вставьте в файл json-schema.json вашу json-схему\n2) вставьте в файл api_response.json ваш ответ апи из постмана\n\"\"\"\n\nimport json\n\n\ndef parser_json_schema():\n \"\"\"Возвращает ключи из json-схемы\"\"\"\n counter = []\n file = 'json-schema.json'\n with open(file) as f:\n loader = json.load(f)\n parse_object = (loader['properties'])\n for i in parse_object.keys():\n counter.append(i)\n\n return counter\n\n\ndef parser_api_response():\n \"\"\"Возвращает ключи из ответа апи\"\"\"\n counter = []\n file = 'api_response.json'\n with open(file) as f:\n loader = json.load(f)\n for i in loader.keys():\n counter.append(i)\n\n return counter\n\n\ndef get_keys_absence_in_api_resp():\n \"\"\"Возвращает поля, которые есть в json-схеме, но которых нет в ответе апи\"\"\"\n counter = []\n for item in parser_json_schema():\n if item not in parser_api_response():\n counter.append(item)\n return counter\n\n\ndef get_keys_absence_in_scheme():\n \"\"\"Возвращает поля, которые есть в ответе апи, но которых нет в json-схеме\"\"\"\n counter = []\n for item in parser_api_response():\n if item not in parser_json_schema():\n counter.append(item)\n return counter\n\n\nif __name__ == '__main__':\n print(len(parser_json_schema()))\n print(len(parser_api_response()))\n print(get_keys_absence_in_scheme())\n print(get_keys_absence_in_api_resp())\n", "repo_name": "ervand7/Summary", "sub_path": "JSON-schema/мой парсер json-схем/my_parser.py", "file_name": "my_parser.py", "file_ext": "py", "file_size_in_byte": 1749, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "75", "api": [{"api_name": "json.load", "line_number": 15, "usage_type": "call"}, {"api_name": "json.load", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "29661951301", "text": "import os\nfrom typing import List, Optional, Union\n\nimport accelerate\nimport kornia as K\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as T\nfrom einops import rearrange, reduce, repeat\nfrom engine.base_engine import BaseEngine\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\nfrom utils import mkdir_if_missing\n\nfrom . import ENGINE_REGISTRY\nfrom .evaluate_engine import EvaluateEngine\n\n\nclass RGFAttackEngine(EvaluateEngine):\n def __init__(\n self,\n train_dataloader: DataLoader,\n query_dataloader: DataLoader,\n gallery_dataloader: DataLoader,\n accelerator: accelerate.Accelerator,\n agent_models: nn.Module,\n target_model: nn.Module,\n segment_model: nn.Module,\n algorithm: str,\n ) -> None:\n super().__init__(\n train_dataloader,\n query_dataloader,\n gallery_dataloader,\n accelerator,\n agent_models,\n target_model,\n segment_model,\n algorithm,\n )\n\n def rgf_attack(self, imgs, camids):\n max_queries = 5000\n\n fd_eta = 0.1\n step_size = 0.01\n momentum = 0.0\n decay = 1.0\n\n # adv_imgs = imgs.clone()\n adv_imgs = torch.clamp(\n imgs + torch.rand_like(imgs) * 2 * (2 / 255) - (2 / 255), 0, 1\n )\n feats = self._reid_model_forward(self.target_model, imgs, camids)\n for _ in range(max_queries // 2):\n exp_noise = torch.randn_like(adv_imgs)\n exp_noise = exp_noise / torch.norm(exp_noise)\n\n input1 = adv_imgs + fd_eta * exp_noise\n adv_feats1 = self._reid_model_forward(self.target_model, input1, camids)\n l1 = (F.normalize(adv_feats1) * F.normalize(feats)).sum(dim=1).mean()\n\n input2 = adv_imgs\n adv_feats2 = self._reid_model_forward(self.target_model, input2, camids)\n l2 = (F.normalize(adv_feats2) * F.normalize(feats)).sum(dim=1).mean()\n est_deriv = (l1 - l2) / fd_eta\n grad = est_deriv.view(-1, 1, 1, 1) * exp_noise\n\n grad = momentum * decay + grad / torch.linalg.vector_norm(\n grad, ord=1, dim=(1, 2, 3), keepdim=True\n ).clamp(min=1e-8)\n momentum = grad\n adv_imgs -= step_size * grad.sign()\n\n delta = torch.clamp(adv_imgs - imgs, min=-self.epsilon, max=self.epsilon)\n adv_imgs = torch.clamp(imgs + delta, 0, 1)\n return adv_imgs\n\n def val_step(self, batch, batch_idx, is_query=True):\n imgs, pids, camids, imgs_path, _ = batch.values()\n\n if is_query:\n adv_imgs = self.rgf_attack(imgs, camids)\n\n self._make_log_dir_if_missing(imgs_path[0].split(os.sep)[-3])\n cache_path = os.path.join(self.log_dir, f\"{self.target_model.name}\")\n mkdir_if_missing(cache_path)\n\n if batch_idx == 1 and self.accelerator.is_main_process:\n save_image(\n adv_imgs[:16],\n f\"{self.log_dir}/{self.target_model.name}_adv_imgs.png\",\n pad_value=1,\n )\n save_image(\n adv_imgs[:16] - imgs[:16],\n f\"{self.log_dir}/{self.target_model.name}_delta.png\",\n normalize=True,\n pad_value=1,\n )\n imgs = adv_imgs\n # cache adv_imgs\n torch.save(adv_imgs, f\"{cache_path}/batch_{batch_idx}.pth\")\n\n # extract_features\n feats = self._reid_model_forward(self.target_model, imgs, camids)\n\n return feats, pids, camids\n\n def _reid_model_forward(self, model, imgs, camids):\n if \"transreid\" in model.name:\n feats = model(imgs, cam_label=camids)\n else:\n feats = model(imgs)\n return feats\n\n\n@ENGINE_REGISTRY.register()\ndef rgf(**trainer_params):\n return RGFAttackEngine(**trainer_params)\n", "repo_name": "HWliiu/QueryUAPReidAttack", "sub_path": "reidattack/engine/experiment/rgf_attack_engine.py", "file_name": "rgf_attack_engine.py", "file_ext": "py", "file_size_in_byte": 4017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "75", "api": [{"api_name": "evaluate_engine.EvaluateEngine", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 25, "usage_type": "name"}, {"api_name": "accelerate.Accelerator", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.clamp", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.rand_like", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.linalg.vector_norm", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.linalg", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.clamp", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 77, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "utils.mkdir_if_missing", "line_number": 88, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 91, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "42098921507", "text": "# Importamos las depenencias.\nimport pickle\nfrom argparse import ArgumentParser\n\nimport cv2\nimport mahotas\n\n# Estas son las dependencias implementadas por nosotros.\nfrom datasmarts.dataset import dataset\nfrom datasmarts.feature.hog import HOG\n\n# Definimos el menú del script:\n# * -m/--model: Ruta al clasificador.\n# * -i/--image: Ruta a la imagen a procesar.\nargument_parser = ArgumentParser()\nargument_parser.add_argument('-m', '--model', required=True, help='Ruta al modelo.')\nargument_parser.add_argument('-i', '--image', required=True, help='Ruta a la imagen de entrada.')\narguments = vars(argument_parser.parse_args())\n\n# Cargamos el modelo entrenado.\nwith open(arguments['model'], 'rb') as f:\n model = pickle.load(f)\n\n# Creamos el extractor HOG con la misma configuración que cuando entrenamos el clasificador.\nhog = HOG(orientations=18, pixels_per_cell=(10, 10), cells_per_block=(1, 1), transform=True)\n\n# Leemos la imagen de entrada y la convertimos en escala de grises.\nimage = cv2.imread(arguments['image'])\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Aplicamos difuminado Gaussiano para reducir el ruido.\nblurred = cv2.GaussianBlur(gray, (5, 5), 0)\n\n# Calculamos los bordes usando el algoritmo de Canny.\nedged = cv2.Canny(blurred, 30, 150)\n\n# Con base a los bordes, encontramos los contornos de los posibles dígitos.\n(contours, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n# Ordenamos los contornos de izquierda a derecha.\ncontours = sorted([(c, cv2.boundingRect(c)[0]) for c in contours], key=lambda p: p[1])\n\n# Iteramos sobre cada contorno.\nfor c, _ in contours:\n # Obtenemos el rectángulo que encierra a cada contorno.\n x, y, width, height = cv2.boundingRect(c)\n\n # Como sabemos que los dígitos son más anchos que largos, nos quedaremos con aquellos que tengan\n # dimensiones mayores o iguales a 20x7\n if width >= 7 and height >= 20:\n # Extraemos la región de interés, utilizando las coordenadas del rectángulo.\n roi = gray[y:y + height, x:x + width]\n thresh = roi.copy()\n\n # Usamos thresholding para convertir la región de interés en una imagen binaria, como\n # las de MNIST.\n T = mahotas.thresholding.otsu(roi)\n thresh[thresh > T] = 255\n thresh = cv2.bitwise_not(thresh)\n\n # Corregimos el sesgo y centramos el dígito.\n thresh = dataset.deskew(thresh, width=20)\n thresh = dataset.center_extent(thresh, size=(20, 20))\n\n # Mostramos la imagen binaria.\n cv2.imshow('No. binario', thresh)\n\n # Calculamos el vector característico usando HOG y lo pasamos al clasificador.\n histogram = hog.describe(thresh)\n digit = model.predict([histogram])[0]\n\n # Imprimimos la predicción en la consola.\n print(f'¿Acaso es un {digit}?')\n\n # Encerramos el área de interés en la imagen original, e imprimimos la predicción al lado.\n cv2.rectangle(image, (x, y), (x + width, y + height), (128, 0, 128), 1)\n cv2.putText(image,\n text=str(digit),\n org=(x - 10, y - 10),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1.2,\n color=(128, 0, 128),\n thickness=2)\n cv2.imshow('image', image)\n cv2.waitKey(0)\n", "repo_name": "RaulCalatayud/RobotNavegacion", "sub_path": "Robot_navegacion/datasmarts/classify.py", "file_name": "classify.py", "file_ext": "py", "file_size_in_byte": 3343, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 22, "usage_type": "call"}, {"api_name": "datasmarts.feature.hog.HOG", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 46, "usage_type": "call"}, {"api_name": "mahotas.thresholding.otsu", "line_number": 57, "usage_type": "call"}, {"api_name": "mahotas.thresholding", "line_number": 57, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_not", "line_number": 59, "usage_type": "call"}, {"api_name": "datasmarts.dataset.dataset.deskew", "line_number": 62, "usage_type": "call"}, {"api_name": "datasmarts.dataset.dataset", "line_number": 62, "usage_type": "name"}, {"api_name": "datasmarts.dataset.dataset.center_extent", "line_number": 63, "usage_type": "call"}, {"api_name": "datasmarts.dataset.dataset", "line_number": 63, "usage_type": "name"}, {"api_name": "cv2.imshow", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 80, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "29064642259", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 14 16:06:05 2020\r\n\r\n@author: alvar\r\n\"\"\"\r\n\r\nfrom mpl_toolkits.mplot3d import axes3d\r\nfrom sklearn.utils import shuffle\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import model_selection \r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.linear_model import LogisticRegression, Lasso, Perceptron, SGDClassifier\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.feature_selection import VarianceThreshold, SelectFromModel\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\nfrom sklearn.decomposition import PCA\r\nimport seaborn as sns\r\nimport pandas as pd\r\n\r\nseed=1\r\nnp.random.seed(seed)\r\n\r\n\"\"\"\r\n########################\r\n# LECTURA DE DATOS #\r\n########################\r\n\"\"\"\r\n\r\n# Funcion para leer los datos\r\ndef read_dataset(name):\r\n data = np.loadtxt(name, delimiter=\",\")\r\n return data[:,:-1], data[:,-1]\r\n\r\n# Lectura de los datos de entrenamiento\r\nX_train , y_train = read_dataset('datos/optdigits.tra')\r\n\r\n# Lectura de los datos de test\r\nX_test , y_test = read_dataset('datos/optdigits.tes')\r\n\r\n\"\"\"\r\n#############################\r\n# DIVISION EN CONJUNTOS #\r\n#############################\r\n\"\"\"\r\n\r\n# Uno todas las instancias para luego dividir en los conjuntos indicados en el guión\r\n# 80% train 20% test\r\n\r\nX=np.concatenate((X_train,X_test),axis=0)\r\ny=np.concatenate((y_train,y_test),axis=0)\r\n\r\n#barajamos las instancias\r\nX, y = shuffle(X, y, random_state=seed)\r\n\r\n#escojo los conjuntos de train y test\r\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.2,random_state=seed)\r\n\r\n\"\"\"\r\n###################################\r\n# REPRESENTACIÓN HEATMAP #\r\n###################################\r\n\"\"\"\r\n\r\nX_aux=np.copy(X)\r\ny_aux=np.copy(y)\r\n\r\npca = PCA(n_components=2)\r\npca.fit(X_aux,y_aux)\r\nX_aux_transformed = pca.transform(X_aux)\r\n\r\nx1=[]\r\ny1=[]\r\nfor i in range(len(X_aux_transformed)):\r\n x1.append(X_aux_transformed[i][0])\r\n y1.append(X_aux_transformed[i][1])\r\n \r\nN_bins = 50\r\n\r\n# Construct 2D histogram from data using the 'plasma' colormap\r\nplt.hist2d(x1, y1, bins=N_bins, density=False, cmap='plasma')\r\n\r\n# Plot a colorbar with label.\r\ncb = plt.colorbar()\r\ncb.set_label('Number of entries')\r\n\r\n# Add title and labels to plot.\r\nplt.title('Heatmap of DataBase')\r\nplt.xlabel('x axis')\r\nplt.ylabel('y axis')\r\n\r\n# Show the plot.\r\nplt.show()\r\n\r\n\r\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\r\n\r\n\"\"\"\r\n######################################################\r\n# PREPROCESAMIENTO Y ELECCIÓN MODELO #\r\n######################################################\r\n\"\"\"\r\n#tamaño validacion cruzada\r\nCV=5\r\n\r\n#creamos un vector preproc donde incluimos el preprocesado que vamos a realizar y la regularización\r\npreproc=[(\"var\", VarianceThreshold(0.1)),\r\n (\"poly\",PolynomialFeatures(1)), \r\n (\"standardize\", StandardScaler())]\r\n\r\n#creamos un pipeline de sklearn donde añadiremos uno de los modelos a estudiar\r\npipe = Pipeline(steps=preproc+[('estimator', LogisticRegression())])\r\n\r\n# Añadimos los estimadores que vamos a utilizar y los parametros que vamos a estudiar:\r\n# si clase de funcion lineal o cuadrática\r\n# la potencia de la penalización l2 \"C\" \r\n# el solver que voy a usar: Regresion Logistica con SGD (SGDClassifier) ,lbfgs,newton \r\nparams_grid = [ {\r\n 'estimator':[LogisticRegression(max_iter=500)],\r\n 'estimator__solver':['lbfgs','newton-cg'],\r\n 'estimator__C': np.logspace(-4, 4, 3),\r\n 'poly__degree': [1,2]\r\n },\r\n {\r\n 'estimator': [Perceptron(random_state = seed)],\r\n 'poly__degree': [1,2]\r\n }\r\n # {'estimator':[Any_other_estimator_you_want],\r\n # 'estimator__valid_param_of_your_estimator':[valid_values]\r\n\r\n ]\r\n\r\nprint(\"CON PREPROCESADO: \\n\")\r\n\r\n# entrenamos con crossvalidation y sacamos el mejor con sus parámetros.\r\n# con 5 conjuntos, accuracy y con n_jobs conseguimos que el ordenador use paralelamente los nucleos que pueda.\r\nbest_clf = GridSearchCV(pipe, params_grid, scoring = 'accuracy',cv = CV, n_jobs = -1, verbose=1)\r\nbest_clf.fit(X_train, y_train)\r\n\r\n#con esto conseguimos pasar a una tabla los resultados obtenidos\r\nresults=pd.DataFrame(best_clf.cv_results_)\r\nprint(\"\\n He estudiado estos modelos a los que asigno un índice:\\n\")\r\nprint(results[['params','param_estimator__C']])\r\n\r\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\r\n\r\nprint(\"Resultados: \\n\")\r\nprint(results[['rank_test_score', 'mean_fit_time','mean_test_score']])\r\n\r\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\r\n\r\nscores=results[['split0_test_score', 'split1_test_score','split2_test_score','split3_test_score','split4_test_score']][0:15].to_numpy()\r\n\r\nfig,ax1=plt.subplots()\r\nfor i in np.arange(len(scores)):\r\n ax1.plot(np.arange(CV),scores[i],label='modelo '+str(i))\r\n\r\nax1.axis([0.0,CV+1.0,0.85,1.0])\r\nplt.xlabel('Conjuntos CV')\r\nplt.ylabel('Accuracy')\r\nplt.title('Gráfico Accuracy')\r\nax1.legend()\r\nplt.show()\r\n\r\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\r\n\r\nprint(\"Mejor modelo:\\n\",best_clf.best_params_)\r\nprint(\"Precisión en training:\", 100.0 * best_clf.score(X_train, y_train))\r\nprint(\"Precisión en test: \",100.0 * best_clf.score(X_test, y_test))\r\n\r\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\r\n\r\n\"\"\"\r\n###############################\r\n# CON REGULARIZACIÓN LASSO #\r\n###############################\r\n\"\"\"\r\n#Añadimos Lasso\r\npreproc=[(\"var\", VarianceThreshold(0.1)), \r\n (\"lasso\", SelectFromModel(Lasso())),\r\n (\"poly\",PolynomialFeatures(1)), \r\n (\"standardize\", StandardScaler())]\r\n\r\n#creamos un pipeline de sklearn donde añadiremos uno de los modelos a estudiar\r\npipe = Pipeline(steps=preproc+[('estimator', LogisticRegression())])\r\n\r\n# Añadimos los estimadores que vamos a utilizar y los parametros que vamos a estudiar:\r\n# si clase de funcion lineal o cuadrática\r\n# la potencia de la penalización l2 \"C\" \r\nparams_grid = [ {\r\n 'estimator':[LogisticRegression(max_iter=500)],\r\n 'estimator__solver':['lbfgs','newton-cg'],\r\n 'estimator__C': np.logspace(-4, 4, 3),\r\n 'poly__degree': [1,2]\r\n },\r\n {\r\n 'estimator': [Perceptron(random_state = seed)],\r\n 'poly__degree': [1,2]\r\n }\r\n # {'estimator':[Any_other_estimator_you_want],\r\n # 'estimator__valid_param_of_your_estimator':[valid_values]\r\n\r\n ]\r\n\r\nprint(\"CON PREPROCESADO Y REGULARIZACION: \\n\")\r\n\r\n# entrenamos con crossvalidation y sacamos el mejor con sus parámetros.\r\nbest_clf = GridSearchCV(pipe, params_grid, scoring = 'accuracy',cv = 5, n_jobs = -1)\r\nbest_clf.fit(X_train, y_train)\r\n\r\n\r\n#con esto conseguimos pasar a una tabla los resultados obtenidos\r\nresults=pd.DataFrame(best_clf.cv_results_)\r\nprint(\"\\n He estudiado estos modelos a los que asigno un índice:\\n\")\r\nprint(results[['params','param_estimator__C']])\r\n\r\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\r\n\r\nprint(\"Resultados: \\n\")\r\nprint(results[['rank_test_score', 'mean_fit_time','mean_test_score']])\r\n\r\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\r\n\r\nscores=results[['split0_test_score', 'split1_test_score','split2_test_score','split3_test_score','split4_test_score']][0:15].to_numpy()\r\n\r\nfig,ax1=plt.subplots()\r\nfor i in np.arange(len(scores)):\r\n ax1.plot(np.arange(CV),scores[i],label='modelo '+str(i))\r\n\r\nax1.axis([0.0,CV+1.0,0.7,1.0])\r\nplt.xlabel('Conjuntos CV')\r\nplt.ylabel('Accuracy')\r\nplt.title('Gráfico Accuracy con LASSO')\r\nax1.legend()\r\nplt.show()\r\n\r\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\r\n\r\nprint(\"Mejor modelo:\\n\",best_clf.best_params_)\r\nprint(\"Precisión en training:\", 100.0 * best_clf.score(X_train, y_train))\r\nprint(\"Precisión en test: \",100.0 * best_clf.score(X_test, y_test))\r\n\r\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\r\n\r\n\r\n\r\n", "repo_name": "drumalv/Practicas-AA", "sub_path": "practica 3/BDoptdigits.py", "file_name": "BDoptdigits.py", "file_ext": "py", "file_size_in_byte": 8061, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "numpy.random.seed", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.copy", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist2d", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "sklearn.feature_selection.VarianceThreshold", "line_number": 109, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 110, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 111, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Perceptron", "line_number": 127, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 139, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "sklearn.feature_selection.VarianceThreshold", "line_number": 181, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 183, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 184, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 187, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 187, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 195, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Perceptron", "line_number": 199, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 210, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}]} +{"seq_id": "37786031134", "text": "import torch\nimport torchtext\nimport re\nfrom collections import Counter, OrderedDict\nimport ctypes\nimport requests\nimport plot\n\n\ndef c_numericalize_str(seq, vocab_itos):\n \"\"\"\n Description:\n Converts a sequence of string-tokens to a sequence of vocabulary-index-tokens, while maintaining their order. It replicates the sequence, whilst replacing the string tokens with index tokens with respect to a vocabulary.\n - Advantages:\n 1. ~287 times faster than .\n 2. Verified to work with UTF-8 symbols.\n\n Inputs:\n : Type: ]>. The sequence of words to be transformed/numericalized.\n : Type: ]>. The list containing all the vocabularies string-tokens at the position with index equal to vocabulary-index-token. Must include the character in the beginning.\n\n Returns:\n : Type: ]>. Contains all the members of where each string replaced with the vocabularies index.\n \"\"\"\n\n ## Load the shared library\n lib = ctypes.CDLL('../lib/str2num.so')\n\n ## Define the prototype\n lib.str_mod.argtypes = [ctypes.POINTER(ctypes.c_char_p), ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_int, ctypes.POINTER(ctypes.c_int)]\n\n enc_seq = [0 for i in range(len(seq))]\n\n seq_array = (ctypes.c_char_p * len(seq))()\n enc_seq_array = (ctypes.c_int * len(enc_seq))()\n vocab_itos_array = (ctypes.c_char_p * len(vocab_itos))()\n\n for i, s in enumerate(seq):\n seq_array[i] = s.encode()\n\n for i, s in enumerate(vocab_itos):\n vocab_itos_array[i] = s.encode()\n\n lib.str_mod(seq_array, len(seq_array), vocab_itos_array, len(vocab_itos_array), enc_seq_array)\n\n enc_seq = list(enc_seq_array)\n\n return enc_seq\n\ndef p_numericalize_str(seq, vocab_):\n\n print('Conversion Status:')\n seq_int = []\n div = 1000\n for idx, word in enumerate(seq):\n if word not in vocab_.get_itos():\n seq_int.append(vocab_.get_stoi()[''])\n else:\n seq_int.append(vocab_.get_stoi()[word])\n\n if (idx+1) % div == 0:\n print('Progress: %.2f %%'%(100*(idx+1) / len(seq)))\n print('Progress: 100 %')\n print('Conversion successful.')\n\n return seq_int\n\ndef tokenize(raw_dataset_str):\n\n ## Raw text preprocessing\n dataset_str_prep = re.sub('[^A-Za-z.]+', ' ', raw_dataset_str).lower()\n dataset_str_prep = re.sub('[.]+', ' . ', dataset_str_prep)\n dataset_str_prep = re.sub('[ ]+', ' ', dataset_str_prep)\n dataset_seq_str = dataset_str_prep.split(' ')\n if dataset_seq_str[0] == '':\n dataset_seq_str = dataset_seq_str[1:]\n if dataset_seq_str[-1] == '':\n dataset_seq_str = dataset_seq_str[:-1]\n\n return dataset_seq_str\n\ndef build_vocab(seq_string):\n \"\"\"\n Description:\n Generates a vocabulary object with respect to the decreasing order of token-frequency.\n\n Inputs:\n : Type: ]>. Contains the dataset's tokens.\n\n Outputs:\n : Type: .\n : Type: .\n \"\"\"\n\n seq_string_counter = Counter(seq_string)\n seq_string_sorted_by_freq_tuples = sorted(seq_string_counter.items(), key=lambda x: x[1], reverse=True)\n seq_string_ordered_dict = OrderedDict(seq_string_sorted_by_freq_tuples)\n vocab_ = torchtext.vocab.vocab\\\n (\n ordered_dict = seq_string_ordered_dict,\n min_freq = 3,\n specials = [''],\n special_first = True\n )\n\n freqs = list(iter(seq_string_ordered_dict.values()))\n\n return freqs, vocab_\n\ndef convert_strings(dataset_seq_str_, vocab_):\n \"\"\"\n Description:\n Converts string tokens from a subscriptable sequence of strings to their vocabularies corresponding integers and appends these values to a sequence. Takes into account words that exist in the data sequence but are absent from the vocabulary.\n\n Inputs:\n : Type: ]>. Contains the string tokens.\n : Type: . The vocabulary.\n \"\"\"\n\n dataset_seq_int = c_numericalize_str(dataset_seq_str_, vocab_.get_itos())\n\n return dataset_seq_int\n\n\nclass text_dataset:\n \"\"\"\n Description:\n Tokens are defined to be words.\n \"\"\"\n\n def __init__(self):\n\n def parse_local_raw_data(raw_dataset_path):\n\n with open(raw_dataset_path, 'r') as file:\n raw_dataset_str = file.read()\n\n return raw_dataset_str\n\n def parse_web_raw_data(url):\n\n response = requests.get(url)\n raw_dataset_str = response.text\n\n return raw_dataset_str\n\n raw_dataset_path = '../datasets/ENGSTR1.txt'\n self.dataset_name = raw_dataset_path.split('.')[-2].split('/')[-1]\n\n ## [training fraction, validation fraction, test fraction].\n self.split_fractions = [0.9, 0.1, 0.0]\n\n self.raw_dataset_str = parse_local_raw_data(raw_dataset_path)#[:10000]\n\n def partition(self, feature_steps, prediction_steps):\n \"\"\"\n Description:\n Used to build and from dataset for the problem of predicting words after a sequence of feature-words.\n\n Input:\n : Type: .\n : Type: .\n\n Output:\n : Type: . Shape: (number of examples, ).\n : Type: . Shape: (number of examples, ) or (number of examples) if equals to 1.\n \"\"\"\n\n X = []\n y = []\n for idx in range( len(self.dataset_seq_int) ):\n if (idx+feature_steps)+prediction_steps == len(self.dataset_seq_int):\n break\n X.append(self.dataset_seq_int[idx:idx+feature_steps])\n y.append(self.dataset_seq_int[idx+feature_steps:(idx+feature_steps)+prediction_steps])\n\n X = torch.stack(X)\n y = torch.stack(y)\n if y.shape[1] == 1:\n y = y[:,0]\n\n return X, y\n\n def generate_dataset(self, feature_steps, prediction_steps):\n \"\"\"\n Description:\n Loads raw dataset, conducts basic token preprocessing and generates the feature and target tensors.\n \"\"\"\n\n self.dataset_seq_str = tokenize(self.raw_dataset_str)\n ## Vocabulary construction\n freqs, self.vocab = build_vocab(seq_string = self.dataset_seq_str)\n # plot.plot_frequency_curve(freqs, self.dataset_name)\n\n self.dataset_seq_int = torch.tensor(convert_strings(self.dataset_seq_str, self.vocab))\n\n self.feature_steps = feature_steps\n self.prediction_steps = prediction_steps\n self.vocab_size = len( self.vocab.get_itos() )\n\n X, y = self.partition(feature_steps, prediction_steps)\n \n ## Shuffle instance positions\n perm = torch.randperm(X.shape[0])\n X = X[perm,...]\n y = y[perm,...]\n\n self.n_instances = y.shape[0]\n\n self.dataset = torch.utils.data.TensorDataset(X, y)\n if self.split_fractions[-1] != 0: # Included a test set\n self.train_set, self.val_set, self.test_set = torch.utils.data.random_split(self.dataset, self.split_fractions, generator=torch.Generator().manual_seed(42))\n\n self.n_train = len(self.train_set)\n self.n_val = len(self.val_set)\n self.n_test = len(self.test_set)\n\n separateXy = lambda set: ( torch.stack([set[i][_] for i in range(len(set))], axis=0) for _ in range(2) )\n\n self.X_train, self.y_train = separateXy(self.train_set)\n self.X_val, self.y_val = separateXy(self.val_set)\n self.X_test, self.y_test = separateXy(self.test_set)\n\n else: # Excluded the test set\n self.train_set, self.val_set = torch.utils.data.random_split(self.dataset, self.split_fractions[:2], generator=torch.Generator().manual_seed(42))\n\n self.n_train = len(self.train_set)\n self.n_val = len(self.val_set)\n\n separateXy = lambda set: ( torch.stack([set[i][_] for i in range(len(set))], axis=0) for _ in range(2) )\n\n self.X_train, self.y_train = separateXy(self.train_set)\n self.X_val, self.y_val = separateXy(self.val_set)", "repo_name": "fl0wxr/word_predictor", "sub_path": "src/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 8263, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "ctypes.CDLL", "line_number": 27, "usage_type": "call"}, {"api_name": "ctypes.POINTER", "line_number": 30, "usage_type": "call"}, {"api_name": "ctypes.c_char_p", "line_number": 30, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 30, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 34, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 35, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 36, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 71, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 72, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 73, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 95, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 97, "usage_type": "call"}, {"api_name": "torchtext.vocab.vocab", "line_number": 98, "usage_type": "call"}, {"api_name": "torchtext.vocab", "line_number": 98, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 210, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 210, "usage_type": "attribute"}, {"api_name": "torch.utils.data.random_split", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 212, "usage_type": "attribute"}, {"api_name": "torch.Generator", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.utils.data.random_split", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 225, "usage_type": "attribute"}, {"api_name": "torch.Generator", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 230, "usage_type": "call"}]} +{"seq_id": "22239899667", "text": "import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport cv2 as cv\nimport argparse\n\n\nap = argparse.ArgumentParser()\n# Image argument\nap.add_argument(\"-i\", \"--image\", required=True, help='path to input image')\nargs = vars(ap.parse_args())\n\nimage_path = args['image']\nmodel = keras.models.load_model('/home/daxter-army/Desktop/ml_report/code_to_run/CURRENCY_MODEL')\nclass_names = ['10', '100', '20', '200']\n\nimg = keras.preprocessing.image.load_img(\n image_path, target_size=(180, 180)\n)\n \nimg_array = keras.preprocessing.image.img_to_array(img)\nimg_array = tf.expand_dims(img_array, 0) # Create a batch\n\npredictions = model.predict(img_array)\n\nscore = tf.nn.softmax(predictions[0])\n\nprint(\n \"This image most likely belongs to {} with a {:.2f} percent confidence.\"\n .format(class_names[np.argmax(score)], 100 * np.max(score))\n)\n\nprint('==================Class Wise Scores================')\nprint('======10======')\nprint('{:.2f}'.format(100 * score[0]))\nprint('======100======')\nprint('{:.2f}'.format(100 * score[1]))\nprint('======20======')\nprint('{:.2f}'.format(100 * score[2]))\nprint('======200======')\nprint('{:.2f}'.format(100 * score[3]))", "repo_name": "daxter-army/indian_denomination_classifier", "sub_path": "code_to_run/classify_image.py", "file_name": "classify_image.py", "file_ext": "py", "file_size_in_byte": 1176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 14, "usage_type": "name"}, {"api_name": "tensorflow.keras.preprocessing.image.load_img", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 17, "usage_type": "name"}, {"api_name": "tensorflow.keras.preprocessing.image.img_to_array", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 21, "usage_type": "name"}, {"api_name": "tensorflow.expand_dims", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "72734449521", "text": "import numpy as np\nimport torch\nimport os\nimport sys\nimport importlib\n\nfrom torch.multiprocessing import Process, SimpleQueue, set_start_method\nfrom sklearn.metrics import roc_auc_score\nfrom torch.utils.data import DataLoader, Subset\nfrom tqdm import tqdm\n\nif __name__ == '__main__':\n set_start_method('spawn')\n\nfrom fednewsrec.shared import args, logger, device\nfrom fednewsrec.utils import latest_checkpoint, dict2table, calculate_cos_similarity, aggregate_metrics\nfrom fednewsrec.dataset import NewsDataset, UserDataset, EvaluationBehaviorsDataset\n\n\ndef dcg_score(y_true, y_score, k=10):\n order = np.argsort(y_score)[::-1]\n y_true = np.take(y_true, order[:k])\n gains = 2**y_true - 1\n discounts = np.log2(np.arange(len(y_true)) + 2)\n return np.sum(gains / discounts)\n\n\ndef ndcg_score(y_true, y_score, k=10):\n best = dcg_score(y_true, y_true, k)\n actual = dcg_score(y_true, y_score, k)\n return actual / best\n\n\ndef mrr_score(y_true, y_score):\n order = np.argsort(y_score)[::-1]\n y_true = np.take(y_true, order)\n rr_score = y_true / (np.arange(len(y_true)) + 1)\n return np.sum(rr_score) / np.sum(y_true)\n\n\ndef calculate_single_user_metric(pair):\n try:\n auc = roc_auc_score(*pair)\n mrr = mrr_score(*pair)\n ndcg5 = ndcg_score(*pair, 5)\n ndcg10 = ndcg_score(*pair, 10)\n return [auc, mrr, ndcg5, ndcg10]\n except ValueError:\n return [np.nan] * 4\n\n\ndef scoring_worker_fn(index,\n task_queue,\n mode,\n news_vectors,\n user_vectors,\n prediction_fn,\n selected_behaviors=None):\n if args.mute_fedgroup_evaluation:\n # Silence the subprocesses\n sys.stdout = open(os.devnull, 'w')\n sys.stderr = open(os.devnull, 'w')\n\n behaviors_dataset = EvaluationBehaviorsDataset(\n f'data/{args.dataset}/{mode}.tsv', {}, index, args.num_scoring_workers)\n logger.debug(f'Scoring worker dataset size {len(behaviors_dataset)}')\n if selected_behaviors is not None:\n behaviors_dataset = Subset(behaviors_dataset, selected_behaviors)\n logger.debug(\n f'Scoring worker dataset size (after filtering) {len(behaviors_dataset)}'\n )\n\n for behaviors in behaviors_dataset:\n candidates = behaviors['positive_candidates'] + behaviors[\n 'negative_candidates']\n news_vector = news_vectors[candidates]\n user_vector = user_vectors[behaviors['user_index']]\n click_probability = prediction_fn(news_vector, user_vector)\n\n y_pred = click_probability.tolist()\n y_true = [1] * len(behaviors['positive_candidates']) + [0] * len(\n behaviors['negative_candidates'])\n task_queue.put((y_true, y_pred))\n\n\ndef metrics_worker_fn(task_queue, result_queue):\n for task in iter(task_queue.get, None):\n result_queue.put(calculate_single_user_metric(task))\n\n\n@torch.no_grad()\ndef infer_news_users(model, mode):\n model.eval()\n news_dataset = NewsDataset(f'data/{args.dataset}/news.tsv')\n news_dataloader = DataLoader(news_dataset,\n batch_size=args.batch_size * 16,\n shuffle=False,\n drop_last=False)\n news_vectors = []\n for minibatch in tqdm(news_dataloader,\n desc='Calculating vectors for news'):\n news_vectors.append(\n model.get_news_vector(minibatch.to(device),\n news_dataset.news_pattern))\n news_vectors = torch.cat(news_vectors, dim=0)\n\n user_dataset = UserDataset(f'data/{args.dataset}/{mode}.tsv')\n user_dataloader = DataLoader(user_dataset,\n batch_size=args.batch_size * 16,\n shuffle=False,\n drop_last=False)\n\n user_vectors = []\n for minibatch in tqdm(user_dataloader,\n desc='Calculating vectors for users'):\n user_vectors.append(\n model.get_user_vector(news_vectors[minibatch['history']]))\n\n user_vectors = torch.cat(user_vectors, dim=0)\n model.train()\n return news_vectors, user_dataset, user_vectors\n\n\n@torch.no_grad()\ndef evaluate(model, mode, return_raw=False, selected_users=None):\n \"\"\"\n Args:\n\n Returns:\n AUC\n MRR\n nDCG@5\n nDCG@10\n \"\"\"\n assert mode in ['val', 'test']\n news_vectors, user_dataset, user_vectors = infer_news_users(model, mode)\n\n if args.show_similarity:\n logger.info(\n f\"News cos similarity: {calculate_cos_similarity(news_vectors.cpu().numpy()[1:]):.4f}\"\n )\n logger.info(\n f\"User cos similarity: {calculate_cos_similarity(user_vectors.cpu().numpy()):.4f}\"\n )\n\n behaviors_count = 0\n if selected_users is not None:\n total_selected_behaviors = []\n # If `i` is the index in `user_vectors`\n # then `user_data_index_to_id[i]` is its 'id' attribute value\n user_data_index_to_id = user_dataset.user['user'].tolist()\n\n for i in range(args.num_scoring_workers):\n # The part has an implicit effect: make sure the cache exists,\n # so in `scoring_worker_fn`, the `user2index` parameter\n # for `EvaluationBehaviorsDataset` can be empty.\n # In this way, `user_dataset.user2index` does not to be passed to `scoring_worker_fn`,\n # saving a lot time on pickling/unpickling\n behaviors_dataset = EvaluationBehaviorsDataset(\n f'data/{args.dataset}/{mode}.tsv', user_dataset.user2index, i,\n args.num_scoring_workers)\n if selected_users is None:\n behaviors_count += len(behaviors_dataset)\n else:\n # `i`: data index in a split behavior file\n # `user_index`: the index in `user_vectors`\n selected_behaviors = [\n i for i, user_index in enumerate(behaviors_dataset.user_index)\n if user_data_index_to_id[user_index] in selected_users\n ]\n\n total_selected_behaviors.append(selected_behaviors)\n behaviors_count += len(selected_behaviors)\n\n logger.debug(f'Number of behaviors to evaluate: {behaviors_count}')\n \"\"\"\n Evaluation with multiprocessing:\n\n ┌──────────────────┐\n │ Metrics Worker 0 │\n └─���────────────────┘\n\n ┌──────────────────┐\n │ Metrics Worker 1 │\n ┌──────────────────┐ └──────────────────┘\n │ Scoring Worker 0 │\n └──────────────────┘ ┌──────────────────┐\n │ Metrics Worker 2 │\n ┌──────────────────┐ └──────────────────┘\n │ Scoring Worker 1 │ TASK QUEUE RESULT QUEUE\n └──────────────────┘ ───────────────────► ┌──────────────────┐ ───────────────────►\n │ Metrics Worker 3 │\n ┌──────────────────┐ └──────────────────┘\n │ Scoring Worker 2 │\n └──────────────────┘ ┌──────────────────┐\n │ Metrics Worker 4 │\n └──────────────────┘\n\n ┌──────────────────┐\n │ Metrics Worker 5 │\n └──────────────────┘\n \"\"\"\n task_queue = SimpleQueue()\n result_queue = SimpleQueue()\n\n scoring_workers = []\n for i in range(args.num_scoring_workers):\n worker = Process(target=scoring_worker_fn,\n args=(i, task_queue, mode, news_vectors, user_vectors,\n model.get_prediction,\n total_selected_behaviors[i]\n if selected_users is not None else None))\n worker.start()\n scoring_workers.append(worker)\n\n metrics_workers = []\n for _ in range(args.num_metrics_workers):\n worker = Process(target=metrics_worker_fn,\n args=(task_queue, result_queue))\n worker.start()\n metrics_workers.append(worker)\n\n logger.debug('Scoring and metrics workers started')\n results = []\n with tqdm(total=behaviors_count,\n desc='Calculating metrics with multiprocessing') as pbar:\n while len(results) < behaviors_count:\n results.append(result_queue.get())\n pbar.update()\n\n logger.debug('Get all the results')\n\n for worker in scoring_workers:\n worker.join()\n logger.debug('All scoring workers joined')\n for _ in range(args.num_metrics_workers):\n task_queue.put(None)\n for worker in metrics_workers:\n worker.join()\n logger.debug('All metrics workers joined')\n if return_raw:\n return results\n\n return aggregate_metrics(results)\n\n\nif __name__ == '__main__':\n logger.info(args)\n logger.info(f'Using device: {device}')\n logger.info(f'Testing {args.model} on {args.dataset}')\n Model = getattr(importlib.import_module(f\"fednewsrec.model.{args.model}\"),\n args.model)\n model = Model().to(device)\n checkpoint_path = latest_checkpoint(\n os.path.join(args.checkpoint_dir, f'{args.model}-{args.dataset}'))\n if checkpoint_path is None:\n logger.warning(\n 'No checkpoint file found! Evaluating with randomly initiated model'\n )\n else:\n logger.info(f\"Load saved parameters in {checkpoint_path}\")\n checkpoint = torch.load(checkpoint_path)\n model.load_state_dict(checkpoint)\n metrics = model.evaluate('test')\n logger.info(f'Metrics on test set:\\n{dict2table(metrics)}')\n", "repo_name": "yusanshi/FINDING", "sub_path": "fednewsrec/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 10909, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "torch.multiprocessing.set_start_method", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.take", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.take", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 49, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args.mute_fedgroup_evaluation", "line_number": 59, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 59, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.devnull", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.devnull", "line_number": 62, "usage_type": "attribute"}, {"api_name": "fednewsrec.dataset.EvaluationBehaviorsDataset", "line_number": 64, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.dataset", "line_number": 65, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 65, "usage_type": "name"}, {"api_name": "fednewsrec.shared.args.num_scoring_workers", "line_number": 65, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.logger.debug", "line_number": 66, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.utils.data.Subset", "line_number": 68, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger.debug", "line_number": 69, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 69, "usage_type": "name"}, {"api_name": "fednewsrec.dataset.NewsDataset", "line_number": 94, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.dataset", "line_number": 94, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 95, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.batch_size", "line_number": 96, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 96, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 100, "usage_type": "call"}, {"api_name": "fednewsrec.shared.device", "line_number": 103, "usage_type": "argument"}, {"api_name": "torch.cat", "line_number": 105, "usage_type": "call"}, {"api_name": "fednewsrec.dataset.UserDataset", "line_number": 107, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.dataset", "line_number": 107, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 108, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.batch_size", "line_number": 109, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 109, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 91, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.show_similarity", "line_number": 138, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 138, "usage_type": "name"}, {"api_name": "fednewsrec.shared.logger.info", "line_number": 139, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 139, "usage_type": "name"}, {"api_name": "fednewsrec.utils.calculate_cos_similarity", "line_number": 140, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger.info", "line_number": 142, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 142, "usage_type": "name"}, {"api_name": "fednewsrec.utils.calculate_cos_similarity", "line_number": 143, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.num_scoring_workers", "line_number": 153, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 153, "usage_type": "name"}, {"api_name": "fednewsrec.dataset.EvaluationBehaviorsDataset", "line_number": 159, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.dataset", "line_number": 160, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 160, "usage_type": "name"}, {"api_name": "fednewsrec.shared.args.num_scoring_workers", "line_number": 161, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 161, "usage_type": "name"}, {"api_name": "fednewsrec.shared.logger.debug", "line_number": 175, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 175, "usage_type": "name"}, {"api_name": "torch.multiprocessing.SimpleQueue", "line_number": 203, "usage_type": "call"}, {"api_name": "torch.multiprocessing.SimpleQueue", "line_number": 204, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.num_scoring_workers", "line_number": 207, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 207, "usage_type": "name"}, {"api_name": "torch.multiprocessing.Process", "line_number": 208, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.num_metrics_workers", "line_number": 217, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 217, "usage_type": "name"}, {"api_name": "torch.multiprocessing.Process", "line_number": 218, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger.debug", "line_number": 223, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 223, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 225, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger.debug", "line_number": 231, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 231, "usage_type": "name"}, {"api_name": "fednewsrec.shared.logger.debug", "line_number": 235, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 235, "usage_type": "name"}, {"api_name": "fednewsrec.shared.args.num_metrics_workers", "line_number": 236, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 236, "usage_type": "name"}, {"api_name": "fednewsrec.shared.logger.debug", "line_number": 240, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 240, "usage_type": "name"}, {"api_name": "fednewsrec.utils.aggregate_metrics", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 124, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger.info", "line_number": 248, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args", "line_number": 248, "usage_type": "argument"}, {"api_name": "fednewsrec.shared.logger", "line_number": 248, "usage_type": "name"}, {"api_name": "fednewsrec.shared.logger.info", "line_number": 249, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 249, "usage_type": "name"}, {"api_name": "fednewsrec.shared.device", "line_number": 249, "usage_type": "name"}, {"api_name": "fednewsrec.shared.logger.info", "line_number": 250, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 250, "usage_type": "name"}, {"api_name": "fednewsrec.shared.args.model", "line_number": 250, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 250, "usage_type": "name"}, {"api_name": "fednewsrec.shared.args.dataset", "line_number": 250, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 251, "usage_type": "call"}, {"api_name": "fednewsrec.shared.args.model", "line_number": 251, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 251, "usage_type": "name"}, {"api_name": "fednewsrec.shared.args.model", "line_number": 252, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 252, "usage_type": "name"}, {"api_name": "fednewsrec.shared.device", "line_number": 253, "usage_type": "argument"}, {"api_name": "fednewsrec.utils.latest_checkpoint", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 255, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args.checkpoint_dir", "line_number": 255, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args", "line_number": 255, "usage_type": "name"}, {"api_name": "fednewsrec.shared.args.model", "line_number": 255, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.args.dataset", "line_number": 255, "usage_type": "attribute"}, {"api_name": "fednewsrec.shared.logger.warning", "line_number": 257, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 257, "usage_type": "name"}, {"api_name": "fednewsrec.shared.logger.info", "line_number": 261, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 261, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 262, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger.info", "line_number": 265, "usage_type": "call"}, {"api_name": "fednewsrec.shared.logger", "line_number": 265, "usage_type": "name"}, {"api_name": "fednewsrec.utils.dict2table", "line_number": 265, "usage_type": "call"}]} +{"seq_id": "73836063923", "text": "class model_creation:\r\n def __init__(self,df):\r\n self.df = df\r\n\r\n def model_builder(self, df, Ycol, cols_to_drop, model_obj):\r\n \"\"\"\r\n Data Science Library\r\n method name: model Builder\r\n desc: Here we are created the whole model on the basis of previous library;\r\n In terms of preprocessing, cvtune, datavisualization, anova etc we are come up to\r\n this conclusion and this code is used to create the model directly from the calling\r\n the library only.\r\n author name: TinCharlie\r\n\r\n :param df: Passing DataFrame\r\n :param Ycol: Target COlumns\r\n :param cols_to_drop: Columns whichever we want to delete\r\n :param model_obj: Applying the algorithms for finishing the model.\r\n :return: Return the accuracy of your model.\r\n \"\"\"\r\n import pandas as pd\r\n df = df.drop(labels=cols_to_drop, axis=1)\r\n from replacer.mean_mode_replacer import replace\r\n replace.Mean_mode_replacer(df)\r\n Y = df[Ycol]\r\n X = df.drop(labels=Ycol, axis=1)\r\n from PreprocessData.preprocessing import preprocess\r\n X_new = preprocess.preprocess_data(X)\r\n from sklearn.model_selection import train_test_split\r\n xtrain, xtest, ytrain, ytest = train_test_split(X_new, Y, test_size=0.2, random_state=31)\r\n from metrics.check_overfit import overfit_or_not\r\n if ytrain[Ycol[0]].dtypes == \"object\":\r\n overfit_or_not.find_overfit_cat(model_obj, xtrain, xtest, ytrain, ytest)\r\n else:\r\n overfit_or_not.find_overfit_con(model_obj, xtrain, xtest, ytrain, ytest)", "repo_name": "tincharlie/ScienceNeedData", "sub_path": "source/PreprocessData/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "replacer.mean_mode_replacer.replace.Mean_mode_replacer", "line_number": 24, "usage_type": "call"}, {"api_name": "replacer.mean_mode_replacer.replace", "line_number": 24, "usage_type": "name"}, {"api_name": "PreprocessData.preprocessing.preprocess.preprocess_data", "line_number": 28, "usage_type": "call"}, {"api_name": "PreprocessData.preprocessing.preprocess", "line_number": 28, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 30, "usage_type": "call"}, {"api_name": "metrics.check_overfit.overfit_or_not.find_overfit_cat", "line_number": 33, "usage_type": "call"}, {"api_name": "metrics.check_overfit.overfit_or_not", "line_number": 33, "usage_type": "name"}, {"api_name": "metrics.check_overfit.overfit_or_not.find_overfit_con", "line_number": 35, "usage_type": "call"}, {"api_name": "metrics.check_overfit.overfit_or_not", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "14782523109", "text": "import numpy as np\nimport pathlib\nfrom itertools import product\n\n\ndef read():\n with open(DIR / \"input\") as f:\n s = (f.read() if teststr == \"\" else teststr).splitlines()\n return np.array(lmap(lambda r: int(r.split(\" \")[-1]), s))\n\n\ndef wins(_, dmg, amr):\n dmg = max(1, dmg - t[2])\n enemy_d = max(1, t[1] - amr)\n turns = (t[0] - 1) // dmg + 1\n enemy_t = (100 - 1) // enemy_d + 1\n return enemy_t >= turns\n\n\nW = np.array([[8, 4, 0], [10, 5, 0], [25, 6, 0], [40, 7, 0], [74, 8, 0]])\nA = np.array([[13, 0, 1], [31, 0, 2], [53, 0, 3], [75, 0, 4], [102, 0, 5]])\nR = np.array([[25, 1, 0], [50, 2, 0], [100, 3, 0], [20, 0, 1], [40, 0, 2], [80, 0, 3]])\nA, R, n = np.vstack([A, [[0, 0, 0]]]), np.vstack([R, [[0, 0, 0]]]), len(R)\nfor i in range(n):\n for j in range(i + 1, n):\n R = np.vstack([R, R[i] + R[j]])\n\nteststr = \"\"\"\"\"\"\nDIR = pathlib.Path(__file__).parent.absolute()\nlmap = lambda *a: list(map(*a))\nt = read()\nif __name__ == \"__main__\":\n win = [(wins(*(w + a + r)), (w + a + r)[0]) for w, a, r in product(W, A, R)]\n print(min([cost for wins, cost in win if wins]))\n print(max([cost for wins, cost in win if not wins]))\n", "repo_name": "yspreen/adventofcode", "sub_path": "2015/21/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1159, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "75", "api": [{"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 26, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 29, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "2028576673", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize as sp\nfrom tqdm import tqdm\n\n# -----------------------------------------------------------------\n# Forward Pass\n# -----------------------------------------------------------------\n\n# -----------------------------------------------------------------\ndef compute_z(x, W):\n '''\n Compute the linear logit values of a data instance. z = W x + b\n Input:\n x: the feature vector of a data instance, a float numpy matrix of shape p by 1. Here p is the number of features/dimensions.\n W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.\n b: the bias values of softmax regression, a float numpy vector of shape c by 1.\n Output:\n z: the linear logits, a float numpy vector of shape c by 1.\n Hint: you could solve this problem using 1 line of code.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n z = np.dot(x, W.T)\n\n #########################################\n return z\n\n\n# -----------------------------------------------------------------\ndef compute_a(z):\n '''\n Compute the softmax activations.\n Input:\n z: the logit values of softmax regression, a float numpy vector of shape c by 1. Here c is the number of classes\n Output:\n a: the softmax activations, a float numpy vector of shape c by 1.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n # Normalize to avoid overflow--we can handle underflow by just setting to a very small number\n\n z[z < 0] = 0\n return z\n\n #########################################\n\n\ndef compute_softmax(z):\n y_hat = np.exp(z)\n y_hat_sum = np.sum(y_hat, axis=1)\n a = y_hat.T / y_hat_sum\n return a.T\n\n\n# -----------------------------------------------------------------\ndef compute_L(a, y):\n '''\n Compute multi-class cross entropy, which is the loss function of softmax regression.\n Input:\n a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.\n y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).\n Output:\n L: the loss value of softmax regression, a float scalar.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n n = y.shape[0]\n a[a == 0] = 1e-100\n a = a[y == 1]\n cross_loss = np.log(a)\n\n L = -(1 / (2 * n)) * cross_loss.sum()\n\n return L\n\n\n\n# -----------------------------------------------------------------\ndef forward(x, y, W):\n '''\n Forward pass: given an instance in the training data, compute the logits z, activations a and multi-class cross entropy L on the instance.\n Input:\n x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.\n y: the label of a training instance, an integer scalar value. The values can be 0 or 1.\n W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.\n b: the bias values of softmax regression, a float numpy vector of shape c by 1.\n Output:\n z: the logit values of softmax regression, a float numpy vector of shape c by 1. Here c is the number of classes\n a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.\n L: the loss value of softmax regression, a float scalar.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n z = compute_z(x, W)\n a = compute_a(z)\n L = compute_L(a, y)\n\n #########################################\n return z, a, L\n\n\n# -----------------------------------------------------------------\n# Compute Local Gradients\n# -----------------------------------------------------------------\n\n\n# -----------------------------------------------------------------\ndef compute_dL_da(a, y):\n '''\n Compute local gradient of the multi-class cross-entropy loss function w.r.t. the activations.\n Input:\n a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.\n y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).\n Output:\n dL_da: the local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.\n The i-th element dL_da[i] represents the partial gradient of the loss function w.r.t. the i-th activation a[i]: d_L / d_a[i].\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n dL_da = (1 / y.shape[0]) * np.subtract(a, y)\n\n #########################################\n return dL_da\n\n\n# -----------------------------------------------------------------\ndef compute_da_dz(a):\n '''\n Compute local gradient of the softmax activations a w.r.t. the logits z.\n Input:\n a: the activation values of softmax function, a numpy float vector of shape c by 1. Here c is the number of classes.\n Output:\n da_dz: the local gradient of the activations a w.r.t. the logits z, a float numpy matrix of shape (c by c).\n The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )\n Hint: you could solve this problem using 4 or 5 lines of code.\n (3 points)\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n a[a > 0] = 1\n a[a < 0] = 0\n da_dz = a\n\n #########################################\n return da_dz\n\n\n# -----------------------------------------------------------------\ndef compute_dz_dW(x, c=0):\n '''\n Compute local gradient of the logits function z w.r.t. the weights W.\n Input:\n x: the feature vector of a data instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.\n c: the number of classes, an integer.\n Output:\n dz_dW: the partial gradient of logits z w.r.t. the weight matrix, a numpy float matrix of shape (c by p).\n The (i,j)-th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]\n Hint: the partial gradients only depend on the input x and the number of classes\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n dz_dW = x.T\n\n #########################################\n return dz_dW\n\n\n# -----------------------------------------------------------------\ndef compute_dz_db(c):\n '''\n Compute local gradient of the logits function z w.r.t. the biases b.\n Input:\n c: the number of classes, an integer.\n Output:\n dz_db: the partial gradient of the logits z w.r.t. the biases b, a float vector of shape c by 1.\n Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias b[i]: d_z[i] / d_b[i]\n Hint: you could solve this problem using 1 line of code.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n dz_db = np.ones((c, 1)) # If error on this line, add np.asmatrix around np.ones\n\n #########################################\n return dz_db\n\n\n# -----------------------------------------------------------------\n# Back Propagation\n# -----------------------------------------------------------------\n\n# -----------------------------------------------------------------\ndef backward(x, y, a):\n '''\n Back Propagation: given an instance in the training data, compute the local gradients of the logits z, activations a, weights W and biases b on the instance.\n Input:\n x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.\n y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).\n a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.\n Output:\n dL_da: the local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.\n The i-th element dL_da[i] represents the partial gradient of the loss function L w.r.t. the i-th activation a[i]: d_L / d_a[i].\n da_dz: the local gradient of the activation w.r.t. the logits z, a float numpy matrix of shape (c by c).\n The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )\n dz_dW: the partial gradient of logits z w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).\n The i,j -th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]\n dz_db: the partial gradient of the logits z w.r.t. the biases b, a float vector of shape c by 1.\n Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias: d_z[i] / d_b[i]\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n dL_da = compute_dL_da(a, y)\n da_dz = compute_da_dz(a)\n dz_dW = compute_dz_dW(x)\n dz_db = compute_dz_db(y.shape[0])\n\n #########################################\n return dL_da, da_dz, dz_dW, dz_db\n\n\n# -----------------------------------------------------------------\ndef compute_dL_dz(dL_da, da_dz):\n '''\n Given the local gradients, compute the gradient of the loss function L w.r.t. the logits z using chain rule.\n Input:\n dL_da: the local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.\n The i-th element dL_da[i] represents the partial gradient of the loss function L w.r.t. the i-th activation a[i]: d_L / d_a[i].\n da_dz: the local gradient of the activation w.r.t. the logits z, a float numpy matrix of shape (c by c).\n The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )\n Output:\n dL_dz: the gradient of the loss function L w.r.t. the logits z, a numpy float vector of shape c by 1.\n The i-th element dL_dz[i] represents the partial gradient of the loss function L w.r.t. the i-th logit z[i]: d_L / d_z[i].\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n dL_dz = np.multiply(da_dz, dL_da)\n\n #########################################\n return dL_dz\n\n\n# -----------------------------------------------------------------\ndef compute_dL_dW(dL_dz, dz_dW):\n '''\n Given the local gradients, compute the gradient of the loss function L w.r.t. the weights W using chain rule.\n Input:\n dL_dz: the gradient of the loss function L w.r.t. the logits z, a numpy float vector of shape c by 1.\n The i-th element dL_dz[i] represents the partial gradient of the loss function L w.r.t. the i-th logit z[i]: d_L / d_z[i].\n dz_dW: the partial gradient of logits z w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).\n The i,j -th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]\n Output:\n dL_dW: the global gradient of the loss function w.r.t. the weight matrix, a numpy float matrix of shape (c by p).\n Here c is the number of classes.\n The i,j-th element dL_dW[i,j] represents the partial gradient of the loss function L w.r.t. the i,j-th weight W[i,j]: d_L / d_W[i,j]\n Hint: you could solve this problem using 2 lines of code\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n dL_dW = np.dot(dz_dW, dL_dz)\n\n #########################################\n return dL_dW.T\n\n\n# -----------------------------------------------------------------\ndef compute_dL_db(dL_dz, dz_db):\n '''\n Given the local gradients, compute the gradient of the loss function L w.r.t. the biases b using chain rule.\n Input:\n dL_dz: the gradient of the loss function L w.r.t. the logits z, a numpy float vector of shape c by 1.\n The i-th element dL_dz[i] represents the partial gradient of the loss function L w.r.t. the i-th logit z[i]: d_L / d_z[i].\n dz_db: the local gradient of the logits z w.r.t. the biases b, a float numpy vector of shape c by 1.\n The i-th element dz_db[i] represents the partial gradient ( d_z[i] / d_b[i] )\n Output:\n dL_db: the global gradient of the loss function L w.r.t. the biases b, a float numpy vector of shape c by 1.\n The i-th element dL_db[i] represents the partial gradient of the loss function w.r.t. the i-th bias: d_L / d_b[i]\n Hint: you could solve this problem using 1 line of code in the block.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n dL_db = np.dot(dL_dz.T, dz_db)\n\n #########################################\n return dL_db\n\n\n# -----------------------------------------------------------------\n# gradient descent\n# -----------------------------------------------------------------\n\n# --------------------------\ndef update_W(W, dL_dW, dL_db, epsilon=0.001):\n '''\n Update the weights W using gradient descent.\n Input:\n W: the current weight matrix, a float numpy matrix of shape (c by p). Here c is the number of classes.\n alpha: the step-size parameter of gradient descent, a float scalar.\n dL_dW: the global gradient of the loss function w.r.t. the weight matrix, a numpy float matrix of shape (c by p).\n The i,j-th element dL_dW[i,j] represents the partial gradient of the loss function L w.r.t. the i,j-th weight W[i,j]: d_L / d_W[i,j]\n Output:\n W: the updated weight matrix, a numpy float matrix of shape (c by p).\n Hint: you could solve this problem using 1 line of code\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n W[:,0:-1] = W[:,0:-1] - epsilon * dL_dW[:,0:-1]\n W[:,-1] = W[:,-1] - epsilon * dL_db[:,-1]\n\n #########################################\n return W\n\n\n# --------------------------\ndef update_b(b, dL_db, alpha=0.001):\n '''\n Update the biases b using gradient descent.\n Input:\n b: the current bias values, a float numpy vector of shape c by 1.\n dL_db: the global gradient of the loss function L w.r.t. the biases b, a float numpy vector of shape c by 1.\n The i-th element dL_db[i] represents the partial gradient of the loss function w.r.t. the i-th bias: d_L / d_b[i]\n alpha: the step-size parameter of gradient descent, a float scalar.\n Output:\n b: the updated of bias vector, a float numpy vector of shape c by 1.\n Hint: you could solve this problem using 1 lines of code\n '''\n\n #########################################\n ## INSERT YOUR CODE HERE\n\n b = b - alpha * dL_db\n\n #########################################\n return b\n\n\n# --------------------------\n# train\ndef train(X, Y, batch_size=75, epsilon=0.005, n_epochs=2, alpha=.005):\n '''\n Given a training dataset, train the softmax regression model by iteratively updating the weights W and biases b using the gradients computed over each data instance.\n Input:\n X: the feature matrix of training instances, a float numpy matrix of shape (n by p). Here n is the number of data instance in the training set, p is the number of features/dimensions.\n Y: the labels of training instance, a numpy integer numpy array of length n. The values can be 0 or 1.\n alpha: the step-size parameter of gradient ascent, a float scalar.\n n_epoch: the number of passes to go through the training set, an integer scalar.\n Output:\n W: the weight matrix trained on the training set, a numpy float matrix of shape (c by p).\n b: the bias, a float numpy vector of shape c by 1.\n '''\n # number of features\n p = X.shape[1]\n # number of classes\n c = Y.shape[1]\n # number of instances\n l = X.shape[0]\n\n # randomly initialize W and b\n # W = np.asmatrix(np.random.rand(c, p))\n W = np.zeros((c, p))\n b = W[:, -1]\n num_batches = l / batch_size\n if not num_batches.is_integer():\n print(\"Not a whole number of batches. %.2f batches with batch size %d.\" % (num_batches, batch_size))\n num_batches = round(num_batches)\n\n L_vec = np.array([])\n\n for epoch in range(n_epochs):\n # Create a random order for the samples\n order = np.random.permutation(l)\n pbar = tqdm(range(num_batches), unit=\"batch\")\n for i in pbar:\n elt_indicies = order[i*batch_size:(i*batch_size)+batch_size]\n x = X[elt_indicies]\n y = Y[elt_indicies]\n\n # Forward pass: compute the logits, softmax and cross_entropy\n z, a, _ = forward(x, y, W)\n\n # Back Propagation: compute local gradients of cross_entropy, softmax and logits\n dL_da, da_dz, dz_dW, dz_db = backward(x, y, a)\n\n # compute the global gradients using chain rule\n dL_dz = compute_dL_dz(dL_da, da_dz)\n dL_dW = compute_dL_dW(dL_dz, dz_dW)\n dL_db = compute_dL_db(dL_dz, dz_db)\n\n # update the paramters using gradient descent\n W = update_W(W, dL_dW, dL_db, epsilon)\n # b = update_b(b, dL_db, epsilon)\n\n _, _, L = forward(X, Y, W)\n L_vec = np.append(L_vec, L)\n acc = compute_acc(X, Y, W)\n pbar.set_description(\"Loss: %.2f. Accuracy: %.2f. Epoch progress\" % (L, acc))\n\n acc = compute_acc(X, Y, W)\n print(\"\\nEpoch %d completed. Epoch loss: %.2f. Epoch accuracy: %.2f\\n\" % (epoch+1, L, acc))\n\n return W, L_vec\n\n\ndef compute_acc(X, Y, W):\n y_hat = compute_a(compute_z(X, W))\n y_hat_indicies = np.argmax(y_hat, axis=1)\n y_indicies = np.argmax(Y, axis=1)\n error = np.array([y_indicies == y_hat_indicies])\n acc = error.sum() / X.shape[0]\n return acc\n\n\n\nX = np.load('mnist_train_images.npy')\nX = np.append(X, np.ones((X.shape[0], 1)), axis=1)\nY = np.load('mnist_train_labels.npy')\n\nW, L = train(X, Y, batch_size=75, epsilon=0.001, n_epochs=1, alpha=.005)\n\nplt.plot(L)\nplt.title(\"Loss over each iteration (mini-batch)\")\nplt.show()\n\n\"\"\" Performance Evaluation with the Validation Set \"\"\"\nX_valid = np.load('mnist_validation_images.npy')\nX_valid = np.append(X_valid, np.ones((X_valid.shape[0], 1)), axis=1)\nY_valid = np.load('mnist_validation_labels.npy')\n\nacc = compute_acc(X_valid, Y_valid, W) * 100\nvalid_loss = compute_L(compute_a(compute_z(X_valid, W)), Y)\n\nprint(\"Validation Loss: %.2f\" % valid_loss)\nprint(\"Validation Accuracy: %.2f %%.\" % acc)\n", "repo_name": "dpwivagg/simple-ML", "sub_path": "Multi-layer handwriting recognition/fromscratch_og.py", "file_name": "fromscratch_og.py", "file_ext": "py", "file_size_in_byte": 19359, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "numpy.dot", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 380, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 381, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 403, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 425, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 429, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 429, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 430, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 430, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 431, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 431, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 434, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 436, "usage_type": "call"}]} +{"seq_id": "30532919575", "text": "# needed for python unit testings\n# https://docs.python.org/3/library/unittest.html\nfrom functools import cache\nimport unittest\n\n# required for type hinting\n# https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html\nfrom typing import List\n\nclass Solution:\n '''\n Given an array of strings arr. A string s is formed by the concatenation of\n a subsequence of arr that has unique characters.\n\n Return the maximum possible length of s.\n\n A subsequence is an array that can be derived from another array by deleting\n some or no elements without changing the order of the remaining elements.\n '''\n # the answer is in the range [1, 26]\n def maxLength(self, arr: List[str]):\n def toBitSet(s) -> int:\n b, l = 0, 0\n for c in s:\n a = 1 << (ord(c) - ord('a'))\n if b & a:\n return 0,0\n b |= a\n l += 1\n return b,l\n a = [toBitSet(s) for s in arr]\n @cache\n def dp(i,b) -> int:\n if i == len(a):\n return 0\n # do not take or cannot take\n if a[i][0] == 0 or b & a[i][0]:\n return dp(i+1, b)\n # possible to take\n else:\n return max(dp(i+1, b), a[i][1] + dp(i+1, b | a[i][0]))\n return dp(0,0)\n\nclass UnitTesting(unittest.TestCase):\n def test_one(self):\n s = Solution()\n i = [\"un\",\"iq\",\"ue\"]\n o = 4\n self.assertEqual(s.maxLength(i), o)\n\n def test_two(self):\n s = Solution()\n i = [\"cha\",\"r\",\"act\",\"ers\"]\n o = 6\n self.assertEqual(s.maxLength(i), o)\n\n def test_three(self):\n s = Solution()\n i = [\"abcdefghijklmnopqrstuvwxyz\"]\n o = 26\n self.assertEqual(s.maxLength(i), o)\n\n def test_four(self):\n s = Solution()\n i = [\"unu\",\"iq\",\"ue\"]\n o = 4\n self.assertEqual(s.maxLength(i), o)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)", "repo_name": "olsenw/LeetCodeExercises", "sub_path": "Python3/maximum_length_of_a_concatenated_string_with_unique_characters.py", "file_name": "maximum_length_of_a_concatenated_string_with_unique_characters.py", "file_ext": "py", "file_size_in_byte": 2006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "functools.cache", "line_number": 32, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 44, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "22436963840", "text": "import numpy as np\nimport sys\nfrom random import randrange\nfrom random import seed\nfrom csv import reader\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer\n\n\"\"\"\nHousing: This is a regression dataset where the task is to predict the value of houses \nin the suburbs of Boston based on thirteen features that describe different aspects that \nare relevant to determining the value of a house, such as the number of rooms, levels of\npollution in the area, etc.\n\"\"\"\n\nclass Preprocess:\n def __init__(self, filename):\n self.filename = filename\n\n # Load a CSV file\n def load_csv(self):\n file = open(self.filename, \"r\")\n lines = reader(file)\n dataset = list(lines)\n return dataset\n\n # Convert string column to float\n def str_column_to_float(self, dataset, column):\n for row in dataset:\n if len(row) != 0:\n row[column] = float(row[column].strip())\n\n def pre_process(self):\n # convert string attributes to integers\n dataset = self.load_csv()\n for col in range(len(dataset[0])):\n self.str_column_to_float(dataset, col)\n\n x = np.array(dataset)[:, 0:-1]\n y = np.array(dataset)[:, -1]\n\n # normalize features\n x = self.normalize_data(x)\n\n # concatenate\n dataset = np.concatenate((x, np.array([y]).T), axis=1)\n return dataset\n\n def normalize_data(self, dataset, type=\"min_max\"):\n if type == \"std\":\n scaler = StandardScaler().fit(dataset)\n X_scaled = scaler.transform(dataset)\n if type == \"l1\" or type == \"l2\":\n scaler = Normalizer(norm=type)\n X_scaled = scaler.fit_transform(dataset)\n if type == \"min_max\":\n scaler = MinMaxScaler(feature_range=(0, 1))\n X_scaled = scaler.fit_transform(dataset)\n return X_scaled\n\n def get_thresholds(self):\n # shuffle is done in the split-n-fold part\n # generate thresholds\n dataset = self.pre_process()\n num_feature = len(dataset[0]) - 1\n num_threshold = len(dataset[:, 0]) - 1 # number of instances - 1\n thresholds = []\n for index in range(num_feature): # loop all the features (0, 1, 2, 3)\n # sort the dataset by the current index(feature)\n sorted_data = dataset[np.argsort(dataset[:, index])]\n feature = sorted_data[:, index]\n label = sorted_data[:, -1]\n temp = []\n for row in range(num_threshold):\n temp.append((feature[row] + feature[row + 1]) * 1. / 2)\n # add the list of thresholds for the feature\n thresholds.append(set(temp))\n return thresholds\n\n# Split a dataset into k folds\ndef cross_validation_split(dataset, n_folds):\n dataset_split = list()\n dataset_copy = list(dataset)\n fold_size = int(len(dataset) / n_folds)\n for i in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(dataset_copy))\n fold.append(dataset_copy.pop(index))\n dataset_split.append(fold)\n return dataset_split\n\n# Calculate SSE\ndef accuracy_metric(actual, predicted):\n error = 0\n for i in range(len(actual)):\n error += ((actual[i] - predicted[i])**2)\n return error\n\n\n# Make a prediction with a decision tree\ndef predict(node, row):\n if row[node['index']] < node['thres']:\n if isinstance(node['left'], dict):\n return predict(node['left'], row)\n else:\n return node['left']\n else:\n if isinstance(node['right'], dict):\n return predict(node['right'], row)\n else:\n return node['right']\n\n\ndef evaluation(dataset, thresholds, n_folds=10, mean_ratio=[0.05, 0.10, 0.15, 0.20]):\n folds = cross_validation_split(dataset, n_folds)\n acc = {}\n for ratio in mean_ratio:\n min_size = ratio * len(dataset)\n scores = list()\n for i in range(len(folds)):\n fold = folds[i]\n train_set = list(folds)\n train_set.pop(i)\n train_set = sum(train_set, [])\n node = build_tree(train_set, min_size, thresholds)\n predicted = list()\n for row in fold:\n predicted.append(predict(node, row))\n actual = [row[-1] for row in fold]\n accuracy = accuracy_metric(actual, predicted)\n scores.append(accuracy)\n acc[ratio] = {}\n acc[ratio][\"acc\"] = sum(scores) / float(len(scores))\n acc[ratio][\"std\"] = np.std(np.array(scores))\n return acc\n\n\ndef create_node(dataset, thresholds):\n SSE = sys.maxsize\n node = {}\n for feature in range(len(dataset[0]) - 1):\n curr_node = sum_of_errors(dataset, feature, thresholds)\n if curr_node['SSE'] < SSE:\n SSE = curr_node['SSE']\n node = curr_node\n return node\n\n\n# calculate the squared sum of one tree node\ndef squared_error(label):\n # predicted value per tree node\n #print label\n mean = np.mean(label)\n #print mean\n result = 0.0\n for y in label:\n result += ((y - mean) ** 2)\n return result\n\n\n# pick the best thresholds for one feature\ndef sum_of_errors(dataset, feature, thresholds):\n min_errors, threshold, group = sys.maxsize, sys.maxsize, None\n for thres in thresholds[feature]:\n left, right = split(dataset, feature, thres)\n left_label = np.array([row[-1] for row in left])\n right_label = np.array([row[-1] for row in right])\n\n left_err = squared_error(left_label)\n right_err = squared_error(right_label)\n\n sum_of_err = (float(left_label.shape[0]) / len(dataset)) * left_err + (float(right_label.shape[0]) / len(dataset)) * right_err\n if sum_of_err <= min_errors:\n min_errors, threshold, group = sum_of_err, thres, (left, right)\n\n return {'SSE': min_errors, 'index': feature, 'group': group, 'thres': threshold}\n\n\n# for a given threshold number, split the dataset into two group\ndef split(dataset, feature, threshold):\n left, right = list(), list()\n for row in dataset:\n if row[feature] <= threshold:\n left.append(row)\n else:\n right.append(row)\n return np.array(left), np.array(right)\n\n\ndef create_leaf(group):\n outcomes = np.mean([row[-1] for row in group])\n return outcomes\n\n\ndef build_tree_helper(node, min_size, thresholds):\n left, right = node['group']\n del (node['group'])\n # if left is empty or right is empty, no split\n if len(left) == 0:\n node['left'] = node['right'] = create_leaf(right)\n return\n\n if len(right) == 0:\n node['right'] = node['left'] = create_leaf(left)\n return\n\n # left child\n if len(left) <= min_size:\n node['left'] = create_leaf(left)\n else:\n node['left'] = create_node(left, thresholds)\n build_tree_helper(node['left'], min_size, thresholds)\n\n # right child\n if len(right) <= min_size:\n node['right'] = create_leaf(right)\n else:\n node['right'] = create_node(right, thresholds)\n build_tree_helper(node['right'], min_size, thresholds)\n\n\ndef build_tree(dataset, min_size, thresholds):\n root = create_node(dataset, thresholds)\n build_tree_helper(root, min_size, thresholds)\n return root\n\n\ndef main():\n seed(1)\n preprocess = Preprocess('housing.csv')\n dataset = preprocess.pre_process()\n thres = preprocess.get_thresholds()\n print(evaluation(dataset, thres, n_folds=10, mean_ratio=[0.05, 0.10, 0.15, 0.20]))\n\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "FanW123/ML-Algorithms-Implementation", "sub_path": "Decision Tree/p6-1.py", "file_name": "p6-1.py", "file_ext": "py", "file_size_in_byte": 7596, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "csv.reader", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Normalizer", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 69, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 153, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 163, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 191, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "21504265367", "text": "# Create your tests here.\nfrom django.urls import reverse\nfrom rest_framework.test import APITestCase, APIClient\nfrom rest_framework.views import status\nfrom .models import Click\nfrom .serializers import ClickSerializer\nimport csv\nimport time\n\n# tests for views\n\n\nclass BaseViewTest(APITestCase):\n client = APIClient()\n\n @staticmethod\n def create_entry(row):\n if row[0] != \"\" and row[1] != \"\":\n Click.objects.create(date=row[0],channel=row[1],country=row[2],os=row[3],impressions=row[4],clicks=row[5],installs=row[6],spend=row[7],revenue=row[8])\n\n def setUp(self):\n # add test data\n with open(\"/home/ubuntu/adjust/admet/data.csv\",\"r\") as dfile:\n reader = csv.reader(dfile)\n for row in reader:\n self.create_entry(row)\n \n\nclass GetAllClickTest(BaseViewTest):\n\n def test_get_all_entries(self):\n \"\"\"\n This test ensures that all entries added in the setUp method\n exist when we make a GET request to the metrics/ endpoint\n \"\"\"\n # hit the API endpoint\n response = self.client.get(\n reverse(\"metric-all\", kwargs={\"version\": \"v1\"})\n )\n # fetch the data from db\n expected = Click.objects.all()\n serialized = ClickSerializer(expected, many=True)\n self.assertEqual(response.data, serialized.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n #time.sleep(500)\n", "repo_name": "nedfrine/adjust", "sub_path": "admet/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1453, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "rest_framework.test.APITestCase", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.test.APIClient", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Click.objects.create", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Click.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Click", "line_number": 19, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Click.objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Click.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Click", "line_number": 41, "usage_type": "name"}, {"api_name": "serializers.ClickSerializer", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.views.status.HTTP_200_OK", "line_number": 44, "usage_type": "attribute"}, {"api_name": "rest_framework.views.status", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "30489430422", "text": "from PyQt5.QtCore import QT_TRANSLATE_NOOP, QObject\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtSql import QSqlDatabase, QSqlQuery\n\n\nclass FieldTypes():\n String = 1\n ShortString = 2\n Number = 3\n Text = 4\n Money = 5\n Date = 6\n BigInt = 7\n Image = 8\n Value = 9\n Status = 10\n DateTime = 11\n EdgeImage = 12\n PreviewImage = 13\n\n Mask = 0xFF\n Checkable = 0x100\n Disabled = 0x200\n\n ImageTypes = (Image, EdgeImage, PreviewImage)\n\n @staticmethod\n def toSql(type_):\n if type_ == FieldTypes.String:\n sql_type = 'TEXT'\n elif type_ == FieldTypes.ShortString:\n sql_type = 'TEXT'\n elif type_ == FieldTypes.Number:\n sql_type = 'INTEGER'\n elif type_ == FieldTypes.Text:\n sql_type = 'TEXT'\n elif type_ == FieldTypes.Money:\n sql_type = 'NUMERIC'\n elif type_ == FieldTypes.Date:\n sql_type = 'TEXT'\n elif type_ == FieldTypes.BigInt:\n sql_type = 'INTEGER'\n elif type_ == FieldTypes.PreviewImage:\n sql_type = 'INTEGER'\n elif type_ == FieldTypes.Image:\n sql_type = 'INTEGER'\n elif type_ == FieldTypes.Value:\n sql_type = 'NUMERIC'\n elif type_ == FieldTypes.Status:\n sql_type = 'TEXT'\n elif type_ == FieldTypes.DateTime:\n sql_type = 'TEXT'\n elif type_ == FieldTypes.EdgeImage:\n sql_type = 'INTEGER'\n else:\n raise\n\n return sql_type\n\n\nclass Status(dict):\n Keys = ('demo', 'pass', 'owned', 'ordered', 'sold', 'sale', 'wish')\n Titles = (\n QT_TRANSLATE_NOOP(\"Status\", \"Demo\"),\n QT_TRANSLATE_NOOP(\"Status\", \"Pass\"),\n QT_TRANSLATE_NOOP(\"Status\", \"Owned\"),\n QT_TRANSLATE_NOOP(\"Status\", \"Ordered\"),\n QT_TRANSLATE_NOOP(\"Status\", \"Sold\"),\n QT_TRANSLATE_NOOP(\"Status\", \"Sale\"),\n QT_TRANSLATE_NOOP(\"Status\", \"Wish\"),\n )\n\n def __init__(self):\n for key, value in zip(self.Keys, self.Titles):\n dict.__setitem__(self, key, value)\n\n def keys(self):\n return self.Keys\n\n def items(self):\n result = []\n for key in self.Keys:\n result.append((key, self.__getitem__(key)))\n return result\n\n def values(self):\n result = []\n for key in self.Keys:\n result.append(self.__getitem__(key))\n return result\n\n def __getitem__(self, key):\n try:\n if isinstance(key, int):\n value = dict.__getitem__(self, self.Keys[key])\n else:\n value = dict.__getitem__(self, key)\n return QApplication.translate(\"Status\", value)\n except KeyError:\n return None\n\nStatuses = Status()\n\n\nclass CollectionField():\n def __init__(self, id_, name, title, type_):\n self.id = id_\n self.name = name\n self.title = title\n self.type = type_\n\n\nclass CollectionFieldsBase(QObject):\n def __init__(self, parent=None):\n from OpenNumismat.Collection.CollectionFields import FieldTypes as Type\n super(CollectionFieldsBase, self).__init__(parent)\n\n fields = [\n ('id', self.tr(\"ID\"), Type.BigInt),\n\n ('title', self.tr(\"Name\"), Type.String),\n ('value', self.tr(\"Value\"), Type.Money),\n ('unit', self.tr(\"Unit\"), Type.String),\n ('country', self.tr(\"Country\"), Type.String),\n ('year', self.tr(\"Year\"), Type.Number),\n ('period', self.tr(\"Period\"), Type.String),\n ('mint', self.tr(\"Mint\"), Type.String),\n ('mintmark', self.tr(\"Mint mark\"), Type.ShortString),\n ('issuedate', self.tr(\"Date of issue\"), Type.Date),\n ('type', self.tr(\"Type\"), Type.String),\n ('series', self.tr(\"Series\"), Type.String),\n ('subjectshort', self.tr(\"Subject\"), Type.String),\n ('status', self.tr(\"Status\"), Type.Status),\n ('material', self.tr(\"Material\"), Type.String),\n ('fineness', self.tr(\"Fineness\"), Type.Number), # 4 digits for Canadian Gold Maple Leaf\n ('shape', self.tr(\"Shape\"), Type.String),\n ('diameter', self.tr(\"Diameter\"), Type.Value),\n ('thickness', self.tr(\"Thickness\"), Type.Value),\n ('weight', self.tr(\"Weight\"), Type.Value),\n ('grade', self.tr(\"Grade\"), Type.String),\n ('edge', self.tr(\"Type\"), Type.String),\n ('edgelabel', self.tr(\"Label\"), Type.String),\n ('obvrev', self.tr(\"ObvRev\"), Type.String),\n ('quality', self.tr(\"Quality\"), Type.String),\n ('mintage', self.tr(\"Mintage\"), Type.BigInt),\n ('dateemis', self.tr(\"Emission period\"), Type.String),\n ('catalognum1', self.tr(\"1#\"), Type.String),\n ('catalognum2', self.tr(\"2#\"), Type.String),\n ('catalognum3', self.tr(\"3#\"), Type.String),\n ('catalognum4', self.tr(\"4#\"), Type.String),\n ('rarity', self.tr(\"Rarity\"), Type.String),\n ('price1', self.tr(\"Fine\"), Type.Money),\n ('price2', self.tr(\"VF\"), Type.Money),\n ('price3', self.tr(\"XF\"), Type.Money),\n ('price4', self.tr(\"Unc\"), Type.Money),\n ('variety', self.tr(\"Variety\"), Type.String),\n ('obversevar', self.tr(\"Obverse\"), Type.String),\n ('reversevar', self.tr(\"Reverse\"), Type.String),\n ('edgevar', self.tr(\"Edge\"), Type.String),\n ('paydate', self.tr(\"Date\"), Type.Date),\n ('payprice', self.tr(\"Price\"), Type.Money),\n ('totalpayprice', self.tr(\"Paid\"), Type.Money),\n ('saller', self.tr(\"Saller\"), Type.String),\n ('payplace', self.tr(\"Place\"), Type.String),\n ('payinfo', self.tr(\"Info\"), Type.Text),\n ('saledate', self.tr(\"Date\"), Type.Date),\n ('saleprice', self.tr(\"Price\"), Type.Money),\n ('totalsaleprice', self.tr(\"Bailed\"), Type.Money),\n ('buyer', self.tr(\"Buyer\"), Type.String),\n ('saleplace', self.tr(\"Place\"), Type.String),\n ('saleinfo', self.tr(\"Info\"), Type.Text),\n ('note', self.tr(\"Note\"), Type.Text),\n ('image', self.tr(\"Image\"), Type.PreviewImage),\n ('obverseimg', self.tr(\"Obverse\"), Type.Image),\n ('obversedesign', self.tr(\"Design\"), Type.Text),\n ('obversedesigner', self.tr(\"Designer\"), Type.String),\n ('reverseimg', self.tr(\"Reverse\"), Type.Image),\n ('reversedesign', self.tr(\"Design\"), Type.Text),\n ('reversedesigner', self.tr(\"Designer\"), Type.String),\n ('edgeimg', self.tr(\"Edge\"), Type.EdgeImage),\n ('subject', self.tr(\"Subject\"), Type.Text),\n ('photo1', self.tr(\"Photo 1\"), Type.Image),\n ('photo2', self.tr(\"Photo 2\"), Type.Image),\n ('photo3', self.tr(\"Photo 3\"), Type.Image),\n ('photo4', self.tr(\"Photo 4\"), Type.Image),\n ('defect', self.tr(\"Defect\"), Type.String),\n ('storage', self.tr(\"Storage\"), Type.String),\n ('features', self.tr(\"Features\"), Type.Text),\n ('createdat', self.tr(\"Created at\"), Type.DateTime),\n ('updatedat', self.tr(\"Updated at\"), Type.DateTime),\n ('quantity', self.tr(\"Quantity\"), Type.BigInt),\n ('url', self.tr(\"URL\"), Type.String),\n ('barcode', self.tr(\"Barcode\"), Type.String),\n ]\n\n self.fields = []\n for id_, field in enumerate(fields):\n self.fields.append(\n CollectionField(id_, field[0], field[1], field[2]))\n setattr(self, self.fields[id_].name, self.fields[id_])\n\n self.systemFields = [self.id, self.createdat, self.updatedat, self.image]\n self.userFields = list(self.fields)\n for item in [self.id, self.createdat, self.updatedat]:\n self.userFields.remove(item)\n\n def field(self, id_):\n return self.fields[id_]\n\n def __iter__(self):\n self.index = 0\n return self\n\n def __next__(self):\n if self.index == len(self.fields):\n raise StopIteration\n self.index = self.index + 1\n return self.fields[self.index - 1]\n\n\nclass CollectionFields(CollectionFieldsBase):\n def __init__(self, db, parent=None):\n super(CollectionFields, self).__init__(parent)\n self.db = db\n\n if 'fields' not in self.db.tables():\n self.create(self.db)\n\n query = QSqlQuery(self.db)\n query.prepare(\"SELECT * FROM fields\")\n query.exec_()\n self.userFields = []\n self.disabledFields = []\n while query.next():\n record = query.record()\n fieldId = record.value('id')\n field = self.field(fieldId)\n field.title = record.value('title')\n field.enabled = bool(record.value('enabled'))\n if field.enabled:\n self.userFields.append(field)\n else:\n self.disabledFields.append(field)\n\n def save(self):\n self.db.transaction()\n\n for field in self.fields:\n query = QSqlQuery(self.db)\n query.prepare(\"UPDATE fields SET title=?, enabled=? WHERE id=?\")\n query.addBindValue(field.title)\n query.addBindValue(int(field.enabled))\n query.addBindValue(field.id)\n query.exec_()\n\n self.db.commit()\n\n @staticmethod\n def create(db=QSqlDatabase()):\n db.transaction()\n\n sql = \"\"\"CREATE TABLE fields (\n id INTEGER NOT NULL PRIMARY KEY,\n title TEXT,\n enabled INTEGER)\"\"\"\n QSqlQuery(sql, db)\n\n fields = CollectionFieldsBase()\n\n for field in fields:\n query = QSqlQuery(db)\n query.prepare(\"\"\"INSERT INTO fields (id, title, enabled)\n VALUES (?, ?, ?)\"\"\")\n query.addBindValue(field.id)\n query.addBindValue(field.title)\n enabled = field in fields.userFields\n query.addBindValue(int(enabled))\n query.exec_()\n\n db.commit()\n", "repo_name": "xavifc/open-numismat", "sub_path": "OpenNumismat/Collection/CollectionFields.py", "file_name": "CollectionFields.py", "file_ext": "py", "file_size_in_byte": 10434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "75", "api": [{"api_name": "PyQt5.QtCore.QT_TRANSLATE_NOOP", "line_number": 64, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QT_TRANSLATE_NOOP", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QT_TRANSLATE_NOOP", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QT_TRANSLATE_NOOP", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QT_TRANSLATE_NOOP", "line_number": 68, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QT_TRANSLATE_NOOP", "line_number": 69, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QT_TRANSLATE_NOOP", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 98, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 98, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QObject", "line_number": 113, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.BigInt", "line_number": 119, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 119, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 121, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 121, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Money", "line_number": 122, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 122, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 123, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 123, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 124, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 124, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Number", "line_number": 125, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 125, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 126, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 126, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 127, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 127, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.ShortString", "line_number": 128, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 128, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Date", "line_number": 129, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 129, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 130, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 130, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 131, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 131, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 132, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 132, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Status", "line_number": 133, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 133, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 134, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 134, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Number", "line_number": 135, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 135, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 136, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 136, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Value", "line_number": 137, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 137, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Value", "line_number": 138, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 138, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Value", "line_number": 139, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 139, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 140, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 140, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 141, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 141, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 142, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 142, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 143, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 143, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 144, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 144, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.BigInt", "line_number": 145, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 145, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 146, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 146, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 147, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 147, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 148, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 148, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 149, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 149, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 150, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 150, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 151, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 151, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Money", "line_number": 152, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 152, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Money", "line_number": 153, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 153, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Money", "line_number": 154, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 154, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Money", "line_number": 155, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 155, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 156, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 156, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 157, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 157, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 158, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 158, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 159, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 159, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Date", "line_number": 160, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 160, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Money", "line_number": 161, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 161, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Money", "line_number": 162, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 162, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 163, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 163, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 164, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 164, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Text", "line_number": 165, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 165, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Date", "line_number": 166, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 166, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Money", "line_number": 167, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 167, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Money", "line_number": 168, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 168, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 169, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 169, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 170, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 170, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Text", "line_number": 171, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 171, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Text", "line_number": 172, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 172, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.PreviewImage", "line_number": 173, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 173, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Image", "line_number": 174, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 174, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Text", "line_number": 175, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 175, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 176, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 176, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Image", "line_number": 177, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 177, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Text", "line_number": 178, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 178, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 179, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 179, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.EdgeImage", "line_number": 180, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 180, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Text", "line_number": 181, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 181, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Image", "line_number": 182, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 182, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Image", "line_number": 183, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 183, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Image", "line_number": 184, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 184, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Image", "line_number": 185, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 185, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 186, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 186, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 187, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 187, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.Text", "line_number": 188, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 188, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.DateTime", "line_number": 189, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 189, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.DateTime", "line_number": 190, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 190, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.BigInt", "line_number": 191, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 191, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 192, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 192, "usage_type": "name"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes.String", "line_number": 193, "usage_type": "attribute"}, {"api_name": "OpenNumismat.Collection.CollectionFields.FieldTypes", "line_number": 193, "usage_type": "name"}, {"api_name": "PyQt5.QtSql.QSqlQuery", "line_number": 229, "usage_type": "call"}, {"api_name": "PyQt5.QtSql.QSqlQuery", "line_number": 249, "usage_type": "call"}, {"api_name": "PyQt5.QtSql.QSqlDatabase", "line_number": 259, "usage_type": "call"}, {"api_name": "PyQt5.QtSql.QSqlQuery", "line_number": 266, "usage_type": "call"}, {"api_name": "{'Type': 'OpenNumismat.Collection.CollectionFields.FieldTypes'}", "line_number": 268, "usage_type": "call"}, {"api_name": "PyQt5.QtSql.QSqlQuery", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "31777765418", "text": "import argparse\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom data_generator import *\nfrom combinatorial_algorithms import *\nimport sys\nimport pandas as pd\nimport os\n\n# Declare function to define command-line arguments\ndef readOptions(args=sys.argv[1:]):\n parser = argparse.ArgumentParser(description=\"The parsing commands lists.\")\n # for common setting\n parser.add_argument(\"-N\", \"--simul_n\", help=\"The number of simulation replication\")\n parser.add_argument(\"-T\", \"--timeHorizon\", help=\"The length of time horizon\")\n parser.add_argument(\"-R\", \"--errorVar\", help=\"Variance of error\")\n parser.add_argument(\"-d\", \"--dim\", help=\"Dimension of covariate. (type: Int)\")\n parser.add_argument(\"-A\", \"--arms\", help=\"The number of arms\")\n # time variant term setting\n parser.add_argument(\"-S\", \"--nu\", help=\"The setting of time variant term\")\n # for tuning\n parser.add_argument(\"-th\", \"--tuningHorizon\", help=\"The length of time horizon for tuning\")\n parser.add_argument(\"-it\", \"--istuning\", help=\"Will be this code for hyperparameter tuning? 0(No), 1(Yes)\")\n # dir\n parser.add_argument(\"-s\", \"--savedir\", help=\"The name of saving directory\")\n # setting for selecting arms algorithms\n parser.add_argument(\"-K\", \"--select_arms\", help = \"The number of selected arms at t\")\n parser.add_argument(\"-l\", \"--lamb\", help = \"Degree to which diversity is considered when selecting a group of items\")\n opts = parser.parse_args(args)\n return opts\n\nopts = readOptions(sys.argv[1:])\n\n# Call the function to read the argment values\ntry:\n opts = readOptions(sys.argv[1:])\nexcept:\n print(\"No inputs detected. Going with default options\")\n class defaultOption(object):\n def __init__(self):\n self.simul_n = 5\n self.timeHorizon = 3000\n self.errorVar = 0.1\n self.dim = 3\n self.arms = 20\n self.nu = \"set1\"\n self.tuningHorizon = 3000\n self.istuning = True\n self.savedir = \"test\"\n self.select_arms = 4\n self.lamb = 0.5\n opts = defaultOption()\n\nopts.istuning = bool(int(opts.istuning))\n\n# base and save dir setting\nbase_dir = os.getcwd()\n\nsrc_dir = base_dir\nsave_dir = base_dir\nif (opts.savedir is not None):\n save_dir = base_dir + \"/\" + opts.savedir\nos.makedirs(save_dir, exist_ok = True)\n\n# common setting\nsimul_n = int(opts.simul_n) # num of simulation\nT = int(opts.timeHorizon) # Time Horizon\nR = float(opts.errorVar) # variance of error\ndim = int(opts.dim) # feature dim\nN = int(opts.arms) # num of arms\n\n# setting for selecting arms algorithm\nlamb = float(opts.lamb)\nK = int(opts.select_arms)\nsigma = 1\n\n# # hyperparameter\n# vstd = 1\n# vprop = 0.8\n\n# time variant term setting\nnu = opts.nu\nistuning = int(opts.istuning)\ntuningT = int(opts.tuningHorizon)\n\n# print setting for debugging\nprint(\"This simulation was run with the following settings: simul_n = %s, T = %s, R = %s, dim = %s, N = %s, nu = %s, tuningT = %s\" % (simul_n, T, R, dim, N, nu, tuningT))\n\n\n# list of hyperparameter for tuning\nVstdList = [1, 0.5, 0.25, 0.125, 0.0625, 0.0125, 0.00625] # C2UCB\nVpropList = [1, 0.5, 0.25, 0.125, 0.0625, 0.0125, 0.00625] # semi-comb: proposed algorithm\n\ntuningNum = len(VstdList)\n\n\ntuningTable = pd.DataFrame(columns=[\"Vstd\", \"C2UCB_wo_real_reward\", \"C2UCB_wi_real_reward\", \"Vprop\", \"prop_real_reward\"], index=range(tuningNum))\ntuningTable[\"Vstd\"] = VstdList\ntuningTable[\"Vprop\"] = VpropList\n\n## tuning\nif istuning == True:\n # savename\n savename = \"simul_n_%s_tuningT_%s_nu_%s_dim_%s_N_%s_R_%s_K_%s_lamb_%s.csv\" % (simul_n, tuningT, nu, dim, N, str(R).replace(\".\", \"\"), K, str(lamb).replace(\".\",\"\"))\n\n C2UCB_wo_real_rw_list = list()\n C2UCB_wi_real_rw_list = list()\n semiComb_real_rw_list = list()\n for tunID in range(tuningNum):\n vstd = VstdList[tunID]\n vprop = VpropList[tunID]\n print(\"Evaluating the %s-th hyperparameter...\\n\" % tunID)\n for simul in range(simul_n):\n theta = make_theta(dim, simul)\n print(\"Tuning simulation, simul num : \", simul)\n opt_cum_exp_rw, opt_cum_real_rw, vs = optimal_strategy(dim, N, R, theta, K, tuningT, simul, simul_n, istuning, tuningT, nu)\n C2UCB_wo_cum_exp_rw, C2UCB_wo_cum_real_rw = C2UCB_without_intercept(dim, N, R, theta, K, tuningT, simul, simul_n, lamb, sigma, vstd, vs, istuning, tuningT)\n C2UCB_wi_cum_exp_rw, C2UCB_wi_cum_real_rw = C2UCB_with_intercept(dim, N, R, theta, K, tuningT, simul, simul_n, lamb, sigma, vstd, vs, istuning, tuningT)\n semiComb_cum_exp_rw, semiComb_cum_real_rw = semiComb(dim, N, R, theta, K, tuningT, simul, simul_n, lamb, sigma, vprop, vs, istuning, tuningT)\n\n # not uning regret, stack reward for tuning\n\n C2UCB_wo_real_rw_list.append(C2UCB_wo_cum_real_rw[-1])\n C2UCB_wi_real_rw_list.append(C2UCB_wi_cum_real_rw[-1])\n semiComb_real_rw_list.append(semiComb_cum_real_rw[-1])\n\n tuningTable.iloc[tunID, 1] = np.mean(C2UCB_wo_real_rw_list)\n tuningTable.iloc[tunID, 2] = np.mean(C2UCB_wi_real_rw_list)\n tuningTable.iloc[tunID, 4] = np.mean(semiComb_real_rw_list)\n tuningTable.to_csv(save_dir + \"/Tuning_\" + savename, na_rep = 'NaN', index = False)\n\n\n\nif istuning == False:\n # select optimal hyperparameter\n tun_savename = \"simul_n_%s_tuningT_%s_nu_%s_dim_%s_N_%s_R_%s_K_%s_lamb_%s.csv\" % (simul_n, tuningT, nu, dim, N, str(R).replace(\".\", \"\"), K, str(lamb).replace(\".\",\"\"))\n tuningTable = pd.read_csv(save_dir + \"/Tuning_\" + tun_savename)\n vstd_wo = VstdList[np.argmax(tuningTable[\"C2UCB_wo_real_reward\"])]\n vstd_wi = VstdList[np.argmax(tuningTable[\"C2UCB_wi_real_reward\"])]\n vprop = VpropList[np.argmax(tuningTable[\"prop_real_reward\"])]\n\n print(\"vstd_wo = \", vstd_wo,\"vstd_wi = \", vstd_wi, \", vprop = \", vprop)\n\n # savename\n savename = \"simul_n_%s_T_%s_nu_%s_dim_%s_N_%s_R_%s_K_%s_lamb_%s.csv\" % (simul_n, tuningT, nu, dim, N, str(R).replace(\".\", \"\"), K, str(lamb).replace(\".\",\"\"))\n ## main simulation\n\n cumulated_reward_optimal = list()\n cumulated_reward_C2UCB_wo = list()\n cumulated_reward_C2UCB_wi = list()\n cumulated_reward_semiComb = list()\n\n cumulated_regret_C2UCB_wo = list()\n cumulated_regret_C2UCB_wi = list()\n cumulated_regret_semiComb = list()\n\n for simul in range(simul_n):\n print(\"Main simulation, simul num : \", simul)\n theta = make_theta(dim, simul)\n opt_cum_exp_rw, opt_cum_real_rw, vs = optimal_strategy(dim, N, R, theta, K, T, simul, simul_n, istuning, tuningT, nu)\n C2UCB_wo_cum_exp_rw, C2UCB_wo_cum_real_rw = C2UCB_without_intercept(dim, N, R, theta, K, T, simul, simul_n, lamb, sigma, vstd_wo, vs, istuning, tuningT)\n C2UCB_wi_cum_exp_rw, C2UCB_wi_cum_real_rw = C2UCB_with_intercept(dim, N, R, theta, K, T, simul, simul_n, lamb, sigma, vstd_wi, vs, istuning, tuningT)\n semiComb_cum_exp_rw, semiComb_cum_real_rw = semiComb(dim, N, R, theta, K, T, simul, simul_n, lamb, sigma, vprop, vs, istuning, tuningT)\n\n cumulated_reward_optimal.append(opt_cum_exp_rw)\n cumulated_reward_C2UCB_wo.append(C2UCB_wo_cum_exp_rw)\n cumulated_reward_C2UCB_wi.append(C2UCB_wi_cum_exp_rw)\n cumulated_reward_semiComb.append(semiComb_cum_exp_rw)\n\n # regret\n cumulated_regret_C2UCB_wo.append(cumulated_reward_optimal[simul] - cumulated_reward_C2UCB_wo[simul])\n cumulated_regret_C2UCB_wi.append(cumulated_reward_optimal[simul] - cumulated_reward_C2UCB_wi[simul])\n cumulated_regret_semiComb.append(cumulated_reward_optimal[simul] - cumulated_reward_semiComb[simul])\n\n pd.DataFrame(cumulated_regret_semiComb).to_csv(save_dir + \"/Regret_semiComb_\" + savename, na_rep='NaN', index=False)\n pd.DataFrame(cumulated_regret_C2UCB_wo).to_csv(save_dir + \"/Regret_C2UCB_wo_\" + savename, na_rep=\"NaN\", index=False)\n pd.DataFrame(cumulated_regret_C2UCB_wi).to_csv(save_dir + \"/Regret_C2UCB_wi_\" + savename, na_rep=\"NaN\", index=False)\n pd.DataFrame(cumulated_reward_optimal).to_csv(save_dir + \"/Reward_optimal_\" + savename, na_rep=\"NaN\", index=False)\n pd.DataFrame(cumulated_reward_semiComb).to_csv(save_dir + \"/Reward_semiComb_\" + savename, na_rep=\"NaN\", index=False)\n pd.DataFrame(cumulated_reward_C2UCB_wo).to_csv(save_dir + \"/Reward_C2UCB_wo_\" + savename, na_rep=\"NaN\", index=False)\n pd.DataFrame(cumulated_reward_C2UCB_wi).to_csv(save_dir + \"/Reward_C2UCB_wi_\" + savename, na_rep=\"NaN\", index=False)\n\n\n ## plot result\n\n # load regret\n C2UCB_wo_regret = pd.read_csv(save_dir + \"/Regret_C2UCB_wo_\" + savename)\n C2UCB_wi_regret = pd.read_csv(save_dir + \"/Regret_C2UCB_wi_\" + savename)\n semiComb_regret = pd.read_csv(save_dir + \"/Regret_semiComb_\" + savename)\n\n # load reward\n C2UCB_wo_reward = pd.read_csv(save_dir + \"/Reward_C2UCB_wo_\" + savename)\n C2UCB_wi_reward = pd.read_csv(save_dir + \"/Reward_C2UCB_wi_\" + savename)\n semiComb_reward = pd.read_csv(save_dir + \"/Reward_semiComb_\" + savename)\n \n # (2,2) plot\n fig, axes = plt.subplots(2, 2)\n fig.tight_layout(pad=4.0)\n\n # make reward graph (mean, sd)\n steps = np.arange(1, T + 1)\n\n C2UCB_wo_mean = np.mean(C2UCB_wo_reward, axis=0)\n C2UCB_wo_std = np.std(C2UCB_wo_reward, axis=0)\n\n C2UCB_wi_mean = np.mean(C2UCB_wi_reward, axis=0)\n C2UCB_wi_std = np.std(C2UCB_wi_reward, axis=0)\n\n semiComb_mean = np.mean(semiComb_reward, axis=0)\n semiComb_std = np.std(semiComb_reward, axis=0)\n\n axes[0][0].plot(steps, C2UCB_wo_mean, 'r', label=\"C2UCB_wo\")\n axes[0][0].plot(steps, C2UCB_wo_mean + C2UCB_wo_std, 'r', linestyle=\"--\")\n axes[0][0].plot(steps, C2UCB_wo_mean - C2UCB_wo_std, 'r', linestyle=\"--\")\n\n axes[0][0].plot(steps, C2UCB_wi_mean, 'g', label=\"C2UCB_wi\")\n axes[0][0].plot(steps, C2UCB_wi_mean + C2UCB_wi_std, 'g', linestyle=\"--\")\n axes[0][0].plot(steps, C2UCB_wi_mean - C2UCB_wi_std, 'g', linestyle=\"--\")\n\n axes[0][0].plot(steps, semiComb_mean, 'b', label=\"semiComb\")\n axes[0][0].plot(steps, semiComb_mean + semiComb_std, 'b', linestyle=\"--\")\n axes[0][0].plot(steps, semiComb_mean + semiComb_std, 'b', linestyle=\"--\")\n\n axes[0][0].set_xlabel('Decision Point')\n axes[0][0].set_ylabel('Cumulative Reward')\n axes[0][0].set_title('Cumulative Reward')\n axes[0][0].legend(loc='upper left', fontsize = 7)\n\n #print(\"(Reward)\", \"C2UCB : \", np.round(C2UCB_wo_mean[-1], 3), \", semiComb : \", np.round(semiComb_mean[-1], 3))\n\n # make reward graph (median, Q1, Q3)\n steps = np.arange(1, T + 1)\n\n axes[0][1].plot(steps, np.median(C2UCB_wo_reward, axis=0), 'r', label=\"C2UCB_wo\")\n axes[0][1].plot(steps, np.percentile(C2UCB_wo_reward, 25, axis=0), 'r', linestyle=\"--\")\n axes[0][1].plot(steps, np.percentile(C2UCB_wo_reward, 75, axis=0), 'r', linestyle=\"--\")\n\n axes[0][1].plot(steps, np.median(C2UCB_wi_reward, axis=0), 'g', label=\"C2UCB_wi\")\n axes[0][1].plot(steps, np.percentile(C2UCB_wi_reward, 25, axis=0), 'g', linestyle=\"--\")\n axes[0][1].plot(steps, np.percentile(C2UCB_wi_reward, 75, axis=0), 'g', linestyle=\"--\")\n\n axes[0][1].plot(steps, np.median(semiComb_reward, axis=0), 'b', label=\"semiComb\")\n axes[0][1].plot(steps, np.percentile(semiComb_reward, 25, axis=0), 'b', linestyle=\"--\")\n axes[0][1].plot(steps, np.percentile(semiComb_reward, 75, axis=0), 'b', linestyle=\"--\")\n\n axes[0][1].set_xlabel('Decision Point')\n axes[0][1].set_ylabel('Cumulative Reward')\n axes[0][1].set_title('Cumulative Reward with median and Q1, Q3')\n axes[0][1].legend(loc='upper left', fontsize = 7)\n\n # make regret graph (mean, sd)\n steps = np.arange(1, T + 1)\n\n C2UCB_wo_mean = np.mean(C2UCB_wo_regret, axis=0)\n C2UCB_wo_std = np.std(C2UCB_wo_regret, axis=0)\n\n C2UCB_wi_mean = np.mean(C2UCB_wi_regret, axis=0)\n C2UCB_wi_std = np.std(C2UCB_wi_regret, axis=0)\n\n semiComb_mean = np.mean(semiComb_regret, axis=0)\n semiComb_std = np.std(semiComb_regret, axis=0)\n\n axes[1][0].plot(steps, C2UCB_wo_mean, 'r', label=\"C2UCB_wo\")\n axes[1][0].plot(steps, C2UCB_wo_mean + C2UCB_wo_std, 'r', linestyle=\"--\")\n axes[1][0].plot(steps, C2UCB_wo_mean - C2UCB_wo_std, 'r', linestyle=\"--\")\n\n axes[1][0].plot(steps, C2UCB_wi_mean, 'g', label=\"C2UCB_wi\")\n axes[1][0].plot(steps, C2UCB_wi_mean + C2UCB_wi_std, 'g', linestyle=\"--\")\n axes[1][0].plot(steps, C2UCB_wi_mean - C2UCB_wi_std, 'g', linestyle=\"--\")\n\n axes[1][0].plot(steps, semiComb_mean, 'b', label=\"semiComb\")\n axes[1][0].plot(steps, semiComb_mean + semiComb_std, 'b', linestyle=\"--\")\n axes[1][0].plot(steps, semiComb_mean - semiComb_std, 'b', linestyle=\"--\")\n\n axes[1][0].set_xlabel('Decision Point')\n axes[1][0].set_ylabel('Cumulative Regret')\n axes[1][0].set_title('Cumulative Regret')\n axes[1][0].legend(loc='upper left', fontsize = 7)\n\n print(\"(Regret)\", \"C2UCB wo : \", np.round(C2UCB_wo_mean[-1], 3),\", C2UCB wi : \", np.round(C2UCB_wi_mean[-1], 3), \", semiComb : \", np.round(semiComb_mean[-1], 3))\n\n # make regret graph (median, Q1, Q3)\n steps = np.arange(1, T + 1)\n\n axes[1][1].plot(steps, np.median(C2UCB_wo_regret, axis=0), 'r', label=\"C2UCB_wo\")\n axes[1][1].plot(steps, np.percentile(C2UCB_wo_regret, 25, axis=0), 'r', linestyle=\"--\")\n axes[1][1].plot(steps, np.percentile(C2UCB_wo_regret, 75, axis=0), 'r', linestyle=\"--\")\n\n axes[1][1].plot(steps, np.median(C2UCB_wi_regret, axis=0), 'g', label=\"C2UCB_wi\")\n axes[1][1].plot(steps, np.percentile(C2UCB_wi_regret, 25, axis=0), 'g', linestyle=\"--\")\n axes[1][1].plot(steps, np.percentile(C2UCB_wi_regret, 75, axis=0), 'g', linestyle=\"--\")\n\n axes[1][1].plot(steps, np.median(semiComb_regret, axis=0), 'b', label=\"semiComb\")\n axes[1][1].plot(steps, np.percentile(semiComb_regret, 25, axis=0), 'b', linestyle=\"--\")\n axes[1][1].plot(steps, np.percentile(semiComb_regret, 75, axis=0), 'b', linestyle=\"--\")\n\n axes[1][1].set_xlabel('Decision Point')\n axes[1][1].set_ylabel('Cumulative Regret')\n axes[1][1].set_title('Cumulative Regret with median and Q1, Q3')\n axes[1][1].legend(loc='upper left', fontsize = 7)\n\n plt.savefig(save_dir + \"/plot_\" + savename + \".png\")", "repo_name": "YunSeo00/combinatorial_MAB", "sub_path": "comb_numerical_simulation.py", "file_name": "comb_numerical_simulation.py", "file_ext": "py", "file_size_in_byte": 14137, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 58, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 130, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 141, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 177, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 178, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 179, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 180, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 188, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 189, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 190, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 193, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 194, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}]} +{"seq_id": "33047044613", "text": "__doc__ = \"\"\"\nDescription of this price predictor:\nThis is my Investor's 8ball. It focuses on using news headlines, previous stock prices, and machine\nlearning to calculate the price of an item on the \"Yahoo Finance Market on any date. Currently, we are \nstarting off with an sklearn framework for linear regression, which will be used to predict the stock prices, \nand then, because numbers are not enough to express what happens in the real world, we will use news headlines\nto calculate a weight, which we will multiply each of the stock predictions by to get our final result.\n\nA quick note that the model in question being used is an XGBoost Regressor with the proper parameters configured\nvia a RandomSearchCV, wrapped in an sklearn MultiOutputRegressor wrapper, which then gets wrapped in a biaswrapper\nof my own design.\n\nBy: Karthik Singaravadivelan\n\"\"\"\n\n# All the imports go below\n# These modules are for yahoo finance information purposes and news scraping\nimport yfinance as yf\n\n# My usual machine learning imports:\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\n\n# Accuracy metric:\nfrom sklearn.metrics import mean_squared_error\n\n# These are for visualization\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# XGBoost regressor!\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom xgboost.sklearn import XGBRegressor\n\n# My Python Package!!\nfrom biaswrappers.regressor import BiasRegressor\n\n# linreg for UpDown Classification (starting small, will go to\n# sentiment and more advanced regressors in future)\nfrom sklearn.linear_model import LinearRegression\n\n# imports included with python that I need\nimport os\nimport sys\nif not sys.warnoptions:\n import warnings\n warnings.simplefilter(\"ignore\")\nimport datetime\nimport csv\n\n# This is a quick function to write to a text file\ndef append_new_line(file_name, text_to_append):\n \"\"\"Append given text as a new line at the end of file\"\"\"\n # Open the file in append & read mode ('a+')\n with open(file_name, \"a+\") as file_object:\n # Move read cursor to the start of file.\n file_object.seek(0)\n # If file is not empty then append '\\n'\n data = file_object.read(100)\n if len(data) > 0:\n file_object.write(\"\\n\")\n # Append text at the end of file\n file_object.write(text_to_append)\n\n# this function clears the log file\ndef clear_log(filename:str=\"log.txt\"):\n \"\"\"\n Clears the log file\n You can input your own filename with the \"filename\" param\n \"\"\"\n file = open(filename,\"r+\")\n file.truncate(0)\n file.close()\n\n# This function gets a model's name (from scikit-learn ONLY)\ndef get_model_name(model):\n return str(model.__class__).split('.')[-1][:-2]\n\n# STOCK PREDICTOR CLASS BELOW\n\nclass MarketPredictor(object):\n \"\"\"\n My Market Predictor class\n\n By Karthik Singaravadivelan\n\n This class predicts the price of something on the \"Yahoo Finance\" market.\n There is no dataset needed, and there are very little commands involved.\n \n First, you must create an instance of this class with the input of a ticker.\n Then, use the load_data() method to load your dataset for the predicting.\n After you've loaded your data, use the predict method to predict your prices.\n\n Note: The accuracy of this specialized model ranges from 0.8 to 0.98 based on the attributes and data given.\n\n You can change the amount of data the model recieves, however it is preferable to keep the \n start and end time variables to \"None\" to recieve maximum data. If you end up using the variables,\n then keep the format of the dates in \"YYYY-MM-DD\". Note that the dates are inclusive, so if you\n want the date range to be 2012-05-01 through 2021-05-01, you will have to specify those dates.\n\n \"\"\"\n # the constructor\n def __init__(self, ticker:str, clone_id=0, dayspast:int=60, start_date:str=None, end_date:str=None):\n self.ticker = ticker.upper()\n self.attributes = None\n self.labels = [\"Open\",\"High\",\"Low\",\"Close\", \"Adj Close\"]\n self.model = None # a variable to hold the regression model.\n self.model_path = \"stockmodel.pickle\"\n self.dayspast = dayspast\n self.start_date = start_date\n self.end_date = end_date\n self.data = None # a variable to hold the data python object.\n self.dataset = R\"data\\{}{}.csv\".format(str(clone_id), str(self.ticker))# actual data\n self.future_dataset = R\"data\\{}Future{}.csv\".format(str(clone_id), str(self.ticker))# dataset for predictions\n self.benchmark = R\"data\\{}benchmark.csv\".format(str(clone_id))\n self.benchmark2 = R\"data\\{}benchmark2.csv\".format(str(clone_id))\n self.test_attrs = None\n self.num_test_attrs = None\n self.preds_dict = None\n\n def load_data(self):\n \"\"\"\n This function will load the data from the Yahoo Finance website with BeautifulSoup4\n and put it in a csv file.\n \"\"\"\n\n def read_cell(x, y):\n filename = self.benchmark\n with open(filename, 'r') as f:\n rows = list(csv.reader(f))\n cell = rows[y][x]\n return cell\n\n with open(\"error.txt\", \"r\") as f:\n ERROR_MESSAGE = str(f.read())\n\n if os.path.exists(self.dataset):\n os.remove(self.dataset)\n\n if os.path.exists(self.future_dataset):\n os.remove(self.future_dataset)\n\n if os.path.exists(self.model_path):\n os.remove(self.model_path)\n\n if os.path.exists(self.benchmark):\n os.remove(self.benchmark)\n \n if os.path.exists(self.benchmark2):\n os.remove(self.benchmark2)\n\n ################################\n # Historical Stock Data\n \n df = yf.download(self.ticker, period=\"max\")\n df = df.drop(\"Volume\", axis=1)\n df.reset_index(level=0, inplace=True)\n df.rename(columns={'Date':'RealDate'}, inplace=True)\n df[\"Date\"] = [1 + idx for idx, row in df.iterrows()]\n new_order = [-1, 1, 2, 3, 4, 5, 0]\n df = df[df.columns[new_order]]\n df.to_csv(self.benchmark, index=False)\n\n with open(self.benchmark, 'r', newline=\"\") as f:\n reader = csv.reader(f)\n benchmark_lines = [row for row in reader]\n\n new_benchmarks = []\n benchmark_dates = []\n for benchmark_line in benchmark_lines:\n benchmark_dates.append(benchmark_line[-1])\n benchmark_line.pop(-1)\n new_benchmarks.append(benchmark_line)\n \n benchmark_lines = new_benchmarks\n if not self.start_date:\n self.start_date = benchmark_dates[1]\n \n if not self.end_date:\n self.end_date = benchmark_dates[-1]\n\n header_template = benchmark_lines.pop(0)\n header_template.pop(0)\n final_header_row = [\"Date\"]\n for num in range(self.dayspast):\n formatted_headers = []\n formatted_headers.extend(header_template)\n if num > 0:\n for idx in range(len(formatted_headers)):\n formatted_headers[idx] = str(formatted_headers[idx]) + str(num)\n final_header_row.extend(formatted_headers)\n final_header_row.append(\"RealDate\")\n\n with open(self.benchmark2, 'a', newline=\"\") as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)\n writer.writerow(final_header_row)\n\n self.attributes = list(final_header_row)[0:-1]\n self.test_attrs = [x for x in self.attributes if x not in self.labels] # all the attributes for testing the model\n self.num_test_attrs = len(self.test_attrs)\n\n for line_num in range(len(benchmark_lines)):\n try:\n real_line_num = line_num + self.dayspast - 1\n final_line = []\n for day in range(self.dayspast):\n my_val = real_line_num - day\n line = benchmark_lines[my_val]\n final_line.extend(line)\n with open(self.benchmark2, 'a', newline=\"\") as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)\n for num in range(self.dayspast):\n if num > 0:\n idx = (num * 6) - (num - 1)\n final_line.pop(idx)\n cdate = read_cell(6, (real_line_num + 1))\n final_line.append(cdate)\n today = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n formatted_final_line = []\n for elem in final_line:\n if (type(elem) == str):\n try:\n formatted_final_line.append(float(elem))\n except ValueError:\n formatted_final_line.append(elem)\n if cdate != today:\n writer.writerow(final_line)\n except IndexError:\n break\n\n with open(self.dataset, 'a', newline=\"\") as output_file:\n nwriter = csv.writer(output_file)\n with open(self.benchmark2, 'r') as input_file:\n reader = csv.reader(input_file)\n\n line_index = 0 # debugging\n\n for row in reader:\n line_index += 1\n date = row[-1]\n try:\n\n startyear = int(self.start_date[0:4])\n startmonth = int(self.start_date[5:7])\n startday = int(self.start_date[8:10])\n\n endyear = int(self.end_date[0:4])\n endmonth = int(self.end_date[5:7])\n endday = int(self.end_date[8:10])\n\n currentyear = int(date[0:4])\n currentmonth = int(date[5:7])\n currentday = int(date[8:10])\n\n current_obj = datetime.datetime(\n year=currentyear, \n month=currentmonth, \n day=currentday\n )\n\n start_obj = datetime.datetime(\n year=startyear,\n month=startmonth,\n day=startday\n )\n\n end_obj = datetime.datetime(\n year=endyear,\n month=endmonth,\n day=endday\n )\n\n if (current_obj >= start_obj) and (current_obj <= end_obj):\n nwriter.writerow(row)\n except Exception as e:\n if str(e) == \"invalid literal for int() with base 10: 'Real'\":\n nwriter.writerow(row)\n \n else:\n print(e)\n\n os.popen(f'copy {self.dataset} {self.future_dataset}')\n # os.remove(self.benchmark)\n # os.remove(self.benchmark2)\n\n def fit_inital(self, filename:str=None):\n \"\"\"\n Here we are fitting our first model on the original data:\n \"\"\"\n if not filename:\n filename = self.dataset\n\n # Clearing the log\n clear_log()\n\n append_new_line(\"log.txt\", \"\\nFitting the model for the stock predictor. \\nStarting log at \"+str(datetime.datetime.now())+\"\\n\")\n\n data = pd.read_csv(filename, sep=\",\")\n data = data[self.attributes]\n\n self.data = data\n\n predict = self.labels\n\n # print(num_test_attrs, test_attrs)\n\n X = data.drop(predict, 1).values\n y = data[predict].values\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n append_new_line(\"log.txt\", \"\\nTraining set: {} samples\".format(X_train.shape))\n append_new_line(\"log.txt\", \"Test set: {} samples\".format(X_test.shape))\n\n inside_model = XGBRegressor(\n objective='reg:squarederror',\n learning_rate=0.045,\n max_depth=5,\n n_estimators=475,\n gamma=6.5e-6,\n subsample=0.75,\n colsample_bytree=0.75,\n min_child_weight=10\n )\n \n multi_model = MultiOutputRegressor(inside_model)\n model = BiasRegressor(multi_model)\n model.fit(X_train, y_train)\n\n test_preds = model.predict(X_test)\n test_preds = test_preds.reshape(-1, len(self.labels))\n rmse = np.sqrt(mean_squared_error(y_test, test_preds))\n self.model = model\n\n append_new_line(\"log.txt\", \"\\nFitting/Tuning Finished at \"+str(datetime.datetime.now()))\n append_new_line(\"log.txt\", \"\\nwith RMSE of \"+str(rmse))\n\n def predict(self, date:str, filename:str=None):\n \"\"\"\n This predictor method predicts stocks, given a date in the form of YYYY-MM-DD.\n It will predict any date, however the farther into the future you choose, the longer it will take to predict the stock price, and the less reliable the machine will be.\n \"\"\"\n \n\n # What this model is doing is, it's predicting one day into the future, \n # then it adds the values it gets to the Future(ticker).csv dataset.\n # Then it trains on the new values, and predicts one more day into the future.\n # This process continues until the date predicted is the date the user asks for, \n # where the console reveals the predicted values.\n\n # We need to define the year, month, and day to find out how many times the loop should be ran.\n\n year = int(date[0:4])\n month = int(date[5:7])\n day = int(date[8:10])\n\n # print(\"Y:{}\\nM:{}\\nD:{}\\n\".format(year, month, day))\n \n def numOfDays(date1, date2):\n return (date2-date1).days\n\n # This function can read any cell in a csv file.\n def read_cell(x, y):\n filename = self.future_dataset\n with open(filename, 'r') as f:\n rows = list(csv.reader(f))\n cell = rows[y][x]\n return cell\n\n def get_next_weekday(date):\n if date.isoweekday() in set((6, 7)):\n date += datetime.timedelta(days=-date.isoweekday() + 8)\n return date\n\n\n # These lines are to see how many times the \"for\" loop should run.\n\n if not self.end_date:\n today = datetime.date.today()\n today = (today - datetime.timedelta(days=1))\n date2 = datetime.date(today.year, today.month, today.day)\n else:\n endyear = int(self.end_date[0:4])\n endmonth = int(self.end_date[5:7])\n endday = int(self.end_date[8:10])\n date2 = datetime.date(endyear, endmonth, endday)\n predicted_date = datetime.date(year, month, day)\n predicted_date = get_next_weekday(predicted_date)\n # print(today.year, today.month, today.day)\n days = int(numOfDays(date2, predicted_date))\n # print(days, \"days\")\n\n stock_days = days + float(read_cell(0, -1)) - 1\n\n print(\"Date Predicted: \" + predicted_date.strftime(\"%Y-%m-%d\"))\n print(\"Model Used For Prediction: \" + str(get_model_name(self.model)))\n\n for day in range(0, days):\n\n # print(day)\n\n \"\"\"\n Here we are fitting our first model:\n \"\"\"\n if not filename:\n filename = self.future_dataset\n\n append_new_line(\"log.txt\", \"\\nFitting the model for the stock predictor. \\nStarting log at \"+str(datetime.datetime.now())+\"\\n\")\n\n data = pd.read_csv(filename, sep=\",\")\n data = data[self.attributes]\n\n predict = self.labels\n\n # print(num_test_attrs, test_attrs)\n\n X = np.array(data.drop(predict, 1))\n y = np.array(data[predict])\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n # print (\"\\n\\nTraining set: {} samples\".format(X_train.shape))\n # print (\"Test set: {} samples\".format(X_test.shape))\n \n\n linear = self.model\n\n linear.fit(X_train, y_train)\n\n test_preds = linear.predict(X_test)\n test_preds = test_preds.reshape(-1, len(self.labels))\n rmse = np.sqrt(mean_squared_error(y_test, test_preds))\n\n # print(coef_str)\n # print(y_in_str)\n\n append_new_line(\"log.txt\", \"\\nPredictions Fitting Finished at \"+str(datetime.datetime.now()))\n\n \"\"\"\n Here is where the predicting begins.\n \"\"\"\n current_sd = stock_days - (days - day - 1)\n pred_values = [current_sd]\n # print(days, stock_days)\n \n for day in range(self.dayspast - 1):\n for pred_attr in self.labels:\n pred_attr += str(day) if day > 0 else \"\"\n cell_x = int((self.attributes.index(pred_attr)))\n cell_y = -1\n # print(f\"({cell_x}, {cell_y})\")\n pred_value = read_cell(cell_x, cell_y)\n pred_value = float(pred_value)\n\n pred_values.append(pred_value)\n\n my_values = np.array([pred_values])[0]\n my_values = my_values.reshape(-1, self.num_test_attrs)\n\n formatted_pred_values = []\n for value in my_values.tolist()[0]:\n value = round(float(value), 6)\n formatted_pred_values.append(value)\n\n # my_values = DMatrix(my_values)\n\n prediction = linear.predict(my_values)\n output_values = list(prediction)[0][0]\n ranges = [[rmse + pred, rmse - pred] for pred in output_values]\n\n formatted_output_values = []\n for value in output_values:\n value = round(float(value), 7)\n formatted_output_values.append(value)\n\n attr_d = {\"Input Labels\":self.test_attrs, \"Input Values\":[float(round(x, 2)) for x in formatted_pred_values]}\n attr_df = pd.DataFrame(data=attr_d)\n label_d = {\"Output Labels\":self.labels, \"Output Values\":[float(round(x, 2)) for x in formatted_output_values]}\n label_df = pd.DataFrame(data=label_d)\n\n df_str = str(attr_df) + \"\\n\\n\" + \"Predicted Date: {}\\n\\n\".format(date) + str(label_df)\n append_new_line(\"log.txt\", df_str)\n\n future_data = []\n future_data.extend(pred_values)\n future_data[0] = int(future_data[0]) + 1\n for elem in reversed(list(output_values)):\n future_data.insert(1, round(elem, 5))\n # print(\"\\nData to be plugged into Future Dataset: {}\".format(future_data))\n\n future_data[0] = int(future_data[0])\n\n # print(future_data)\n\n current_stock_date = date\n\n future_data.append(current_stock_date)\n\n with open(self.future_dataset, \"a\", newline='') as fp:\n wr = csv.writer(fp, dialect='excel')\n wr.writerow(future_data)\n\n print(\"\\nSee log.txt for more details about the training and testing of the model. (And all the dataframes)\\n\")\n # Writing to the Excel file: predictions.xlsx\n wb_name = 'data\\\\predictions.xlsx'\n try:\n label_df.to_excel(wb_name)\n self.preds_dict = label_d\n return label_df\n except:\n print(\"You have predicted a date that has already been processed by the data.\")\n print(\"To view the corresponding prices for this date, use the 'print_past-date'\")\n print(\"method, please.\")\n exit()\n\n def up_or_down(self, date:str):\n \"\"\"\n Determines sentiment and percentage value for given date.\n \"\"\"\n\n data = pd.read_csv(self.benchmark)\n\n main_data = pd.DataFrame()\n main_data[\"Date\"] = data[\"RealDate\"][1:]\n main_data[\"Index\"] = [x for x in range(1, (len(data[\"RealDate\"][:-1]) + 1))]\n\n percentage_vals = [float((data[\"Close\"][x] - data[\"Close\"][x - 1])/(data[\"Close\"][x]) * 100) for x in range(1, len(data[\"Close\"]))]\n\n main_data[\"PercentChange\"] = percentage_vals\n\n print(main_data.head())\n\n model = LinearRegression()\n\n\n\n def delete_datasets(self):\n if os.path.exists(self.dataset):\n os.remove(self.dataset)\n\n if os.path.exists(self.future_dataset):\n os.remove(self.future_dataset)\n\n if os.path.exists(self.model_path):\n os.remove(self.model_path)\n\n if os.path.exists(self.benchmark):\n os.remove(self.benchmark)\n \n if os.path.exists(self.benchmark2):\n os.remove(self.benchmark2)\n\n def plot_2D(self, x:str, y:str):\n \"\"\"\n This plots a 2D graph of an x and a y, from a data object generated by the 'fit' method.\n Note that both x and y must be either attributes or features of the data object.\n \"\"\"\n x_True = x in self.attributes\n y_True = y in self.attributes\n if not all([x_True, y_True]):\n raise Exception('x and y must be features or targets inside the data object.')\n style.use(\"ggplot\")\n plt.scatter(self.data[x], self.data[y])\n plt.xlabel(x)\n plt.ylabel(y)\n plt.show()\n\n def plot_3D(self, x:str, y:str, z:str):\n \"\"\"\n This plots a 3D graph of an x, y, and z, from a data object generated by the 'fit' method.\n \"\"\"\n x_True = x in self.attributes\n y_True = y in self.attributes\n z_True = z in self.attributes\n if not all([x_True, y_True, z_True]):\n raise Exception('x, y, and z must be features or targets inside the data object.')\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.scatter(self.data[x], self.data[y], self.data[z])\n ax.set_xlabel(x)\n ax.set_ylabel(y)\n ax.set_zlabel(z)\n plt.show()\n\n", "repo_name": "karthiksing05/8ball", "sub_path": "predictor.py", "file_name": "predictor.py", "file_ext": "py", "file_size_in_byte": 22213, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "sys.warnoptions", "line_number": 47, "usage_type": "attribute"}, {"api_name": "warnings.simplefilter", "line_number": 49, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 152, "usage_type": "call"}, {"api_name": "yfinance.download", "line_number": 157, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 167, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 197, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONNUMERIC", "line_number": 197, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 213, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONNUMERIC", "line_number": 213, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 220, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 220, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 234, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 236, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 257, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 263, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 269, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 284, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 298, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 300, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 312, "usage_type": "call"}, {"api_name": "xgboost.sklearn.XGBRegressor", "line_number": 316, "usage_type": "call"}, {"api_name": "sklearn.multioutput.MultiOutputRegressor", "line_number": 327, "usage_type": "call"}, {"api_name": "biaswrappers.regressor.BiasRegressor", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 333, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 333, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 336, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 336, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 367, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 373, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 380, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 380, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 381, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 382, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 387, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 388, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 409, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 409, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 411, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 419, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 421, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 432, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 432, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 437, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 437, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 457, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 477, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 479, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 500, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 521, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 523, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 533, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 538, "usage_type": "call"}, {"api_name": "os.path", "line_number": 538, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 539, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 541, "usage_type": "call"}, {"api_name": "os.path", "line_number": 541, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 542, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 544, "usage_type": "call"}, {"api_name": "os.path", "line_number": 544, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 545, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 547, "usage_type": "call"}, {"api_name": "os.path", "line_number": 547, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 548, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 550, "usage_type": "call"}, {"api_name": "os.path", "line_number": 550, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 551, "usage_type": "call"}, {"api_name": "matplotlib.style.use", "line_number": 562, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 562, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 563, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 563, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 564, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 564, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 565, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 565, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 566, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 566, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 577, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 577, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 583, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 583, "usage_type": "name"}]} +{"seq_id": "33069326030", "text": "\n# coding: utf-8\n\n# In[15]:\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nget_ipython().magic('matplotlib inline')\n\nfig=plt.figure()\nax=Axes3D(fig)\nx=np.arange(-1,2,0.25)\ny=np.arange(-10,4,0.5)\nx,y=np.meshgrid(x,y)\nR=np.sqrt(x**2 + y**2)\nZ=np.sin(R)\nZ_plane=(-5+2*x+4*y)\nax.plot_surface (x,y,R,rstride=1,cstride=1, cmap='hot')\nax.plot_surface (x,y,Z_plane,rstride=1,cstride=1, cmap='hot')\nplt.show()\n\n\n# In[ ]:\n\n\n\n", "repo_name": "echatziosman/Computational-Geometry", "sub_path": "HesaplamHafta12.py", "file_name": "HesaplamHafta12.py", "file_ext": "py", "file_size_in_byte": 465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.Axes3D", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "21159380466", "text": "from flask import Flask, render_template, send_file\nfrom flask_flatpages import FlatPages\n\nfrom os.path import join\n\n\nclass Config:\n FLATPAGES_ROOT = 'articles'\n FLATPAGES_EXTENSION = '.md'\n FLATPAGES_MARKDOWN_EXTENSIONS = ['toc']\n FLATPAGES_EXTENSION_CONFIGS = {\n 'toc': {\n 'permalink': 'True',\n 'permalink_class': 'octicon-link'\n }\n }\n\n\napp = Flask(__name__)\napp.config.from_object(Config())\nflatpages = FlatPages(app)\n\n\n@app.route('/articles/')\ndef article(permalink):\n article = flatpages.get_or_404(permalink)\n all_articles = [article for article in flatpages]\n \n return render_template(\n 'article.html',\n article=article,\n all_articles=all_articles\n )\n", "repo_name": "AttractiveVitaminsChaseThrough/presentations", "sub_path": "martian-geology/flask/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 756, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "flask.Flask", "line_number": 19, "usage_type": "call"}, {"api_name": "flask_flatpages.FlatPages", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "71459756721", "text": "import logging\nimport logging.handlers\nfrom logging.config import dictConfig\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_LOGGING = {\"version\": 1, \"disable_existing_loggers\": False}\n\n\ndef configure_logging():\n \"\"\"\n Initialize logging defaults for Project.\n\n :param logfile_path: logfile used to the logfile\n :type logfile_path: string\n\n This function does:\n\n - Assign INFO and DEBUG level to logger file handler and console handler\n\n \"\"\"\n dictConfig(DEFAULT_LOGGING)\n\n default_formatter = logging.Formatter(\n \"[%(asctime)s] [%(levelname)8s] [%(name)s] [%(funcName)s():%(lineno)s] [PID:%(process)d \"\n \"TID:%(thread)d] -> \"\n \"%(message)s\",\n \"%d/%m/%Y %H:%M:%S\",\n )\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(default_formatter)\n logging.root.setLevel(logging.INFO)\n logging.root.addHandler(console_handler)\n\n # logging.getLogger(\"sqlalchemy.engine\").setLevel(logging.INFO)\n # logging.root.setLevel(logging.DEBUG)\n", "repo_name": "zaxmks/demo-data-compliance-service", "sub_path": "src/core/default_logging/log_config.py", "file_name": "log_config.py", "file_ext": "py", "file_size_in_byte": 1106, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.config.dictConfig", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 32, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.root.setLevel", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.root", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.root.addHandler", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.root", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "17140012132", "text": "#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n\r\n\"\"\"\r\nMySQL-Plugin to dispatch FMS-, ZVEI- and POCSAG - messages to a MySQL database\r\n\r\n@author: Jens Herrmann\r\n@author: Bastian Schroll\r\n\r\n@requires: MySQL-Configuration has to be set in the config.ini\r\n@requires: Created Database/Tables, see boswatch.sql\r\n\"\"\"\r\n\r\nimport logging # Global logger\r\n\r\nimport mysql\r\nimport mysql.connector\r\n\r\nfrom includes import globalVars # Global variables\r\n\r\nfrom includes.helper import configHandler\r\n\r\ndef isSignal(poc_id):\r\n\t\"\"\"\r\n\t@type poc_id: string\r\n\t@param poc_id: POCSAG Ric\r\n\r\n\t@requires: Configuration has to be set in the config.ini\r\n\r\n\t@return: True if the Ric is Signal, other False\r\n\t@exception: none\r\n\t\"\"\"\r\n\t# If RIC is Signal return True, else False\r\n\tif globalVars.config.get(\"POC\", \"netIdent_ric\"):\r\n\t\tif poc_id in globalVars.config.get(\"POC\", \"netIdent_ric\"):\r\n\t\t\tlogging.info(\"RIC %s is net ident\", poc_id)\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tlogging.info(\"RIC %s is no net ident\", poc_id)\r\n\t\t\treturn False\r\n\r\n\r\n##\r\n#\r\n# onLoad (init) function of plugin\r\n# will be called one time by the pluginLoader on start\r\n#\r\ndef onLoad():\r\n\t\"\"\"\r\n\tWhile loading the plugins by pluginLoader.loadPlugins()\r\n\tthis onLoad() routine is called one time for initialize the plugin\r\n\r\n\t@requires: nothing\r\n\r\n\t@return: nothing\r\n\t\"\"\"\r\n\t# nothing to do for this plugin\r\n\treturn\r\n\r\n\r\n##\r\n#\r\n# Main function of MySQL-plugin\r\n# will be called by the alarmHandler\r\n#\r\ndef run(typ,freq,data):\r\n\t\"\"\"\r\n\tThis function is the implementation of the MySQL-Plugin.\r\n\tIt will store the data to an MySQL database\r\n\r\n\tThe configuration for the MySQL-Connection is set in the config.ini.\r\n\tFor DB- and tablestructure see boswatch.sql\r\n\r\n\t@type typ: string (FMS|ZVEI|POC)\r\n\t@param typ: Typ of the dataset for sending to BosMon\r\n\t@type data: map of data (structure see readme.md in plugin folder)\r\n\t@param data: Contains the parameter for dispatch to BosMon.\r\n\t@type freq: string\r\n\t@keyword freq: frequency is not used in this plugin\r\n\r\n\t@requires: MySQL-Configuration has to be set in the config.ini\r\n\t@requires: Created Database/Tables, see boswatch.sql\r\n\r\n\t@return: nothing\r\n\t\"\"\"\r\n\ttry:\r\n\t\tif configHandler.checkConfig(\"MySQL\"): #read and debug the config\r\n\r\n\t\t\ttry:\r\n\t\t\t\t#\r\n\t\t\t\t# Connect to MySQL\r\n\t\t\t\t#\r\n\t\t\t\tlogging.debug(\"connect to MySQL\")\r\n\t\t\t\tconnection = mysql.connector.connect(host = globalVars.config.get(\"MySQL\",\"dbserver\"), port = globalVars.config.get(\"MySQL\",\"dbport\"), user = globalVars.config.get(\"MySQL\",\"dbuser\"), passwd = globalVars.config.get(\"MySQL\",\"dbpassword\"), db = globalVars.config.get(\"MySQL\",\"database\"), charset = 'utf8mb4', collation = 'utf8mb4_general_ci')\r\n\t\t\t\tcursor = connection.cursor()\r\n\t\t\texcept:\r\n\t\t\t\tlogging.error(\"cannot connect to MySQL\")\r\n\t\t\t\tlogging.debug(\"cannot connect to MySQL\", exc_info=True)\r\n\t\t\telse: # Without connection, plugin couldn't work\r\n\t\t\t\ttry:\r\n\t\t\t\t\t#\r\n\t\t\t\t\t# Create and execute SQL-statement\r\n\t\t\t\t\t#\r\n\t\t\t\t\tlogging.debug(\"Insert %s\", typ)\r\n\r\n\t\t\t\t\tif typ == \"FMS\":\r\n\t\t\t\t\t\tcursor.execute(\"INSERT INTO \"+globalVars.config.get(\"MySQL\",\"tableFMS\")+\" (`time`, `fms`, `status`, `direction`, `directionText`, `tsi`, `description`) VALUES (FROM_UNIXTIME(%s),%s,%s,%s,%s,%s,%s)\", (data[\"timestamp\"], data[\"fms\"], data[\"status\"], data[\"direction\"], data[\"directionText\"], data[\"tsi\"], data[\"description\"]))\r\n\r\n\t\t\t\t\telif typ == \"ZVEI\":\r\n\t\t\t\t\t\tcursor.execute(\"INSERT INTO \"+globalVars.config.get(\"MySQL\",\"tableZVEI\")+\" (`time`, `zvei`, `description`) VALUES (FROM_UNIXTIME(%s),%s,%s)\", (data[\"timestamp\"], data[\"zvei\"], data[\"description\"]))\r\n\r\n\t\t\t\t\telif typ == \"POC\":\r\n\t\t\t\t\t\tif isSignal(data[\"ric\"]):\r\n\t\t\t\t\t\t\tif globalVars.config.getint(\"POC\",\"netIdent_history\"):\r\n\t\t\t\t\t\t\t\tcursor.execute(\"INSERT INTO \"+globalVars.config.get(\"MySQL\",\"tableSIG\")+\" (`time`,`ric`) VALUES (NOW(), '\"+data[\"ric\"]+\"');\")\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tcursor.execute(\"UPDATE \"+globalVars.config.get(\"MySQL\",\"tableSIG\")+\" SET time = NOW() WHERE ric = '\"+data[\"ric\"]+\"';\")\r\n\t\t\t\t\t\t\t\tif cursor.rowcount == 0:\r\n\t\t\t\t\t\t\t\t\tcursor.execute(\"INSERT INTO \"+globalVars.config.get(\"MySQL\",\"tableSIG\")+\" (`time`,`ric`) VALUES (NOW(), '\"+data[\"ric\"]+\"');\")\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tcursor.execute(\"INSERT INTO \"+globalVars.config.get(\"MySQL\",\"tablePOC\")+\" (`time`, `ric`, `function`, `functionChar`, `msg`, `bitrate`, `description`) VALUES (FROM_UNIXTIME(%s),%s,%s,%s,%s,%s,%s)\", (data[\"timestamp\"], data[\"ric\"], data[\"function\"], data[\"functionChar\"], data[\"msg\"], data[\"bitrate\"], data[\"description\"]))\r\n\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tlogging.warning(\"Invalid Typ: %s\", typ)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tlogging.error(\"cannot Insert %s\", typ)\r\n\t\t\t\t\tlogging.debug(\"cannot Insert %s\", typ, exc_info=True)\r\n\t\t\t\t\treturn\r\n\r\n\t\t\tfinally:\r\n\t\t\t\tlogging.debug(\"close MySQL\")\r\n\t\t\t\ttry:\r\n\t\t\t\t\tcursor.close()\r\n\t\t\t\t\tconnection.close() #Close connection in every case\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\r\n\texcept:\r\n\t\tlogging.error(\"unknown error\")\r\n\t\tlogging.debug(\"unknown error\", exc_info=True)\r\n", "repo_name": "Schrolli91/BOSWatch", "sub_path": "plugins/MySQL/MySQL.py", "file_name": "MySQL.py", "file_ext": "py", "file_size_in_byte": 4938, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 130, "dataset": "github-code", "pt": "75", "api": [{"api_name": "includes.globalVars.config.get", "line_number": 34, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 34, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 34, "usage_type": "name"}, {"api_name": "includes.globalVars.config.get", "line_number": 35, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 35, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 35, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}, {"api_name": "includes.helper.configHandler.checkConfig", "line_number": 87, "usage_type": "call"}, {"api_name": "includes.helper.configHandler", "line_number": 87, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 93, "usage_type": "call"}, {"api_name": "mysql.connector.connect", "line_number": 94, "usage_type": "call"}, {"api_name": "mysql.connector", "line_number": 94, "usage_type": "attribute"}, {"api_name": "includes.globalVars.config.get", "line_number": 94, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 94, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 94, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 97, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 104, "usage_type": "call"}, {"api_name": "includes.globalVars.config.get", "line_number": 107, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 107, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 107, "usage_type": "name"}, {"api_name": "includes.globalVars.config.get", "line_number": 110, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 110, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 110, "usage_type": "name"}, {"api_name": "includes.globalVars.config.getint", "line_number": 114, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 114, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 114, "usage_type": "name"}, {"api_name": "includes.globalVars.config.get", "line_number": 115, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 115, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 115, "usage_type": "name"}, {"api_name": "includes.globalVars.config.get", "line_number": 117, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 117, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 117, "usage_type": "name"}, {"api_name": "includes.globalVars.config.get", "line_number": 119, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 119, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 119, "usage_type": "name"}, {"api_name": "includes.globalVars.config.get", "line_number": 121, "usage_type": "call"}, {"api_name": "includes.globalVars.config", "line_number": 121, "usage_type": "attribute"}, {"api_name": "includes.globalVars", "line_number": 121, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 124, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 126, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 127, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 131, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 139, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "70522781683", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 1 07:31:22 2018\n\n@author: Administrator\n\"\"\"\n\n# 本模块用于实现一个实时预警器\n \nimport requests\nimport tushare as ts\nfrom requests.exceptions import RequestException\nfrom bs4 import BeautifulSoup\nimport re\nimport time\nimport pandas as pd\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36'}\n\ndef get_one_page(url):\n try:\n response = requests.get(url,headers = headers)\n response.encoding = 'GB2312'\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n return None\n\n\n###############################################################################\n###############################################################################\n# 自动预警逻辑\n'''\n 1.实现对大盘的跟踪观测\n 2.实现对指定个股的跟踪\n 3.实现整个市场资金异动的发掘(暂时不实现)\n'''\n###############################################################################\n# 用户配置参数,可手动修改或者通过AT指令实时修改\nglobal target_id\nglobal rase_th\nglobal volume_rate\nglobal warning_level\nglobal sleep_time\n\ntarget_id = [] #感兴趣的个股列表\nrase_th = 2.5 #涨跌幅预警阈值(%)\nvolume_rate = 1 #量比阈值\nquantity_th = 15 #成交量预警阈值(亿),暂时不管\nsleep_time = 0 #用于控制主线程休眠\n\n'''\nwarning_level 用来实现预警等级\n0 关闭预警功能\n1 只预警实时价格(默认最少会监控大盘状态,如果不设置target_id的话)\n2 预警实时价格+量比\n3 预警实时价格+量比+年月周线\n'''\nwarning_level = 3 #价格预警开关(0:全部关闭,1:只预警实时价格,1:预警周线\n###############################################################################\n# 全局变量区\nglobal avg_info\navg_info = pd.DataFrame(columns=['id', 'avg_price_week', 'avg_price_month'\n 'avg_price_year','volume_min']) #保存平均值\nglobal morning_open_time #用以指示交易时间\nglobal morning_close_time\nglobal afternoon_open_time\nglobal afternoon_close_time\n###############################################################################\n# 系统参数:不可随意更改\nprice_line = 0\nvolume_line = 3\nrate_line = 2\nstock_info = [] #定义的消息类型:[0]当前股价,[1]涨跌幅,[2]成交量\n\navg_line_m = 0\navg_line_w = 1\navg_line_y = 2\nid_line = 3\navg_vol_min = 4\n###############################################################################\n#初始化时获取个股的历史平均水平\ndef init():\n '''\n 根据target_id获取历史平均成交量、均线水平等参数,作为后续check的比对依据\n id:\n avg_price_week:周线\n avg_price_month:月线\n avg_price_year:年线\n volume_min:5日内平均每分钟成交量(用于计算量比)\n '''\n global avg_info\n global morning_open_time\n global morning_close_time\n global afternoon_open_time\n global afternoon_close_time\n# input_id = ['000651','600887','600066','600660']\n input_id = target_id\n \n avg_price_week = [] #周均线,5天\n avg_price_month = [] #月均线,22天\n avg_price_year = [] #年线,250天\n avg_volumn_min = []\n for id_name in input_id:\n a = ts.get_k_data(id_name) #获取基础信息\n temp = a.iloc[-5:]['close']\n avg_price_week.append(temp.sum()/5)\n temp = a.iloc[-22:]['close']\n avg_price_month.append(temp.sum()/22) \n if len(a) > 250:\n temp = a.iloc[-250:]['close']\n avg_price_year.append(temp.sum()/250)\n else:\n temp = a.iloc[:]['close']\n avg_price_year.append(temp.sum()/len(a)) \n a = a[-5:] #计算过去5天内的平均成交量\n b = a.iloc[:]['volume']\n c = b.sum() / len(b) / 240\n avg_volumn_min.append(c)\n \n data = {'id':input_id,\n 'avg_price_week':avg_price_week,\n 'avg_price_month':avg_price_month,\n 'avg_price_year':avg_price_year,\n 'volume_min':avg_volumn_min\n }\n avg_info = pd.DataFrame(data)\n \n \n now = datetime.now()\n morning_open_time = datetime(now.year,now.month,now.day,9,30)\n morning_close_time = datetime(now.year,now.month,now.day,11,30)\n afternoon_open_time = datetime(now.year,now.month,now.day,13,00)\n afternoon_close_time = datetime(now.year,now.month,now.day,15,00)\n# return pd.DataFrame(data)\n\ndef get_main_market():\n #监测大盘\n url = 'http://hq.sinajs.cn/list=s_sh000001'\n html = get_one_page(url)\n pattern_data = '-?[\\d.]+'\n reobj = re.compile(pattern_data)\n r1 = reobj.findall(html) \n\n url = 'http://hq.sinajs.cn/list=s_sz399001'\n html = get_one_page(url)\n pattern_data = '-?[\\d.]+'\n reobj = re.compile(pattern_data)\n r2 = reobj.findall(html)\n \n name = ['上证综指','深成指数']\n price = [r1[1],r2[1]]\n rise = [r1[3],r2[3]]\n quantity = [round(float(r1[5])/10000,2),round(float(r2[5])/10000,2)]\n data = {'name':name,\n 'price':price,\n 'rise_rate':rise,\n 'quantity':quantity\n }\n r = pd.DataFrame(data)\n return r\n \ndef get_stock_market():\n global target_id\n if len(target_id) < 1:\n return 0\n s = ''\n for l in target_id:\n if (int(l) < 600000) and (int(l) != 1):\n s += 's_sz' #如果此处换为 'sz',则对应详细买卖量信息\n else:\n s += 's_sh'\n s += l\n s += ','\n result = s[:-1]\n# print(result) \n url = 'http://hq.sinajs.cn/list=' + result\n# print(url)\n# url = 'http://hq.sinajs.cn/list=sh600660,sh600006'\n html = get_one_page(url)\n pattern_data = '-?[\\d.]+'\n reobj = re.compile(pattern_data)\n data = reobj.findall(html)\n# print('个股数据'+ str(data))\n\n name = data[::6]\n cur_price = data[1::6]\n rise_rate = data[3::6]\n volume = data[4::6] \n data = {'id':name,\n 'cur_price':cur_price,\n 'rise_rate':rise_rate,\n 'volume':volume\n }\n r = pd.DataFrame(data)\n# print(r)\n return r\n\ndef check(df):\n '''\n 1. 判断涨跌\n 2. 判断成交量\n 3. 判断是否突破各级均线水平\n '''\n global volume_rate\n global morning_open_time\n global morning_close_time\n global afternoon_open_time\n global afternoon_close_time\n global warning_level\n \n # 如果关闭了预警功能,则直接退出\n if warning_level == 0:\n return 'close warning mode'\n total_info_buff = ''\n \n #监测大盘\n market_th = 1.5 #大盘预警门限比个股要低,而且禁止配置\n \n info = get_main_market()\n rate = float(info.iloc[0]['rise_rate'])\n if abs(rate) >= market_th:\n total_info_buff += '沪市预警:'\n total_info_buff += info.iloc[0]['price']\n total_info_buff += '\\t涨跌(%):'\n total_info_buff += info.iloc[0]['rise_rate']\n total_info_buff += '\\t成交量(亿)'\n total_info_buff += str(info.iloc[0]['quantity'])\n total_info_buff += '\\n\\n'\n \n rate = float(info.iloc[1]['rise_rate'])\n if abs(rate) >= market_th:\n total_info_buff += '深市预警:'\n total_info_buff += info.iloc[1]['price']\n total_info_buff += '\\t涨跌(%):'\n total_info_buff += info.iloc[1]['rise_rate']\n total_info_buff += '\\t成交量(亿)'\n total_info_buff += str(info.iloc[1]['quantity'])\n total_info_buff += '\\n\\n'\n \n # 监测个股\n try:\n s = len(df)\n except:\n return total_info_buff \n \n# if df == 0: #如果没有设置target_id,则只监控大盘状态\n# return total_info_buff\n for indexs in df.index:\n #涨跌判断\n if abs(float(df.loc[indexs].values[rate_line])) > rase_th:\n# print(df.loc[indexs].values[:])\n total_info_buff += '涨跌预警:ID=' + df.loc[indexs].values[1] + \\\n ', 涨跌幅:' + df.loc[indexs].values[2] + '%\\n' \n #均线判断 \n if warning_level > 2: \n if float(df.loc[indexs].values[price_line]) < \\\n float(avg_info.iloc[indexs].values[avg_line_w]):\n total_info_buff += '周线预警:ID=' + df.loc[indexs].values[1] + \\\n ',(当前价<周线):(' + df.loc[indexs].values[0] + '<' + \\\n str(avg_info.iloc[indexs].values[avg_line_w]) + ')\\n' \n # print('week error')\n if float(df.loc[indexs].values[price_line]) < \\\n float(avg_info.iloc[indexs].values[avg_line_m]):\n total_info_buff += '月线预警:ID=' + df.loc[indexs].values[1] + \\\n ',(当前价<月线):(' + df.loc[indexs].values[0] + '<' + \\\n str(round(avg_info.iloc[indexs].values[avg_line_m],2)) + ')\\n' \n # print('month error')\n if float(df.loc[indexs].values[price_line]) < \\\n float(avg_info.iloc[indexs].values[avg_line_y]):\n total_info_buff += '年线预警:ID=' + df.loc[indexs].values[1] + \\\n ',(当前价<年线):(' + df.loc[indexs].values[0] + '<' + \\\n str(round(avg_info.iloc[indexs].values[avg_line_y],2)) + ')\\n' \n# print('year error')\n #量比判断\n if warning_level > 1:\n now = datetime.now()\n if now < morning_close_time:\n diff = now - morning_open_time\n time_elapse = diff.seconds / 60\n elif now <= afternoon_open_time:\n time_elapse = 120\n elif now < afternoon_close_time:\n diff = now - afternoon_open_time\n time_elapse = 120 + diff.seconds / 60\n else:\n time_elapse = 240 \n cur_vol = float(df.loc[indexs].values[volume_line])\n vol_rate = cur_vol / (time_elapse * avg_info.iloc[indexs].values[avg_vol_min])\n \n if vol_rate > volume_rate : #成交量\n total_info_buff += '量比异常:ID=' + df.loc[indexs].values[1] + \\\n ',量比=' + str(round(vol_rate,2)) + ')\\n'\n total_info_buff += '\\n'\n return total_info_buff\n \n###############################################################################\n# API\n# \n###############################################################################\ndef set_target_id(set_id):\n global target_id\n if isinstance(set_id, (list)):\n for i in set_id:\n if i not in target_id:\n target_id.append(i)\n elif isinstance(set_id, (str)):\n if set_id not in target_id:\n target_id.append(set_id) \n else:\n print('input invalid, only support list or str type') \n \ndef del_target_id(rm_id):\n global target_id\n if isinstance(rm_id, (list)):\n for i in rm_id:\n if i in target_id:\n target_id.append(i)\n elif isinstance(rm_id, (str)):\n if rm_id in target_id:\n target_id.remove(rm_id) \n else:\n print('input invalid, only support list or str type')\n \ndef show_target_id():\n global target_id\n msg = '当前个股列表:'\n for i in target_id:\n# print(i)\n msg += i\n msg += ','\n return msg\n\ndef clear_target_id():\n global target_id\n# print(target_id)\n# print(len(target_id))\n while len(target_id) > 0:\n target_id.pop()\n#------------------------------------------------------------------------------ \ndef set_param(r_th, v_th = volume_rate):#设置预警阈值\n global rase_th\n global volume_rate\n rase_th = r_th\n volume_rate = v_th \ndef get_param():\n global rase_th\n global volume_rate\n return rase_th,volume_rate \n\ndef set_warning_level(level):\n global warning_level\n if level > 3:\n level = 3\n elif level < 0:\n level = 0\n warning_level = level\n print('全局预警等级设置为',level,'\\n')\n\ndef set_sleep_time(set_time):\n global sleep_time\n sleep_time = set_time\n print('休眠 %s ' % set_time, '分钟')\n return sleep_time\n\ndef get_sleep_time():\n global sleep_time\n return sleep_time\n \ndef clear_sleep_time():\n global sleep_time\n sleep_time = 0\n#最终的信息输出,发送给微信端\n#def output_msg():\n# global out_buff \n# out_buff = total_info_buff + individual_info_buff\n# return out_buff\n###############################################################################\n###############################################################################\n#init() #不初始化会出问题\n#l = ['000001','600066','000651','601012']\n#result = get_stock_market(l)\n#set_param(1,10)\n#check(result)\n#l = get_param()\n\n#clear_target_id()\n#l = ['00031','2']\n#set_target_id('000665')\n#set_target_id(l )\n#show_target_id()\n#del_target_id('00031')\n#del_target_id('2')\n#show_target_id()\n###############################################################################\n###############################################################################\n###############################################################################\n#l = ['600066','000651','601012']\n#set_target_id(l)\n#init() #此处逻辑有问题,必须先执行此句才能实现初始化\n#c = get_stock_market()\n#check(c)\n#print(show_target_id())\n", "repo_name": "YinChao126/anack", "sub_path": "anack/App/M1808/early_warning.py", "file_name": "early_warning.py", "file_ext": "py", "file_size_in_byte": 13592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 39, "dataset": "github-code", "pt": "75", "api": [{"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.exceptions.RequestException", "line_number": 30, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 67, "usage_type": "call"}, {"api_name": "tushare.get_k_data", "line_number": 109, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 131, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 134, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 138, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 146, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 152, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 164, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 186, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 281, "usage_type": "name"}]} +{"seq_id": "31279023356", "text": "import pymzml\nimport statistics\nimport pandas as pd\nimport numpy as np\nimport os\nimport threading\nimport logging\nimport xlsxwriter\nimport plotly.express as px\nimport plotly\nimport plotly.graph_objects as go\nimport plotly.offline as offline\nfrom jinja2 import Environment, FileSystemLoader\nfrom scipy.stats import shapiro\nimport time\n\nfrom mod.general_functions import cv, cv_status, check_threshold, groupname, label_outlier, get_outlier_and_cv_status, only_outlier_status, get_series_status, color_list\n\n#-------------------------------------------------------------------------- FUNCTIONS ---------------------------------------------------------------------------\n\ndef get_mzml_list(mzml_dir):\n\n \"\"\"\n Retrieves a list of .mzML files from the specified directory.\n\n Args:\n mzml_dir (str): Directory containing mzML files.\n\n Returns:\n list: List of paths to mzML files in the specified directory.\n \"\"\"\n mzml_list = os.listdir(mzml_dir)\n\n final_mzml_list = []\n\n logging.info(f\"Getting list of mzML files under provided directory path : {mzml_dir}\")\n for mzml_file in mzml_list:\n if not mzml_file.endswith(\".mzML\"):\n logging.info(f\"{mzml_file} is not a mzML file and will not be used for data extraction\")\n continue\n full_filename = f\"{mzml_dir}/{mzml_file}\"\n final_mzml_list.append(full_filename)\n\n logging.info(f\"{len(final_mzml_list)} mzML files have been found under {mzml_dir}\")\n\n return final_mzml_list\n\ndef mzml_extract(mzml_path, mzml_data):\n\n \"\"\"\n Extracts data from an mzML file and appends it to a provided list.\n\n Args:\n mzml_path (str): Path to the mzML file.\n mzml_data (list): List to which extracted data is appended.\n\n Returns:\n list: Updated list with data extracted from the mzML file.\n \"\"\"\n\n logging.info(f\"Extracting file: {mzml_path}\")\n\n data_dict = {}\n basepeak_intensity_list = []\n ms1_spectra = 0\n ms2_spectra = 0\n ms1_tic = 0\n ms2_tic = 0\n\n msrun = pymzml.run.Reader(mzml_path)\n\n for spectrum in msrun:\n #getting basepeak intensity\n if 'base peak intensity' in spectrum: #MS:1000505 --> accession for basepeak intensity\n basepeak_intensity_value = spectrum['base peak intensity']\n basepeak_intensity_list.append(basepeak_intensity_value)\n #getting spectra count + tic\n if 'ms level' and 'total ion current' in spectrum:\n #getting spectra and tic from proteowizard converted mzML files\n if isinstance(spectrum['ms level'], list):\n if int(list(set(spectrum['ms level']))[0]) == 1:\n ms1_spectra += 1\n ms1_tic += spectrum['total ion current']\n if int(list(set(spectrum['ms level']))[0]) == 2:\n ms2_spectra += 1\n ms2_tic += spectrum['total ion current']\n else: #for thermoconverter converted mzML files\n if spectrum['ms level'] == 1:\n ms1_spectra += 1\n ms1_tic += spectrum['total ion current']\n if spectrum['ms level'] == 2:\n ms2_spectra += 1\n ms2_tic += spectrum['total ion current']\n\n data_dict['Filename'] = os.path.split(mzml_path)[1]\n\n #throwing an error if no spectra can be extracted from the mzML file\n if ms1_spectra == 0 and ms2_spectra == 0:\n logging.error(f\"Not able to read spectra in the mzML file. Please remove this file: {mzml_path} from all inputs.\")\n sys.exit(1)\n\n else:\n data_dict['MS1 Spectra'] = ms1_spectra\n data_dict['MS2 Spectra'] = ms2_spectra\n data_dict['MS2/MS1 Spectra'] = (ms2_spectra/ms1_spectra)\n\n if ms1_tic != 0:\n data_dict['MS1 TIC'] = ms1_tic\n\n if ms2_tic != 0:\n data_dict['MS2 TIC'] = ms2_tic\n\n if len(basepeak_intensity_list) != 0:\n data_dict['Max Basepeak Intensity'] = max(basepeak_intensity_list)\n\n mzml_data.append(data_dict)\n\n return mzml_data\n\ndef get_mzml_info_dataframe(mzml_list):\n\n \"\"\"\n Processes a list of mzML files in parallel and compiles extracted data into a DataFrame.\n\n Args:\n mzml_list (list): List of mzML file paths.\n\n Returns:\n DataFrame: DataFrame containing compiled data from all mzML files.\n \"\"\"\n\n mzml_data = []\n threads = []\n\n for filename in mzml_list:\n\n job = threading.Thread(target=mzml_extract, args=(filename, mzml_data))\n threads.append(job)\n job.start()\n\n #Finish all threads\n for job in threads:\n job.join()\n\n mzml_dataframe = pd.DataFrame(mzml_data)\n mzml_dataframe = mzml_dataframe.sort_values(\"Filename\")\n\n return mzml_dataframe\n\ndef apply_idfree_thresholds(mzml_df, mzml_threshold_dict):\n\n \"\"\"\n Applies specified thresholds to mzML data and updates the DataFrame with QC status.\n\n Args:\n mzml_df (DataFrame): DataFrame containing mzML data.\n mzml_threshold_dict (dict): Dictionary containing threshold values for various metrics.\n\n Returns:\n DataFrame: Updated mzML DataFrame with added columns for QC status based on thresholds.\n \"\"\"\n\n if mzml_threshold_dict['MS1 TIC Threshold'] and 'MS1 TIC' in mzml_df.columns.tolist():\n mzml_df[f\"MS1TIC QC Threshold = {mzml_threshold_dict['MS1 TIC Threshold']}\"] = mzml_df['MS1 TIC'].apply(check_threshold, args=[mzml_threshold_dict['MS1 TIC Threshold'],])\n\n if mzml_threshold_dict['MS2 TIC Threshold'] and 'MS2 TIC' in mzml_df.columns.tolist():\n mzml_df[f\"MS2TIC QC Threshold = {mzml_threshold_dict['MS2 TIC Threshold']}\"] = mzml_df['MS2 TIC'].apply(check_threshold, args=[mzml_threshold_dict['MS2 TIC Threshold'],])\n\n if mzml_threshold_dict['MS1 Spectra Threshold']:\n mzml_df[f\"MS1Spectra QC Threshold = {mzml_threshold_dict['MS1 Spectra Threshold']}\"] = mzml_df['MS1 Spectra'].apply(check_threshold, args=[mzml_threshold_dict['MS1 Spectra Threshold'],])\n\n if mzml_threshold_dict['MS2 Spectra Threshold']:\n mzml_df[f\"MS2Spectra QC Threshold = {mzml_threshold_dict['MS2 Spectra Threshold']}\"] = mzml_df['MS2 Spectra'].apply(check_threshold, args=[mzml_threshold_dict['MS2 Spectra Threshold'],])\n\n if mzml_threshold_dict['Max Basepeak Intensity Threshold'] and 'Max Basepeak Intensity' in mzml_df.columns.tolist():\n mzml_df[f\"Max Basepeak Intensity QC Threshold = {mzml_threshold_dict['Max Basepeak Intensity Threshold']}\"] = mzml_df['Max Basepeak Intensity'].apply(check_threshold, args=[mzml_threshold_dict['Max Basepeak Intensity Threshold'],])\n\n return mzml_df\n\ndef check_normality(data):\n\n \"\"\"\n Performs a Shapiro-Wilk test to check the normality of the provided data.\n\n Args:\n data (list/array): Data to be tested for normality.\n\n Returns:\n str: Returns \"z-score\" if data is normally distributed; \"iqr\" otherwise.\n \"\"\"\n stat, p_value = shapiro(data)\n alpha = 0.05\n\n if p_value > alpha:\n return \"z-score\"\n else:\n return \"iqr\"\n\ndef zscore_outliers(df, colname, zscore_threshold = 2):\n\n \"\"\"\n Identifies outliers in a DataFrame column based on z-score.\n\n Args:\n df (DataFrame): DataFrame containing the data.\n colname (str): Name of the column to check for outliers.\n zscore_threshold (float, optional): The z-score threshold to identify outliers. Default is 2.\n\n Returns:\n tuple: Updated DataFrame with outliers marked and the number of outliers found.\n \"\"\"\n\n #calculating mean and standard deviation of the data\n mean = df[colname].mean()\n std = df[colname].std()\n\n #getting outliers based on z-score outlier\n outliers = []\n\n for value in df[colname].tolist():\n zscore = abs((value-mean)/std)\n if zscore > zscore_threshold:\n outliers.append(value)\n\n df[f\"{colname} Outliers\"] = df[colname].apply(label_outlier, args=[outliers, ])\n\n return (df, len(outliers))\n\ndef iqr_outliers(df, colname):\n\n \"\"\"\n Identifies outliers in a DataFrame column based on the Interquartile Range (IQR).\n\n Args:\n df (DataFrame): DataFrame containing the data.\n colname (str): Name of the column to check for outliers.\n\n Returns:\n tuple: Updated DataFrame with outliers marked and the number of outliers found.\n \"\"\"\n\n #calculating quantiles\n q1=df[colname].quantile(0.25)\n q3=df[colname].quantile(0.75)\n\n #iqr\n IQR=q3-q1\n\n #can set it to 1.5 for more sensitivity\n outliers = df[((df[colname]<(q1-3*IQR)) | (df[colname]>(q3+3*IQR)))][colname].tolist()\n\n df[f\"{colname} Outliers\"] = df[colname].apply(label_outlier, args=[outliers, ])\n\n return (df, len(outliers))\n\ndef outlier_detection(mzml_df, zscore_threshold = 2):\n\n \"\"\"\n Detects outliers in mzML data using either z-score or IQR methods based on data distribution.\n\n Args:\n mzml_df (DataFrame): DataFrame containing mzML data.\n zscore_threshold (float, optional): Z-score threshold for outlier detection. Default is 2.\n\n Returns:\n DataFrame: mzML DataFrame updated with outlier detection results.\n \"\"\"\n\n idfree_metrics = ['MS1 TIC', 'MS2 TIC', 'MS2/MS1 Spectra', 'Max Basepeak Intensity']\n\n for colname in mzml_df.columns.tolist():\n if colname in idfree_metrics:\n\n #check normality and decide outlier method\n logging.info(f\"Checking outliers for {colname}\")\n\n if check_normality(mzml_df[colname].tolist()) == \"z-score\":\n\n logging.info(f\"{colname} values are normally distributed, z-score outlier detection will be used\")\n logging.info(f\"Z-score Threshold has been set to {zscore_threshold}\")\n\n mzml_df, num_outliers = zscore_outliers(mzml_df, colname, zscore_threshold)\n\n if num_outliers == 0:\n logging.info(f\"No outliers found for {colname}\")\n else:\n logging.info(f\"{num_outliers} outliers were found for {colname}\")\n\n elif check_normality(mzml_df[colname].tolist()) == \"iqr\":\n\n logging.info(f\"{colname} values are not normally distributed, iqr outlier detection will be used\")\n mzml_df, num_outliers = iqr_outliers(mzml_df, colname)\n\n if num_outliers == 0:\n logging.info(f\"No outliers found for {colname}\")\n else:\n logging.info(f\"{num_outliers} outliers were found for {colname}\")\n\n return mzml_df\n\ndef calculate_tic_cv(mzml_df, groups, tic_cv_threshold):\n\n \"\"\"\n Calculates the Coefficient of Variation (CV%) for MS1 and MS2 TIC values across different groups.\n\n Args:\n mzml_df (DataFrame): DataFrame containing mzML data.\n groups (dict): Dictionary mapping groups to filenames.\n tic_cv_threshold (float): Threshold for the TIC CV%.\n\n Returns:\n DataFrame: DataFrame containing CV% for MS1 and MS2 TIC values.\n \"\"\"\n\n tic_cv = mzml_df[['Filename','MS1 TIC','MS2 TIC']]\n\n group_cv = {}\n\n for group in groups:\n group_subset = tic_cv[tic_cv['Filename'].isin(groups[group])]\n ms1tic_cv = round(cv(group_subset['MS1 TIC'].tolist()),2)\n ms2tic_cv = round(cv(group_subset['MS2 TIC'].tolist()),2)\n group_cv[group] = {\"MS1 TIC CV%\": ms1tic_cv, \"MS2 TIC CV%\": ms2tic_cv}\n\n tic_cv = pd.DataFrame.from_dict(group_cv, orient=\"index\")\n tic_cv.index = tic_cv.index.set_names(['Group'])\n tic_cv.reset_index(drop=False, inplace=True)\n tic_cv = tic_cv.sort_values('Group')\n\n tic_cv[f'MS1 TIC CV% Threshold = {int(tic_cv_threshold)}'] = tic_cv['MS1 TIC CV%'].apply(cv_status, args=[tic_cv_threshold,])\n tic_cv[f'MS2 TIC CV% Threshold = {int(tic_cv_threshold)}'] = tic_cv['MS2 TIC CV%'].apply(cv_status, args=[tic_cv_threshold,])\n\n return tic_cv\n\ndef get_sample_qc(mzml_df, mzml_threshold_dict, groupwise_comparison, groups):\n\n \"\"\"\n Applies QC status checks to mzML data based on thresholds and outlier detection.\n\n Args:\n mzml_df (DataFrame): DataFrame containing mzML data.\n mzml_threshold_dict (dict): Dictionary containing QC thresholds.\n groupwise_comparison (bool): Indicates if group-wise comparison is being used.\n groups (dict): Dictionary mapping groups for analysis.\n\n Returns:\n DataFrame: Updated mzML DataFrame with sample QC status.\n \"\"\"\n\n if 'MS1 TIC' in mzml_df.columns.tolist():\n if mzml_threshold_dict['MS1 TIC Threshold']:\n mzml_df['MS1 TIC Sample QC Status'] = mzml_df[['MS1 TIC Outliers',f\"MS1TIC QC Threshold = {mzml_threshold_dict['MS1 TIC Threshold']}\"]].apply(get_outlier_and_cv_status, axis=1)\n else:\n mzml_df['MS1 TIC Sample QC Status'] = mzml_df['MS1 TIC Outliers'].apply(only_outlier_status)\n\n if 'MS2 TIC' in mzml_df.columns.tolist():\n if mzml_threshold_dict['MS2 TIC Threshold']:\n mzml_df['MS2 TIC Sample QC Status'] = mzml_df[['MS2 TIC Outliers',f\"MS2TIC QC Threshold = {mzml_threshold_dict['MS2 TIC Threshold']}\"]].apply(get_outlier_and_cv_status, axis=1)\n else:\n mzml_df['MS2 TIC Sample QC Status'] = mzml_df['MS2 TIC Outliers'].apply(only_outlier_status)\n\n if mzml_threshold_dict['MS1 Spectra Threshold']:\n mzml_df['MS1 Spectra QC Status'] = mzml_df[['MS2/MS1 Spectra Outliers',f\"MS1Spectra QC Threshold = {mzml_threshold_dict['MS1 Spectra Threshold']}\"]].apply(get_outlier_and_cv_status, axis=1)\n else:\n mzml_df['MS1 Spectra QC Status'] = mzml_df['MS2/MS1 Spectra Outliers'].apply(only_outlier_status)\n\n if mzml_threshold_dict['MS2 Spectra Threshold']:\n mzml_df['MS2 Spectra QC Status'] = mzml_df[['MS2/MS1 Spectra Outliers',f\"MS2Spectra QC Threshold = {mzml_threshold_dict['MS2 Spectra Threshold']}\"]].apply(get_outlier_and_cv_status, axis=1)\n else:\n mzml_df['MS2 Spectra QC Status'] = mzml_df['MS2/MS1 Spectra Outliers'].apply(only_outlier_status)\n\n if 'Max Basepeak Intensity' in mzml_df.columns.tolist():\n if mzml_threshold_dict['Max Basepeak Intensity Threshold']:\n mzml_df['Max Basepeak Intensity QC Status'] = mzml_df[['Max Basepeak Intensity Outliers', f\"Max Basepeak Intensity QC Threshold = {mzml_threshold_dict['Max Basepeak Intensity Threshold']}\"]].apply(get_outlier_and_cv_status, axis=1)\n else:\n mzml_df['Max Basepeak Intensity QC Status'] = mzml_df['Max Basepeak Intensity Outliers'].apply(only_outlier_status)\n\n sample_qc_cols = ['MS1 TIC Sample QC Status', 'MS2 TIC Sample QC Status', 'MS1 Spectra QC Status', 'MS2 Spectra QC Status', 'Max Basepeak Intensity QC Status']\n matched_sample_qc_cols = []\n for colname in mzml_df.columns.tolist():\n if colname in sample_qc_cols:\n matched_sample_qc_cols.append(colname)\n\n mzml_df = mzml_df[['Filename'] + matched_sample_qc_cols]\n mzml_df = mzml_df.sort_values('Filename')\n\n return mzml_df\n\ndef get_idfree_grouped_df(mzml_sample_df, tic_cv, tic_cv_threshold, groups):\n \"\"\"\n Compiles a grouped DataFrame for ID-free data, merging TIC CV% and sample QC status.\n\n Args:\n mzml_sample_df (DataFrame): DataFrame containing sample QC status.\n tic_cv (DataFrame): DataFrame containing TIC CV% data.\n tic_cv_threshold (float): Threshold for TIC CV%.\n groups (dict): Dictionary mapping groups to filenames.\n\n Returns:\n DataFrame: Compiled DataFrame with grouped QC status for ID-free data.\n \"\"\"\n\n tic_group_df = tic_cv[['Group',f'MS1 TIC CV% Threshold = {int(tic_cv_threshold)}', f'MS2 TIC CV% Threshold = {int(tic_cv_threshold)}']]\n idfree_status_params = ['MS1 TIC Sample QC Status', 'MS2 TIC Sample QC Status', 'MS1 Spectra QC Status', 'MS2 Spectra QC Status', 'Max Basepeak Intensity QC Status']\n mzml_sample_df['Group'] = mzml_sample_df['Filename'].apply(groupname, args=[groups, ])\n\n group_status_dict = {}\n\n for group in list(set(mzml_sample_df['Group'].tolist())):\n group_subset = mzml_sample_df[mzml_sample_df['Group'] == group]\n col_dict = {}\n\n for colname in ['MS1 TIC Sample QC Status', 'MS2 TIC Sample QC Status', 'MS1 Spectra QC Status', 'MS2 Spectra QC Status', 'Max Basepeak Intensity QC Status']:\n if colname in group_subset.columns.tolist():\n group_colname = colname.replace(\"Sample\", \"Group\")\n if list(set(group_subset[colname].tolist())) == [\"PASS\"]:\n col_dict[group_colname] = \"PASS\"\n else:\n col_dict[group_colname] = \"FAIL\"\n\n group_status_dict[group] = col_dict\n\n grouped_df = pd.DataFrame.from_dict(group_status_dict, orient=\"columns\")\n grouped_df = grouped_df.T\n grouped_df.reset_index(drop=False, inplace=True)\n grouped_df.rename(columns={'index':'Group'}, inplace=True)\n\n grouped_df = pd.merge(grouped_df, tic_group_df, on='Group')\n\n if 'MS1 TIC Group QC Status' in grouped_df.columns.tolist():\n grouped_df['MS1 TIC Group QC Status'] = grouped_df[['MS1 TIC Group QC Status', f'MS1 TIC CV% Threshold = {int(tic_cv_threshold)}']].apply(get_series_status, axis=1)\n grouped_df = grouped_df.drop(f'MS1 TIC CV% Threshold = {int(tic_cv_threshold)}', axis=1)\n\n if 'MS2 TIC Group QC Status' in grouped_df.columns.tolist():\n grouped_df['MS2 TIC Group QC Status'] = grouped_df[['MS2 TIC Group QC Status', f'MS2 TIC CV% Threshold = {int(tic_cv_threshold)}']].apply(get_series_status, axis=1)\n grouped_df = grouped_df.drop(f'MS2 TIC CV% Threshold = {int(tic_cv_threshold)}', axis=1)\n\n grouped_df = grouped_df.sort_values('Group')\n\n return grouped_df\n\n#------------------------------------------------------------------------ PLOT FUNCTIONS ----------------------------------------------------------------------------\n\ndef tic_plots(mzml_df, tic_cv, ms1_tic_threshold, ms2_tic_threshold, tic_cv_threshold, groupwise_comparison, color_list):\n\n df = mzml_df[['Filename','MS1 TIC','MS2 TIC']]\n\n df = df.melt(id_vars=[\"Filename\"],\n var_name=\"Label\",\n value_name=\"TIC\")\n\n tic_line = px.line(df, x='Filename', y=\"TIC\", title=\"Total Ion Current\", color=\"Label\", line_shape=\"spline\", markers=True)\n tic_line.update_xaxes(tickfont_size=6)\n tic_line.update_layout(title={'font': {'size': 9}})\n tic_line.update_layout(\n margin=dict(l=20, r=20, t=20, b=20)\n )\n\n if ms1_tic_threshold:\n tic_line.add_hline(y=ms1_tic_threshold, line_dash=\"dot\", annotation_text=f\"MS1 TIC Threshold = {ms1_tic_threshold}\")\n\n if ms2_tic_threshold:\n tic_line.add_hline(y=ms2_tic_threshold, line_dash=\"dot\", annotation_text=f\"MS2 TIC Threshold = {ms2_tic_threshold}\")\n\n tic_plot = plotly.io.to_html(tic_line, include_plotlyjs=False, full_html=False, default_width='900px', default_height='450px')\n #tic_plot = offline.plot(tic_line, output_type='div', include_plotlyjs=False)\n\n tic_report_params = {'total_ion_current': True,\n 'tic_plot': tic_plot,\n 'tic_ms_plot_description': 'The total ion current (TIC) is the summed intensity across the entire range of masses being detected in each sample. MS1 and MS2 Total Ion Current Values extracted from spectra within the given mzML files'}\n\n if not list(set(mzml_df['MS1 TIC Outliers'].tolist())) == [0]:\n ms1_outliers = mzml_df[mzml_df['MS1 TIC Outliers'] == 1]['Filename'].tolist()\n ms1_outliers_filenames = \", \".join(ms1_outliers)\n tic_report_params['tic_ms1_outlier_description'] = f\"{mzml_df['MS1 TIC Outliers'].tolist().count(1)} outliers were found. The following files have been detected as outliers: {ms1_outliers_filenames}\"\n\n tic_ms1_outlier = px.scatter(mzml_df, x='Filename', y='MS1 TIC', title=\"MS1 TIC Outliers\", color='MS1 TIC Outliers')\n tic_ms1_outlier.update_layout(title={'font': {'size': 9}})\n tic_ms1_outlier.update_xaxes(tickfont_size=6)\n tic_ms1_outlier.update_layout(\n margin=dict(l=20, r=20, t=20, b=20)\n )\n tic_ms1_outlier.update_traces(marker=dict(line=dict(color='black', width=1)))\n tic_ms1_outlier_plot = plotly.io.to_html(tic_ms1_outlier, include_plotlyjs=False, full_html=False, default_width='900px', default_height='450px')\n\n tic_report_params['tic_ms1_outlier_plot'] = tic_ms1_outlier_plot\n\n if not list(set(mzml_df['MS2 TIC Outliers'].tolist())) == [0]:\n ms2_outliers = mzml_df[mzml_df['MS2 TIC Outliers'] == 1]['Filename'].tolist()\n ms2_outliers_filenames = \", \".join(ms2_outliers)\n tic_report_params['tic_ms2_outlier_description'] = f\"{mzml_df['MS2 TIC Outliers'].tolist().count(1)} outliers were found. The following files have been detected as outliers: {ms2_outliers_filenames}\"\n\n tic_ms2_outlier = px.scatter(mzml_df, x='Filename', y='MS2 TIC', title=\"MS2 TIC Outliers\", color='MS2 TIC Outliers')\n tic_ms2_outlier.update_layout(title={'font': {'size': 9}})\n tic_ms2_outlier.update_xaxes(tickfont_size=6)\n tic_ms2_outlier.update_layout(\n margin=dict(l=20, r=20, t=20, b=20)\n )\n tic_ms2_outlier.update_traces(marker=dict(line=dict(color='black', width=1)))\n tic_ms2_outlier_plot = plotly.io.to_html(tic_ms2_outlier, include_plotlyjs=False, full_html=False, default_width='900px', default_height='450px')\n\n tic_report_params['tic_ms2_outlier_plot'] = tic_ms2_outlier_plot\n\n if groupwise_comparison:\n tic_report_params['tic_ms_cv_description'] = \"When a grouping file is provided, CV% for TIC values across samples in each group is calculated. This provides an insignt into how consistent the samples are within each group.\"\n\n ms1tic_bar = px.bar(tic_cv, x='Group', y=\"MS1 TIC CV%\", title=\"MS1 Total Ion Current - CV%\", color=\"Group\", color_discrete_sequence=color_list)\n ms1tic_bar.update_xaxes(tickfont_size=6)\n ms1tic_bar.update_layout(title={'font': {'size': 9}})\n ms1tic_bar.add_hline(y=tic_cv_threshold, line_dash=\"dot\", annotation_text=f\"TIC CV Threshold = {tic_cv_threshold}\")\n ms1tic_bar.update_layout(\n margin=dict(l=20, r=20, t=20, b=20),\n )\n\n ms1_tic = plotly.io.to_html(ms1tic_bar, include_plotlyjs=False, full_html=False, default_width='900px', default_height='450px')\n tic_report_params['tic_ms1_cv_plot'] = ms1_tic\n\n if list(set(tic_cv[f'MS1 TIC CV% Threshold = {int(tic_cv_threshold)}'].tolist())) == ['PASS']:\n tic_report_params['tic_ms1_cv_description'] = 'CV% for MS1 TIC was calculated using TIC values from each sample within a provided group. All groups have passed the CV Threshold'\n else:\n failed_ms1_groups = \", \".join(tic_cv[tic_cv[f'MS1 TIC CV% Threshold = {int(tic_cv_threshold)}'] == 'FAIL']['Group'].tolist())\n tic_report_params['tic_ms1_cv_description'] = f'CV% for MS1 TIC was calculated using TIC values from each sample within a provided group. The following groups have not met the CV Threshold: {failed_ms1_groups}. This represents an inconsistent TIC pattern, please check the samples within the failed groups.'\n\n ms2tic_bar = px.bar(tic_cv, x='Group', y=\"MS2 TIC CV%\", title=\"MS2 Total Ion Current - CV%\", color=\"Group\", color_discrete_sequence=color_list)\n ms2tic_bar.update_xaxes(tickfont_size=6)\n ms2tic_bar.update_layout(title={'font': {'size': 9}})\n ms2tic_bar.add_hline(y=tic_cv_threshold, line_dash=\"dot\", annotation_text=f\"TIC CV Threshold = {tic_cv_threshold}\")\n ms2tic_bar.update_layout(\n margin=dict(l=20, r=20, t=20, b=20),\n )\n\n ms2_tic = plotly.io.to_html(ms2tic_bar, include_plotlyjs=False, full_html=False, default_width='900px', default_height='450px')\n tic_report_params['tic_ms2_cv_plot'] = ms2_tic\n\n if list(set(tic_cv[f'MS2 TIC CV% Threshold = {int(tic_cv_threshold)}'].tolist())) == ['PASS']:\n tic_report_params['tic_ms2_cv_description'] = 'CV% for MS2 TIC was calculated using TIC values from each sample within a provided group. All groups have passed the CV Threshold'\n else:\n failed_ms2_groups = \", \".join(tic_cv[tic_cv[f'MS2 TIC CV% Threshold = {int(tic_cv_threshold)}'] == 'FAIL']['Group'].tolist())\n tic_report_params['tic_ms2_cv_description'] = f'CV% for MS1 TIC was calculated using TIC values from each sample within a provided group. The following groups have not met the CV Threshold: {failed_ms2_groups}. This represents an inconsistent TIC pattern, please check the samples within the failed groups.'\n\n return tic_report_params\n\ndef spectral_plot(mzml_df):\n\n df = mzml_df[['Filename','MS2/MS1 Spectra', 'MS2/MS1 Spectra Outliers']]\n\n count_line = px.line(df, x='Filename', y=\"MS2/MS1 Spectra\", title=\"MS2/MS1 Spectral Ratio\", line_shape=\"spline\", markers=True)\n count_line.update_xaxes(tickfont_size=6)\n count_line.update_layout(title={'font': {'size': 9}})\n count_line.update_layout(\n margin=dict(l=20, r=20, t=20, b=20),\n )\n\n spectral_count = plotly.io.to_html(count_line, include_plotlyjs=False, full_html=False, default_width='900px', default_height='450px')\n spectra_report_params = {'ms2_ms1_spectral_ratio': True,\n 'ms2_ms1_spectral_ratio_plot': spectral_count,\n 'ms2_ms1_spectral_ratio_description': 'MS2/MS1 Spectral Count Ratio extracted from given mzML files'}\n\n if not list(set(mzml_df['MS2/MS1 Spectra Outliers'].tolist())) == [0]:\n spectra_outliers = mzml_df[mzml_df['MS2/MS1 Spectra Outliers'] == 1]['Filename'].tolist()\n spectra_outliers_filenames = \", \".join(spectra_outliers)\n spectra_report_params['ms2_ms1_spectral_ratio_outlier_description'] = f\"{mzml_df['MS2/MS1 Spectra Outliers'].tolist().count(1)} outliers were found. The following files have been detected as outliers: {spectra_outliers_filenames}\"\n\n ms2_ms1_spectral_ratio_outlier = px.scatter(mzml_df, x='Filename', y='MS2/MS1 Spectra', title=\"MS2/MS1 Spectra Outliers\", color='MS2/MS1 Spectra Outliers')\n ms2_ms1_spectral_ratio_outlier.update_xaxes(tickfont_size=6)\n ms2_ms1_spectral_ratio_outlier.update_layout(title={'font': {'size': 9}})\n ms2_ms1_spectral_ratio_outlier.update_layout(\n margin=dict(l=20, r=20, t=20, b=20)\n )\n ms2_ms1_spectral_ratio_outlier.update_traces(marker=dict(line=dict(color='black', width=1)))\n ms2_ms1_spectral_ratio_plot = plotly.io.to_html(ms2_ms1_spectral_ratio_outlier, include_plotlyjs=False, full_html=False, default_width='900px', default_height='450px')\n\n spectra_report_params['ms2_ms1_spectral_ratio_outlier_plot'] = ms2_ms1_spectral_ratio_plot\n\n return spectra_report_params\n\ndef basepeak_graph(mzml_df, max_basepeak_intensity_threshold, groups, groupwise_comparison, color_list):\n\n if groupwise_comparison:\n mzml_df['Group'] = mzml_df['Filename'].apply(groupname, args=[groups,])\n mzml_df = mzml_df.sort_values('Group')\n bp_bar = px.bar(mzml_df, x='Filename', y=\"Max Basepeak Intensity\", title=\"Max Base Peak Intensity\", color=\"Group\", color_discrete_sequence=color_list)\n else:\n bp_bar = px.bar(mzml_df, x='Filename', y=\"Max Basepeak Intensity\", title=\"Max Base Peak Intensity\")\n\n bp_bar.update_layout(title={'font': {'size': 9}})\n bp_bar.update_xaxes(tickfont_size=6)\n bp_bar.update_layout(\n margin=dict(l=20, r=20, t=20, b=20),\n )\n\n if max_basepeak_intensity_threshold:\n bp_bar.add_hline(y=max_basepeak_intensity_threshold, line_dash=\"dot\", annotation_text=f\"Max Base Peak Intensity Threshold = {max_basepeak_intensity_threshold}\")\n\n bp_plot = plotly.io.to_html(bp_bar, include_plotlyjs=False, full_html=False, default_width='900px', default_height='450px')\n\n basepeak_report_params = {'max_basepeak_intensity' : True,\n 'max_basepeak_intensity_plot': bp_plot}\n\n if not list(set(mzml_df['Max Basepeak Intensity Outliers'].tolist())) == [0]:\n bp_outliers = mzml_df[mzml_df['Max Basepeak Intensity Outliers'] == 1]['Filename'].tolist()\n bp_outliers_filenames = \", \".join(bp_outliers)\n basepeak_report_params['max_basepeak_intensity_outlier_description'] = f\"{mzml_df['Max Basepeak Intensity Outliers'].tolist().count(1)} outliers were found. The following files have been detected as outliers: {bp_outliers_filenames}\"\n\n max_basepeak_intensity_outlier = px.scatter(mzml_df, x='Filename', y='Max Basepeak Intensity', title=\"Max Base Peak Intensity Outliers\", color='Max Basepeak Intensity Outliers')\n max_basepeak_intensity_outlier.update_layout(title={'font': {'size': 9}})\n max_basepeak_intensity_outlier.update_xaxes(tickfont_size=6)\n max_basepeak_intensity_outlier.update_layout(\n margin=dict(l=20, r=20, t=20, b=20)\n )\n max_basepeak_intensity_outlier.update_traces(marker=dict(line=dict(color='black', width=1)))\n max_basepeak_intensity_outlier_plot = plotly.io.to_html(max_basepeak_intensity_outlier, include_plotlyjs=False, full_html=False, default_width='900px', default_height='450px')\n\n basepeak_report_params['max_basepeak_intensity_outlier_plot'] = max_basepeak_intensity_outlier_plot\n\n return basepeak_report_params\n\ndef create_graphs(mzml_df, tic_cv, groupwise_comparison, groups, mzml_threshold_dict):\n\n if 'MS1 TIC' and 'MS2 TIC' in mzml_df.columns.tolist():\n tic_report_params = tic_plots(mzml_df, tic_cv, mzml_threshold_dict['MS1 TIC Threshold'], mzml_threshold_dict['MS2 TIC Threshold'], mzml_threshold_dict['TIC CV Threshold'], groupwise_comparison, color_list)\n else:\n logging.info(\"No TIC information was extracted from provided mzML files, no plots for TIC will be generated\")\n tic_report_params = {}\n\n if 'Max Basepeak Intensity' in mzml_df.columns.tolist():\n basepeak_report_params = basepeak_graph(mzml_df, mzml_threshold_dict['Max Basepeak Intensity Threshold'], groups, groupwise_comparison, color_list)\n else:\n logging.info(\"No Basepeak Intensity information was extracted from provided mzML files, no plots for Max Basepeak Intensity will be generated\")\n basepeak_report_params = {}\n\n spectra_report_params = spectral_plot(mzml_df)\n\n idfree_report_parameters = dict(tuple(tic_report_params.items()) + tuple(spectra_report_params.items()) + tuple(basepeak_report_params.items()))\n\n return idfree_report_parameters\n\n#---------------------------------------------------------------------- MAIN FUNCTION CALL -------------------------------------------------------------------------\n\ndef calculate_idfree_metrics(out_dir, reportname, mzml_dir, groupwise_comparison, groups, mzml_threshold_dict):\n\n #getting list of mzML files\n mzml_list = get_mzml_list(mzml_dir)\n\n if len(mzml_list) > 30:\n mzml_list_chunks = [mzml_list[x:x+30] for x in range(0, len(mzml_list), 30)]\n else:\n mzml_list_chunks = [mzml_list]\n\n mzml_df = pd.DataFrame()\n\n for lt in mzml_list_chunks:\n #extracting data from mzml files\n extracted_df = get_mzml_info_dataframe(lt)\n mzml_df = pd.concat([mzml_df, extracted_df], ignore_index=True)\n time.sleep(100)\n\n #applying thresholds + outlier detection\n mzml_df = apply_idfree_thresholds(mzml_df, mzml_threshold_dict)\n mzml_df = outlier_detection(mzml_df)\n\n if groupwise_comparison:\n mzml_df['Group'] = mzml_df['Filename'].apply(groupname, args=[groups, ])\n mzml_df = mzml_df.sort_values(\"Group\")\n tic_cv = calculate_tic_cv(mzml_df, groups, mzml_threshold_dict['TIC CV Threshold'])\n else:\n tic_cv = \"\"\n\n logging.info(f\"Saving ID-Free QC Report to {out_dir}/{reportname}_ID-Free_QC_Report.xlsx\")\n\n #saving dataframes to excel document\n writer = pd.ExcelWriter(f\"{out_dir}/{reportname}_ID-Free_QC_Report.xlsx\", engine='xlsxwriter')\n mzml_df.to_excel(writer, index=False, sheet_name=\"ID-Free Metrics Summary\")\n if groupwise_comparison:\n tic_cv.to_excel(writer, index=False, sheet_name='Group TIC CV')\n writer.save()\n\n idfree_report_parameters = create_graphs(mzml_df, tic_cv, groupwise_comparison, groups, mzml_threshold_dict)\n\n mzml_sample_df = get_sample_qc(mzml_df, mzml_threshold_dict, groupwise_comparison, groups)\n if groupwise_comparison:\n idfree_grouped_df = get_idfree_grouped_df(mzml_sample_df, tic_cv, mzml_threshold_dict['TIC CV Threshold'], groups)\n else:\n idfree_grouped_df = \"\"\n\n return (mzml_sample_df, idfree_grouped_df, idfree_report_parameters)\n", "repo_name": "vegesnam/QCeltis", "sub_path": "mod/mzml_extract.py", "file_name": "mzml_extract.py", "file_ext": "py", "file_size_in_byte": 32394, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 44, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "pymzml.run.Reader", "line_number": 70, "usage_type": "call"}, {"api_name": "pymzml.run", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 99, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 137, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 145, "usage_type": "call"}, {"api_name": "mod.general_functions.check_threshold", "line_number": 164, "usage_type": "argument"}, {"api_name": "mod.general_functions.check_threshold", "line_number": 167, "usage_type": "argument"}, {"api_name": "mod.general_functions.check_threshold", "line_number": 170, "usage_type": "argument"}, {"api_name": "mod.general_functions.check_threshold", "line_number": 173, "usage_type": "argument"}, {"api_name": "mod.general_functions.check_threshold", "line_number": 176, "usage_type": "argument"}, {"api_name": "scipy.stats.shapiro", "line_number": 191, "usage_type": "call"}, {"api_name": "mod.general_functions.label_outlier", "line_number": 225, "usage_type": "argument"}, {"api_name": "mod.general_functions.label_outlier", "line_number": 252, "usage_type": "argument"}, {"api_name": "logging.info", "line_number": 275, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 279, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 280, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 285, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 287, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 291, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 295, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 297, "usage_type": "call"}, {"api_name": "mod.general_functions.cv", "line_number": 321, "usage_type": "call"}, {"api_name": "mod.general_functions.cv", "line_number": 322, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 325, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 325, "usage_type": "attribute"}, {"api_name": "mod.general_functions.cv_status", "line_number": 330, "usage_type": "argument"}, {"api_name": "mod.general_functions.cv_status", "line_number": 331, "usage_type": "argument"}, {"api_name": "mod.general_functions.get_outlier_and_cv_status", "line_number": 352, "usage_type": "argument"}, {"api_name": "mod.general_functions.only_outlier_status", "line_number": 354, "usage_type": "argument"}, {"api_name": "mod.general_functions.get_outlier_and_cv_status", "line_number": 358, "usage_type": "argument"}, {"api_name": "mod.general_functions.only_outlier_status", "line_number": 360, "usage_type": "argument"}, {"api_name": "mod.general_functions.get_outlier_and_cv_status", "line_number": 363, "usage_type": "argument"}, {"api_name": "mod.general_functions.only_outlier_status", "line_number": 365, "usage_type": "argument"}, {"api_name": "mod.general_functions.get_outlier_and_cv_status", "line_number": 368, "usage_type": "argument"}, {"api_name": "mod.general_functions.only_outlier_status", "line_number": 370, "usage_type": "argument"}, {"api_name": "mod.general_functions.get_outlier_and_cv_status", "line_number": 374, "usage_type": "argument"}, {"api_name": "mod.general_functions.only_outlier_status", "line_number": 376, "usage_type": "argument"}, {"api_name": "mod.general_functions.groupname", "line_number": 405, "usage_type": "argument"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 423, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 423, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 428, "usage_type": "call"}, {"api_name": "mod.general_functions.get_series_status", "line_number": 431, "usage_type": "argument"}, {"api_name": "mod.general_functions.get_series_status", "line_number": 435, "usage_type": "argument"}, {"api_name": "plotly.express.line", "line_number": 452, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 452, "usage_type": "name"}, {"api_name": "plotly.io.to_html", "line_number": 465, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 465, "usage_type": "attribute"}, {"api_name": "plotly.express.scatter", "line_number": 477, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 477, "usage_type": "name"}, {"api_name": "plotly.io.to_html", "line_number": 484, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 484, "usage_type": "attribute"}, {"api_name": "plotly.express.scatter", "line_number": 493, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 493, "usage_type": "name"}, {"api_name": "plotly.io.to_html", "line_number": 500, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 500, "usage_type": "attribute"}, {"api_name": "plotly.express.bar", "line_number": 507, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 507, "usage_type": "name"}, {"api_name": "mod.general_functions.color_list", "line_number": 507, "usage_type": "name"}, {"api_name": "plotly.io.to_html", "line_number": 515, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 515, "usage_type": "attribute"}, {"api_name": "plotly.express.bar", "line_number": 524, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 524, "usage_type": "name"}, {"api_name": "mod.general_functions.color_list", "line_number": 524, "usage_type": "name"}, {"api_name": "plotly.io.to_html", "line_number": 532, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 532, "usage_type": "attribute"}, {"api_name": "plotly.express.line", "line_number": 547, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 547, "usage_type": "name"}, {"api_name": "plotly.io.to_html", "line_number": 554, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 554, "usage_type": "attribute"}, {"api_name": "plotly.express.scatter", "line_number": 564, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 564, "usage_type": "name"}, {"api_name": "plotly.io.to_html", "line_number": 571, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 571, "usage_type": "attribute"}, {"api_name": "mod.general_functions.groupname", "line_number": 580, "usage_type": "argument"}, {"api_name": "plotly.express.bar", "line_number": 582, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 582, "usage_type": "name"}, {"api_name": "mod.general_functions.color_list", "line_number": 582, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 584, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 584, "usage_type": "name"}, {"api_name": "plotly.io.to_html", "line_number": 595, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 595, "usage_type": "attribute"}, {"api_name": "plotly.express.scatter", "line_number": 605, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 605, "usage_type": "name"}, {"api_name": "plotly.io.to_html", "line_number": 612, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 612, "usage_type": "attribute"}, {"api_name": "mod.general_functions.color_list", "line_number": 621, "usage_type": "argument"}, {"api_name": "logging.info", "line_number": 623, "usage_type": "call"}, {"api_name": "mod.general_functions.color_list", "line_number": 627, "usage_type": "argument"}, {"api_name": "logging.info", "line_number": 629, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 650, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 655, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 656, "usage_type": "call"}, {"api_name": "mod.general_functions.groupname", "line_number": 663, "usage_type": "argument"}, {"api_name": "logging.info", "line_number": 669, "usage_type": "call"}, {"api_name": "pandas.ExcelWriter", "line_number": 672, "usage_type": "call"}]} +{"seq_id": "1944452227", "text": "import logging\nfrom os import path\nfrom pprint import pformat\n\nfrom summary.scope import session, store\nfrom summary.steps_lib.files import get_requested_files, get_downloaded_files\nfrom summary.steps_lib.frame_utils import unique_seqs\nfrom tools.remote_files import RemoteFileTools\nfrom utils import ensure_string\n\n\ndef write_file_from_description(file_path, description):\n missing_chunk_ids = description['MissingChunks']\n error_chunk_ids = description['ErrorChunks']\n downloaded_chunks = description['ChunkFrames']\n\n missing_file_name = file_path + '.missing'\n\n if len(missing_chunk_ids) > 0:\n session.write_artifact(missing_file_name, ', '.join(map(str, missing_chunk_ids)))\n else:\n if session.has_artifact(missing_file_name):\n logging.warning('Removing obsolete missing file {}'.format(missing_file_name))\n session.remove_artifact(missing_file_name)\n\n if len(error_chunk_ids) > 0:\n session.write_artifact(file_path + '.error', ', '.join(map(str, error_chunk_ids)))\n\n (base_name, ext) = path.splitext(file_path)\n\n is_jpg = ext == '.jpg'\n\n if is_jpg:\n jpg_file_name = base_name + '.jpg'\n file_path = base_name + '.raw'\n else:\n jpg_file_name = None\n\n with session.open_artifact(file_path, 'wb') as f:\n for chunk in downloaded_chunks:\n f.write(ensure_string(chunk.response))\n\n if is_jpg:\n just_responses = []\n\n for f in description['ChunkFrames']:\n just_responses.append(f.response)\n\n RemoteFileTools.save_photo(session.expand_artifact_path(jpg_file_name), just_responses)\n\n\ndef list_requested_files():\n files = get_requested_files(session.tasklist)\n\n text = pformat(files)\n session.write_artifact('requested_files.txt', text)\n\n\ndef extract_downloaded_files():\n files = get_downloaded_files(session.tasklist, session.all_frames)\n\n for file_name in files.keys():\n logging.info(\n 'Saving extracted file (this session) {} ({} chunks requested, {} downloaded, {} missing, {} errors)'.format(\n file_name,\n len(files[file_name]['RequestedChunks']),\n len(files[file_name]['DownloadedChunks']),\n len(files[file_name]['MissingChunks']),\n len(files[file_name]['ErrorChunks'])\n ))\n write_file_from_description(file_name, files[file_name])\n\n\ndef extract_file(file_name, also=[]):\n sessions = [session]\n\n for id in also:\n sessions.append(store.get_session(id))\n\n file_frames = []\n requested_chunks = set()\n error_chunk_ids = set()\n\n for s in sessions:\n session_frames = s.all_frames\n\n files = get_downloaded_files(s.tasklist, session_frames)\n\n if file_name in files:\n file = files[file_name]\n\n file_frames.extend(file['ChunkFrames'])\n requested_chunks.update(file['RequestedChunks'])\n error_chunk_ids.update(file['ErrorChunks'])\n else:\n logging.warning('File {} not found in session {}'.format(file_name, s.session_number))\n\n file_frames = unique_seqs(file_frames)\n file_frames = sorted(file_frames, key=lambda f: f.seq())\n\n dowloaded_chunk_ids = map(lambda f: f.seq(), file_frames)\n\n file = {\n 'MissingChunks': list(set(requested_chunks).difference(dowloaded_chunk_ids).difference(error_chunk_ids)),\n 'ChunkFrames': file_frames,\n 'RequestedChunks': requested_chunks,\n 'DownloadedChunks': dowloaded_chunk_ids,\n 'ErrorChunks': list(set(error_chunk_ids).difference(dowloaded_chunk_ids))\n }\n\n session_ids = sorted(map(lambda s: str(s.session_number), sessions))\n\n logging.info(\n 'Saving extracted file {} (sessions: {}) ({} chunks requested, {} downloaded, {} missing, {} errors)'.format(\n file_name,\n ', '.join(session_ids),\n len(file['RequestedChunks']),\n len(file['DownloadedChunks']),\n len(file['MissingChunks']),\n len(file['ErrorChunks'])\n ))\n\n write_file_from_description(path.join('assembled', file_name.strip('/')), file)\n", "repo_name": "PW-Sat2/GSControl", "sub_path": "summary/summary_steps/03-files.py", "file_name": "03-files.py", "file_ext": "py", "file_size_in_byte": 4141, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "75", "api": [{"api_name": "summary.scope.session.write_artifact", "line_number": 20, "usage_type": "call"}, {"api_name": "summary.scope.session", "line_number": 20, "usage_type": "name"}, {"api_name": "summary.scope.session.has_artifact", "line_number": 22, "usage_type": "call"}, {"api_name": "summary.scope.session", "line_number": 22, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 23, "usage_type": "call"}, {"api_name": "summary.scope.session.remove_artifact", "line_number": 24, "usage_type": "call"}, {"api_name": "summary.scope.session", "line_number": 24, "usage_type": "name"}, {"api_name": "summary.scope.session.write_artifact", "line_number": 27, "usage_type": "call"}, {"api_name": "summary.scope.session", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "name"}, {"api_name": "summary.scope.session.open_artifact", "line_number": 39, "usage_type": "call"}, {"api_name": "summary.scope.session", "line_number": 39, "usage_type": "name"}, {"api_name": "utils.ensure_string", "line_number": 41, "usage_type": "call"}, {"api_name": "tools.remote_files.RemoteFileTools.save_photo", "line_number": 49, "usage_type": "call"}, {"api_name": "tools.remote_files.RemoteFileTools", "line_number": 49, "usage_type": "name"}, {"api_name": "summary.scope.session.expand_artifact_path", "line_number": 49, "usage_type": "call"}, {"api_name": "summary.scope.session", "line_number": 49, "usage_type": "name"}, {"api_name": "summary.steps_lib.files.get_requested_files", "line_number": 53, "usage_type": "call"}, {"api_name": "summary.scope.session.tasklist", "line_number": 53, "usage_type": "attribute"}, {"api_name": "summary.scope.session", "line_number": 53, "usage_type": "name"}, {"api_name": "pprint.pformat", "line_number": 55, "usage_type": "call"}, {"api_name": "summary.scope.session.write_artifact", "line_number": 56, "usage_type": "call"}, {"api_name": "summary.scope.session", "line_number": 56, "usage_type": "name"}, {"api_name": "summary.steps_lib.files.get_downloaded_files", "line_number": 60, "usage_type": "call"}, {"api_name": "summary.scope.session.tasklist", "line_number": 60, "usage_type": "attribute"}, {"api_name": "summary.scope.session", "line_number": 60, "usage_type": "name"}, {"api_name": "summary.scope.session.all_frames", "line_number": 60, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "summary.scope.session", "line_number": 75, "usage_type": "name"}, {"api_name": "summary.scope.store.get_session", "line_number": 78, "usage_type": "call"}, {"api_name": "summary.scope.store", "line_number": 78, "usage_type": "name"}, {"api_name": "summary.steps_lib.files.get_downloaded_files", "line_number": 87, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 96, "usage_type": "call"}, {"api_name": "summary.steps_lib.frame_utils.unique_seqs", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "name"}]} +{"seq_id": "38419682082", "text": "# -*- encoding=utf-8 -*-\nfrom flask import request\nfrom flask_login import login_required\nfrom flask_restful import Resource\n\nfrom backend.app import app\nfrom backend.controller.execute_testcase_controller import ExecuteTestCaseController\nfrom backend.utils.code_utils import CodeUtil\nfrom backend.utils.cron_utils import CronUtil\nfrom backend.utils.exception_utils import REQ_IS_EMPTY_ERROR, REQ_TYPE_ERROR, REQ_KEY_ERROR\nfrom backend.utils.make_response_utils import make_response\n\n\nclass CronJobController:\n\n @classmethod\n # 新增定时任务\n def add_cron(cls, cron_data):\n job = CronUtil.add_cron(ExecuteTestCaseController.execute_testsuite, cron_data)\n return job\n\n @classmethod\n # 查询定时任务的详情数据:\n def query_detail(cls, cron_id):\n return CronUtil.query_detail_cron(cron_id)\n\n @classmethod\n # 搜索定时任务\n def search_cron_by_cronname(cls, cron_name):\n return CronUtil.query_cron_by_name(cron_name)\n\n @classmethod\n # 查询定时任务列表\n def query_list(cls, page=1, size=10):\n return CronUtil.query_list(page, size)\n\n @classmethod\n # 修改定时任务\n def modify_cron(cls, cron_data):\n return CronUtil.update_cron(cron_data)\n\n @classmethod\n # 删除定时任务\n def delete_cron(cls, cron_id):\n return CronUtil.delete_cron(cron_id)\n\n\nclass CronJobService(Resource):\n decorators = [login_required]\n\n # 查询类操作\n def get(self):\n if not request.args:\n raise REQ_IS_EMPTY_ERROR()\n if not request.args.get(\"type\"):\n raise REQ_KEY_ERROR()\n\n action_type = request.args.get(\"type\")\n if action_type == \"query_detail\":\n cron_id = request.args.get(\"cron_id\") # 获取cron_id\n response_data = CronJobController.query_detail(cron_id) # 根据cron_id查询定时任务详情\n return make_response(status=CodeUtil.SUCCESS, data=response_data)\n if action_type == \"search\":\n cron_name = request.args.get(\"cron_name\") # 获取cron_name\n response_data = CronJobController.search_cron_by_cronname(cron_name)\n return make_response(status=CodeUtil.SUCCESS, data=response_data)\n if action_type == \"query_list\":\n page = request.args.get(\"page\")\n if page: # 如果传入了page,那么判断page是不是数字,如果不是数字直接返回查询的空结果\n if page.isdigit():\n page = int(page)\n else:\n return make_response(status=CodeUtil.SUCCESS, data=[])\n size = request.args.get(\"size\")\n response_data = CronJobController.query_list(page=1, size=10)\n total_count = len(response_data)\n return make_response(status=CodeUtil.SUCCESS,\n data=response_data,\n total_count=total_count,\n page=page,\n size=size)\n # 如果没有满足的if条件,那么返回json数据的结果\n return make_response(status=CodeUtil.SUCCESS)\n\n # 新增\n def post(self):\n # 公共校验\n if not request.data:\n raise REQ_IS_EMPTY_ERROR()\n if not request.is_json:\n raise REQ_TYPE_ERROR()\n # 添加定时任务\n job = CronJobController.add_cron(request.get_json())\n app.logger.info(f\"CronJobService中添加定时任务返回的job为:{job}\")\n return make_response(status=CodeUtil.SUCCESS, data=request.get_json())\n\n # 修改定时任务\n def put(self):\n if not request.data:\n raise REQ_IS_EMPTY_ERROR()\n if not request.is_json:\n raise REQ_TYPE_ERROR()\n\n # 调用Controller的修改定时任务方法\n job = CronJobController.modify_cron(request.get_json())\n app.logger.info(f\"修改定时任务的结果为:{job}\")\n return make_response(status=CodeUtil.SUCCESS, data=request.get_json())\n\n # 删除\n def delete(self):\n if not request.data:\n raise REQ_IS_EMPTY_ERROR()\n if not request.is_json:\n raise REQ_TYPE_ERROR()\n\n # 删除定时任务\n cron_id = request.get_json().get(\"cron_id\") # 获取要删除的cron_id\n CronJobController.delete_cron(cron_id) # 删除定时任务\n return make_response(status=CodeUtil.SUCCESS, data=None)\n\n # 暂停/恢复定时任务\n def patch(self):\n if not request.data:\n raise REQ_IS_EMPTY_ERROR()\n if not request.is_json:\n raise REQ_TYPE_ERROR()\n action = request.get_json().get(\"action\") # 获取要进行的动作\n cron_id = request.get_json().get(\"cron_id\")\n app.logger.info(f\"暂停和恢复定时任务的cron_id: {cron_id}\")\n # 根据action来判断对定时任务状态进行操作\n if action == 0:\n CronUtil.resume_job() # 恢复全部定时任务\n app.logger.info(f\"恢复全部定时任务后的结果为:{CronUtil.scheduler.state}\")\n if action == 1:\n CronUtil.resume_job(job_id=cron_id) # 指定id进行恢复,可以查看对应cron_id的next_run_time有没有恢复即可\n if action == 2:\n CronUtil.pause_cron(job_id=cron_id) # 指定id进行暂停,可以查看对应cron_id的next_run_time有没有被删除即可\n if action == 3:\n CronUtil.pause_cron() # 暂停全部定时任务\n app.logger.info(f\"暂停全部定时任务后的结果为:{CronUtil.scheduler.state}\")\n return make_response(status=CodeUtil.SUCCESS, data=None)\n", "repo_name": "Seven4li/taitan_interface_platform_origin", "sub_path": "backend/controller/cron_job_controller.py", "file_name": "cron_job_controller.py", "file_ext": "py", "file_size_in_byte": 5703, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "backend.utils.cron_utils.CronUtil.add_cron", "line_number": 19, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 19, "usage_type": "name"}, {"api_name": "backend.controller.execute_testcase_controller.ExecuteTestCaseController.execute_testsuite", "line_number": 19, "usage_type": "attribute"}, {"api_name": "backend.controller.execute_testcase_controller.ExecuteTestCaseController", "line_number": 19, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.query_detail_cron", "line_number": 25, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 25, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.query_cron_by_name", "line_number": 30, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 30, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.query_list", "line_number": 35, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 35, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.update_cron", "line_number": 40, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 40, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.delete_cron", "line_number": 45, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 45, "usage_type": "name"}, {"api_name": "flask_restful.Resource", "line_number": 48, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_IS_EMPTY_ERROR", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_KEY_ERROR", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "backend.utils.make_response_utils.make_response", "line_number": 62, "usage_type": "call"}, {"api_name": "backend.utils.code_utils.CodeUtil.SUCCESS", "line_number": 62, "usage_type": "attribute"}, {"api_name": "backend.utils.code_utils.CodeUtil", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "backend.utils.make_response_utils.make_response", "line_number": 66, "usage_type": "call"}, {"api_name": "backend.utils.code_utils.CodeUtil.SUCCESS", "line_number": 66, "usage_type": "attribute"}, {"api_name": "backend.utils.code_utils.CodeUtil", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "backend.utils.make_response_utils.make_response", "line_number": 73, "usage_type": "call"}, {"api_name": "backend.utils.code_utils.CodeUtil.SUCCESS", "line_number": 73, "usage_type": "attribute"}, {"api_name": "backend.utils.code_utils.CodeUtil", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "backend.utils.make_response_utils.make_response", "line_number": 77, "usage_type": "call"}, {"api_name": "backend.utils.code_utils.CodeUtil.SUCCESS", "line_number": 77, "usage_type": "attribute"}, {"api_name": "backend.utils.code_utils.CodeUtil", "line_number": 77, "usage_type": "name"}, {"api_name": "backend.utils.make_response_utils.make_response", "line_number": 83, "usage_type": "call"}, {"api_name": "backend.utils.code_utils.CodeUtil.SUCCESS", "line_number": 83, "usage_type": "attribute"}, {"api_name": "backend.utils.code_utils.CodeUtil", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.request.data", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_IS_EMPTY_ERROR", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_TYPE_ERROR", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "backend.app.app.logger.info", "line_number": 94, "usage_type": "call"}, {"api_name": "backend.app.app.logger", "line_number": 94, "usage_type": "attribute"}, {"api_name": "backend.app.app", "line_number": 94, "usage_type": "name"}, {"api_name": "backend.utils.make_response_utils.make_response", "line_number": 95, "usage_type": "call"}, {"api_name": "backend.utils.code_utils.CodeUtil.SUCCESS", "line_number": 95, "usage_type": "attribute"}, {"api_name": "backend.utils.code_utils.CodeUtil", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.data", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_IS_EMPTY_ERROR", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_TYPE_ERROR", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "backend.app.app.logger.info", "line_number": 106, "usage_type": "call"}, {"api_name": "backend.app.app.logger", "line_number": 106, "usage_type": "attribute"}, {"api_name": "backend.app.app", "line_number": 106, "usage_type": "name"}, {"api_name": "backend.utils.make_response_utils.make_response", "line_number": 107, "usage_type": "call"}, {"api_name": "backend.utils.code_utils.CodeUtil.SUCCESS", "line_number": 107, "usage_type": "attribute"}, {"api_name": "backend.utils.code_utils.CodeUtil", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.request.data", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_IS_EMPTY_ERROR", "line_number": 112, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 113, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_TYPE_ERROR", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 117, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "backend.utils.make_response_utils.make_response", "line_number": 119, "usage_type": "call"}, {"api_name": "backend.utils.code_utils.CodeUtil.SUCCESS", "line_number": 119, "usage_type": "attribute"}, {"api_name": "backend.utils.code_utils.CodeUtil", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.request.data", "line_number": 123, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 123, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_IS_EMPTY_ERROR", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 125, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 125, "usage_type": "name"}, {"api_name": "backend.utils.exception_utils.REQ_TYPE_ERROR", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 127, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 128, "usage_type": "name"}, {"api_name": "backend.app.app.logger.info", "line_number": 129, "usage_type": "call"}, {"api_name": "backend.app.app.logger", "line_number": 129, "usage_type": "attribute"}, {"api_name": "backend.app.app", "line_number": 129, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.resume_job", "line_number": 132, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 132, "usage_type": "name"}, {"api_name": "backend.app.app.logger.info", "line_number": 133, "usage_type": "call"}, {"api_name": "backend.app.app.logger", "line_number": 133, "usage_type": "attribute"}, {"api_name": "backend.app.app", "line_number": 133, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.scheduler", "line_number": 133, "usage_type": "attribute"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 133, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.resume_job", "line_number": 135, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 135, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.pause_cron", "line_number": 137, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 137, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.pause_cron", "line_number": 139, "usage_type": "call"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 139, "usage_type": "name"}, {"api_name": "backend.app.app.logger.info", "line_number": 140, "usage_type": "call"}, {"api_name": "backend.app.app.logger", "line_number": 140, "usage_type": "attribute"}, {"api_name": "backend.app.app", "line_number": 140, "usage_type": "name"}, {"api_name": "backend.utils.cron_utils.CronUtil.scheduler", "line_number": 140, "usage_type": "attribute"}, {"api_name": "backend.utils.cron_utils.CronUtil", "line_number": 140, "usage_type": "name"}, {"api_name": "backend.utils.make_response_utils.make_response", "line_number": 141, "usage_type": "call"}, {"api_name": "backend.utils.code_utils.CodeUtil.SUCCESS", "line_number": 141, "usage_type": "attribute"}, {"api_name": "backend.utils.code_utils.CodeUtil", "line_number": 141, "usage_type": "name"}]} +{"seq_id": "14083881536", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = \"Murat Özer\"\n__version__ = \"3.8.9\"\n__email__ = \"ozerr.murat@hotmail.com\"\n\n\nfrom flask import Flask, request, jsonify, abort\nfrom flask_restful import Resource, Api\nfrom flask_cors import CORS\nimport json\n\n\napp = Flask(__name__)\napi = Api(app)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\nDATA_PATH = './data/pcr_region_coverage.json'\n\ndata = []\ndef read_json(path):\n f = open(path)\n data = json.load(f)\n f.close()\n return data;\n\ndef get_paginated_list(results, current_page_number, pagination_size):\n current_page_number = int(current_page_number)\n pagination_size = int(pagination_size)\n total_item_count = len(results)\n if total_item_count < current_page_number or pagination_size < 0:\n abort(404)\n obj = {}\n obj['pagination'] = {}\n obj['pagination']['current_item_count'] = pagination_size\n obj['pagination']['current_page_number'] = current_page_number\n obj['pagination']['pagination_size'] = pagination_size\n obj['pagination']['total_item_count'] = total_item_count\n obj['pagination']['total_page_count'] = total_item_count / pagination_size\n obj['items'] = results[((pagination_size * current_page_number) - pagination_size):(pagination_size * current_page_number)]\n return obj\n\nclass PCR(Resource):\n def get(self):\n return jsonify(get_paginated_list(\n data, \n current_page_number=request.args.get('current_page_number', 1), \n pagination_size=request.args.get('pagination_size', 10)\n ))\n\n# Alternative api usage\n# @app.route('/api/pcr', methods=['GET'])\n# def get_list():\n# return jsonify(get_paginated_list(\n# data, \n# current_page_number=request.args.get('current_page_number', 1), \n# pagination_size=request.args.get('pagination_size', 10)\n# ))\n\napi.add_resource(PCR, '/api/pcr')\ndata = read_json(DATA_PATH)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n ", "repo_name": "ryuz4k1/project-pcr", "sub_path": "pcr-back/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 1987, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 16, "usage_type": "call"}, {"api_name": "json.load", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 32, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "22686111561", "text": "import sys\nfrom asyncio import Queue, create_task, run\nfrom itertools import permutations\nsys.path.append(\"..\")\n\nfrom intcode import IntCode\nfrom common import qp\n\nwith open(\"input.txt\", \"r\") as inp:\n init = tuple(qp(inp.read()))\n\nasync def p1(phase_values):\n input_buffer = Queue()\n start_buffer = input_buffer\n\n tasks = []\n for phase in phase_values:\n output_buffer = Queue()\n input_buffer.put_nowait(phase)\n amp = IntCode(init, input_callback=input_buffer.get,\n output_callback=output_buffer.put)\n tasks.append(create_task(amp.async_run()))\n input_buffer = output_buffer\n\n start_buffer.put_nowait(0)\n for task in tasks:\n await task\n return output_buffer.get_nowait()\n\nasync def p2(phase_values):\n input_buffer = Queue()\n start_buffer = input_buffer\n\n amps = []\n tasks = []\n for phase in phase_values:\n output_buffer = Queue()\n input_buffer.put_nowait(phase)\n amp = IntCode(init, input_callback=input_buffer.get,\n output_callback=output_buffer.put)\n amps.append(amp)\n tasks.append(create_task(amp.async_run()))\n input_buffer = output_buffer\n # Create loop\n amps[-1].output_callback = start_buffer.put\n start_buffer.put_nowait(0)\n\n for task in tasks:\n await task\n return start_buffer.get_nowait()\n\n\nvals = []\nfor perm in permutations((0, 1, 2, 3, 4)):\n vals.append((run(p1(perm)), perm))\nprint(max(vals))\n\nvals = []\nfor perm in permutations((5, 6, 7, 8, 9)):\n vals.append((run(p2(perm)), perm))\nprint(max(vals))\n", "repo_name": "spauka/AOC2019", "sub_path": "d7/d7.py", "file_name": "d7.py", "file_ext": "py", "file_size_in_byte": 1606, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "common.qp", "line_number": 10, "usage_type": "call"}, {"api_name": "asyncio.Queue", "line_number": 13, "usage_type": "call"}, {"api_name": "asyncio.Queue", "line_number": 18, "usage_type": "call"}, {"api_name": "intcode.IntCode", "line_number": 20, "usage_type": "call"}, {"api_name": "asyncio.create_task", "line_number": 22, "usage_type": "call"}, {"api_name": "asyncio.Queue", "line_number": 31, "usage_type": "call"}, {"api_name": "asyncio.Queue", "line_number": 37, "usage_type": "call"}, {"api_name": "intcode.IntCode", "line_number": 39, "usage_type": "call"}, {"api_name": "asyncio.create_task", "line_number": 42, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 54, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 55, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 59, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "14368911321", "text": "import yaml\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as plticker\nimport matplotlib\nfrom hist import Hist\nimport hist\nimport mplhep\nmatplotlib.style.use(mplhep.style.ALICE)\n# Quick construction, no other imports needed:\nmatplotlib.use(\"pdf\")\nfrom matplotlib.ticker import FormatStrFormatter\n\n###read full masked data \n\nwith open(\"../summary_data/fhr_summary.yaml\", 'r') as stream:\n dic = yaml.full_load(stream)\n\nwith open(\"../summary_data/fhr_masked_2_summary.yaml\", 'r') as stream:\n dic_masked = yaml.full_load(stream)\n\n\nstaves = list(dic_masked.keys())[:-1]\n\n\nfhr_array_aver = np.array([])\nfhr_array_aver_masked = np.array([])\n\n\nfor i,stave in enumerate(staves):\n mean_arr = np.array(dic[stave]['mean'][\"tuned_fhr\"])\n mean_arr_masked = np.array(dic_masked[stave]['mean'])\n filter_1 = mean_arr > 0\n mask_filter = mean_arr_masked > 0\n fhr_array_aver = np.append(fhr_array_aver, [np.mean(mean_arr[filter_1])], axis=0)\n fhr_array_aver_masked = np.append(fhr_array_aver_masked, [np.mean(mean_arr_masked[mask_filter])], axis=0)\n\n\nmean_masked = np.mean(fhr_array_aver_masked)\nstd_masked = np.std(fhr_array_aver_masked)/np.sqrt(len(fhr_array_aver_masked))\n\n########read dat files with mask stability fhrs\nx = np.loadtxt(\"../summary_data/summary_fhr_mask_stab_5e05.dat\", usecols=(3,4,5)) # mask only common pixel, mask pixel common in 80% of the runs\ncommon_masking_fhr = x[:,0]\ncommon_80_masking_fhr = x[:,1]\ncommon_20_masking_fhr = x[:,2]\nmean_common_masking = np.mean(common_masking_fhr)\nmean_common_80_masking = np.mean(common_80_masking_fhr)\nmean_common_20_masking = np.mean(common_20_masking_fhr)\nstd_common_masking = np.std(common_masking_fhr)\nstd_common_20_masking = np.std(common_20_masking_fhr)\n#############################################àà\n\n\nbins = np.geomspace(1e-11, 1e-6, 20)\n\nh_distr = np.histogram(fhr_array_aver, bins=bins)\nh_distr_masked = np.histogram(fhr_array_aver_masked, bins=bins)\nh_distr_common_masking = np.histogram(common_masking_fhr, bins=bins)\nh_distr_common_80_masking = np.histogram(common_80_masking_fhr, bins=bins)\nh_distr_common_20_masking = np.histogram(common_20_masking_fhr, bins=bins)\n\n\n\n\nplt.figure()\nfig, ax = plt.subplots(figsize=(12, 12))\n\nmplhep.histplot(h_distr, ax=ax, yerr=False,histtype=\"fill\", linewidth=2, alpha=0.2, color=\"grey\", label=\"No masking\")\nmplhep.histplot(h_distr_masked, ax=ax, yerr=False,histtype=\"fill\", linewidth=2, alpha=0.1, color=\"blue\", label=\"Full software masking\")\nmplhep.histplot(h_distr_common_80_masking, ax=ax, yerr=False,histtype=\"step\", linewidth=2, alpha=1, color=\"green\", label=\"Mask bad pixel common to at least 80% of runs\")\nmplhep.histplot(h_distr_common_20_masking, ax=ax, yerr=False,histtype=\"step\", linewidth=2, alpha=1, color=\"red\", label=\"Mask bad pixel common to at least 20% of runs\")\nmplhep.histplot(h_distr_common_masking, ax=ax, yerr=False,histtype=\"step\", linewidth=2, alpha=1, color=\"orange\", label=\"Mask bad pixel common to all runs\")\n\n# mplhep.histplot(h_distr_masked, ax=ax, yerr=False, histtype=\"step\", alpha=1, linestyle = \"--\", color=\"blue\", linewidth=3)\n# mplhep.histplot(h_distr_common_masking, ax=ax, yerr=False, histtype=\"step\", alpha=1, linestyle = \"--\", color=\"red\", linewidth=3)\n# mplhep.histplot(h_distr_common_80_masking, ax=ax, yerr=False, histtype=\"step\", alpha=1, linestyle = \"--\", color=\"green\", linewidth=3)\n\n\ntext_x = 2.2e-10\ntext_y = 72\nax.text(text_x, text_y, f'ITS Outer Barrel', color=\"black\", fontsize=40, fontweight=\"bold\")\nax.text(text_x, text_y - 10, 'Max sample: 20 runs/stave', color=\"black\", fontsize=20, fontstyle=\"italic\")\n\nexp_str1 = f'Average FHR : {mean_masked*10**11:.2} x $10^{{-11}}$ hits/event/pixel'\nexp_str2 = f'Average FHR : {mean_common_80_masking*10**10:.2} x $10^{{-10}}$ hits/event/pixel'\nexp_str3 = f'Average FHR : {mean_common_masking*10**9:.2} x $10^{{-9}}$ hits/event/pixel'\n# ax.text(text_x, text_y - 45, exp_str1, color=\"blue\", fontsize=22)\n# ax.text(text_x, text_y - 53, exp_str2, color=\"green\", fontsize=22)\n# ax.text(text_x, text_y - 59, exp_str3, color=\"red\", fontsize=22)\n\n\nax.legend(loc = \"lower left\", bbox_to_anchor=(0.27, 0.67), fontsize=18)\nplt.xlabel(\"Fake Hit Rate (hits/event/pixel)\")\nplt.ylabel(\"Counts\")\nplt.yscale(\"log\")\nplt.xscale(\"log\")\n\nax.tick_params(axis='x', which='major', pad=10)\n\n\nplt.savefig(f'../results/mask_stability.png', bbox_inches = 'tight',\n pad_inches = 0.2)\nplt.savefig(f'../results/mask_stability.pdf', bbox_inches = 'tight',\n pad_inches = 0.2)\nplt.close()", "repo_name": "fmazzasc/OB_plotting", "sub_path": "summary_plots/fhr_mask_stability.py", "file_name": "fhr_mask_stability.py", "file_ext": "py", "file_size_in_byte": 4505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "matplotlib.style.use", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 9, "usage_type": "attribute"}, {"api_name": "mplhep.style", "line_number": 9, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 11, "usage_type": "call"}, {"api_name": "yaml.full_load", "line_number": 17, "usage_type": "call"}, {"api_name": "yaml.full_load", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.geomspace", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "mplhep.histplot", "line_number": 69, "usage_type": "call"}, {"api_name": "mplhep.histplot", "line_number": 70, "usage_type": "call"}, {"api_name": "mplhep.histplot", "line_number": 71, "usage_type": "call"}, {"api_name": "mplhep.histplot", "line_number": 72, "usage_type": "call"}, {"api_name": "mplhep.histplot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "27888219097", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 1 18:24:35 2020\n\n@author: Logan Rowe\n\"\"\"\n\nfrom __future__ import print_function\n\nimport pygame\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom tensorflow import keras\nimport pickle\nimport time\nimport matplotlib.pyplot as plt\nimport glob\nimport gc\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nos.environ[\"PATH\"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'\n\npygame.init()\n\n\n# =============================================================================\n# INPUT VALUES FOR NEURAL NETWORK ARE OUTPUT VALUES FROM WHAT SNAKE SEES\n# =============================================================================\n\ndef snakeVision(snake,food,obstruction_connections=False):\n '''\n Takes the snake of interest and current food as inputs returns an output\n of 20 values that the snake sees and a list of the locations of obstructions.\n \n The output will be fed as input to the neural net. The list of obstructions\n will be used visualize what the snake sees as it moves (plot red spots on \n obstruction locations).\n \n All distances are normalized to range between -1 and 1 where -1 represents\n a wall that is 30 blocks (the width of the screen) to the left or above the\n snakes head and 1 for 30 blocks to the right or below the snakes head\n \n note: this does not account for diagonal distances being root(2) times\n longer than horizontal or vertical distances... should only be of\n minor concern\n \n This in combination with the us of tanh as an activation function should\n assist in speeding up the training process\n \n If two observation points are connected by a series of obstructions it will\n be denoted by 1, if they are not connected -1\n \n i.e. Determine whether obstriction at right is connected to obstruction \n at up-right by a chain of obstructions (i.e. right is wall and \n upright is also wall)=1 or (i.e. right is tail not touching wall \n and up right is wall)=-1\n \n \n outputs=[x dist from snakes head to food,\n y dist from snakes head to food,\n is food to the right of the snake,\n is food above the snake,\n is food to the left of the snake,\n is food below the snake,\n dist to nearest obstruction to the right,\n dist to nearest obstruction to the up-right,\n dist to nearest obstruction to the up,\n dist to nearest obstruction to the up-left,\n dist to nearest obstruction to the left,\n dist to nearest obstruction to the down-left,\n dist to nearest obstruction to the down,\n dist to nearest obstruction to the downright,\n snake is headed to the right 1 for true 0 for false\n snake is headed up 1 for true 0 for false\n snake is headed to the left 1 for true 0 for false\n snake is headed down 1 for true 0 for false\n ] \n '''\n \n outputs=[]\n \n #Add Food location x distance and y distance (consistently using final position - initial position)\n outputs.append(food.position[0]-snake.components[0].position[0])\n outputs.append(food.position[1]-snake.components[0].position[1]) \n \n inline_with_food = False\n if inline_with_food:\n #Add 1 if food is in line with snake (right, above, left, or down)\n outputs.append(grid_rows if (snake.components[0].position[1]==food.position[1] and snake.components[0].position[0]food.position[1]) else 0)\n outputs.append(grid_rows if (snake.components[0].position[1]==food.position[1] and snake.components[0].position[0]>food.position[0]) else 0)\n outputs.append(grid_rows if (snake.components[0].position[0]==food.position[0] and snake.components[0].position[1]food.position[1] else 0)\n outputs.append(grid_rows if snake.components[0].position[0]>food.position[0] else 0)\n outputs.append(grid_rows if snake.components[0].position[1]=0 and x<=grid_columns-1) and (y>=0 and y<=grid_rows-1) and ((x,y) not in snake_space):\n x+=direction_dict[direction][0]\n y+=direction_dict[direction][1]\n dist+=1\n obstructions.append(tuple((x,y)))\n outputs.append(dist)\n \n #Determine whether obstriction at right is connected to obstruction at up-right\n #by a chain of obstructions (i.e. right is wall and upright is also wall)=1\n # or (i.e. right is tail not touching wal and up right is wall)=-1\n \n #add another copy of the obstruction to the right, to the end of the list\n obstructions.append(obstructions[0])\n \n if obstruction_connections:\n #check if each obstruction is connected to the one located CCW from it\n for idx,obstruction in enumerate(obstructions[:-1]):\n \n if (obstruction not in snake_space) and (obstructions[idx+1] not in snake_space):\n \n #if both obstructions are on the wall, then yes they are connected\n outputs.append(grid_rows) #will be normalized with other distances later\n \n elif (obstruction in snake_space) and (obstructions[idx+1] in snake_space):\n \n #if both obstructions are on the snake's body, then yes they are connected\n outputs.append(grid_rows)\n \n else:\n snake_is_touching_wall=False\n #If a part of the snake is adjacent to the wall, then yes they are connected\n for position in snake_space:\n x,y=position\n if (x<1 or x>=grid_columns-1) or not (y<2 or y>grid_rows-1):\n snake_is_touching_wall=True\n \n if snake_is_touching_wall:\n outputs.append(grid_rows)\n else:\n #Snake is not touching the wall\n outputs.append(-grid_rows)\n \n #Lastly lets tell the snake what direction it is currently headed:\n #horizontal: (-1,0) --> -1 | (1,0) --> 1 | (0,+/- 1) --> 0\n #vertical: (-1,0) --> 0 | (1,0) --> 0 | (0,+/- 1) --> +/- 1 \n outputs.append(grid_rows if snake.direction[0]==1 else 0) #heading right\n outputs.append(grid_rows if snake.direction[1]==-1 else 0) #heading upward\n outputs.append(grid_rows if snake.direction[0]==-1 else 0) #heading left\n outputs.append(grid_rows if snake.direction[1]==1 else 0) #heading downward\n \n \n #Normalize ouputs\n outputs=[output/grid_rows for output in outputs]\n \n return (outputs,obstructions)\n\ndef drawObstructions(obstructions):\n '''\n Obstructions are given from snake vision, they are (x,y) pairs of locations\n that will cause the snake to die if touched\n \n drawObstructions([(1,2),(7,9),(30,5),...])\n will blit a red square anywhere that is hazardous to the snake\n '''\n \n #Plot a red square slightly larger than the grid square size at each location\n marker_size=0.5\n square_width,square_height=marker_size*grid.square_width,marker_size*grid.square_height\n \n for obstruction in obstructions:\n color=(255,0,0)\n\n x,y=obstruction\n x1,y1=grid.x+x*grid.square_width,grid.y+y*grid.square_height\n \n if x==grid_columns:\n x1-=grid.square_width-square_width\n elif x==-1:\n x1+=grid.square_width\n elif y==grid_rows:\n y1-=square_height\n elif y==-1:\n y1+=grid.square_height\n else:\n x1+=int(0.5*(grid.square_width-square_width))\n y1-=int(0.5*(2*grid.square_width-square_height))\n color=(255,255,255)\n \n pygame.draw.rect(win,color,(x1,y1,square_width,square_height),0)\n \n# =============================================================================\n# REDRAW GAME WINDOW \n# =============================================================================\n\ndef redrawGameWindow():\n \n pygame.draw.rect(win,(0,0,0),(0,0,win_width,win_height))\n \n grid.draw(win)\n severus.draw(win,grid)\n food.draw(win,grid)\n header.draw(win,win_width,severus.energy,snake_icon)\n \n drawObstructions(obstructions)\n \n pygame.display.update()\n\n# =============================================================================\n# INITIAL CONDITIONS FOLLOWED BY RUN LOOP\n# =============================================================================\n\ndef evalGenomes(population, generations, mutation_type='gaussian', food_energy=300, grid_size=(10,10), survival_fraction=0.1, fitness_threshold=200, mutation_rate=0.03, mutation_range=[-2,2], nn_shape=[24,30,30,4], activation_functions=['tanh','tanh','tanh','softmax'], initial_config=False, watch=False):\n '''\n population: the number of snakes in each generation\n generations: the number of generations you wish to run the evolution process for\n \n mutation rate: probability of a gene mutating\n mutation_range: the min and max possible mutated value\n nn_shape: the shape of the neural net: input layer, hidden 1, hidden 2, ..., output\n activation_functions: the function that will be used at layer1, layer2, ..., output\n \n initial_config: If False, the neural network will initiate with random weights on generation 1\n If initial_config='configuration_file_name.pkl' then neural net will use\n the weights from the pkl file, thus starting from a partially evolved state\n \n '''\n \n # =============================================================================\n # MAIN LOOP\n # =============================================================================\n '''From here until the run loop is simply initializing game objects such as\n the snake population, the board, food, etc. \n '''\n \n #objects\n global grid, win, severus, food, header\n \n #dimensions\n global grid_rows,grid_columns, win_width, win_height\n \n #flags and values\n global colors, game_on, snake_icon, obstructions, snake_output, gen\n \n gen=0\n \n #SET INITIAL CONDITIONS\n if watch:\n clock=pygame.time.Clock()\n clock.tick(10)\n pygame.time.delay(100)\n win_width=500\n win_height=win_width+50\n if watch:\n win=pygame.display.set_mode((win_width,win_height))\n \n \n #Size of grid for snake to move on\n grid_columns,grid_rows=grid_size\n \n #Range of colors to randomly choose from for snake food\n color_dict={'red':(255,0,0),\n 'orange':(255,127,0),\n 'yellow':(255,255,0),\n 'green':(0,255,0),\n 'blue':(0,0,255),\n 'indigo':(75,0,130),\n 'violet':(148,0,211)}\n \n colorful=True\n if colorful:\n colors=['red','orange','yellow','green','blue','indigo','violet']\n else:\n colors=['green']\n \n \n #flag for whether the snake is alive\n game_on=True\n\n\n #CREATE INITIAL OBJECTS\n #Square grid the same width as the window\n grid=GridBoard(grid_columns,grid_rows,win_width,win_width,(0,win_height-win_width))\n \n \n #Snake\n severus=Snake((1,0),SnakeComponent(int(grid.square_width),(int(0.5*grid_columns),int(0.5*grid_rows)),(0,255,0),shape='circle'),food_energy)\n \n \n #Score Board Snake Icon Resized to fit scoreboard\n snake_icon=pygame.image.load('./images/snake-image-alpha-removed.png')\n snake_icon_ratio=1280/960\n snake_icon=pygame.transform.scale(snake_icon,(int(snake_icon_ratio*0.75*(win_height-win_width)),int(0.75*(win_height-win_width))))\n \n \n #Score Board\n header=ScoreBoard(severus.length(),(win_width,win_height-win_width),win_width,snake_icon)\n \n\n #Add food to the map in a location that the snake does not inhabit\n food_loc=tuple((np.random.randint(1,grid_columns),np.random.randint(1,grid_rows)))\n while food_loc in severus.snake_space():\n food_loc=tuple((np.random.randint(1,grid_columns),np.random.randint(1,grid_rows)))\n food=SnakeFood(int(grid.square_width),food_loc,color_dict[np.random.choice(colors)],shape=severus.components[0].shape,grid=grid)\n\n \n\n # =============================================================================\n # CREATE FIRST POPULATION OF SNAKES AND NEURAL NETWORKS\n # =============================================================================\n #record the history of the performance of each generation of snakes\n history={'best' : [],\n 'average' : [],\n 'std' : [],\n 'run_time' : []\n }\n \n nets = []\n snakes = []\n fitness = [0]*population\n \n #if no initial_configuration file is given, randomly generate neural net weights\n if not initial_config:\n for i in range(population):\n snakes.append(Snake((1,0),SnakeComponent(int(grid.square_width),(int(0.5*grid_columns),int(0.5*grid_rows)),(0,255,0),shape='circle'),food_energy))\n \n #weights for connections between nodes\n conn_weights=[scale(np.random.rand(nn_shape[idx],nn_shape[idx+1])) for idx in range(len(nn_shape)-1)]\n \n #bias for each node\n bias_weights=[scale(np.random.rand(nn_shape[idx+1],)) for idx in range(len(nn_shape)-1)]\n \n #create neural net with given weights and activation functions\n nets.append(make_nets(conn_weights,bias_weights,activation_functions))\n else:\n #build population of snakes\n for i in range(population):\n snakes.append(Snake((1,0),SnakeComponent(int(grid.square_width),(int(0.5*grid_columns),int(0.5*grid_rows)),(0,255,0),shape='circle'),food_energy))\n \n #load top 50 nerual nets from previous session\n net_files=glob.glob('ga_snake_history/checkpoint_weights/*.h5')\n net_files=[i.split('\\\\')[-1] for i in net_files]\n \n nets=[]\n #Manually compile the top 50 neural nets from previous session\n for file in net_files:\n print()\n net=keras.models.load_model('./ga_snake_history/checkpoint_weights/'+file)\n flattened_net=flatten_net(net)\n connection_weights,bias_weights=rebuild_net(flattened_net,nn_shape)\n nets.append(make_nets(connection_weights,bias_weights,activation_functions))\n print()\n print('Manually loading, flattening, and rebuilding neural net',file,'from checkpoint.')\n\n \n #Reload the latest history\n with open('./ga_snake_history/history.pkl', 'rb') as file:\n history = pickle.load(file)\n \n #if the population is larger than 50, expand on the loaded neural nets to fill the population\n for i in range(population-len(nets)):\n nets.append(np.random.choice(nets)) \n \n print('nets')\n print(len(nets))\n\n\n \n #Decide how much the snake should be rewarded for each positive/negative action\n reward_food = 2\n reward_move = 0.01\n reward_hit_wall = - 0\n \n for gen in range(generations):\n t_start=time.time()\n gen+=1\n \n #Delete unreferenced objects\n gc.collect()\n\n #run loop\n snake_count=0\n for index,severus in enumerate(snakes):\n \n #Progress bar\n if snake_count%100==0:\n empty=' '*50\n full='|'*50\n progress=float(snake_count)/float(population)\n print('|'+full[:int(progress*50)]+empty[:int((1-progress)*50)]+'|')\n snake_count+=1\n\n run=True \n while run:\n \n #Set the speed the game runs at playing: (50,20) | training (0,comment out)\n if watch:\n pygame.time.delay(50)\n clock.tick(15)\n \n #Every time step, severus loses one energy [kcal]\n severus.energy-=1\n \n #get list of all events that happen i.e. keyboard, mouse, ...\n for event in pygame.event.get():\n #Check if the red X was clicked\n if event.type==pygame.QUIT:\n run=False\n \n #keep track of where the snakes tail is before movement incase it eats food\n severus.tail=severus.components[-1]\n \n \n # =============================================================================\n # CONTROL SNAKE USING NEURAL NET \n # =============================================================================\n #Increase the snakes fitness for each frame it has lived \n severus.fitness += reward_move\n \n #Output the snake vision to the neural net\n snake_output,obstructions = snakeVision(severus,food)\n \n \n snake_output=np.reshape(np.array(snake_output),(1,-1))\n \n #Ask neural net what snake should do based on snake's vision\n nn_output = nets[index].predict(snake_output)\n \n #Perform action suggested by nn_output\n snake_actions={0:'RIGHT',1:'UP',2:'LEFT',3:'DOWN',4:'NONE'}\n \n #OUTPUT FROM NEURAL NET (NN_OUTPUT) DRIVES THE SNAKE\n if (snake_actions[np.argmax(nn_output)]=='LEFT' and severus.direction!=(1,0)):\n #Only allow a left turn if the snake is not going right\n \n #Update the snakes tail components position to be to the left of the snakes head This will create the illusion of the snake progressing forward\n severus.components[-1].position=(severus.components[0].position[0]-1,severus.components[0].position[1])\n #Move the tail component to the head position of the snake\n severus.components=[severus.components.pop()]+severus.components\n #Change the direction of the snake to left\n severus.direction=(-1,0)\n \n \n if (snake_actions[np.argmax(nn_output)]=='RIGHT' and severus.direction!=(-1,0)):\n severus.components[-1].position=(severus.components[0].position[0]+1,severus.components[0].position[1])\n severus.components=[severus.components.pop()]+severus.components\n severus.direction=(1,0)\n \n if (snake_actions[np.argmax(nn_output)]=='UP' and severus.direction!=(0,1)):\n severus.components[-1].position=(severus.components[0].position[0],severus.components[0].position[1]-1)\n severus.components=[severus.components.pop()]+severus.components\n severus.direction=(0,-1)\n \n if (snake_actions[np.argmax(nn_output)]=='DOWN' and severus.direction!=(0,-1)):\n severus.components[-1].position=(severus.components[0].position[0],severus.components[0].position[1]+1)\n severus.components=[severus.components.pop()]+severus.components\n severus.direction=(0,1)\n \n \n #If the snake finds food it will grow by lenght 1\n if severus.components[0].position==food.position:\n #elongate snake with color of food\n severus.components.append(SnakeComponent(grid.square_width,severus.tail.position,food.color,shape=food.shape))\n \n #update the score\n header.score=severus.length()\n \n if header.score>=header.high_score:\n header.high_score=header.score\n \n #generate new food at a location not on the snake\n food_loc=tuple((np.random.randint(1,grid_columns),np.random.randint(1,grid_rows)))\n while food_loc in severus.snake_space():\n food_loc=tuple((np.random.randint(1,grid_columns),np.random.randint(1,grid_rows)))\n food=SnakeFood(int(grid.square_width),food_loc,color_dict[np.random.choice(colors)],grid=grid)\n \n #Increase snakes energy after eating food\n severus.energy+=food_energy\n \n #Increase the snakes fitness for finding food\n severus.fitness += reward_food\n \n #Pygame snakes cannot store more than 999 kilocalories, excess is not metabolized\n if severus.energy>999:\n severus.energy=999 \n else:\n #If the snake bites its tail or wanders into the hunting zone the snake becomes injured\n #note if snake does not move off of food in one frame it will register as biting its own tail\n x,y=severus.components[0].position[0],severus.components[0].position[1]\n if (x<0 or x>=grid_columns) or (y<1 or y>grid_rows) or ((x,y) in severus.snake_space()[1:]):\n #game over because of biting tail or out of bounds\n severus.fitness += reward_hit_wall\n game_on=False\n \n #If the snake tries to go out of bounds reset the head to the tail\n #if (x<0 or x>=grid_columns) or (y<1 or y>grid_rows):\n # severus=Snake((1,0),SnakeComponent(int(grid.square_width),(int(0.5*grid_columns),int(0.5*grid_rows)),(0,255,0),shape='circle'),food_energy)\n \n \n #The snake starved before finding food\n if severus.energy<=0:\n game_on=False\n \n \n #If snake died of starvation, bit its tail or hit a wall\n if not game_on:\n #print('snake injured at ('+str(x)+','+str(y)+')')\n if header.high_score<=severus.length():\n header.high_score=severus.length()\n \n #record how fit the snake was\n fitness[index]=severus.fitness \n \n #reset snake\n severus=Snake((1,0),SnakeComponent(int(grid.square_width),(int(0.5*grid_columns),int(0.5*grid_rows)),(0,255,0),shape='circle'),food_energy)\n \n #reset food\n food_loc=tuple((np.random.randint(1,grid_columns),np.random.randint(1,grid_rows)))\n while food_loc in severus.snake_space():\n food_loc=tuple((np.random.randint(1,grid_columns),np.random.randint(1,grid_rows)))\n food=SnakeFood(int(grid.square_width),food_loc,color_dict[np.random.choice(colors)],shape=severus.components[0].shape,grid=grid)\n\n \n #update score\n header.score=severus.length()\n \n #run=False: kill game | game_on=True: reset snake\n #run=False\n game_on=True\n \n #Break from while loop and continue with the next snake\n break\n \n \n #REDRAW GAME WINDOW\n if watch:\n redrawGameWindow()\n \n # =============================================================================\n # SELECT THE MOST FIT PARENTS TO SURVIVE AND BREED\n # =============================================================================\n print('Selecting the top snakes to breed...')\n #Agent[0]=(net[0],fitness[0])\n agents=selection(nets, fitness, survival_fraction)\n \n print('Agents:',str(len(agents)))\n \n # =============================================================================\n # PERFORM CROSSOVER TO MAKE CHILD NEURAL NETS FROM TOP PERFORMING PARENTS\n # =============================================================================\n print('Performing crossover...')\n nets=[agent[0] for agent in agents]\n \n nets.extend(crossover(agents, nn_shape, activation_functions,population))\n \n print('Nets:',str(len(nets)))\n \n\n # =============================================================================\n # RECORD STATISTICS \n # =============================================================================\n history['best'].append(np.max(fitness))\n history['average'].append(np.mean(fitness))\n history['std'].append(np.std(fitness))\n history['run_time'].append(time.time()-t_start)\n \n reporter(history)\n print()\n \n \n print('Recording history...')\n #Save the ost recent copy of the history dictionary\n with open('./ga_snake_history/history.pkl','wb') as file:\n pickle.dump(history, file, protocol=pickle.HIGHEST_PROTOCOL)\n \n \n # =============================================================================\n # SAVE THE BEST FIT PARENT TO MONITOR HOW THE POPULATION GREW FROM GENERATION TO GENERATION\n # =============================================================================\n print('Saving a copy of the best snake...')\n #Save a copy of the best neural network from each generation\n #nets[0].save_weights('./ga_snake_history/best/'+str(gen)+'_best')\n nets[0].save('./ga_snake_history/best/'+str(len(history['best'])+1)+'_best.h5')\n \n # =============================================================================\n # ADD RANDOM MUTATIONS\n # =============================================================================\n print('Adding mutations...\\n')\n #Only mutate children, leave parents alone\n child_nets=mutate(nets[int(len(nets)*survival_fraction):], mutation_type=mutation_type, mutation_range=mutation_range, mutation_rate=mutation_rate, nn_shape=nn_shape, activation_functions=activation_functions)\n nets=nets[:int(len(nets)*survival_fraction)]+child_nets\n \n # =============================================================================\n # SAVE THE NEWLY CREATED POPULATION OF SNAKE NEURAL NETS AS A CHECKPOINT\n # =============================================================================\n print('Storing a backup copy of the top 500 snakes neural net weights...')\n #Save the most recent copy of the top 500 snakes\n save_count=0\n for net in nets:\n #net.save_weights('./ga_snake_history/checkpoint_weights/'+str(save_count)+'_weights')\n net.save('./ga_snake_history/checkpoint_weights/'+str(save_count)+'_weights.h5')\n save_count+=1\n if save_count==500:\n break\n print('Checkpoint process complete, OK to exit script.\\n')\n \n # =============================================================================\n # IF A SATISFACTORY SNAKE EXISTS, BREAK (i.e. snake can reach a score of 200)\n # =============================================================================\n if max(fitness)>fitness_threshold:\n print('A super snake has been born.')\n break\n \n #reset snake population and fitness values for next round\n snakes = []\n fitness = [0]*population\n \n print('Repopulating snake bodies for next gen...')\n for i in range(population):\n snakes.append(Snake((1,0),SnakeComponent(int(grid.square_width),(int(0.5*grid_columns),int(0.5*grid_rows)),(0,255,0),shape='circle'),food_energy))\n\n\nif __name__ == '__main__':\n from ga_tools import *\n from game_objects import *\n import settings_training\n settings=settings_training.settings\n \n print(settings)\n \n evalGenomes(**settings)\n \n pygame.quit()\n \n #main()", "repo_name": "LPRowe/artificial-intelligence-snake", "sub_path": "ga_snake_train.py", "file_name": "ga_snake_train.py", "file_ext": "py", "file_size_in_byte": 30081, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "75", "api": [{"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.pathsep", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 214, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 222, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 222, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 231, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 231, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 273, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 273, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 275, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 275, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 279, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 279, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 315, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 315, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 317, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 317, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 325, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 327, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 328, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 352, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 355, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 365, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 372, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 372, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 372, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 386, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 399, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 403, "usage_type": "call"}, {"api_name": "pygame.time.delay", "line_number": 422, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 422, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 429, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 429, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 431, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 468, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 473, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 478, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 496, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 498, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 499, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 499, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 542, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 542, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 544, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 545, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 545, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 586, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 588, "usage_type": "call"}, {"api_name": "time.time", "line_number": 589, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 598, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 598, "usage_type": "attribute"}, {"api_name": "settings_training.settings", "line_number": 651, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 657, "usage_type": "call"}]} +{"seq_id": "14606476059", "text": "import discord\nfrom discord import app_commands\nfrom discord.ext import commands\nfrom f.stuff.shopitems import shopitems\nfrom f.__index__ import *\nfrom db.db import db\nfrom db.sql import *\n\nclass Economy(commands.Cog):\n\tdef __init__(self, bot: commands.Bot):\n\t\tself.bot = bot\n\t\tself.currency = \"⚇\"\n\t\tself.shopitems = shopitems\n\t\n\tgroup = app_commands.Group(name=\"econ\", description=f\"Economy commands: earn coins, spend coins, and more\")\n\n\t# some of the economy commands will be in other cogs\n\t# levels: levelling up gives you some $$$\n\t# admin: archivemonth also removes equipped products\n\n\t# inventory stuff\n\tclass ItemActions(discord.ui.View):\n\t\tdef __init__(\n\t\t\tself, \n\t\t\tcurrency: str,\n\t\t\tembed: discord.Embed,\n\t\t\towner: discord.User,\n\t\t\titem: str,\n\t\t\tequippable: bool = False,\n\t\t\tsellable: bool = False\n\t\t):\n\t\t\tself.currency = currency\n\t\t\tself.embed = embed\n\t\t\tself.owner = owner\n\t\t\tself.item = item\n\t\t\tself.equippable = equippable\n\t\t\tself.sellable = sellable\n\n\t\t\tprice = shopitems[self.item][\"price\"]\n\t\t\t# get 65% of the price\n\t\t\tself.price = int(price * 0.75)\n\n\t\t\tsuper().__init__()\n\n\t\t\t# disable some buttons\n\t\t\tself.disable_buttons()\n\t\t\n\t\tasync def on_timeout(self) -> None:\n\t\t\tfor item in self.children:\n\t\t\t\titem.disabled = True\n\n\t\t\tawait self.message.edit(view=self)\n\t\t\n\t\tdef disable_buttons(self):\n\t\t\tall_buttons = self.children\n\t\t\tequip = discord.utils.get(all_buttons, custom_id=\"equip\")\n\t\t\tsell = discord.utils.get(all_buttons, custom_id=\"sell\")\n\t\t\tsell.label = f\"sell for {self.price} {self.currency}\"\n\t\t\tequip.disabled = not self.equippable\n\t\t\tsell.disabled = not self.sellable\n\n\t\t@discord.ui.button(custom_id=\"equip\", label='equip', style=discord.ButtonStyle.green)\n\t\tasync def equip(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\t\t# assuming that this is the inventory\n\t\t\t# and that the item is equippable\n\t\t\tguildid = interaction.guild.id\n\t\t\townerid = self.owner.id\n\n\t\t\t# check stuff to avoid errors\n\t\t\tawait psql.check_user(ownerid, guildid)\n\n\t\t\tif interaction.user == self.owner:\n\t\t\t\t# get the user's inventory\n\t\t\t\trow = await psql.db.fetchrow(\n\t\t\t\t\t\"\"\"--sql\n\t\t\t\t\tSELECT inventory, equipped FROM users\n\t\t\t\t\tWHERE userid = $1 and guildid = $2\n\t\t\t\t\t\"\"\",\n\t\t\t\t\townerid, guildid\n\t\t\t\t)\n\t\t\t\tinventory = psql.commasplit(row[\"inventory\"])\n\t\t\t\tequipped = psql.commasplit(row[\"equipped\"])\n\n\t\t\t\t# find the index of the item in the inventory\n\t\t\t\tindex = inventory.index(self.item)\n\n\t\t\t\t# remove from the inventory\n\t\t\t\tinventory.pop(index)\n\t\t\t\t# add to equipped\n\t\t\t\tequipped.append(self.item)\n\n\t\t\t\tif self.item in inventory:\n\t\t\t\t\t# it's still in there, so can still be equipped and sold\n\t\t\t\t\tself.equippable = True\n\t\t\t\t\tself.sellable = True\n\t\t\t\telse:\n\t\t\t\t\tself.equippable = False\n\t\t\t\t\t# you can't sell a non-existent item already equipped\n\t\t\t\t\tself.sellable = False\n\t\t\t\t\n\t\t\t\tself.disable_buttons()\n\n\t\t\t\t# save the data\n\t\t\t\tconnection = await psql.db.acquire()\n\t\t\t\tasync with connection.transaction():\n\t\t\t\t\tawait psql.db.execute(\n\t\t\t\t\t\t\"\"\"--sql\n\t\t\t\t\t\tUPDATE users\n\t\t\t\t\t\tSET inventory = $1, equipped = $2\n\t\t\t\t\t\tWHERE userid = $3 and guildid = $4\n\t\t\t\t\t\t\"\"\",\n\t\t\t\t\t\tpsql.commasjoin(inventory), psql.commasjoin(equipped),\n\t\t\t\t\t\townerid, guildid\n\t\t\t\t\t)\n\t\t\t\tawait psql.db.release(connection)\n\n\t\t\t\t# update the embed\n\t\t\t\ttitle = self.embed.title\n\t\t\t\tdescription = self.embed.description\n\n\t\t\t\t# log stuff in the description\n\t\t\t\t# if ``` .* ``` is in the description, replace the inside with stuff\n\t\t\t\tmessage = f\"Equipped x1 of {self.item}\"\n\t\t\t\tif \"```\" in description:\n\t\t\t\t\t# find the start and end of the code block\n\t\t\t\t\tstart = description.find(\"```\")\n\t\t\t\t\tend = description.find(\"```\", start + 3)\n\t\t\t\t\t# get the code block\n\t\t\t\t\tlogs = description[start + 3:end]\n\t\t\t\t\t# replace the code block with the new stuff\n\t\t\t\t\tdescription = description.replace(logs, logs + \"\\n\" + message)\n\t\t\t\telse:\n\t\t\t\t\tdescription += \"\\n```\\n\" + f\"{message}```\"\n\n\t\t\t\t# find out how many of the item is in the inventory now\n\t\t\t\tinventory = inventory\n\t\t\t\tif self.item in inventory:\n\t\t\t\t\tintcount = inventory.count(self.item)\n\t\t\t\t\tif intcount > 0:\n\t\t\t\t\t\tcount = f\" (x{intcount})\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tcount = \" [Not in the inventory]\"\n\t\t\t\t\ttitle = title.replace(f\" (x{intcount + 1})\", count)\n\t\t\t\telse:\n\t\t\t\t\tintcount = 0\n\t\t\t\t\tcount = \" [Not in the inventory]\"\n\t\t\t\t\tif f\" (x{intcount + 1})\" in title:\n\t\t\t\t\t\ttitle = title.replace(f\" (x{intcount + 1})\", count)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttitle += count\n\t\t\t\tself.embed.__setattr__(\"title\", title)\n\t\t\t\tself.embed.__setattr__(\"description\", description)\n\n\n\t\t\t\tawait interaction.response.edit_message(embed=self.embed, view=self)\n\t\t\telse:\n\t\t\t\tembed = discord.Embed(\n\t\t\t\t\ttitle=\"Only the owner of this item can do this!\",\n\t\t\t\t\tcolor = theme.colours.red\n\t\t\t\t)\n\t\t\t\tawait interaction.response.send_message(embed=embed, ephemeral=True)\n\n\t\t@discord.ui.button(custom_id=\"sell\", label='sell', style=discord.ButtonStyle.red)\n\t\tasync def sell(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\t\t# assuming that this is the inventory\n\t\t\t# and that the item is equippable\n\t\t\tguildid = interaction.guild.id\n\t\t\townerid = self.owner.id\n\n\t\t\t# check stuff to avoid errors\n\t\t\tawait psql.check_user(ownerid, guildid)\n\n\t\t\tif interaction.user == self.owner:\n\t\t\t\t# get the user's inventory\n\t\t\t\trow = await psql.db.fetchrow(\n\t\t\t\t\t\"\"\"--sql\n\t\t\t\t\tSELECT inventory, equipped, balance FROM users\n\t\t\t\t\tWHERE userid = $1 and guildid = $2\n\t\t\t\t\t\"\"\",\n\t\t\t\t\townerid, guildid\n\t\t\t\t)\n\t\t\t\t\n\t\t\t\tinventory = psql.commasplit(row[\"inventory\"])\n\t\t\t\tequipped = psql.commasplit(row[\"equipped\"])\n\t\t\t\tbalance = row[\"balance\"]\n\n\t\t\t\t# find the index of the item in the inventory\n\t\t\t\tindex = inventory.index(self.item)\n\n\t\t\t\t# remove from the inventory\n\t\t\t\tinventory.pop(index)\n\t\t\t\t# instead of equipping, sell the item\n\t\t\t\tprice = self.price\n\t\t\t\t# add to the user's balance\n\t\t\t\tbalance = row[\"balance\"] + price\n\n\t\t\t\t# save the data\n\t\t\t\tconnection = await psql.db.acquire()\n\t\t\t\tasync with connection.transaction():\n\t\t\t\t\tawait psql.db.execute(\n\t\t\t\t\t\t\"\"\"--sql\n\t\t\t\t\t\tUPDATE users\n\t\t\t\t\t\tSET inventory = $1, balance = $2\n\t\t\t\t\t\tWHERE userid = $3 and guildid = $4\n\t\t\t\t\t\t\"\"\",\n\t\t\t\t\t\tpsql.commasjoin(inventory), balance,\n\t\t\t\t\t\townerid, guildid\n\t\t\t\t\t)\n\t\t\t\tawait psql.db.release(connection)\n\n\t\t\t\tif self.item in inventory:\n\t\t\t\t\t# it's still in there, so can still be equipped and sold\n\t\t\t\t\tif self.equippable != True:\n\t\t\t\t\t\tself.equippable = True\n\t\t\t\t\tif self.sellable != False:\n\t\t\t\t\t\tself.sellable = True\n\t\t\t\telse:\n\t\t\t\t\tself.equippable = False\n\t\t\t\t\t# you can't sell a non-existent item already equipped\n\t\t\t\t\tself.sellable = False\n\t\t\t\t\n\t\t\t\tself.disable_buttons()\n\n\t\t\t\t# update the embed\n\t\t\t\ttitle = self.embed.title\n\t\t\t\tdescription = self.embed.description\n\n\t\t\t\t# log stuff in the description\n\t\t\t\t# if ``` .* ``` is in the description, replace the inside with stuff\n\t\t\t\tmessage = f\"Sold x1 of {self.item} for {price} {self.currency}\"\n\t\t\t\tif \"```\" in description:\n\t\t\t\t\t# find the start and end of the code block\n\t\t\t\t\tstart = description.find(\"```\")\n\t\t\t\t\tend = description.find(\"```\", start + 3)\n\t\t\t\t\t# get the code block\n\t\t\t\t\tlogs = description[start + 3:end]\n\t\t\t\t\t# replace the code block with the new stuff\n\t\t\t\t\tdescription = description.replace(logs, logs + \"\\n\" + message)\n\t\t\t\telse:\n\t\t\t\t\tdescription += \"\\n```\\n\" + f\"{message}```\"\n\n\t\t\t\t# find out how many of the item is in the inventory now\n\t\t\t\tinventory = inventory\n\t\t\t\tif self.item in inventory:\n\t\t\t\t\tintcount = inventory.count(self.item)\n\t\t\t\t\tif intcount > 0:\n\t\t\t\t\t\tcount = f\" (x{intcount})\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tcount = \" [Not in the inventory]\"\n\t\t\t\t\ttitle = title.replace(f\" (x{intcount + 1})\", count)\n\t\t\t\telse:\n\t\t\t\t\tintcount = 0\n\t\t\t\t\tcount = \" [Not in the inventory]\"\n\t\t\t\t\tif f\" (x{intcount + 1})\" in title:\n\t\t\t\t\t\ttitle = title.replace(f\" (x{intcount + 1})\", count)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttitle += count\n\t\t\t\tself.embed.__setattr__(\"title\", title)\n\t\t\t\tself.embed.__setattr__(\"description\", description)\n\n\n\t\t\t\tawait interaction.response.edit_message(embed=self.embed, view=self)\n\t\t\telse:\n\t\t\t\tembed = discord.Embed(\n\t\t\t\t\ttitle=\"Only the owner of this item can do this!\",\n\t\t\t\t\tcolor = theme.colours.red\n\t\t\t\t)\n\t\t\t\tawait interaction.response.send_message(embed=embed, ephemeral=True)\n\n\tclass InventoryDropdown(discord.ui.Select):\n\t\tdef __init__(self, currency: str, equipped: bool, user: discord.User, inventorylist: list, ephemeral: bool, ItemActions):\n\t\t\tself.currency = currency\n\t\t\tself.equipped = equipped # 1 for equipped items, 2 for inventory items\n\t\t\tself.user = user\n\t\t\tself.ephemeral = ephemeral\n\t\t\tself.ItemActions = ItemActions\n\n\t\t\t# inventorylist is a list of items in the inventory\n\t\t\t# make it a dict {\"name\": amount}\n\t\t\tself.items_with_count = {}\n\t\t\tfor item in inventorylist:\n\t\t\t\tif item in self.items_with_count:\n\t\t\t\t\tself.items_with_count[item] += 1\n\t\t\t\telse:\n\t\t\t\t\tself.items_with_count[item] = 1\n\n\t\t\t# {\"item\": count}\n\t\t\t# Set the options that will be presented inside the dropdown\n\t\t\toptions = []\n\t\t\tfor item in self.items_with_count.keys():\n\t\t\t\tcount = self.items_with_count[item]\n\t\t\t\tif count == 1:\n\t\t\t\t\tcount = \"\"\n\t\t\t\telse:\n\t\t\t\t\tcount = f\"(x{count}) \"\n\n\t\t\t\tdescription = f\"{count}{shopitems[item]['description']}\"\n\t\t\t\t# only get the first 100 characters of the description\n\t\t\t\tif len(description) > 100:\n\t\t\t\t\tdescription = description[:97] + \"...\"\n\n\t\t\t\toptions.append(\n\t\t\t\t\tdiscord.SelectOption(\n\t\t\t\t\t\tlabel = item,\n\t\t\t\t\t\tdescription = description\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\n\t\t\tif equipped == True:\n\t\t\t\tplaceholder = \"View equipped items...\"\n\t\t\telif equipped == False:\n\t\t\t\tplaceholder = \"View inventory items...\"\n\n\t\t\tsuper().__init__(\n\t\t\t\tplaceholder=placeholder,\n\t\t\t\tmin_values=1,\n\t\t\t\tmax_values=1,\n\t\t\t\toptions=options\n\t\t\t\t)\n\n\t\tasync def callback(self, interaction: discord.Interaction):\n\t\t\t# self.values = list of selected options\n\t\t\titem = self.values[0]\n\t\t\t# get the item's description\n\t\t\tdescription = shopitems[item][\"description\"]\n\t\t\tuse = shopitems[item][\"use\"]\n\t\t\tequip = shopitems[item][\"equip\"]\n\n\t\t\tabout = self.items_with_count[item]\n\t\t\tif self.equipped == True:\n\t\t\t\tif about == 1:\n\t\t\t\t\tabout = \" [Equipped]\"\n\t\t\t\telse:\n\t\t\t\t\tabout = f\" [Equipped x{about}]\"\n\t\t\telif self.equipped == False:\n\t\t\t\tif about == 1:\n\t\t\t\t\tabout = \"\"\n\t\t\t\telse:\n\t\t\t\t\tabout = f\" (x{about})\"\n\t\t\t\n\t\t\tdescription = f\"{description} {use}\"\n\t\t\tif equip and item != \"python\":\n\t\t\t\tdescription = f\"{description} Can be equipped until the monthly reset.\"\n\t\t\t\n\t\t\tembed = discord.Embed(\n\t\t\t\t# capitalise item name\n\t\t\t\ttitle = f\"{item.capitalize()}{about}\",\n\t\t\t\tdescription = description,\n\t\t\t\tcolor = theme.colours.primary\n\t\t\t)\n\t\t\tembed.set_footer(\n\t\t\t\ttext = f\"Owned by {self.user.name}#{self.user.discriminator}\",\n\t\t\t)\n\n\t\t\tif not self.equipped and equip == True:\n\t\t\t\t# not equipped yet / can be equipped\n\t\t\t\tequippable = True\n\t\t\telse:\n\t\t\t\t# it's equipped already / can't be equipped\n\t\t\t\tequippable = False\n\t\t\t\n\t\t\tif not self.equipped:\n\t\t\t\t# can be sold\n\t\t\t\tsellable = True\n\t\t\telse:\n\t\t\t\tsellable = False\n\n\t\t\tview = self.ItemActions(\n\t\t\t\tcurrency = self.currency,\n\t\t\t\tembed = embed,\n\t\t\t\towner = self.user,\n\t\t\t\titem = item,\n\t\t\t\tequippable = equippable,\n\t\t\t\tsellable = sellable\n\t\t\t)\n\n\t\t\tview.message = await interaction.response.edit_message(\n\t\t\t\tembed = embed,\n\t\t\t\tview = view\n\t\t\t)\n\t\n\tclass InventoryDropdownView(discord.ui.View):\n\t\tdef __init__(self, InventoryDropdown, currency: str, user: discord.User, inventorylist: list, equippedlist: list, ephemeral: bool, ItemActions):\n\t\t\tsuper().__init__()\n\n\t\t\t# Adds the dropdown to our view object.\n\t\t\tif len(equippedlist) > 0:\n\t\t\t\tself.add_item(\n\t\t\t\t\tInventoryDropdown(currency, True, user, equippedlist, ephemeral, ItemActions)\n\t\t\t\t)\n\t\t\tif len(inventorylist) > 0:\n\t\t\t\tself.add_item(\n\t\t\t\t\tInventoryDropdown(currency, False, user, inventorylist, ephemeral, ItemActions)\n\t\t\t\t)\n\t\t\n\t\tasync def on_timeout(self) -> None:\n\t\t\tfor item in self.children:\n\t\t\t\titem.disabled = True\n\n\t\t\tawait self.message.edit(view=self)\n\n\t@group.command(name=\"stats\")\n\t@app_commands.describe(\n\t\tmember = \"the member to get the stats of\"\n\t)\n\tasync def stats(\n\t\tself,\n\t\tinteraction: discord.Interaction,\n\t\tmember: discord.User = None,\n\t\tephemeral: bool = False\n\t\t) -> None:\n\t\t\"\"\"\n\t\tGets the balance and inventory of a user. \"\"\"\n\n\t\tif member == None:\n\t\t\tmember = interaction.user\n\t\telse:\n\t\t\tmember = member\n\t\t\n\t\tif member.bot:\n\t\t\tawait interaction.response.defer(ephemeral=True)\n\n\t\t\tembed = discord.Embed(\n\t\t\t\ttitle = \"You can't rank bots!\", \n\t\t\t\tcolour = theme.colours.red\n\t\t\t)\n\t\t\tawait interaction.followup.send(embed=embed)\n\t\telse:\n\t\t\tawait interaction.response.defer(ephemeral=ephemeral)\n\t\t\tguildid = interaction.guild.id\n\t\t\tmemberid = member.id\n\n\t\t\t# avoid errors\n\t\t\tawait psql.check_user(memberid, guildid)\n\n\t\t\trow = await psql.db.fetchrow(\n\t\t\t\t\"\"\"--sql\n\t\t\t\tSELECT balance, xp, inventory, equipped FROM users\n\t\t\t\tWHERE userid = $1 AND guildid = $2\n\t\t\t\t\"\"\",\n\t\t\t\tmemberid, guildid\n\t\t\t)\n\n\t\t\tcoins = row[\"balance\"]\n\t\t\txp = row[\"xp\"]\n\t\t\tinventory = row[\"inventory\"]\n\t\t\tequipped = row[\"equipped\"]\n\n\t\t\tinventorylist = psql.commasplit(inventory)\n\t\t\tequippedlist = psql.commasplit(equipped)\n\n\t\t\t# sort lists alphabetically\n\t\t\tinventorylist.sort()\n\t\t\tequippedlist.sort()\n\n\t\t\t# get the kill and xp multipliers\n\t\t\tmulti = calc_multi(equippedlist, xp)\n\t\t\tkill_multi = multi.kill_multi\n\t\t\txp_multi = multi.xp_multi\n\n\t\t\tnewline = \"\\n\"\n\n\t\t\tgeneral = [\n\t\t\t\tf\"balance: {coins} {self.currency}\",\n\t\t\t\tf\"hunting multiplier: {kill_multi - 1}\",\n\t\t\t\tf\"xp multiplier: {xp_multi - 1}\"\n\t\t\t]\n\t\t\tgeneral = \"\\n\".join(general)\n\n\t\t\togequipped = equippedlist.copy()\n\t\t\toginventory = inventorylist.copy()\n\n\t\t\tembed = discord.Embed(\n\t\t\t\ttitle = f\"{member.name}'s stats\",\n\t\t\t\tdescription = general,\n\t\t\t\tcolor = theme.colours.primary\n\t\t\t)\n\n\t\t\tif len(inventorylist) == 0:\n\t\t\t\tembed.set_footer(\n\t\t\t\t\ttext = \"This user's inventory is empty. Items can be bought in the shop.\"\n\t\t\t\t)\n\t\t\telif len(equippedlist) == 0:\n\t\t\t\tembed.set_footer(\n\t\t\t\t\ttext = \"This user has no equipped items. Items can be equipped by selecting them in the inventory.\"\n\t\t\t\t)\n\t\t\t\n\t\t\tif len(equippedlist) == 0:\n\t\t\t\tequippedlist.append(\"[There are no equipped items.]\")\n\t\t\tif len(inventorylist) == 0:\n\t\t\t\tinventorylist.append(\"[This inventory is empty.]\")\n\n\t\t\tview = self.InventoryDropdownView(\n\t\t\t\tself.InventoryDropdown,\n\t\t\t\tcurrency = self.currency,\n\t\t\t\tuser = member,\n\t\t\t\tinventorylist = oginventory,\n\t\t\t\tequippedlist = ogequipped,\n\t\t\t\tephemeral = ephemeral,\n\t\t\t\tItemActions = self.ItemActions\n\t\t\t)\n\t\t\tview.message = await interaction.followup.send(embed=embed, view=view)\n\n\t# leaderboard\n\tclass LeaderboardView(discord.ui.View):\n\t\t\"\"\"\n\t\tButtons for the /leaderboard command \"\"\"\n\n\t\tdef __init__(self, guild: discord.Guild, currency: str, users_per_page: int = 5):\n\t\t\t# if you want to see top 1-10, then 1\n\t\t\t# top 11-20, then 11\n\t\t\t# and so on\n\t\t\tself.leaderboard = []\n\t\t\tself.leaderboard_index = 0\n\t\t\tself.guild = guild\n\t\t\tself.max_per_page = users_per_page\n\t\t\tself.currency = currency\n\t\t\t\n\t\t\t# gonna dump stuff here\n\t\t\t# ◁ ▷\n\n\t\t\tsuper().__init__() \n\t\t\t# apparently I must do this or stuff breaks\n\t\t\n\t\tdef disable_buttons(self):\n\t\t\tmax_per_page = self.max_per_page\n\t\t\t# update the buttons\n\t\t\tall_buttons = self.children\n\n\t\t\tleftbutton = discord.utils.get(all_buttons, custom_id=\"left\")\n\t\t\trightbutton = discord.utils.get(all_buttons, custom_id=\"right\")\n\n\t\t\tif self.leaderboard_index - max_per_page < 0:\n\t\t\t\tleftbutton.disabled = True\n\t\t\telse:\n\t\t\t\tleftbutton.disabled = False\n\t\t\t\n\t\t\tif self.leaderboard_index + max_per_page >= len(self.leaderboard):\n\t\t\t\trightbutton.disabled = True\n\t\t\telse:\n\t\t\t\trightbutton.disabled = False\n\t\t\n\t\tasync def on_timeout(self) -> None:\n\t\t\tfor item in self.children:\n\t\t\t\titem.disabled = True\n\n\t\t\tawait self.message.edit(view=self)\n\n\t\tasync def generate_leaderboard(self, guild: discord.Guild) -> list:\n\t\t\t\"\"\"\n\t\t\tGives a list of the leaderboard.\n\t\t\t\n\t\t\t`guild` is the server to get the leaderboard for \n\t\t\t\n\t\t\tReturns a list of dicts, each dict containing the member and $$$.\n\n\t\t\t```py\n\t\t\t[\n\t\t\t\t{\n\t\t\t\t\t\"member\": discord.User,\n\t\t\t\t\t\"$$$\": int\n\t\t\t\t}, {}, {} # etc\n\t\t\t] \n\t\t\t```\"\"\"\n\n\t\t\t# get the data\n\n\t\t\trows = await psql.db.fetch(\n\t\t\t\t\"\"\"--sql\n\t\t\t\tSELECT userid, balance FROM users\n\t\t\t\tWHERE guildid = $1\n\t\t\t\tORDER BY balance DESC\n\t\t\t\t\"\"\",\n\t\t\t\tguild.id\n\t\t\t)\n\n\t\t\tfor row in rows:\n\t\t\t\tmember = guild.get_member(row[\"userid\"])\n\t\t\t\tif member is not None:\n\t\t\t\t\tbalance = row[\"balance\"]\n\n\t\t\t\t\t# add the user to the leaderboard\n\t\t\t\t\tself.leaderboard.append(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"member\": member,\n\t\t\t\t\t\t\t\"$$$\": balance\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\n\t\t\t# sort the users by most $$$ to least $$$\n\t\t\t# I have no idea how to use lambda, but GitHub copilot said to use it\n\t\t\t# I think this lambda basically returns the xp of the dict, x being the dict\n\t\t\tself.leaderboard.sort(key=lambda x: x[\"$$$\"], reverse=True)\n\n\t\t\treturn self.leaderboard\n\n\t\tdef get_leaderboard_embed(self, guild: discord.Guild, startindex: int) -> discord.Embed:\n\t\t\t\"\"\"\n\t\t\tReturns an embed of the leaderboard.\n\n\t\t\t`guild` is the server to get the leaderboard for\n\t\t\t\n\t\t\t`startindex` is the index where the leaderboard should start, eg 0 would show top 1; 10 would show top 11 \"\"\"\n\n\t\t\t# list slicing to get the users\n\t\t\tleaderboard = self.leaderboard[startindex:startindex + self.max_per_page]\n\n\t\t\t# I love list slicing but I can never remember how\n\n\t\t\t# make the description\n\t\t\tdescription_list = []\n\n\t\t\trank = startindex + 1\n\n\t\t\tfor user in leaderboard:\n\t\t\t\t# get the user's data\n\t\t\t\tmember = user[\"member\"]\n\t\t\t\tbal = user[\"$$$\"]\n\n\t\t\t\t# make the description\n\t\t\t\tdescription_list.append(\n\t\t\t\t\tf\"**`#{rank}`** | {member.mention} | {bal} {self.currency}\"\n\t\t\t\t)\n\n\t\t\t\trank += 1\n\t\t\t\n\t\t\tdescription = \"\\n\".join(description_list)\n\n\n\t\t\t# make the embed\n\t\t\tembed = discord.Embed(\n\t\t\t\ttitle = f\"{guild.name}'s leaderboard\",\n\t\t\t\tdescription = description,\n\t\t\t\tcolor = theme.colours.primary\n\t\t\t)\n\n\t\t\treturn embed\n\n\t\t\n\t\t# Define the actual button\n\t\t@discord.ui.button(\n\t\t\tlabel='◄',\n\t\t\tstyle=discord.ButtonStyle.secondary,\n\t\t\tcustom_id='left',\n\t\t\tdisabled=True\n\t\t\t)\n\t\tasync def left(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\t\t# which part of the leaderboard to see\n\t\t\tmax_per_page = self.max_per_page\n\t\t\tguild = interaction.guild\n\n\t\t\t# eg your index is 10 and max_per_page is 10\n\t\t\t# so make index -10 so 0\n\t\t\tself.leaderboard_index -= max_per_page\n\n\t\t\t# get the leaderboard\n\t\t\tembed = self.get_leaderboard_embed(\n\t\t\t\tguild = guild,\n\t\t\t\tstartindex = self.leaderboard_index\n\t\t\t)\n\n\t\t\t# update the buttons\n\t\t\tself.disable_buttons()\n\n\t\t\t# Make sure to update the message with our updated selves\n\t\t\tawait interaction.response.edit_message(embed=embed, view=self)\n\t\t\n\t\t@discord.ui.button(\n\t\t\tlabel='top users',\n\t\t\tstyle=discord.ButtonStyle.secondary,\n\t\t\tcustom_id='top',\n\t\t\tdisabled=False\n\t\t\t)\n\t\tasync def top(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\t\t# which part of the leaderboard to see\n\t\t\tmax_per_page = self.max_per_page\n\t\t\tguild = interaction.guild\n\n\t\t\t# eg your index is 10 and max_per_page is 10\n\t\t\t# so make index -10 so 0\n\t\t\tself.leaderboard_index = 0\n\n\t\t\t# get the leaderboard\n\t\t\tembed = self.get_leaderboard_embed(\n\t\t\t\tguild = guild,\n\t\t\t\tstartindex = self.leaderboard_index\n\t\t\t)\n\n\t\t\t# update the buttons\n\t\t\tself.disable_buttons()\n\n\t\t\t# Make sure to update the message with our updated selves\n\t\t\tawait interaction.response.edit_message(embed=embed, view=self)\n\t\t\n\t\t# right button now\n\t\t@discord.ui.button(\n\t\t\tlabel='►',\n\t\t\tstyle=discord.ButtonStyle.secondary,\n\t\t\tcustom_id='right',\n\t\t\tdisabled=False\n\t\t\t)\n\t\tasync def right(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\t\t# which part of the leaderboard to see\n\t\t\tmax_per_page = self.max_per_page\n\t\t\tguild = interaction.guild\n\n\t\t\t# eg your index is 10 and max_per_page is 10\n\t\t\t# so make index +10 so 20\n\t\t\tself.leaderboard_index += max_per_page\n\n\t\t\t# get the leaderboard\n\t\t\tembed = self.get_leaderboard_embed(\n\t\t\t\tguild = guild,\n\t\t\t\tstartindex = self.leaderboard_index\n\t\t\t)\n\n\t\t\t# update the buttons\n\t\t\tall_buttons = self.children\n\n\t\t\tleftbutton = discord.utils.get(all_buttons, custom_id=\"left\")\n\t\t\trightbutton = discord.utils.get(all_buttons, custom_id=\"right\")\n\n\t\t\tself.disable_buttons\n\n\n\t\t\t# Make sure to update the message with our updated selves\n\t\t\tawait interaction.response.edit_message(embed=embed, view=self)\n\n\t@group.command(name=\"leaderboard\")\n\t@app_commands.describe(\n\t\tephemeral = \"whether or not others should see the bot's reply\",\n\t\tusersperpage = \"the number of users to show per page\"\n\t)\n\tasync def leaderboard(self, interaction: discord.Interaction, ephemeral: bool = False, usersperpage: int = 5) -> None:\n\t\t\"\"\"\n\t\tViews the economy leaderboard. \"\"\"\n\t\tawait interaction.response.defer(ephemeral=ephemeral)\n\n\t\tlb = self.LeaderboardView(interaction.guild, self.currency, usersperpage)\n\n\t\tawait lb.generate_leaderboard(interaction.guild)\n\n\t\tembed = lb.get_leaderboard_embed(\n\t\t\tguild = interaction.guild,\n\t\t\tstartindex = 0\n\t\t)\n\n\t\t# disable some buttons\n\t\tall_buttons = lb.children\n\n\t\trightbutton = discord.utils.get(all_buttons, custom_id=\"right\")\n\n\t\tif lb.max_per_page >= len(lb.leaderboard):\n\t\t\trightbutton.disabled = True\n\n\t\tlb.message = await interaction.followup.send(\n\t\t\tembed = embed,\n\t\t\tview = lb\n\t\t)\n\n\t# shop\n\tclass ShopDrop(discord.ui.Select):\n\t\tdef __init__(self, currency: str):\n\t\t\tself.currency = currency\n\t\t\titemkeys = shopitems.keys()\n\t\t\t# sort keys by price\n\t\t\titemkeys = sorted(itemkeys, key=lambda x: shopitems[x]['price'])\n\n\t\t\toptions = []\n\n\t\t\tfor itemkey in itemkeys:\n\t\t\t\tname = shopitems[itemkey]['name']\n\t\t\t\tdescription = shopitems[itemkey]['description']\n\t\t\t\tprice = f\"{shopitems[itemkey]['price']} {self.currency}\"\n\n\t\t\t\tdescription = f\"[{price}] {description}\"\n\t\t\t\t# only get the first 100 characters of the description\n\t\t\t\tif len(description) > 100:\n\t\t\t\t\tdescription = description[:97] + \"...\"\n\n\t\t\t\toptions.append(\n\t\t\t\t\tdiscord.SelectOption(\n\t\t\t\t\t\tlabel = name,\n\t\t\t\t\t\tdescription = description\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\n\t\t\tsuper().__init__(\n\t\t\t\tplaceholder='View shop items...',\n\t\t\t\tmin_values=1,\n\t\t\t\tmax_values=1,\n\t\t\t\toptions=options\n\t\t\t)\n\t\t\n\n\t\tasync def callback(self, interaction: discord.Interaction):\n\t\t\t# Use the interaction object to send a response message containing\n\t\t\t# the user's favourite colour or choice. The self object refers to the\n\t\t\t# Select object, and the values attribute gets a list of the user's\n\t\t\t# selected options. We only want the first one.\n\t\t\titem = self.values[0]\n\t\t\titemdict = shopitems[item]\n\n\t\t\tname = itemdict[\"name\"]\n\t\t\tdescription = itemdict[\"description\"]\n\t\t\tuse = itemdict[\"use\"]\n\t\t\tprice = itemdict[\"price\"]\n\t\t\tkill_multi = itemdict.get(\"kill_multi\")\n\t\t\txp_multi = itemdict.get(\"xp_multi\")\n\t\t\tequip = itemdict[\"equip\"]\n\n\t\t\tdescription = f\"{description} {use}\"\n\t\t\tif equip:\n\t\t\t\tdescription = f\"{description} Can be equipped for effects.\"\n\n\t\t\tembed = discord.Embed(\n\t\t\t\ttitle = f\"{name.capitalize()} [{price} {self.currency}]\",\n\t\t\t\tdescription = description,\n\t\t\t\tcolor = theme.colours.primary\n\t\t\t)\n\t\t\t\n\t\t\tview = self.view\n\t\t\tview.shopitem = item.lower()\n\t\t\tall_buttons = view.children\n\t\t\tbuybutton = discord.utils.get(all_buttons, custom_id=\"buy\")\n\t\t\tbalancebutton = discord.utils.get(all_buttons, custom_id=\"balance\")\n\t\t\tif buybutton == None:\n\t\t\t\tview.add_item(view.buy)\n\t\t\tif balancebutton == None:\n\t\t\t\tview.add_item(view.balance)\n\t\t\t# get it again\n\t\t\tall_buttons = view.children\n\t\t\tbuybutton = discord.utils.get(all_buttons, custom_id=\"buy\")\n\t\t\tbuybutton.label = f\"buy this item for {price} {self.currency}\"\n\n\t\t\tawait interaction.response.edit_message(embed=embed, view=view)\n\n\tclass ShopDropView(discord.ui.View):\n\t\tdef __init__(self, ShopDrop, currency: str):\n\t\t\tsuper().__init__()\n\n\t\t\t# Adds the dropdown to our view object.\n\t\t\tself.currency = currency\n\t\t\tself.shopitem = None\n\t\t\tself.add_item(ShopDrop(currency))\n\n\t\t\t# remove the buy button\n\t\t\tall_buttons = self.children\n\t\t\tbuybutton = discord.utils.get(all_buttons, custom_id=\"buy\")\n\t\t\tbalancebutton = discord.utils.get(all_buttons, custom_id=\"balance\")\n\t\t\tself.remove_item(buybutton)\n\t\t\tself.remove_item(balancebutton)\n\t\t\n\t\tasync def on_timeout(self) -> None:\n\t\t\tfor item in self.children:\n\t\t\t\titem.disabled = True\n\n\t\t\tawait self.message.edit(view=self)\n\n\t\t@discord.ui.button(label='buy', style=discord.ButtonStyle.primary, custom_id=\"buy\", row=2)\n\t\tasync def buy(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\t\tguildid = interaction.guild.id\n\t\t\tuserid = interaction.user.id\n\t\t\titemdict = shopitems[self.shopitem]\n\n\t\t\tprice = itemdict[\"price\"]\n\n\t\t\tawait psql.check_user(userid, guildid)\n\n\t\t\trow = await psql.db.fetchrow(\n\t\t\t\t\"\"\"--sql\n\t\t\t\tSELECT balance, inventory FROM users\n\t\t\t\tWHERE guildid = $1 AND userid = $2\n\t\t\t\t\"\"\",\n\t\t\t\tguildid, userid\n\t\t\t)\n\n\t\t\tcash = row['balance']\n\t\t\tinventory = psql.commasplit(row['inventory'])\n\t\t\t\n\t\t\tif cash >= price:\n\t\t\t\t# remove the money and add the item\n\t\t\t\tcash -= price\n\t\t\t\tinventory.append(self.shopitem)\n\n\t\t\t\tconnection = await psql.db.acquire()\n\t\t\t\tasync with connection.transaction():\n\t\t\t\t\tawait psql.db.execute(\n\t\t\t\t\t\t\"\"\"--sql\n\t\t\t\t\t\tUPDATE users\n\t\t\t\t\t\tSET balance = $1, inventory = $2\n\t\t\t\t\t\tWHERE guildid = $3 AND userid = $4\n\t\t\t\t\t\t\"\"\",\n\t\t\t\t\t\tcash, psql.commasjoin(inventory),\n\t\t\t\t\t\tguildid, userid\n\t\t\t\t\t)\n\t\t\t\tawait psql.db.release(connection)\n\n\t\t\t\tembed = discord.Embed(\n\t\t\t\t\ttitle = f\"You bought x1 of {self.shopitem}\",\n\t\t\t\t\tdescription = f\"You now have {cash} {self.currency}\",\n\t\t\t\t\tcolor = theme.colours.green\n\t\t\t\t)\n\t\t\t\tawait interaction.response.send_message(embed=embed, ephemeral=True)\n\t\t\telse:\n\t\t\t\tembed = discord.Embed(\n\t\t\t\t\ttitle = f\"You don't have enough money to buy that.\",\n\t\t\t\t\tdescription = \"\\n\".join([f\"You only have {cash} {self.currency}.\",\n\t\t\t\t\t\tf\"You'll need {price - cash} more to buy it.\"]),\n\t\t\t\t\tcolor = theme.colours.red\n\t\t\t\t)\n\t\t\t\tawait interaction.response.send_message(embed=embed, ephemeral=True)\n\n\t\t@discord.ui.button(label='view my stats', style=discord.ButtonStyle.secondary, custom_id=\"balance\", row=2)\n\t\tasync def balance(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\t\tguildid = interaction.guild.id\n\t\t\tuserid = interaction.user.id\n\n\t\t\trow = await psql.db.fetchrow(\n\t\t\t\t\"\"\"--sql\n\t\t\t\tSELECT balance, inventory FROM users\n\t\t\t\tWHERE guildid = $1 AND userid = $2\n\t\t\t\t\"\"\",\n\t\t\t\tguildid, userid\n\t\t\t)\n\n\t\t\tbalance = row['balance']\n\t\t\tinventory = psql.commasplit(row['inventory'])\n\n\t\t\titemdict = shopitems[self.shopitem]\n\t\t\tprice = itemdict[\"price\"]\n\n\t\t\tif balance >= price:\n\t\t\t\tcolour = theme.colours.green\n\t\t\telse:\n\t\t\t\tcolour = theme.colours.red\n\t\t\t\n\t\t\t# how many of this item do you have?\n\t\t\tcount = inventory.count(self.shopitem)\n\n\t\t\tembed = discord.Embed(\n\t\t\t\ttitle = f\"You have {balance} {self.currency}\",\n\t\t\t\tdescription = \"\\n\".join([\n\t\t\t\t\tf\"If you buy this item, you'll have **{balance - price} {self.currency}** left.\",\n\t\t\t\t\tf\"You have **{count}** of this item ({self.shopitem}) in your inventory.\"\n\t\t\t\t\t]),\n\t\t\t\tcolor = colour\n\t\t\t)\n\n\t\t\tawait interaction.response.send_message(embed=embed, ephemeral=True)\n\t\n\t@group.command(name=\"shop\")\n\t@app_commands.describe(\n\t\tephemeral = \"whether or not others should see the bot's reply\",\n\t)\n\tasync def shop(self, interaction: discord.Interaction, ephemeral: bool = True) -> None:\n\t\t\"\"\"\n\t\tViews the shop and its items.\"\"\"\n\t\tawait interaction.response.defer(ephemeral=ephemeral)\n\n\t\t# random from shopitems keys\n\t\tshopitems_keys = list(shopitems.keys())\n\t\trandom_key = random.choice(shopitems_keys)\n\n\t\tfeature = shopitems[random_key]\n\t\tname = feature[\"name\"].capitalize()\n\t\tdescription = feature[\"description\"]\n\t\tprice = feature[\"price\"]\n\n\t\tembed = discord.Embed(\n\t\t\ttitle = f\"{self.bot.user.name} Shop\",\n\t\t\tdescription = f\"**{name}:** {description} For {price} {self.currency} only!\",\n\t\t\tcolor = theme.colours.primary\n\t\t)\n\n\t\tview = self.ShopDropView(self.ShopDrop, self.currency)\n\n\t\tview.message = await interaction.followup.send(embed=embed, view=view)\n\nasync def setup(bot: commands.Bot):\n await bot.add_cog(Economy(bot))", "repo_name": "writeblankspace/pytree", "sub_path": "cogs/economy.py", "file_name": "economy.py", "file_ext": "py", "file_size_in_byte": 27675, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "75", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 9, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 9, "usage_type": "name"}, {"api_name": "discord.ext.commands.Bot", "line_number": 10, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 10, "usage_type": "name"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 13, "usage_type": "name"}, {"api_name": "discord.app_commands.Group", "line_number": 15, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 15, "usage_type": "name"}, {"api_name": "discord.ui", "line_number": 22, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 26, "usage_type": "attribute"}, {"api_name": "discord.User", "line_number": 27, "usage_type": "attribute"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 39, "usage_type": "name"}, {"api_name": "discord.utils.get", "line_number": 56, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 56, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 57, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 57, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 63, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 63, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 157, "usage_type": "call"}, {"api_name": "discord.ui.button", "line_number": 62, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 62, "usage_type": "attribute"}, {"api_name": "discord.ButtonStyle", "line_number": 62, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 164, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 164, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 264, "usage_type": "call"}, {"api_name": "discord.ui.button", "line_number": 163, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 163, "usage_type": "attribute"}, {"api_name": "discord.ButtonStyle", "line_number": 163, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 270, "usage_type": "attribute"}, {"api_name": "discord.User", "line_number": 271, "usage_type": "attribute"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 297, "usage_type": "name"}, {"api_name": "discord.SelectOption", "line_number": 303, "usage_type": "call"}, {"api_name": "discord.Interaction", "line_number": 321, "usage_type": "attribute"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 325, "usage_type": "name"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 326, "usage_type": "name"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 327, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 345, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 382, "usage_type": "attribute"}, {"api_name": "discord.User", "line_number": 383, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 408, "usage_type": "attribute"}, {"api_name": "discord.User", "line_number": 409, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 423, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 473, "usage_type": "call"}, {"api_name": "discord.app_commands.describe", "line_number": 403, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 403, "usage_type": "name"}, {"api_name": "discord.ui", "line_number": 505, "usage_type": "attribute"}, {"api_name": "discord.Guild", "line_number": 509, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 530, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 530, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 531, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 531, "usage_type": "attribute"}, {"api_name": "discord.Guild", "line_number": 549, "usage_type": "attribute"}, {"api_name": "discord.Guild", "line_number": 597, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 631, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 597, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 647, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 647, "usage_type": "attribute"}, {"api_name": "discord.ui.button", "line_number": 641, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 641, "usage_type": "attribute"}, {"api_name": "discord.ButtonStyle", "line_number": 643, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 674, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 674, "usage_type": "attribute"}, {"api_name": "discord.ui.button", "line_number": 668, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 668, "usage_type": "attribute"}, {"api_name": "discord.ButtonStyle", "line_number": 670, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 702, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 702, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 720, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 720, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 721, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 721, "usage_type": "attribute"}, {"api_name": "discord.ui.button", "line_number": 696, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 696, "usage_type": "attribute"}, {"api_name": "discord.ButtonStyle", "line_number": 698, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 734, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 751, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 751, "usage_type": "attribute"}, {"api_name": "discord.app_commands.describe", "line_number": 730, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 730, "usage_type": "name"}, {"api_name": "discord.ui", "line_number": 762, "usage_type": "attribute"}, {"api_name": "f.stuff.shopitems.shopitems.keys", "line_number": 765, "usage_type": "call"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 765, "usage_type": "name"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 767, "usage_type": "name"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 772, "usage_type": "name"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 773, "usage_type": "name"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 774, "usage_type": "name"}, {"api_name": "discord.SelectOption", "line_number": 782, "usage_type": "call"}, {"api_name": "discord.Interaction", "line_number": 796, "usage_type": "attribute"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 802, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 816, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 825, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 825, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 826, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 826, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 833, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 833, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 838, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 849, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 849, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 850, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 850, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 861, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 861, "usage_type": "attribute"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 864, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 899, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 906, "usage_type": "call"}, {"api_name": "discord.ui.button", "line_number": 860, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 860, "usage_type": "attribute"}, {"api_name": "discord.ButtonStyle", "line_number": 860, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 915, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 915, "usage_type": "attribute"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 930, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 941, "usage_type": "call"}, {"api_name": "discord.ui.button", "line_number": 914, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 914, "usage_type": "attribute"}, {"api_name": "discord.ButtonStyle", "line_number": 914, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 956, "usage_type": "attribute"}, {"api_name": "f.stuff.shopitems.shopitems.keys", "line_number": 962, "usage_type": "call"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 962, "usage_type": "name"}, {"api_name": "f.stuff.shopitems.shopitems", "line_number": 965, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 970, "usage_type": "call"}, {"api_name": "discord.app_commands.describe", "line_number": 953, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 953, "usage_type": "name"}, {"api_name": "discord.ext.commands.Bot", "line_number": 980, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 980, "usage_type": "name"}]}