diff --git "a/1261.jsonl" "b/1261.jsonl" new file mode 100644--- /dev/null +++ "b/1261.jsonl" @@ -0,0 +1,452 @@ +{"seq_id": "300969791", "text": "# from da\nfrom flask import request, session, current_app as app\n\nfrom wtforms import Form, FloatField, StringField\nfrom wtforms.validators import InputRequired, ValidationError, StopValidation\nfrom fractions import Fraction\nfrom datetime import datetime\nfrom pytz import timezone\nimport wtforms_json\n\nfrom openprocurement.auction.utils import prepare_extra_journal_fields\n\nwtforms_json.init()\n\n\ndef validate_bid_value(form, field):\n \"\"\"\n Bid must be greater then 0\n \"\"\"\n if field.data <= 0.0 and field.data != -1:\n raise ValidationError(u'Too low value')\n\n\ndef validate_bid_change_on_bidding(form, field):\n \"\"\"\n Bid must be lower then previous bidder bid amount minus minimalStep amount\n \"\"\"\n stage_id = form.document['current_stage']\n if form.auction.features:\n minimal_bid = form.document['stages'][stage_id]['amount_features']\n minimal = Fraction(minimal_bid) * form.auction.bidders_coeficient[form.data['bidder_id']]\n minimal -= Fraction(form.document['minimalStep']['amount'])\n if field.data > minimal:\n raise ValidationError(u'Too high value')\n else:\n minimal_bid = form.document['stages'][stage_id]['amount']\n if field.data > (minimal_bid - form.document['minimalStep']['amount']):\n raise ValidationError(u'Too high value')\n\n\ndef validate_bidder_id_on_bidding(form, field):\n stage_id = form.document['current_stage']\n if field.data != form.document['stages'][stage_id]['bidder_id']:\n raise StopValidation(u'Not valid bidder')\n\n\nclass BidsForm(Form):\n bidder_id = StringField('bidder_id',\n [InputRequired(message=u'No bidder id'), ])\n\n bid = FloatField('bid', [InputRequired(message=u'Bid amount is required'),\n validate_bid_value])\n\n def validate_bid(self, field):\n stage_id = self.document['current_stage']\n if self.document['stages'][stage_id]['type'] == 'bids':\n validate_bid_change_on_bidding(self, field)\n else:\n raise ValidationError(u'Stage not for bidding')\n\n def validate_bidder_id(self, field):\n stage_id = self.document['current_stage']\n if self.document['stages'][stage_id]['type'] == 'bids':\n validate_bidder_id_on_bidding(self, field)\n\n\ndef form_handler():\n auction = app.config['auction']\n with auction.bids_actions:\n form = app.bids_form.from_json(request.json)\n form.auction = auction\n form.document = auction.db.get(auction.auction_doc_id)\n current_time = datetime.now(timezone('Europe/Kiev'))\n if form.validate():\n # write data\n auction.add_bid(form.document['current_stage'],\n {'amount': form.data['bid'],\n 'bidder_id': form.data['bidder_id'],\n 'time': current_time.isoformat()})\n if form.data['bid'] == -1.0:\n app.logger.info(\"Bidder {} with client_id {} canceled bids in stage {} in {}\".format(\n form.data['bidder_id'], session['client_id'],\n form.document['current_stage'], current_time.isoformat()\n ), extra=prepare_extra_journal_fields(request.headers))\n else:\n app.logger.info(\"Bidder {} with client_id {} placed bid {} in {}\".format(\n form.data['bidder_id'], session['client_id'],\n form.data['bid'], current_time.isoformat()\n ), extra=prepare_extra_journal_fields(request.headers))\n return {'status': 'ok', 'data': form.data}\n else:\n app.logger.info(\"Bidder {} with client_id {} wants place bid {} in {} with errors {}\".format(\n request.json.get('bidder_id', 'None'), session['client_id'],\n request.json.get('bid', 'None'), current_time.isoformat(),\n repr(form.errors)\n ), extra=prepare_extra_journal_fields(request.headers))\n return {'status': 'failed', 'errors': form.errors}\n", "sub_path": "openprocurement/auction/worker/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 4062, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "wtforms_json.init", "line_number": 13, "usage_type": "call"}, {"api_name": "wtforms.validators.ValidationError", "line_number": 21, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 31, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 32, "usage_type": "call"}, {"api_name": "wtforms.validators.ValidationError", "line_number": 34, "usage_type": "call"}, {"api_name": "wtforms.validators.ValidationError", "line_number": 38, "usage_type": "call"}, {"api_name": "wtforms.validators.StopValidation", "line_number": 44, "usage_type": "call"}, {"api_name": "wtforms.Form", "line_number": 47, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 48, "usage_type": "call"}, {"api_name": "wtforms.validators.InputRequired", "line_number": 49, "usage_type": "call"}, {"api_name": "wtforms.FloatField", "line_number": 51, "usage_type": "call"}, {"api_name": "wtforms.validators.InputRequired", "line_number": 51, "usage_type": "call"}, {"api_name": "wtforms.validators.ValidationError", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.current_app.bids_form.from_json", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.current_app.bids_form", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.current_app.logger.info", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 82, "usage_type": "name"}, {"api_name": "openprocurement.auction.utils.prepare_extra_journal_fields", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 87, "usage_type": "name"}, {"api_name": "openprocurement.auction.utils.prepare_extra_journal_fields", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 94, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 94, "usage_type": "name"}, {"api_name": "openprocurement.auction.utils.prepare_extra_journal_fields", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 96, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "73971414", "text": "import sys, pygame\nfrom pygame.locals import *\n\ndef main():\n screen = pygame.display.set_mode((400,400))\n pygame.display.set_caption('Prueba Pygame')\n\n # crear color\n # para dibujar cualquier figura geometrica hay que poyarnos en \n # line muchos valores\n # 1param,el primer valor es donde se va dibujar la línea\n # 2param,de que color se dibujará la linea\n # 3.param tupla en pos X y Y, SE DIBUJARA UNA LINEA RECTA HASTA ENCONTRAR EL PUNTO FINAL\n \n while True:\n for eventos in pygame.event.get():\n pygame.draw.rect(screen,(255,0,0),(0,0,50,50))\n pygame.draw.line(screen,(255,0,0),(0,0),(400,400))\n pygame.draw.line(screen,(255,0,0),(400,0),(0,400))\n pygame.draw.line(screen,(255,0,0),(200,0),(200,400))\n pygame.draw.line(screen,(255,0,0),(0,200),(400,200))\n if eventos.type == QUIT:\n sys.exit(0)\n pygame.display.update()\n return 0\n\nif __name__ == '__main__':\n pygame.init()\n main()\n\n\"\"\"\nLa ventana es una matriz\nlos cuadrados en pygame son llamados pixeles\nhay que poyarnos de las coordenadas\ntenemos coordenadas en X y en Y\nsiempre empezando de 0\nes importante cuando estemos trabajando con imagenes\n\"\"\"", "sub_path": "pygame/basico/1_movimientos_basicos.py", "file_name": "1_movimientos_basicos.py", "file_ext": "py", "file_size_in_byte": 1244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pygame.display.set_mode", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "46259476", "text": "#!/usr/bin/python\n# coding:utf-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modules.layers import Highway, Initialized_Conv1d\n\nclass Embedding(nn.Module):\n def __init__(self, wemb_dim, cemb_dim, d_model,\n dropout_w=0.1, dropout_c=0.05):\n super().__init__()\n self.conv2d = nn.Conv2d(cemb_dim, d_model, kernel_size = (1,5), padding=0, bias=True)\n nn.init.kaiming_normal_(self.conv2d.weight, nonlinearity='relu')\n self.conv1d = Initialized_Conv1d(wemb_dim + d_model, d_model, bias=False)\n self.high = Highway(2, d_model)\n self.dropout_w = dropout_w\n self.dropout_c = dropout_c\n\n def forward(self, ch_emb, wd_emb, length):\n ch_emb = ch_emb.permute(0, 3, 1, 2)\n ch_emb = F.dropout(ch_emb, p=self.dropout_c, training=self.training)\n ch_emb = self.conv2d(ch_emb)\n ch_emb = F.relu(ch_emb)\n ch_emb, _ = torch.max(ch_emb, dim=3)\n\n wd_emb = F.dropout(wd_emb, p=self.dropout_w, training=self.training)\n wd_emb = wd_emb.transpose(1, 2)\n emb = torch.cat([ch_emb, wd_emb], dim=1)\n emb = self.conv1d(emb)\n emb = self.high(emb)\n return emb\n", "sub_path": "modules/embedding.py", "file_name": "embedding.py", "file_ext": "py", "file_size_in_byte": 1200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "modules.layers.Initialized_Conv1d", "line_number": 14, "usage_type": "call"}, {"api_name": "modules.layers.Highway", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "419120205", "text": "#!C:\\Users\\groso\\AppData\\Local\\Programs\\Python\\Python37\\python.exe\n# -*- coding: utf-8 -*-\n\nimport save_db as sd\nimport pymysql\nimport json\n\n\"\"\"\nModulo que leera la cantidad de censos realizados por fechas, para la realizacion del grafico\n\"\"\"\n\nprint(\"Content-type: text/html; charset=UTF-8\")\nprint(\"\")\n\n# conexion a base de datos\ndb = sd.AnimalitosDb(\"root\", \"\")\n\ntry:\n\n messages = db.read_mascotas()\n msg = {}\n mascotas = []\n cantidad = []\n k = 0\n for i in messages:\n\n mascotas.append(messages[k][0])\n cantidad.append(messages[k][1])\n k += 1\n\n msg[0] = mascotas\n msg[1] = cantidad\n print(json.dumps(msg))\nexcept pymysql.Error as e:\n mensaje = 'Error con base de datos: {0} {1} '.format(e.args[0], e.args[1])\n\n", "sub_path": "cgi-bin/read_mascotas.py", "file_name": "read_mascotas.py", "file_ext": "py", "file_size_in_byte": 761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "save_db.AnimalitosDb", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "pymysql.Error", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "572478091", "text": "import argparse\n\nfrom pydataweaver.engines import engine_list\nfrom pydataweaver.lib.defaults import VERSION\n\n# Create the parser\nparser = argparse.ArgumentParser(prog=\"pydataweaver\")\n\n# Add first level arguments\nparser.add_argument(\"-v\", \"--version\", action=\"version\", version=VERSION)\nparser.add_argument(\"-q\",\n \"--quiet\",\n help=\"suppress command-line output\",\n action=\"store_true\")\n\n# ..............................................................\n# subparsers\n# ..............................................................\n\nsubparsers = parser.add_subparsers(help=\"sub-command help\", dest=\"command\")\nhelp_parser = subparsers.add_parser(\"help\", help=\"\")\n\nls_parser = subparsers.add_parser(\"ls\", help=\"display a list all available datasets\")\ncitation_parser = subparsers.add_parser(\"citation\", help=\"view citation\")\nlicense_parser = subparsers.add_parser(\"license\", help=\"view dataset licenses\")\njoin_parser = subparsers.add_parser(\"join\",\n help=\"integrate data using a data package script\")\nupdate_parser = subparsers.add_parser(\n \"update\", help=\"download updated versions of data package scripts\")\nreset_parser = subparsers.add_parser(\"reset\", help=\"reset pydataweaver: deletes scripts\")\n\n# ..............................................................\n# subparsers with Arguments\n# ...............................................................\n\ncitation_parser.add_argument(\"dataset\",\n help=\"pydataweaver citation or dataset citation\",\n nargs=\"?\",\n default=None)\nlicense_parser.add_argument(\"dataset\",\n help=\"pydataweaver license or dataset licenses\",\n nargs=\"?\",\n default=None)\n\nls_parser.add_argument(\"-l\",\n help=\"search datasets with specific license(s)\",\n nargs=\"*\",\n default=False)\nls_parser.add_argument(\"-k\",\n help=\"search datasets with keyword(s)\",\n nargs=\"*\",\n default=False)\nls_parser.add_argument(\"-v\",\n help=\"verbose list of all datasets\",\n nargs=\"*\",\n default=False)\njoin_parser.add_argument(\"--debug\", help=\"run in debug mode\", action=\"store_true\")\njoin_subparsers = join_parser.add_subparsers(help=\"engine-specific help\", dest=\"engine\")\nreset_parser.add_argument(\"scope\", help=\"things to reset: scripts\", choices=[\"scripts\"])\n\nfor engine in engine_list:\n join_engine_parser = join_subparsers.add_parser(engine.abbreviation, help=engine.name)\n join_engine_parser.add_argument(\"dataset\", help=\"file name\")\n\n abbreviations = set(\"h\")\n for arg in engine.required_opts:\n arg_name, help_msg, default = arg[:3]\n potential_abbreviations = [char for char in arg_name if not char in abbreviations]\n if potential_abbreviations:\n abbreviation = potential_abbreviations[0]\n abbreviations.add(abbreviation)\n else:\n abbreviation = \"-%s\" % arg_name\n join_engine_parser.add_argument(\n \"--%s\" % arg_name,\n \"-%s\" % abbreviation,\n help=help_msg,\n nargs=\"?\",\n default=default,\n )\n", "sub_path": "pydataweaver/lib/get_opts.py", "file_name": "get_opts.py", "file_ext": "py", "file_size_in_byte": 3384, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "pydataweaver.lib.defaults.VERSION", "line_number": 10, "usage_type": "name"}, {"api_name": "pydataweaver.engines.engine_list", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "415166346", "text": "from usage import dataset, weights, weights_stochastic\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# separate instnaces by labels\nclass_0_points = dataset.loc[dataset['label'] == 0].iloc[:, [1, 2]].values\nclass_1_points = dataset.loc[dataset['label'] == 1].iloc[:, [1, 2]].values\n\n# plot data points\nplt.scatter(class_0_points[:, 0], class_0_points[:, 1], c='red', marker='s')\nplt.scatter(class_1_points[:, 0], class_1_points[:, 1], c='blue')\n\n# plot decision boundary - gradient descent\nx = np.arange(min(dataset.iloc[:, 1].values), max(dataset.iloc[:, 1].values), 0.1)\n# we equal input to sigmoid function to 0 and solve for x2\nx2 = (-weights[0]-weights[1]*x)/weights[2]\nplt.plot(x, x2)\n\n# plot decision boundary - stochastic gradient descent\nx2_s = (-weights_stochastic[0]-weights_stochastic[1]*x)/weights_stochastic[2]\nplt.plot(x, x2_s)\n\nplt.show()", "sub_path": "classification/logistic_regression/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "usage.dataset.loc", "line_number": 6, "usage_type": "attribute"}, {"api_name": "usage.dataset", "line_number": 6, "usage_type": "name"}, {"api_name": "usage.dataset.loc", "line_number": 7, "usage_type": "attribute"}, {"api_name": "usage.dataset", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 14, "usage_type": "call"}, {"api_name": "usage.dataset.iloc", "line_number": 14, "usage_type": "attribute"}, {"api_name": "usage.dataset", "line_number": 14, "usage_type": "name"}, {"api_name": "usage.weights", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "usage.weights_stochastic", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "75005324", "text": "from flask_mail import Mail, Message\nimport os\nfrom flask import render_template\n\nmail_settings = {\n \"MAIL_SERVER\": 'smtp.gmail.com',\n \"MAIL_PORT\": 587,\n \"MAIL_USE_TLS\": True,\n \"MAIL_USERNAME\": \"amansinghbawa@gmail.com\",\n \"MAIL_PASSWORD\": \"Bawa7800\"\n}\n\n\nclass EmailService:\n def __init__(self, app):\n self.app = app\n app.config.update(mail_settings)\n self.mail = Mail(app)\n\n def send_mail(self, recipient, name, message):\n msg = Message(subject=f\"Query from {recipient}\",\n sender=self.app.config.get(\"MAIL_USERNAME\"),\n recipients=[self.app.config.get(\"MAIL_USERNAME\"), ], # replace with your email for testing\n body=message)\n self.mail.send(msg)\n\n msg = Message(subject=\"Baba iron and cement store - Query submitted\",\n sender=self.app.config.get(\"MAIL_USERNAME\"),\n recipients=[recipient, ] # replace with your email for testing\n # body=f\"Hi,\\nYour query is submitted we will contact you soon\\n\\nDetails:\\n{message}\\n\\nThank you,\\nBaba Iron and cement store\\n\"\n )\n msg.html = render_template(\"mail/contact_us_customer.html\", **{\"name\": name,\"message\": message})\n self.mail.send(msg)\n", "sub_path": "utils/email.py", "file_name": "email.py", "file_ext": "py", "file_size_in_byte": 1306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask_mail.Mail", "line_number": 18, "usage_type": "call"}, {"api_name": "flask_mail.Message", "line_number": 21, "usage_type": "call"}, {"api_name": "flask_mail.Message", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "605100930", "text": "import requests\nimport dryscrape\nfrom bs4 import BeautifulSoup\nimport json\nimport re\nimport sys\n\nif 'linux' in sys.platform:\n\tdryscrape.start_xvfb()\n\ndef __is_info(p):\n\tpattern = re.compile(r'(.*\\n)?(.+:.+\\n)+')\n\tr = pattern.match(p)\n\treturn True if r is not None else False\n\ndef __extract_info(p_all):\n\tresult = []\n\tfor p in p_all:\n\t\tif __is_info(p.text) is True:\n\t\t\tresult.append(p)\n\treturn result\n\ndef robot_vegetarianStore(timeStamp = '2016-03-01'):\n\tWEBSITE = 'http://clnote.tw/'\n\tresults = list()\n\treq = requests.get(WEBSITE)\n\tsoup = BeautifulSoup(req.text, 'html.parser')\n\tarticles = soup.find('div',{'id':'main', 'class':'site-main'}).find_all('article')\n\n\n\tAPI_KEY = ''\n\twith open('/home/ubuntu/documents/robot-vegetarian/geocoding_apikey.txt','r') as rfile:\n\t\tAPI_KEY = rfile.read().split('\\n')[0]\n\n\tfor idx, article in enumerate(articles):\n\t\ttitle = article.h1.text.encode('latin1').decode('utf8')\n\t\tpublishedTime = article.span.a.find('time',{'class':'entry-date published'}).text\n\t\tcontent_url = article.h1.a['href']\n\n\t\tif publishedTime <= timeStamp:\n\t\t\tcontinue\n\n\t\tprint (\"drive to article content \"+content_url)\n\t\tsession = dryscrape.Session(base_url = 'http://google.com')\n\t\tsession.visit(content_url)\n\t\tcontent = BeautifulSoup(session.body(), 'html.parser')\n\t\tp_all = content.find('div',{'class':'entry-content'}).find_all('p')\n\n\t\tinfos = __extract_info(p_all)\n\n\t\tinfo = infos[0] # Extract one for example if there are many infos\n\t\t# Extract info\n\t\tstoreName = info.strong.text\n\t\taddress = info.text.split('\\n')[1].split(':')[1][:-4]\n\t\topeningTime = info.text.split('\\n')[2].split(':')[1]\n\t\tphone = info.text.split('\\n')[3].split(':')[1]\n\t\twebsite = info.text.split('\\n')[4].split(':')[1]\n\n\n\t\tgeocoding_url = 'https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(address,API_KEY)\n\t\tgeocode = requests.get(geocoding_url)\n\t\tjsonGeo = json.loads(geocode.text)\n\t\tlng = jsonGeo['results'][0]['geometry']['location']['lng']\n\t\tlat = jsonGeo['results'][0]['geometry']['location']['lat']\n\n\t\tprint (\"{0}\\n{1}\\n{2}\\n{3}\\n{4},{5}\\n\\n\".format(title,publishedTime,content_url,address,lng,lat))\n\t\tresults.append((title,storeName,publishedTime,info.text,lng,lat,content_url,website))\n\t\t\n\t\tif idx == 1:\n\t\t\tbreak\t\t\n\treturn results\n\n\nif __name__ == '__main__':\n\ttimeStamp = ''\n\twith open ('/home/ubuntu/documents/robot-vegetarian/timeStamp.txt','r') as rfile:\n\t\ttimeStamp = rfile.read().split('\\n')[0]\n\tprint (timeStamp)\n\tresults = robot_vegetarianStore()\n\tprint (results[0])\n\tif timeStamp < results[0][2]:\n\t\tprint ('New found store')\n\n\n\n", "sub_path": "robot.py", "file_name": "robot.py", "file_ext": "py", "file_size_in_byte": 2572, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.platform", "line_number": 8, "usage_type": "attribute"}, {"api_name": "dryscrape.start_xvfb", "line_number": 9, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 27, "usage_type": "call"}, {"api_name": "dryscrape.Session", "line_number": 44, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 61, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "221341546", "text": "# def create_dictionary():\n# new_dictionary = {\"day\": \"between sunrise and sunset\",\n# \"night\": \"when the moon is out\"\n# }\n# return new_dictionary\n\n\ndef create_patient():\n new_patient = {\n \"first name\":\"Smith\",\n \"age\":60,\n \"married\":False,\n \"test_results\":[0,16,23,2.3]\n }\n #test_one = new_patient[\"test results\"][1]\n #print(test_one)\n return new_patient\n\ndef save_Json(patient):\n import json\n filename = \"patient_data.txt\"\n out_file = open(filename,'w')\n json.dump(patient,out_file)\n out_file.close()\n\n# def read_dictionary(my_dict):\n# my_key = \"day\"\n# y = my_dict[my_key]\n# print(\"The definition of {} is {}\".format(my_key,y))\n# return y\n#\n# def add_info(my_dict):\n# my_dict[\"lunch\"] = \"The meal I eat in the middle of the day\"\n# my_dict[\"day\"] = \"when I am not sleeping\"\n# return my_dict\n\n\nif __name__ == \"__main__\":\n # x = create_dictionary()\n # read_dictionary(x)\n # print(x)\n # x = add_info(x)\n # print(x)\n # print(type(x))\n # z = x.get(\"dinner\")\n # print(z)\n x = create_patient()\n save_Json(x)", "sub_path": "dictionary.py", "file_name": "dictionary.py", "file_ext": "py", "file_size_in_byte": 1149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "json.dump", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "83275334", "text": "\n#from __future__ import division\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport caffe\nimport progressbar\nimport h5py\nfrom sklearn import preprocessing\n\nimport ipdb\n\nbar = progressbar.ProgressBar()\ncaffe_root = './caffe/' # this file should be run from {caffe_root}/examples (otherwise change this line)\nsys.path.insert(0, caffe_root + 'python')\ncaffe.set_device(0) # if we have multiple GPUs, pick the first one\ncaffe.set_mode_gpu()\n\nmodel_def = './deploy_single.prototxt'\nmodel_weights = './training/single_iter_20000.caffemodel'\n\nnet = caffe.Net(model_def, # defines the structure of the model\n model_weights, # contains the trained weights\n caffe.TEST) # use test mode (e.g., don't perform dropout)\n\ntransformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\ntransformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension\ntransformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR\n\n\n\ndataroot = \"/mnt/hdd/dataset/audioset/eval_spectrogram_25ms_6frame/\"\nwith open('./data_val_single.txt') as f:\n content = f.readlines()\nprobs = np.zeros((len(content),527)) \ni = 0\nlabel = []\nfor row in bar(content):\n\timg_file = row.split(' ')[0]\n\timage = caffe.io.load_image(dataroot + img_file)#image.shape = 50 96 3\n\ttransformed_image = transformer.preprocess('data', image) #trainsformed_image = 3 50 96\n\tnet.blobs['data'].data[...] = transformed_image\n\tnet.forward()\n\toutput_prob = net.blobs['score'].data[0] #(527,)\n\tprobs[i] = output_prob\n\tlabel.append(int(row.split(' ')[1]))\n\ti +=1\n\n#with h5py.File('./label_valf6.h5', 'r') as f:\n#\tlabel = f['rabel'][()]\t#label.shape (1027365, 527)\n#label = np.load('./label_valf6_short.npy','r')\n\n#blabel = MultiLabelBinarizer().fit_transform(label) \nlb = preprocessing.LabelBinarizer()\nlb.fit(label)\nblabel = lb.transform(label)\nprobi = (probs >= 0.5).astype(float)\nacc = (blabel == probi).astype(float)\n\nfor i in xrange(probi.shape[1]):\n\tprint(' %d th acc is %f\\n'%(i, sum(acc[:,i]) / acc.shape[0]) )\n\nprint(' total acc is %f\\n'%(sum(sum(acc)) / np.prod(acc.shape))) \n", "sub_path": "inference_single.py", "file_name": "inference_single.py", "file_ext": "py", "file_size_in_byte": 2140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "progressbar.ProgressBar", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "caffe.set_device", "line_number": 18, "usage_type": "call"}, {"api_name": "caffe.set_mode_gpu", "line_number": 19, "usage_type": "call"}, {"api_name": "caffe.Net", "line_number": 24, "usage_type": "call"}, {"api_name": "caffe.TEST", "line_number": 26, "usage_type": "attribute"}, {"api_name": "caffe.io.Transformer", "line_number": 28, "usage_type": "call"}, {"api_name": "caffe.io", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "caffe.io.load_image", "line_number": 42, "usage_type": "call"}, {"api_name": "caffe.io", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.LabelBinarizer", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.prod", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "351619058", "text": "from aiohttp import web\nimport psycopg2.errors\nfrom urllib.parse import urlencode\n\nfrom .config import db_block, web_routes\n\n@web_routes.post('/action/xuanke/add')\nasync def xuanke_add(request):\n params = await request.post()\n xstu_sn = params.get(\"stu_sn\")\n plan_sn = params.get(\"plan_sn\")\n plan_xueqi = params.get(\"plan_xueqi\")\n \n with db_block() as db:\n db.execute(\"\"\"\n INSERT INTO xuanke (xstu_sn, plan_sn) \n VALUES ( %(xstu_sn)s, %(plan_sn)s)\n \"\"\", dict(xstu_sn=xstu_sn, plan_sn = plan_sn))\n \n\n return web.HTTPFound(location=\"/xuanke\")\n\n@web_routes.post('/action/xuanke/delete/{xstu_sn}/{plan_sn}')\nasync def action_xuanke_delete(request):\n \n xstu_sn = request.match_info.get(\"xstu_sn\")\n plan_sn = request.match_info.get(\"plan_sn\")\n \n with db_block() as db:\n db.execute(\"\"\"\n DELETE FROM xuanke\n WHERE xstu_sn = %(xstu_sn)s AND plan_sn = %(plan_sn)s\n \"\"\", dict(xstu_sn=xstu_sn, plan_sn = plan_sn))\n return web.HTTPFound(location=\"/xuanke\")\n ", "sub_path": "serv/xuanke_actions.py", "file_name": "xuanke_actions.py", "file_ext": "py", "file_size_in_byte": 1063, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "config.db_block", "line_number": 14, "usage_type": "call"}, {"api_name": "aiohttp.web.HTTPFound", "line_number": 21, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 21, "usage_type": "name"}, {"api_name": "config.web_routes.post", "line_number": 7, "usage_type": "call"}, {"api_name": "config.web_routes", "line_number": 7, "usage_type": "name"}, {"api_name": "config.db_block", "line_number": 29, "usage_type": "call"}, {"api_name": "aiohttp.web.HTTPFound", "line_number": 34, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 34, "usage_type": "name"}, {"api_name": "config.web_routes.post", "line_number": 23, "usage_type": "call"}, {"api_name": "config.web_routes", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "11066747", "text": "# -*- coding: UTF-8 -*- \r\n#!/usr/bin/env python\r\n# \r\n# Copyright 2010- Hui Zhang\r\n# E-mail: hui.zh012@gmail.com\r\n#\r\n# Distributed under the terms of the GPL (GNU Public License)\r\n#\r\n# you can redistribute it and/or modify it under the terms of the GNU \r\n# General Public License as published by the Free Software Foundation;\r\n# either version 2 of the License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r\n\r\nfrom datetime import date\r\nfrom zlib import decompress\r\nfrom xy.core import bytestr, utils\r\n\r\ndef d2n(d=None):\r\n if not d: d = date.today()\r\n return d.year*10000 + d.month*100 + d.day\r\n\r\ndef n2d(n):\r\n y, d = divmod(n, 10000)\r\n m, d = divmod(d, 100)\r\n return date(y, m, d)\r\n\r\ndef m2c(market):\r\n return ['sz', 'sh'].index(market.lower())\r\n\r\ndef c2m(mcode):\r\n return ['sz', 'sh'][mcode]\r\n\r\nResponseMagic = '\\xb1\\xcb\\x74\\x00'\r\n\r\nclass Cmds:\r\n #commands\r\n HeartBeat = '\\x04\\x00'\r\n StockCount = '\\x4e\\x04'\r\n StockList = '\\x50\\x04'\r\n FileInfo = '\\xc5\\x02'\r\n GetFile = '\\xb9\\x06'\r\n Tran_DD = '\\xc5\\x0f' ## 分笔明细? deal divided? /urgly english\r\n Tran_TD = '\\x1d\\x05' ## 分时明细?time divided\r\n KData = '\\x2c\\x05'\r\n Quote = '\\x3e\\x05'\r\n \r\n #magic numbers\r\n\r\n_protocol_reg = {}\r\n\r\ndef protocol(klass):\r\n _protocol_reg[klass.cmd] = klass\r\n return klass\r\n\r\ndef build_request(seqno, cmd, *args, **kwargs):\r\n return _protocol_reg[cmd].build_request(seqno, *args, **kwargs)\r\n\r\ndef parse_response(cmd, data):\r\n return _protocol_reg[cmd].parse_response(data)\r\n\r\ndef merge_requests(cmd, requests):\r\n return _protocol_reg[cmd]._merge_requests(requests)\r\n\r\ndef split_response(stream):\r\n resps = []\r\n while 1:\r\n bs = bytestr(stream)\r\n if len(stream) < 16: break # even not have a complete header\r\n respmagic, compresstype, seqno, packtype, cmd, datalen, origdatalen = bs.read_struct(\"=4sBIB2sHH\")\r\n if respmagic != ResponseMagic:\r\n try:\r\n stream = stream[stream.index(ResponseMagic):]\r\n except:\r\n stream = ''\r\n continue\r\n \r\n bs.cut()\r\n if len(bs.buf) < datalen: break\r\n stream = bs.buf[datalen:]\r\n \r\n data = bs.read_s(datalen)\r\n if compresstype==0x1c:\r\n try:\r\n data = decompress(data)\r\n except:\r\n data = ''\r\n\r\n if origdatalen != len(data):\r\n data = ''\r\n \r\n resps.append((seqno, cmd, data)) \r\n return tuple(resps), stream\r\n \r\nclass BaseProtocol(object):\r\n #cmd = None #must be defined by sub class\r\n \r\n @classmethod\r\n def build_request(cls, seqno, *args, **kwargs):\r\n '''\r\n char zip; // always 0x0c: data-uncompressed\r\n uint seq_id; // 同一种命令的 seq_id。\r\n char packet_type; // 00: 回应。 1,2,3... request count\r\n ushort len; // 数据长度\r\n ushort len1; // 数据长度重复\r\n #ushort cmd; // b4 bf: 分钟线。。b5 bf 单笔成交\r\n '''\r\n bs = bytestr()\r\n bs.write_s(cls.cmd)\r\n cls._build_request(bs, *args, **kwargs)\r\n length = len(bs.buf)\r\n bs.pos = 0\r\n bs.write_struct(\"=BIBHH\", 0x0c, seqno, 1, length, length)\r\n return bs.buf\r\n \r\n @classmethod\r\n def parse_response(cls, resp):\r\n bs = bytestr(resp)\r\n return cls._parse_response(bs)\r\n \r\n @classmethod\r\n def _build_request(cls, bs, *args, **kwargs):\r\n raise NotImplemented('request builder for %s' %utils.bth(cls.cmd))\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n return bs.buf\r\n @classmethod\r\n def _merge_requests(cls, requests):\r\n return None\r\n\r\n@protocol\r\nclass HeartBeat(BaseProtocol):\r\n cmd = Cmds.HeartBeat\r\n @classmethod\r\n def _build_request(cls, bs):\r\n pass\r\n @classmethod\r\n def _merge_requests(cls, requests):\r\n # compress the heart beat package\r\n return ((), {}), tuple([r[0] for r in requests])\r\n\r\n@protocol\r\nclass StockCount(BaseProtocol):\r\n cmd = Cmds.StockCount\r\n @classmethod\r\n def _build_request(cls, bs, market):\r\n bs.write_struct('=HI', m2c(market), d2n())\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n return bs.read_struct('=H')\r\n\r\n@protocol\r\nclass StockList(BaseProtocol):\r\n cmd = Cmds.StockList\r\n @classmethod\r\n def _build_request(cls, bs, market, begin):\r\n bs.write_struct('=HH', m2c(market), begin)\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n count = bs.read_struct('=H')\r\n stocks = []\r\n for i in range(count):\r\n code, unit, name, unknown, dim, price, basemap, gbbqmap = bs.read_struct('=6sH8sIBfHH')\r\n name = name.strip('\\x00').decode('gbk')\r\n stocks.append((code, (name, price, basemap, gbbqmap)))\r\n return stocks\r\n\r\n@protocol\r\nclass FileInfo(BaseProtocol):\r\n cmd = Cmds.FileInfo\r\n @classmethod\r\n def _build_request(cls, bs, filename):\r\n bs.write_s(filename)\r\n bs.write_s('\\x00' * (40-len(filename)))\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n filelen, packtype = bs.read_struct('=IB')\r\n hexmd5 = bs.read_s()[:-1]\r\n return (filelen, hexmd5)\r\n \r\n@protocol\r\nclass FileDown(BaseProtocol):\r\n cmd = Cmds.GetFile\r\n @classmethod\r\n def _build_request(cls, bs, filename, begin, size):\r\n bs.write_struct('=II', begin, size)\r\n bs.write_s(filename)\r\n bs.write_s('\\x00' * (100-len(filename)))\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n length = bs.read_struct('=I')\r\n data = bs.read_s()\r\n if len(data) == length:\r\n return data\r\n\r\n@protocol\r\nclass Quote(BaseProtocol):\r\n cmd = Cmds.Quote\r\n @classmethod\r\n def _build_request(cls, bs, *tickers):\r\n bs.write_s('\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\r\n bs.write_struct('=H', len(tickers))\r\n for market, code in tickers:\r\n bs.write_struct('=B6s', market, code)\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n magic = bs.read_s(2)\r\n # if magic != '\\xb1\\xcb':\r\n # print 'strange: response bs magic is %s, instead of \"B1 CB\"' % ' '.join(map(lambda x:hex(ord(x)), magic))\r\n records = []\r\n num = bs.read_struct('=H')\r\n unknowns = []\r\n for i in range(num):\r\n market, code, liveness = bs.read_struct('=B6sH')\r\n market = 'sh' if market==1 else 'sz'\r\n price = bs.read_varint()\r\n c, o, h, l = map(lambda v: v+price, bs.read_varint(4))\r\n unknowns.extend(bs.read_varint(2))\r\n vol, lastbid_vol = bs.read_varint(2)\r\n turnover = bs.read_struct('=f')\r\n inner_vol, outer_vol = bs.read_varint(2)\r\n unknowns.extend(bs.read_varint(2))\r\n bid5 = bs.read_varint(20)\r\n buy5 = [(bid5[i*4], bid5[i*4+2]) for i in range(5)]\r\n sell5 = [(bid5[i*4+1], bid5[i*4+3]) for i in range(5)]\r\n unknowns.extend(bs.read_struct('=HB'))\r\n unknowns.extend(bs.read_varint(3))\r\n unknowns.extend(bs.read_struct('=HH'))\r\n \r\n # data = dictattr()\r\n # data.market = market\r\n # data.code = code\r\n # data.liveness = liveness\r\n # data.prices = (c, o, h, l, price)\r\n # data.volums = (vol, lastbid_vol, inner_vol, outer_vol)\r\n # data.turnover = turnover\r\n # data.bid5 = (buy5, sell5)\r\n # data.unknowns = unknowns\r\n # records.append(data)\r\n records.append((code, market, price, turnover, vol,\r\n (c, o, h, l), \r\n (lastbid_vol, inner_vol, outer_vol),\r\n buy5, sell5,\r\n ))\r\n return tuple(records)\r\n", "sub_path": "xyproj/src/xyearn/tdx/hqproto.py", "file_name": "hqproto.py", "file_ext": "py", "file_size_in_byte": 8481, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "datetime.date.today", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 33, "usage_type": "call"}, {"api_name": "xy.core.bytestr", "line_number": 75, "usage_type": "call"}, {"api_name": "zlib.decompress", "line_number": 92, "usage_type": "call"}, {"api_name": "xy.core.bytestr", "line_number": 115, "usage_type": "call"}, {"api_name": "xy.core.bytestr", "line_number": 125, "usage_type": "call"}, {"api_name": "xy.core.utils.bth", "line_number": 130, "usage_type": "call"}, {"api_name": "xy.core.utils", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "631642884", "text": "import threading\nimport random\nimport logging\n\nlogging.basicConfig(level=__debug__,format='[%(levelname)s] (%(threadName)-10s) %(message)s')\nexitFlag = False\nlst = [\"apple\",\"mango\",\"grapes\",\"orange\",\"pineapple\"]\ndef print_list():\n global exitFlag\n if not exitFlag:\n with queue:\n if not len(lst) == 0:\n name = lst.pop(0)\n logging.debug(\"printing name %s\",name)\n # logging.debug(\"\")\n # print name\n else:\n logging.debug(\"setting flag exitFlag to True\")\n exitFlag = True\n\nqueue = threading.Lock()\n\nwhile not exitFlag:\n for i in range(3):\n logging.debug(\"Starting thread %d\",i)\n t = threading.Thread(name=\"thread-\"+str(i),target=print_list)\n t.start()\n\nmain_thread = threading.currentThread()\nif exitFlag:\n for i in threading.enumerate():\n if i is not main_thread:\n i.join()\n logging.debug(\"exited\")", "sub_path": "python/threads/thread_sync.py", "file_name": "thread_sync.py", "file_ext": "py", "file_size_in_byte": 971, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.basicConfig", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 18, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 25, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 26, "usage_type": "call"}, {"api_name": "threading.currentThread", "line_number": 29, "usage_type": "call"}, {"api_name": "threading.enumerate", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "371538345", "text": "\"\"\"\nLoad and prepare text documents for machine learning\nThis file has been modified by Muhammad Mahir Latif\nOriginally written by Patrick Coady (pcoady@alum.mit.edu)\n\n1. load_book(): Return list of words and word counter for document.\n Also basic document statistics.\n2. build_dict(): Build word -> integer dictionary\n3. doc2num(): Transform document word list to integer numpy array\n4. build_word_array(): Convenience function that runs 3 functions\n above to build an integer numpy word array from a file.\n5. save_word_array(): Save a word array and dictionary to file for\n future fast loading.\n6. load_word_array(): Load previously saved word array and dictionary.\n\"\"\"\nimport collections\nimport numpy as np\nimport pickle\nimport re\n\n\ndef load_books(filenames):\n \"\"\"\n Read files and count number of occurrences of each unique word in the\n file. Also return the document as a list of words in the same order\n as the original document.\n Notes:\n The following punctuation are treated as separate words: ;:-()&.,!?'\"\n All letters changed to lower-case\n Contractions (e.g. don't, we'll) returned as-is (i.e. ' treated as\n letter). This could cause problems for text that uses single\n quotes (') for other purposes\n :param filenames: list of filenames (including path, if needed)\n :return: tuple:\n 0) collections.Counter() with unique word counts\n 1) list with document words in order\n 2) tuples: (number of lines read, number of words read)\n \"\"\"\n word_counter = collections.Counter()\n word_list = []\n num_lines, num_words = (0, 0)\n for filename in filenames:\n with open(filename, 'r') as f:\n for line in f.readlines():\n # TODO: check reg-exp below\n words = re.findall(\"[\\\\w']+|[;:\\-\\(\\)&.,!?\\\"]\", line.lower().strip('\\n'))\n word_counter.update(words)\n word_list.extend(words)\n num_lines += 1\n num_words += len(words)\n\n return word_counter, word_list, num_lines, num_words\n\n\ndef build_dict(word_counter, vocab_size=50000):\n \"\"\"\n Builds dictionary and reverse dictionary of most common words in word_counter.\n Number of words to include in the dictionary is set by dict_size.\n :param word_counter: collections.Counter() with keys = word and values = number of\n occurrences. Case sensitive.\n :param vocab_size: Upper limit on vocabulary size. If number of unique words\n greater than vocab_size, will take most commonly occurring words\n :return: tuple:\n 0) dictionary of words to integers (most common word is 0, next most\n common is 1, ...)\n 1) reverse dictionary of integers to words (same integer to word mapping as\n \"forward dictionary\"\n \"\"\"\n top_words = word_counter.most_common(vocab_size)\n top_words.sort(key=lambda t: -t[1])\n dictionary = dict()\n for idx, word in enumerate(map(lambda t: t[0], top_words)):\n dictionary[word] = idx\n\n return dictionary\n\n\ndef doc2num(word_list, dictionary):\n \"\"\"\n Maps list of words to np.array of integers using key/value pairs in\n dictionary. Words not found in dictionary will be mapped to len(dictionary)\n (i.e. 1 larger than biggest value in dictionary).\n :param word_list: List of words\n :param dictionary: Dictionary mapping words to integers\n :return: return numpy array of type np.int32 corresponding to integer mapping\n of words, with words not appearing in dictionary being mapped to\n largest integer in dictionary (i.e. len(dictionary)-1)\n \"\"\"\n word_array = []\n unknown_val = len(dictionary)\n for word in word_list:\n word_array.append(dictionary.get(word, unknown_val))\n\n return np.array(word_array, dtype=np.int32)\n\n\ndef build_word_array(filenames, vocab_size):\n \"\"\"\n Convenience function that runs: 1) load_books(), 2) build_dict(),\n and doc2num() in sequence and returns integer word array of documents,\n a dictionary and basic document statistics.\n :param filenames: list of file names (including path, if needed)\n :param vocab_size: Upper limit on vocabulary size. If number of unique words\n greater than vocab_size, will take most commonly occurring words\n :param gutenberg: Set flag to True for .txt files from Project Gutenberg.\n Loader will then skip Gutenberg preamble and license text at end of\n file.\n :return: 3-tuple:\n 0) numpy array of type np.int32 corresponding to integer mapping\n of words in documents. Words not in dictionary are mapped to\n largest integer in dictionary (i.e. len(dictionary)-1)\n 1) dictionary: word -> int dictionary\n 2) 2-tuple: (number of lines read, number of words read)\n Note: no integration coverage\n \"\"\"\n word_counter, word_list, num_lines, num_words = load_books(filenames)\n dictionary = build_dict(word_counter, vocab_size)\n word_array = doc2num(word_list, dictionary)\n num_unique_words = len(word_counter.keys())\n return word_array, dictionary, num_lines, num_words, num_unique_words\n\n\ndef save_word_array(filename, word_array, dictionary,num_lines, num_words, num_unique_words):\n \"\"\"\n Save word array and dictionary for faster load.\n :param filename: Filename, with path. Saved as python pickle file.\n :param word_array: Numpy integer word array of document\n :param dictionary: Word -> int document\n :return: None\n Note: no unit test coverage\n \"\"\"\n word_array_dict = dict()\n word_array_dict['word_array'] = word_array\n word_array_dict['dictionary'] = dictionary\n word_array_dict['num_lines'] = num_lines\n word_array_dict['num_words'] = num_words\n word_array_dict['num_unique_words'] = num_unique_words\n\n with open(filename + '.p', 'wb') as f:\n pickle.dump(word_array_dict, f)\n\n\ndef load_word_array(filename):\n \"\"\"\n Load integer word array and dictionary saved by save_word_array()\n :param filename: Same filename used with save_word_array()\n :return: 2-tuple\n 0) Numpy word array of integers (document representation)\n 1) Word -> int dictionary\n Note: no unit test coverage\n \"\"\"\n with open(filename + '.p', 'rb') as f:\n word_array_dict = pickle.load(f)\n\n return word_array_dict['word_array'], word_array_dict['dictionary'], word_array_dict['num_lines'], \\\n word_array_dict['num_words'], word_array_dict['num_unique_words']\n", "sub_path": "src/docload.py", "file_name": "docload.py", "file_ext": "py", "file_size_in_byte": 6501, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "collections.Counter", "line_number": 39, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 140, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "30557785", "text": "from django.template.loader import render_to_string\n\nfrom django.http import HttpResponseRedirect\nfrom django.http import JsonResponse\nfrom django.core import urlresolvers\n\nfrom django.db.models import Q\n\nfrom .forms import GuestForm, RsvpFormSet\nfrom .models import Guest\n\ndef get_guest(request):\n if 'guest_id' not in request.session:\n return None\n return Guest.filter(id=request.session['guest_id']).first()\n\ndef rsvp_form(request):\n guest = Guest.objects.get(id=request.session['guest_id'])\n if guest.group:\n guests = guest.group.guest_set.filter(~Q(id=guest.id)).order_by(\"last_name\", \"first_name\")\n guests = [guest] + list(guests)\n else:\n guests = [guest]\n\n if request.method == 'POST':\n formset = RsvpFormSet(request.POST)\n # Only allow submitter to change allowed guests\n for form in formset:\n form.fields['guest'].queryset = guests\n if form.is_valid():\n for form in formset:\n form.full_clean()\n guest = form.clean()['guest']\n guest.attending = form.clean()['attending']\n guest.email = form.clean()['email']\n guest.save()\n\n return JsonResponse({\n 'redirect': '/thanks'\n })\n else:\n for form in formset:\n form.initial = {'guest': Guest.objects.get(id=form['guest'].value())}\n else:\n formset = RsvpFormSet(initial=[{'guest': g, 'email': g.email, 'attending': g.attending} for g in guests])\n\n\n\n return JsonResponse({\n 'content': render_to_string('rsvp/rsvp_form.html', {'rsvp_formset': formset, 'action': urlresolvers.reverse('rsvp-form')}, request=request)\n })\n\ndef guest_form(request):\n if 'guest_id' in request.session:\n return HttpResponseRedirect(urlresolvers.reverse('rsvp-form'))\n\n if request.method == 'POST':\n form = GuestForm(request.POST)\n if form.is_valid():\n guests = Guest.objects.filter(\n first_name__iexact=form.cleaned_data['first_name'],\n last_name__iexact=form.cleaned_data['last_name'])\n\n if len(guests) < 1:\n form.add_error(\"__all__\", \"No guest matches name, please ensure it is spelt the same as your invitation\")\n else:\n guest = guests.first()\n request.session['guest_id'] = guest.id\n\n if form.is_valid():\n return HttpResponseRedirect(urlresolvers.reverse('rsvp-form'))\n else:\n return JsonResponse({\n 'content': render_to_string('rsvp/form.html', {'form': form, 'action': urlresolvers.reverse('guest-form')}, request=request)\n })\n\n form = GuestForm()\n\n return JsonResponse({\n 'content': render_to_string('rsvp/form.html', {'form': form, 'action': urlresolvers.reverse('guest-form')}, request=request)\n })\n", "sub_path": "api/wedding_api/rsvp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2933, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "models.Guest.filter", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Guest", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Guest.objects.get", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Guest.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Guest", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 20, "usage_type": "call"}, {"api_name": "forms.RsvpFormSet", "line_number": 26, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Guest.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Guest.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Guest", "line_number": 43, "usage_type": "name"}, {"api_name": "forms.RsvpFormSet", "line_number": 45, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 49, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 50, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 50, "usage_type": "call"}, {"api_name": "django.core.urlresolvers", "line_number": 50, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 55, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 55, "usage_type": "call"}, {"api_name": "django.core.urlresolvers", "line_number": 55, "usage_type": "name"}, {"api_name": "forms.GuestForm", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Guest.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Guest.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Guest", "line_number": 60, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 71, "usage_type": "call"}, {"api_name": "django.core.urlresolvers", "line_number": 71, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 73, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 74, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 74, "usage_type": "call"}, {"api_name": "django.core.urlresolvers", "line_number": 74, "usage_type": "name"}, {"api_name": "forms.GuestForm", "line_number": 77, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 79, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 80, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 80, "usage_type": "call"}, {"api_name": "django.core.urlresolvers", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "198393345", "text": "import tensorflow as tf\nimport numpy as np\nimport itertools\n\nfrom tensorflow.python.training import moving_averages\n\n\n######## LAYERS ########\ndef dense(input_data, output_dim, name):\n input_dim = input_data.get_shape().as_list()[-1]\n \"\"\"NN fully connected layer.\"\"\"\n with tf.variable_scope(name): \n W = tf.get_variable(\"W\", [input_dim, output_dim],\n initializer=tf.contrib.layers.xavier_initializer()) \n b = tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0))\n return tf.matmul(input_data, W, name=\"matmul\") + b\n\ndef batch_normalization(input_data, is_train, name='BatchNormalization'):\n \"\"\"NN batch normalization layer.\"\"\"\n x = input_data\n BN_DECAY = 0.9997\n BN_EPSILON = 0.001\n x_shape = x.get_shape()\n params_shape = x_shape[-1:]\n with tf.variable_scope(name):\n axis = list(range(len(x_shape) - 1))\n beta = tf.get_variable('beta',\n params_shape,\n initializer=tf.zeros_initializer)\n gamma = tf.get_variable('gamma',\n params_shape,\n initializer=tf.ones_initializer)\n moving_mean = tf.get_variable('moving_mean',\n params_shape,\n initializer=tf.zeros_initializer,\n trainable=False)\n moving_variance = tf.get_variable('moving_variance',\n params_shape,\n initializer=tf.ones_initializer,\n trainable=False)\n\n # These ops will only be preformed when training.\n mean, variance = tf.nn.moments(x, axis)\n update_moving_mean = moving_averages.assign_moving_average(moving_mean,\n mean, BN_DECAY)\n update_moving_variance = moving_averages.assign_moving_average(\n moving_variance, variance, BN_DECAY)\n tf.add_to_collection('update_ops', update_moving_mean)\n tf.add_to_collection('update_ops', update_moving_variance)\n\n mean, variance = tf.cond(\n is_train, lambda: (mean, variance),\n lambda: (moving_mean, moving_variance))\n\n x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)\n\n return x\n\ndef dense_relu_batch(input_data, N, H, is_train, name):\n \"\"\"NN dense relu batch layer.\"\"\"\n with tf.variable_scope(name):\n affine = dense(input_data, N, H, \"dense\")\n bn = batch_normalization(affine, is_train, \"batch\")\n return tf.nn.relu(bn, \"relu\")\n\ndef dense_relu(input_data, N, H, name):\n \"\"\"NN dense relu layer\"\"\"\n with tf.variable_scope(name):\n affine = dense(input_data, N, H, \"dense\")\n return tf.nn.relu(affine, \"relu\")\n\ndef multi_dense_relu_batch(input_data, N, Hs, is_train, name):\n \"\"\"NN multi dense relu batch layer.\"\"\"\n with tf.variable_scope(name):\n output = input_data\n for i, H in enumerate(itertools.izip([N] + Hs, Hs)):\n output = dense_relu_batch(output, H[0], H[1], is_train, \"fc_\" + str(i))\n return output\n\ndef conv2d(input_data, filter_size, stride, name):\n \"\"\"NN 2D convolutional layer.\"\"\"\n with tf.variable_scope(name):\n W = tf.get_variable(\"W\", filter_size,\n initializer=tf.contrib.layers.xavier_initializer_conv2d())\n conv = tf.nn.conv2d(input_data, W,\n [1, stride, stride, 1], \"SAME\", name=\"conv2d\")\n biases = tf.get_variable(\"b\", shape=filter_size[-1])\n bias = tf.reshape(tf.nn.bias_add(conv, biases),\n conv.get_shape().as_list())\n\n return bias\n\ndef conv_words(input_data, window_size, num_filters, name):\n \"\"\"NN convolution over window_size words across entire embedding dimension\"\"\"\n batch_size, sentence_length, embedding_dim = input_data.get_shape().as_list()\n input_data = tf.reshape(input_data,\n [batch_size, sentence_length, embedding_dim, 1])\n with tf.variable_scope(name):\n filter_size = [window_size, embedding_dim, 1, num_filters]\n W = tf.get_variable(\"W\", filter_size,\n initializer=tf.contrib.layers.xavier_initializer())\n conv = tf.nn.conv2d(input_data, W, [1,1,1,1], padding='VALID')\n biases = tf.get_variable(\"b\", shape=filter_size[-1])\n bias = tf.reshape(tf.nn.bias_add(conv, biases),\n conv.get_shape().as_list())\n return bias\n\ndef maxpool2d(input_data, stride, name):\n \"\"\"NN 2D max pooling layer.\"\"\"\n with tf.variable_scope(name):\n filter_size = [1, stride, stride, 1]\n return tf.nn.max_pool(input_data, filter_size,\n filter_size, \"SAME\", name=\"max_pool\")\n\ndef conv2d_relu_batch(input_data, filter_size, stride, is_train, name):\n with tf.variable_scope(name):\n conv = conv2d(input_data, filter_size, stride, \"conv2d\")\n bn = batch_normalization(conv, is_train, \"batch\")\n return tf.nn.relu(bn, \"relu\")\n\ndef conv2d_relu(input_data, filter_size, stride, name):\n with tf.variable_scope(name):\n conv = conv2d(input_data, filter_size, stride, \"conv2d\")\n return tf.nn.relu(conv, \"relu\")\n\ndef softmax_loss(logits, labels):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,\n labels, name='cross_entropy')\n cross_entropy_mean = tf.reduce_mean(\n cross_entropy, name='mean_cross_entropy')\n tf.add_to_collection('losses', cross_entropy_mean)\n return tf.add_n(tf.get_collection('losses'), name='total_loss')\n", "sub_path": "classifiers/cnn_model/src/layer_utils.py", "file_name": "layer_utils.py", "file_ext": "py", "file_size_in_byte": 5313, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "tensorflow.variable_scope", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.constant_initializer", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.ones_initializer", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.ones_initializer", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.moments", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.python.training.moving_averages.assign_moving_average", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.python.training.moving_averages", "line_number": 44, "usage_type": "name"}, {"api_name": "tensorflow.python.training.moving_averages.assign_moving_average", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.python.training.moving_averages", "line_number": 46, "usage_type": "name"}, {"api_name": "tensorflow.add_to_collection", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.add_to_collection", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.cond", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.nn.batch_normalization", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 74, "usage_type": "call"}, {"api_name": "itertools.izip", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer_conv2d", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.nn.bias_add", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 101, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.nn.bias_add", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 104, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 124, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 127, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.add_to_collection", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.add_n", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.get_collection", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "125648072", "text": "\"\"\"This module provides functions which determine various observatory\nspecific policies for JWST:\n\n1. How to convert reference file basenames to fully specified paths.\n\n2. How to manage parameters for reference file Validator objects used\nin the certification of reference files. \n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nimport os.path\nimport re\n\nfrom crds import (log, rmap, data_file, config, utils, timestamp)\nfrom . import tpn\n\n# =======================================================================\n\ntry:\n from jwst.datamodels import DataModel\n MODEL = DataModel()\nexcept Exception:\n MODEL = None\n\n# =======================================================================\n\nHERE = os.path.dirname(__file__) or \"./\"\n\n# =======================================================================\n\ndef test():\n \"\"\"Run the module doctests.\"\"\"\n import doctest\n from . import locate\n return doctest.testmod(locate)\n\n# =======================================================================\n\n# These two functions decouple the generic reference file certifier program \n# from observatory-unique ways of specifying and caching Validator parameters.\n\nfrom crds.jwst.tpn import get_tpninfos # reference_name_to_validator_key, mapping_validator_key defined here.\nfrom crds.jwst import TYPES, INSTRUMENTS, FILEKINDS, EXTENSIONS\n\nreference_name_to_validator_key = TYPES.reference_name_to_validator_key \n# mapping_validator_key = TYPES.mapping_validator_key\nget_row_keys = TYPES.get_row_keys\nget_row_keys_by_instrument = TYPES.get_row_keys_by_instrument\nget_item = TYPES.get_item\nsuffix_to_filekind = TYPES.suffix_to_filekind\n\n# =======================================================================\n\ndef match_context_key(key):\n \"\"\"Set the case of a context key appropriately for this project, JWST\n always uses upper case.\n \"\"\"\n return key.upper()\n\n# =======================================================================\n\ndef mapping_validator_key(mapping):\n \"\"\"For now, just use instrument based constraints.\"\"\"\n return (mapping.instrument + \"_all_ld.tpn\", mapping.name)\n\n# =======================================================================\n\nREF_EXT_RE = re.compile(r\"\\.fits|\\.r\\dh$\")\n\ndef get_file_properties(filename):\n \"\"\"Figure out (instrument, filekind, serial) based on `filename` which\n should be a mapping or FITS reference file.\n\n >> get_file_properties(\"./hst_acs_biasfile_0001.rmap\")\n ('acs', 'biasfile')\n\n >> get_file_properties(\"./hst_acs_biasfile_0001.pmap\")\n Traceback (most recent call last):\n ...\n AssertionError: Invalid .pmap filename './hst_acs_biasfile_0001.pmap'\n\n >> get_file_properties(\"test_data/s7g1700gl_dead.fits\")\n \"\"\"\n if rmap.is_mapping(filename):\n try:\n return decompose_newstyle_name(filename)[2:4]\n except Exception:\n # NOTE: load_mapping more conservative than fetch_mapping used in properties_from_mapping\n mapping = rmap.load_mapping(filename)\n return mapping.instrument, mapping.filekind\n elif config.is_reference(filename):\n result = get_reference_properties(filename)[2:4]\n else:\n try:\n result = properties_inside_mapping(filename)\n except Exception as exc:\n result = get_reference_properties(filename)[2:4]\n assert result[0] in INSTRUMENTS+[\"\"], \"Bad instrument \" + \\\n repr(result[0]) + \" in filename \" + repr(filename)\n assert result[1] in FILEKINDS+[\"\"], \"Bad filekind \" + \\\n repr(result[1]) + \" in filename \" + repr(filename)\n return result\n\ndef decompose_newstyle_name(filename):\n \"\"\"\n >> decompose_newstyle_name(\"./hst.pmap\")\n ('.', 'hst', '', '', '', '.pmap')\n\n >> decompose_newstyle_name(\"./hst_0001.pmap\")\n ('.', 'hst', '', '', '0001', '.pmap')\n\n >> decompose_newstyle_name(\"./hst_acs.imap\")\n ('.', 'hst', 'acs', '', '', '.imap')\n\n >> decompose_newstyle_name(\"./hst_acs_0001.imap\")\n ('.', 'hst', 'acs', '', '0001', '.imap')\n\n >> decompose_newstyle_name(\"./hst_acs_biasfile.rmap\")\n ('.', 'hst', 'acs', 'biasfile', '', '.rmap')\n\n >> decompose_newstyle_name(\"./hst_acs_biasfile_0001.rmap\")\n ('.', 'hst', 'acs', 'biasfile', '0001', '.rmap')\n\n >> decompose_newstyle_name(\"./hst_acs_biasfile.fits\")\n ('.', 'hst', 'acs', 'biasfile', '', '.fits')\n\n >> decompose_newstyle_name(\"./hst_acs_biasfile_0001.fits\")\n ('.', 'hst', 'acs', 'biasfile', '0001', '.fits')\n \"\"\"\n path, parts, ext = _get_fields(filename)\n observatory = parts[0]\n serial = list_get(parts, 3, \"\")\n\n if ext == \".pmap\":\n assert len(parts) in [1,2], \"Invalid .pmap filename \" + repr(filename)\n instrument, filekind = \"\", \"\"\n serial = list_get(parts, 1, \"\")\n elif ext == \".imap\":\n assert len(parts) in [2,3], \"Invalid .imap filename \" + repr(filename)\n instrument = parts[1]\n filekind = \"\"\n serial = list_get(parts, 2, \"\")\n else:\n assert len(parts) in [3,4], \"Invalid filename \" + repr(filename)\n instrument = parts[1]\n filekind = parts[2]\n serial = list_get(parts, 3, \"\")\n\n # Don't include filename in these or it messes up crds.certify unique error tracking.\n\n assert instrument in INSTRUMENTS+[\"\"], \"Invalid instrument \" + repr(instrument)\n assert filekind in FILEKINDS+[\"\"], \"Invalid filekind \" + repr(filekind)\n assert re.match(r\"\\d*\", serial), \"Invalid id field \" + repr(id)\n # extension may vary for upload temporary files.\n\n return path, observatory, instrument, filekind, serial, ext\n\ndef properties_inside_mapping(filename):\n \"\"\"Load `filename`s mapping header to discover and \n return (instrument, filekind).\n \"\"\"\n map = rmap.fetch_mapping(filename)\n if map.filekind == \"PIPELINE\":\n result = \"\", \"\"\n elif map.filekind == \"INSTRUMENT\":\n result = map.instrument, \"\"\n else:\n result = map.instrument, map.filekind\n return result\n\ndef _get_fields(filename):\n path = os.path.dirname(filename)\n name = os.path.basename(filename)\n name, ext = os.path.splitext(name)\n parts = name.split(\"_\")\n return path, parts, ext\n\ndef list_get(l, index, default):\n try:\n return l[index]\n except IndexError:\n return default\n\nCDBS_DIRS_TO_INSTR = {\n \"/jref/\":\"acs\",\n \"/oref/\":\"stis\",\n \"/iref/\":\"wfc3\",\n \"/lref/\":\"cos\",\n \"/nref/\":\"nicmos\",\n \n \"/upsf/\":\"wfpc2\",\n \"/uref/\":\"wfpc2\",\n \"/uref_linux/\":\"wfpc2\",\n \n \"/yref/\" : \"fos\",\n \"/zref/\" : \"hrs\",\n \n}\n\ndef get_reference_properties(filename):\n \"\"\"Figure out FITS (instrument, filekind, serial) based on `filename`.\n \"\"\"\n try: # Hopefully it's a nice new standard filename, easy\n return decompose_newstyle_name(filename)\n except AssertionError: # cryptic legacy paths & names, i.e. reality\n pass\n # If not, dig inside the FITS file, slow\n return ref_properties_from_header(filename)\n\n# =======================================================================\n\ndef ref_properties_from_header(filename):\n \"\"\"Look inside FITS `filename` header to determine instrument, filekind.\n \"\"\"\n # For legacy files, just use the root filename as the unique id\n path, parts, ext = _get_fields(filename)\n serial = os.path.basename(os.path.splitext(filename)[0])\n header = data_file.get_free_header(filename, observatory=\"jwst\")\n instrument = utils.header_to_instrument(header).lower()\n assert instrument in INSTRUMENTS, \"Invalid instrument \" + repr(instrument)\n filekind = utils.get_any_of(header, [\"REFTYPE\", \"TYPE\", \"META.TYPE\", \"META.REFFILE.TYPE\"], \"UNDEFINED\").lower()\n assert filekind in FILEKINDS, \"Invalid file type \" + repr(filekind)\n return path, \"jwst\", instrument, filekind, serial, ext\n\n# =============================================================================\n\ndef reference_keys_to_dataset_keys(rmapping, header):\n \"\"\"Given a header dictionary for a reference file, map the header back to keys\n relevant to datasets. So for ACS biasfile the reference says BINAXIS1 but\n the dataset says NUMCOLS. This would convert { \"BINAXIS1\": 1024 } to {\n \"NUMCOLS\" : 1024 }.\n \n In general, rmap parkeys are matched against datset values and are defined\n as dataset header keywords. For refactoring though, what's initially\n available are reference file keywords... which need to be mapped into the\n terms rmaps know: dataset keywords.\n \"\"\"\n header = dict(header)\n try:\n translations = rmapping.reference_to_dataset\n except AttributeError:\n pass\n else:\n # Add replacements for translations *if* the existing untranslated value\n # is poor and the translated value is better defined. This is to do\n # translations w/o replacing valid/concrete DM values with something \n # like guessed values of \"UNDEFINED\" or \"N/A\".\n for rkey in translations:\n if rkey in header:\n dkey = translations[rkey]\n dval = header.get(translations[rkey], None)\n rval = header[rkey]\n if dval in [None, \"N/A\", \"UNDEFINED\"] and rval not in [None, \"UNDEFINED\"]:\n header[dkey] = rval\n if \"USEAFTER\" in header: # and \"DATE-OBS\" not in header:\n reformatted = timestamp.reformat_useafter(rmapping, header).split()\n header[\"DATE-OBS\"] = header[\"META.OBSERVATION.DATE\"] = reformatted[0]\n header[\"TIME-OBS\"] = header[\"META.OBSERVATION.TIME\"] = reformatted[1]\n return header\n\n# =============================================================================\n\ndef condition_matching_header(rmapping, header):\n \"\"\"Normalize header values for .rmap reference insertion.\"\"\"\n return dict(header) # NOOP for JWST, may have to revisit\n\n# ============================================================================\n\nclass MissingDependencyError(Exception):\n \"\"\"A required package is missing.\"\"\"\n\ndef fits_to_parkeys(fits_header):\n \"\"\"Map a FITS header onto rmap parkeys appropriate for JWST.\"\"\"\n if MODEL is None:\n raise MissingDependencyError(\"JWST data models are not installed. Cannot fits_to_parkeys().\")\n parkeys = {}\n for key, value in fits_header.items():\n key, value = str(key), str(value)\n if not key.lower().startswith(\"meta.\"):\n pk = cached_dm_find_fits_keyword(key)\n if not pk:\n pk = key\n else:\n assert len(pk) == 1, \"CRDS JWST Data Model ambiguity on \" + \\\n repr(key) + \" = \" + repr(pk)\n pk = pk[0]\n else:\n pk = key\n pk = pk.upper()\n parkeys[pk] = value\n return parkeys\n\n@utils.cached\ndef cached_dm_find_fits_keyword(key):\n \"\"\"Return the SSB JWST data model path for the specified non-path keyword, nominally\n a FITS or json or ASDF bare keyword.\n \"\"\"\n return MODEL.find_fits_keyword(key.upper(), return_result=True)\n# ============================================================================\n\ndef get_env_prefix(instrument):\n \"\"\"Return the environment variable prefix (IRAF prefix) for `instrument`.\"\"\"\n return \"crds://\"\n\ndef locate_file(refname, mode=None):\n \"\"\"Given a valid reffilename in CDBS or CRDS format, return a cache path for the file.\n The aspect of this which is complicated is determining instrument and an instrument\n specific sub-directory for it based on the filename alone, not the file contents.\n \"\"\"\n if mode is None:\n mode = config.get_crds_ref_subdir_mode(observatory=\"jwst\")\n if mode == \"instrument\":\n instrument = utils.file_to_instrument(refname)\n rootdir = locate_dir(instrument, mode)\n elif mode == \"flat\":\n rootdir = config.get_crds_refpath(\"jwst\")\n else:\n raise ValueError(\"Unhandled reference file location mode \" + repr(mode))\n return os.path.join(rootdir, os.path.basename(refname))\n\ndef locate_dir(instrument, mode=None):\n \"\"\"Locate the instrument specific directory for a reference file.\"\"\"\n if mode is None:\n mode = config.get_crds_ref_subdir_mode(observatory=\"jwst\")\n else:\n config.check_crds_ref_subdir_mode(mode)\n crds_refpath = config.get_crds_refpath(\"jwst\")\n if mode == \"instrument\": # use simple names inside CRDS cache.\n rootdir = os.path.join(crds_refpath, instrument.lower())\n if not os.path.exists(rootdir):\n utils.ensure_dir_exists(rootdir + \"/locate_dir.fits\")\n elif mode == \"flat\": # use original flat cache structure, all instruments in same directory.\n rootdir = crds_refpath\n else:\n raise ValueError(\"Unhandled reference file location mode \" + repr(mode))\n return rootdir\n\n# ============================================================================\ndef load_all_type_constraints():\n \"\"\"Load all the JWST type constraint files.\"\"\"\n tpn.get_tpninfos(\"miri_flat.tpn\", \"foo.fits\") # With core schema, one type loads all\n", "sub_path": "crds/jwst/locate.py", "file_name": "locate.py", "file_ext": "py", "file_size_in_byte": 13060, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "jwst.datamodels.DataModel", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "doctest.testmod", "line_number": 36, "usage_type": "call"}, {"api_name": "crds.jwst.TYPES.reference_name_to_validator_key", "line_number": 46, "usage_type": "attribute"}, {"api_name": "crds.jwst.TYPES", "line_number": 46, "usage_type": "name"}, {"api_name": "crds.jwst.TYPES.get_row_keys", "line_number": 48, "usage_type": "attribute"}, {"api_name": "crds.jwst.TYPES", "line_number": 48, "usage_type": "name"}, {"api_name": "crds.jwst.TYPES.get_row_keys_by_instrument", "line_number": 49, "usage_type": "attribute"}, {"api_name": "crds.jwst.TYPES", "line_number": 49, "usage_type": "name"}, {"api_name": "crds.jwst.TYPES.get_item", "line_number": 50, "usage_type": "attribute"}, {"api_name": "crds.jwst.TYPES", "line_number": 50, "usage_type": "name"}, {"api_name": "crds.jwst.TYPES.suffix_to_filekind", "line_number": 51, "usage_type": "attribute"}, {"api_name": "crds.jwst.TYPES", "line_number": 51, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 69, "usage_type": "call"}, {"api_name": "crds.rmap.is_mapping", "line_number": 85, "usage_type": "call"}, {"api_name": "crds.rmap", "line_number": 85, "usage_type": "name"}, {"api_name": "crds.rmap.load_mapping", "line_number": 90, "usage_type": "call"}, {"api_name": "crds.rmap", "line_number": 90, "usage_type": "name"}, {"api_name": "crds.config.is_reference", "line_number": 92, "usage_type": "call"}, {"api_name": "crds.config", "line_number": 92, "usage_type": "name"}, {"api_name": "crds.jwst.INSTRUMENTS", "line_number": 99, "usage_type": "name"}, {"api_name": "crds.jwst.FILEKINDS", "line_number": 101, "usage_type": "name"}, {"api_name": "crds.jwst.INSTRUMENTS", "line_number": 152, "usage_type": "name"}, {"api_name": "crds.jwst.FILEKINDS", "line_number": 153, "usage_type": "name"}, {"api_name": "re.match", "line_number": 154, "usage_type": "call"}, {"api_name": "crds.rmap.fetch_mapping", "line_number": 163, "usage_type": "call"}, {"api_name": "crds.rmap", "line_number": 163, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 173, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 174, "usage_type": "name"}, {"api_name": "os.path.path.splitext", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 175, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 218, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 218, "usage_type": "name"}, {"api_name": "os.path.path.splitext", "line_number": 218, "usage_type": "call"}, {"api_name": "crds.data_file.get_free_header", "line_number": 219, "usage_type": "call"}, {"api_name": "crds.data_file", "line_number": 219, "usage_type": "name"}, {"api_name": "crds.utils.header_to_instrument", "line_number": 220, "usage_type": "call"}, {"api_name": "crds.utils", "line_number": 220, "usage_type": "name"}, {"api_name": "crds.jwst.INSTRUMENTS", "line_number": 221, "usage_type": "name"}, {"api_name": "crds.utils.get_any_of", "line_number": 222, "usage_type": "call"}, {"api_name": "crds.utils", "line_number": 222, "usage_type": "name"}, {"api_name": "crds.jwst.FILEKINDS", "line_number": 223, "usage_type": "name"}, {"api_name": "crds.timestamp.reformat_useafter", "line_number": 257, "usage_type": "call"}, {"api_name": "crds.timestamp", "line_number": 257, "usage_type": "name"}, {"api_name": "crds.utils.cached", "line_number": 294, "usage_type": "attribute"}, {"api_name": "crds.utils", "line_number": 294, "usage_type": "name"}, {"api_name": "crds.config.get_crds_ref_subdir_mode", "line_number": 312, "usage_type": "call"}, {"api_name": "crds.config", "line_number": 312, "usage_type": "name"}, {"api_name": "crds.utils.file_to_instrument", "line_number": 314, "usage_type": "call"}, {"api_name": "crds.utils", "line_number": 314, "usage_type": "name"}, {"api_name": "crds.config.get_crds_refpath", "line_number": 317, "usage_type": "call"}, {"api_name": "crds.config", "line_number": 317, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 320, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 320, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 320, "usage_type": "call"}, {"api_name": "crds.config.get_crds_ref_subdir_mode", "line_number": 325, "usage_type": "call"}, {"api_name": "crds.config", "line_number": 325, "usage_type": "name"}, {"api_name": "crds.config.check_crds_ref_subdir_mode", "line_number": 327, "usage_type": "call"}, {"api_name": "crds.config", "line_number": 327, "usage_type": "name"}, {"api_name": "crds.config.get_crds_refpath", "line_number": 328, "usage_type": "call"}, {"api_name": "crds.config", "line_number": 328, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 330, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 330, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 330, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 331, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 331, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 331, "usage_type": "name"}, {"api_name": "crds.utils.ensure_dir_exists", "line_number": 332, "usage_type": "call"}, {"api_name": "crds.utils", "line_number": 332, "usage_type": "name"}]} +{"seq_id": "557537337", "text": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OdooBro - odoobro.contact@gmail.com\n#\n##############################################################################\n\nimport logging\n\nfrom openerp import api, models\n_logger = logging.getLogger('openerp')\n\n\nclass UpdateFunctionData(models.TransientModel):\n _name = \"update.function.data\"\n\n @api.model\n def update_sale_config_settings(self):\n _logger.info(\"===== START: UPDATE SALE CONFIG SETTINGS =====\")\n # For group\n config_data = {\n 'sale_pricelist_setting': 'formula',\n 'group_pricelist_item': True,\n 'group_sale_pricelist': True,\n 'group_product_pricelist': False\n }\n SaleConfig = self.env['sale.config.settings']\n fs = dict(SaleConfig._fields)\n vals = SaleConfig.default_get(fs)\n vals.update(config_data)\n sale_config = SaleConfig.create(vals)\n sale_config.execute()\n _logger.info(\"===== END: UPDATE SALE CONFIG SETTINGS =====\")\n return True\n", "sub_path": "loyalty_card/data/update_function_data.py", "file_name": "update_function_data.py", "file_ext": "py", "file_size_in_byte": 1092, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "openerp.models.TransientModel", "line_number": 14, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 14, "usage_type": "name"}, {"api_name": "openerp.api.model", "line_number": 17, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "80389065", "text": "# Copyright 2008-2018 Univa Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mock\nimport pytest\n\nfrom tortuga.db.models.node import Node\nfrom tortuga.db.models.softwareProfile import SoftwareProfile\nfrom tortuga.exceptions.operationFailed import OperationFailed\nfrom tortuga.node.nodeManager import NodeManager\nfrom .osUtilityMock import get_os_object_factory\n\n\n@mock.patch('tortuga.os_utility.osUtility.getOsObjectFactory',\n side_effect=get_os_object_factory)\nclass TestNodeManager:\n\n def test_simple_validate_delete_nodes_request(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Simple delete of multiple nodes in same software profile\n \"\"\"\n\n swprofile = SoftwareProfile(name='swprofile1', lockedState='Unlocked')\n\n nodes = [\n Node(name='compute-01', softwareprofile=swprofile),\n Node(name='compute-02', softwareprofile=swprofile),\n ]\n\n NodeManager()._NodeManager__validate_delete_nodes_request(nodes, False)\n\n def test_validate_delete_nodes_request_alt(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Simple delete of multiple nodes with one profile locked and one not\n \"\"\"\n\n swprofile1 = SoftwareProfile(name='swprofile1', lockedState='Unlocked')\n swprofile2 = SoftwareProfile(name='swprofile1', lockedState='SoftLocked')\n\n nodes = [\n Node(name='compute-01', softwareprofile=swprofile1),\n Node(name='compute-02', softwareprofile=swprofile2),\n ]\n\n with pytest.raises(OperationFailed):\n NodeManager()._NodeManager__validate_delete_nodes_request(\n nodes, False)\n\n def test_simple_validate_delete_nodes_request_alt(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Delete from soft locked software profile without force\n \"\"\"\n\n nodes = [\n Node(name='compute-01',\n softwareprofile=SoftwareProfile(name='swprofile1',\n lockedState='SoftLocked')),\n ]\n\n with pytest.raises(OperationFailed):\n NodeManager()._NodeManager__validate_delete_nodes_request(\n nodes, False)\n\n\n def test_simple_validate_delete_nodes_request_alt_with_force(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Delete from soft locked software profile with force\n \"\"\"\n\n nodes = [\n Node(name='compute-01',\n softwareprofile=SoftwareProfile(name='swprofile1',\n lockedState='SoftLocked')),\n ]\n\n NodeManager()._NodeManager__validate_delete_nodes_request(nodes, True)\n\n\n def test_simple_validate_delete_nodes_request_alt2(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Delete from hard locked software profile\n \"\"\"\n\n nodes = [\n Node(name='compute-01',\n softwareprofile=SoftwareProfile(name='swprofile1',\n lockedState='HardLocked')),\n ]\n\n with pytest.raises(OperationFailed):\n NodeManager()._NodeManager__validate_delete_nodes_request(\n nodes, False)\n", "sub_path": "src/installer/tests/test_NodeManager.py", "file_name": "test_NodeManager.py", "file_ext": "py", "file_size_in_byte": 3987, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "tortuga.db.models.softwareProfile.SoftwareProfile", "line_number": 36, "usage_type": "call"}, {"api_name": "tortuga.db.models.node.Node", "line_number": 39, "usage_type": "call"}, {"api_name": "tortuga.db.models.node.Node", "line_number": 40, "usage_type": "call"}, {"api_name": "tortuga.node.nodeManager.NodeManager", "line_number": 43, "usage_type": "call"}, {"api_name": "tortuga.db.models.softwareProfile.SoftwareProfile", "line_number": 52, "usage_type": "call"}, {"api_name": "tortuga.db.models.softwareProfile.SoftwareProfile", "line_number": 53, "usage_type": "call"}, {"api_name": "tortuga.db.models.node.Node", "line_number": 56, "usage_type": "call"}, {"api_name": "tortuga.db.models.node.Node", "line_number": 57, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 60, "usage_type": "call"}, {"api_name": "tortuga.exceptions.operationFailed.OperationFailed", "line_number": 60, "usage_type": "argument"}, {"api_name": "tortuga.node.nodeManager.NodeManager", "line_number": 61, "usage_type": "call"}, {"api_name": "tortuga.db.models.node.Node", "line_number": 72, "usage_type": "call"}, {"api_name": "tortuga.db.models.softwareProfile.SoftwareProfile", "line_number": 73, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 77, "usage_type": "call"}, {"api_name": "tortuga.exceptions.operationFailed.OperationFailed", "line_number": 77, "usage_type": "argument"}, {"api_name": "tortuga.node.nodeManager.NodeManager", "line_number": 78, "usage_type": "call"}, {"api_name": "tortuga.db.models.node.Node", "line_number": 90, "usage_type": "call"}, {"api_name": "tortuga.db.models.softwareProfile.SoftwareProfile", "line_number": 91, "usage_type": "call"}, {"api_name": "tortuga.node.nodeManager.NodeManager", "line_number": 95, "usage_type": "call"}, {"api_name": "tortuga.db.models.node.Node", "line_number": 106, "usage_type": "call"}, {"api_name": "tortuga.db.models.softwareProfile.SoftwareProfile", "line_number": 107, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 111, "usage_type": "call"}, {"api_name": "tortuga.exceptions.operationFailed.OperationFailed", "line_number": 111, "usage_type": "argument"}, {"api_name": "tortuga.node.nodeManager.NodeManager", "line_number": 112, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 25, "usage_type": "call"}, {"api_name": "osUtilityMock.get_os_object_factory", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "4344267", "text": "from django.urls import path\nfrom .import views\n\nurlpatterns=[\n path('',views.main,name=\"main\"),\n path('index/',views.index,name=\"index\"),\n path('Problem///',views.Problem,name=\"Problem\"),\n path('Profile/',views.Profile,name=\"Profile\"),\n path('login/',views.login,name=\"login\"),\n path('register/',views.register,name=\"register\"),\n path('explore/',views.explore,name= \"explore\"),\n\n path('questionsList//',views.questionsList,name=\"questionsList\"),\n\n\n \n\n]\n\n\n", "sub_path": "JudgeSystemProject/JudgeSystemApp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 518, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "572475584", "text": "#!python\n\nimport zipfile\nimport urllib\nimport time\nimport glob\nimport os\nimport os.path\nfrom os.path import join\nfrom os.path import isdir\n\ndef zipdir(zip, dir, prefix='', pat='*'):\n cwd = os.getcwd()\n os.chdir(dir)\n for f in glob.glob( pat ):\n if isdir( join(dir, f) ):\n zipdir(zip, join(dir, f), join(prefix, f), pat)\n else:\n zip.write(f, join(prefix, f))\n os.chdir(cwd)\n\nbdir = 'build/'\nhost = \"http://localhost/\"\nfiles = 'index.html,p21.html,p22.html,p23.html,p31.html,p32.html,p321.html,p322.html,p330.html,p331.html,p332.html,p333.html,p41.html,p42.html,p51.html,p52.html'.split(',')\n\nzipName = \"{}/{}.zip\".format(bdir, time.strftime('%Y%m%d_%H_%M_%S') )\nzip = zipfile.ZipFile(zipName, 'w')\n\nfor i in files:\n url = host + i\n #print 'fetching {}'.format(url)\n file = bdir+i\n fo = urllib.urlopen(url)\n zip.writestr( i, fo.read().replace('/static/page/', '') )\n\nfor d in 'js,img,css'.split(','):\n zipdir(zip, 'page/static/page/'+d, d )\nzip.close()\n\n\n\n", "sub_path": "build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.getcwd", "line_number": 13, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 14, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 20, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 26, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 27, "usage_type": "call"}, {"api_name": "urllib.urlopen", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "179093169", "text": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimg = mpimg.imread('lab1.jpg')\nimgplot = plt.imshow(img)\nplt.show()\nred, blue, green = img.copy(), img.copy(), img.copy()\nred[:,:, 1], red[:, :, 2] = 0, 0\nplt.imshow(red)\nplt.show()\nblue[:, :, 0], blue[:, :, 1] = 0, 0\nplt.imshow(blue)\nplt.show()\ngreen[:, :, 0], green[:, :, 2] = 0, 0\nplt.imshow(green)\nplt.show()\nlum_img = img[:,:,0]\nimgplot = plt.imshow(lum_img, clim=(0.0, 50.0))\nplt.show()\nlum_img1 = img[:,:,0]\nimgplot = plt.imshow(lum_img, clim=(0.0, 255.0))\nplt.show()\nimgplot = plt.imshow(np.dot(img[...,:3], [0.33, 0.33, 0.33]), cmap=\"gray\")\nplt.show()", "sub_path": "Lab_5/Lab_5.3.py", "file_name": "Lab_5.3.py", "file_ext": "py", "file_size_in_byte": 645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "matplotlib.image.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 4, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 6, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "232905880", "text": "import logging # 引入logging模块\nimport os.path\nimport time\n# 第一步,创建一个logger\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO) # Log等级总开关\n# 第二步,创建一个handler,用于写入日志文件\n#rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\nrq = time.strftime('%Y%m%d', time.localtime(time.time()))\nprint (rq)\n#log_path = os.path.dirname(os.getcwd()) + '/Logs/'\nlog_path = ''\nlog_name = log_path + rq + '.log'\nlogfile = log_name\n# mode a 追加写入, w 覆盖写入\nfh = logging.FileHandler(logfile, mode='a')\nfh.setLevel(logging.DEBUG) # 输出到file的log等级的开关\n\nch = logging.StreamHandler()\nch.setLevel(logging.WARNING) #输出到控制台\n\n# 第三步,定义handler的输出格式\nformatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\nfh.setFormatter(formatter)\n\n#日志输出到控制台:\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n# 第四步,将logger添加到handler里面\nlogger.addHandler(fh)\n# 日志\nlogger.debug('this is a logger debug message')\nlogger.info('this is a logger info message')\nlogger.warning('this is a logger warning message')\nlogger.error('this is a logger error message')\nlogger.critical('this is a logger critical message')\n\n\n", "sub_path": "基本语法/logging_out_logtxt.py", "file_name": "logging_out_logtxt.py", "file_ext": "py", "file_size_in_byte": 1301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 9, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 9, "usage_type": "call"}, {"api_name": "time.time", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 20, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "592093696", "text": "from typing import Callable\n\nfrom jschon.exceptions import CatalogueError\nfrom jschon.json import AnyJSONCompatible, JSON\nfrom jschon.jsonschema import Keyword, JSONSchema, Scope\n\n__all__ = [\n 'FormatKeyword',\n 'FormatValidator',\n]\n\nFormatValidator = Callable[[AnyJSONCompatible], None]\n\n\nclass FormatKeyword(Keyword):\n key = \"format\"\n\n def __init__(self, parentschema: JSONSchema, value: str):\n super().__init__(parentschema, value)\n\n from jschon.catalogue import Catalogue\n try:\n self.validator: FormatValidator = Catalogue.get_format_validator(value)\n except CatalogueError:\n self.validator = None\n\n def evaluate(self, instance: JSON, scope: Scope) -> None:\n scope.annotate(instance, self.key, self.json.value)\n if self.validator is not None:\n try:\n self.validator(instance.value)\n except ValueError as e:\n scope.fail(instance, f'The instance is invalid against the \"{self.json.value}\" format: {e}')\n else:\n scope.noassert()\n", "sub_path": "jschon/vocabulary/format.py", "file_name": "format.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "typing.Callable", "line_number": 12, "usage_type": "name"}, {"api_name": "jschon.json.AnyJSONCompatible", "line_number": 12, "usage_type": "name"}, {"api_name": "jschon.jsonschema.Keyword", "line_number": 15, "usage_type": "name"}, {"api_name": "jschon.jsonschema.JSONSchema", "line_number": 18, "usage_type": "name"}, {"api_name": "jschon.catalogue.Catalogue.get_format_validator", "line_number": 23, "usage_type": "call"}, {"api_name": "jschon.catalogue.Catalogue", "line_number": 23, "usage_type": "name"}, {"api_name": "jschon.exceptions.CatalogueError", "line_number": 24, "usage_type": "name"}, {"api_name": "jschon.json.JSON", "line_number": 27, "usage_type": "name"}, {"api_name": "jschon.jsonschema.Scope", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "89957105", "text": "from .singleton_driver import SingleDriver\nfrom .utils.json_utils import JsonUtils\nfrom .logger import logger\n\n\nclass Browser:\n URL = JsonUtils('config.json').get_data('link')\n IMPLICITLY_WAIT = JsonUtils('config.json').get_data('implicitly_wait')\n\n @staticmethod\n def get_browser():\n browser = SingleDriver().get_driver()\n return browser\n\n @staticmethod\n def open_url(url=None):\n if url is None:\n url = Browser.URL\n logger.info('Try to get driver')\n browser = Browser.get_browser()\n logger.info(f'Try to open url \"{url}\"')\n browser.get(url)\n browser.implicitly_wait(Browser.IMPLICITLY_WAIT)\n\n @staticmethod\n def get_current_url():\n browser = Browser.get_browser()\n return browser.current_url\n\n @staticmethod\n def get_cookies():\n browser = Browser.get_browser()\n logger.info('Getting cookies')\n return browser.get_cookies()\n\n @staticmethod\n def get_screenshot(name):\n browser = Browser.get_browser()\n browser.get_screenshot_as_file(name)\n\n @staticmethod\n def get_cookie(name):\n browser = Browser.get_browser()\n logger.info(f'Getting cookie named as \"{name}\"')\n return browser.get_cookie(name)\n\n @staticmethod\n def add_cookie(cookie):\n browser = Browser.get_browser()\n logger.info('Adding cookies')\n browser.add_cookie(cookie)\n\n @staticmethod\n def change_cookie(name_cookie_to_change, new_value):\n browser = Browser.get_browser()\n logger.info(f'Changing cookies \"{name_cookie_to_change}\" to new value \"{new_value}\"')\n cookie_to_change = browser.get_cookie(name_cookie_to_change)\n cookie_to_change['value'] = new_value\n browser.add_cookie(cookie_to_change)\n\n @staticmethod\n def delete_cookie(name):\n browser = Browser.get_browser()\n logger.info(f'Deleting cookie named as \"{name}\"')\n browser.delete_cookie(name)\n\n @staticmethod\n def delete_all_cookies():\n browser = Browser.get_browser()\n logger.info('Deleting all cookies')\n browser.delete_all_cookies()\n\n @staticmethod\n def maximize():\n browser = SingleDriver().get_driver()\n logger.info('Maximize window')\n browser.maximize_window()\n\n @staticmethod\n def refresh():\n browser = SingleDriver().get_driver()\n logger.info('Refresh window')\n browser.refresh()\n\n @staticmethod\n def quit_browser():\n driver = SingleDriver()\n browser = driver.get_driver()\n browser.quit()\n driver.del_driver()\n\n @staticmethod\n def switch_to_top():\n browser = SingleDriver().get_driver()\n logger.info('Switching to default frame')\n browser.switch_to.default_content()\n\n @staticmethod\n def switch_to_frame(element):\n browser = SingleDriver().get_driver()\n logger.info('Switching to IFrame')\n browser.switch_to.frame(element.find_element())\n\n @staticmethod\n def go_to_other_tab():\n browser = SingleDriver().get_driver()\n if len(browser.window_handles[1]) > 1:\n logger.info('Switching to other tab')\n browser.switch_to.window(browser.window_handles[1])\n\n @staticmethod\n def go_to_main_tab():\n browser = SingleDriver().get_driver()\n logger.info('Switching to main tab')\n browser.switch_to.window(browser.window_handles[0])\n\n @staticmethod\n def confirm_alert():\n logger.info('Switching to alert and confirm')\n Browser.get_browser().switch_to.alert.accept()\n\n @staticmethod\n def dismiss_alert():\n logger.info('Switching to alert and dismiss')\n Browser.get_browser().switch_to.alert.dismiss()\n\n @staticmethod\n def get_text_from_alert():\n logger.info('Getting text from alert')\n return Browser.get_browser().switch_to.alert.text\n\n @staticmethod\n def input_text_into_alert(text):\n logger.info(f'Prompting \"{text}\" into alert')\n Browser.get_browser().switch_to.alert.send_keys(text)\n", "sub_path": "framework/browser.py", "file_name": "browser.py", "file_ext": "py", "file_size_in_byte": 4089, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "utils.json_utils.JsonUtils", "line_number": 7, "usage_type": "call"}, {"api_name": "utils.json_utils.JsonUtils", "line_number": 8, "usage_type": "call"}, {"api_name": "singleton_driver.SingleDriver", "line_number": 12, "usage_type": "call"}, {"api_name": "logger.logger.info", "line_number": 19, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 19, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 21, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 21, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 33, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 33, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 44, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 44, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 50, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 50, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 56, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 56, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 64, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 64, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 70, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 70, "usage_type": "name"}, {"api_name": "singleton_driver.SingleDriver", "line_number": 75, "usage_type": "call"}, {"api_name": "logger.logger.info", "line_number": 76, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 76, "usage_type": "name"}, {"api_name": "singleton_driver.SingleDriver", "line_number": 81, "usage_type": "call"}, {"api_name": "logger.logger.info", "line_number": 82, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 82, "usage_type": "name"}, {"api_name": "singleton_driver.SingleDriver", "line_number": 87, "usage_type": "call"}, {"api_name": "singleton_driver.SingleDriver", "line_number": 94, "usage_type": "call"}, {"api_name": "logger.logger.info", "line_number": 95, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 95, "usage_type": "name"}, {"api_name": "singleton_driver.SingleDriver", "line_number": 100, "usage_type": "call"}, {"api_name": "logger.logger.info", "line_number": 101, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 101, "usage_type": "name"}, {"api_name": "singleton_driver.SingleDriver", "line_number": 106, "usage_type": "call"}, {"api_name": "logger.logger.info", "line_number": 108, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 108, "usage_type": "name"}, {"api_name": "singleton_driver.SingleDriver", "line_number": 113, "usage_type": "call"}, {"api_name": "logger.logger.info", "line_number": 114, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 114, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 119, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 119, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 124, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 124, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 129, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 129, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 134, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 134, "usage_type": "name"}]} +{"seq_id": "283623863", "text": "# GNU MediaGoblin -- federated, autonomous media hosting\n# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nfrom functools import wraps\n\nfrom urlparse import urljoin\nfrom werkzeug.exceptions import Forbidden, NotFound\nfrom werkzeug.urls import url_quote\n\nfrom mediagoblin import mg_globals as mgg\nfrom mediagoblin.db.models import MediaEntry, User\nfrom mediagoblin.tools.response import redirect, render_404\n\n\ndef require_active_login(controller):\n \"\"\"\n Require an active login from the user.\n \"\"\"\n @wraps(controller)\n def new_controller_func(request, *args, **kwargs):\n if request.user and \\\n request.user.status == u'needs_email_verification':\n return redirect(\n request, 'mediagoblin.user_pages.user_home',\n user=request.user.username)\n elif not request.user or request.user.status != u'active':\n next_url = urljoin(\n request.urlgen('mediagoblin.auth.login',\n qualified=True),\n request.url)\n\n return redirect(request, 'mediagoblin.auth.login',\n next=next_url)\n\n return controller(request, *args, **kwargs)\n\n return new_controller_func\n\ndef active_user_from_url(controller):\n \"\"\"Retrieve User() from URL pattern and pass in as url_user=...\n\n Returns a 404 if no such active user has been found\"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n user = User.query.filter_by(username=request.matchdict['user']).first()\n if user is None:\n return render_404(request)\n\n return controller(request, *args, url_user=user, **kwargs)\n\n return wrapper\n\n\ndef user_may_delete_media(controller):\n \"\"\"\n Require user ownership of the MediaEntry to delete.\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n uploader_id = kwargs['media'].uploader\n if not (request.user.is_admin or\n request.user.id == uploader_id):\n raise Forbidden()\n\n return controller(request, *args, **kwargs)\n\n return wrapper\n\n\ndef user_may_alter_collection(controller):\n \"\"\"\n Require user ownership of the Collection to modify.\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n creator_id = request.db.User.find_one(\n {'username': request.matchdict['user']}).id\n if not (request.user.is_admin or\n request.user.id == creator_id):\n raise Forbidden()\n\n return controller(request, *args, **kwargs)\n\n return wrapper\n\n\ndef uses_pagination(controller):\n \"\"\"\n Check request GET 'page' key for wrong values\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n try:\n page = int(request.GET.get('page', 1))\n if page < 0:\n return render_404(request)\n except ValueError:\n return render_404(request)\n\n return controller(request, page=page, *args, **kwargs)\n\n return wrapper\n\n\ndef get_user_media_entry(controller):\n \"\"\"\n Pass in a MediaEntry based off of a url component\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n user = User.query.filter_by(username=request.matchdict['user']).first()\n if not user:\n raise NotFound()\n\n media = None\n\n # might not be a slug, might be an id, but whatever\n media_slug = request.matchdict['media']\n\n # if it starts with id: it actually isn't a slug, it's an id.\n if media_slug.startswith(u'id:'):\n try:\n media = MediaEntry.query.filter_by(\n id=int(media_slug[3:]),\n state=u'processed',\n uploader=user.id).first()\n except ValueError:\n raise NotFound()\n else:\n # no magical id: stuff? It's a slug!\n media = MediaEntry.query.filter_by(\n slug=media_slug,\n state=u'processed',\n uploader=user.id).first()\n\n if not media:\n # Didn't find anything? Okay, 404.\n raise NotFound()\n\n return controller(request, media=media, *args, **kwargs)\n\n return wrapper\n\n\ndef get_user_collection(controller):\n \"\"\"\n Pass in a Collection based off of a url component\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n user = request.db.User.find_one(\n {'username': request.matchdict['user']})\n\n if not user:\n return render_404(request)\n\n collection = request.db.Collection.find_one(\n {'slug': request.matchdict['collection'],\n 'creator': user.id})\n\n # Still no collection? Okay, 404.\n if not collection:\n return render_404(request)\n\n return controller(request, collection=collection, *args, **kwargs)\n\n return wrapper\n\n\ndef get_user_collection_item(controller):\n \"\"\"\n Pass in a CollectionItem based off of a url component\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n user = request.db.User.find_one(\n {'username': request.matchdict['user']})\n\n if not user:\n return render_404(request)\n\n collection_item = request.db.CollectionItem.find_one(\n {'id': request.matchdict['collection_item'] })\n\n # Still no collection item? Okay, 404.\n if not collection_item:\n return render_404(request)\n\n return controller(request, collection_item=collection_item, *args, **kwargs)\n\n return wrapper\n\n\ndef get_media_entry_by_id(controller):\n \"\"\"\n Pass in a MediaEntry based off of a url component\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n media = MediaEntry.query.filter_by(\n id=request.matchdict['media_id'],\n state=u'processed').first()\n # Still no media? Okay, 404.\n if not media:\n return render_404(request)\n\n given_username = request.matchdict.get('user')\n if given_username and (given_username != media.get_uploader.username):\n return render_404(request)\n\n return controller(request, media=media, *args, **kwargs)\n\n return wrapper\n\n\ndef get_workbench(func):\n \"\"\"Decorator, passing in a workbench as kwarg which is cleaned up afterwards\"\"\"\n\n @wraps(func)\n def new_func(*args, **kwargs):\n with mgg.workbench_manager.create() as workbench:\n return func(*args, workbench=workbench, **kwargs)\n\n return new_func\n", "sub_path": "decorators.py", "file_name": "decorators.py", "file_ext": "py", "file_size_in_byte": 7301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "mediagoblin.tools.response.redirect", "line_number": 36, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 40, "usage_type": "call"}, {"api_name": "mediagoblin.tools.response.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 32, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.User.query.filter_by", "line_number": 58, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.User.query", "line_number": 58, "usage_type": "attribute"}, {"api_name": "mediagoblin.db.models.User", "line_number": 58, "usage_type": "name"}, {"api_name": "mediagoblin.tools.response.render_404", "line_number": 60, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 56, "usage_type": "call"}, {"api_name": "werkzeug.exceptions.Forbidden", "line_number": 76, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 71, "usage_type": "call"}, {"api_name": "werkzeug.exceptions.Forbidden", "line_number": 93, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 87, "usage_type": "call"}, {"api_name": "mediagoblin.tools.response.render_404", "line_number": 109, "usage_type": "call"}, {"api_name": "mediagoblin.tools.response.render_404", "line_number": 111, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 104, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.User.query.filter_by", "line_number": 124, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.User.query", "line_number": 124, "usage_type": "attribute"}, {"api_name": "mediagoblin.db.models.User", "line_number": 124, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.NotFound", "line_number": 126, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.MediaEntry.query.filter_by", "line_number": 136, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.MediaEntry.query", "line_number": 136, "usage_type": "attribute"}, {"api_name": "mediagoblin.db.models.MediaEntry", "line_number": 136, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.NotFound", "line_number": 141, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.MediaEntry.query.filter_by", "line_number": 144, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.MediaEntry.query", "line_number": 144, "usage_type": "attribute"}, {"api_name": "mediagoblin.db.models.MediaEntry", "line_number": 144, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.NotFound", "line_number": 151, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 122, "usage_type": "call"}, {"api_name": "mediagoblin.tools.response.render_404", "line_number": 168, "usage_type": "call"}, {"api_name": "mediagoblin.tools.response.render_404", "line_number": 176, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 162, "usage_type": "call"}, {"api_name": "mediagoblin.tools.response.render_404", "line_number": 193, "usage_type": "call"}, {"api_name": "mediagoblin.tools.response.render_404", "line_number": 200, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 187, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.MediaEntry.query.filter_by", "line_number": 213, "usage_type": "call"}, {"api_name": "mediagoblin.db.models.MediaEntry.query", "line_number": 213, "usage_type": "attribute"}, {"api_name": "mediagoblin.db.models.MediaEntry", "line_number": 213, "usage_type": "name"}, {"api_name": "mediagoblin.tools.response.render_404", "line_number": 218, "usage_type": "call"}, {"api_name": "mediagoblin.tools.response.render_404", "line_number": 222, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 211, "usage_type": "call"}, {"api_name": "mediagoblin.mg_globals.workbench_manager.create", "line_number": 234, "usage_type": "call"}, {"api_name": "mediagoblin.mg_globals.workbench_manager", "line_number": 234, "usage_type": "attribute"}, {"api_name": "mediagoblin.mg_globals", "line_number": 234, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 232, "usage_type": "call"}]} +{"seq_id": "544195178", "text": "from marshmallow import fields\nfrom .....messaging.agent_message import AgentMessage, AgentMessageSchema\nfrom ..message_types import PRESENT_PROOF, PROTOCOL_PACKAGE\n\nHANDLER_CLASS = f\"{PROTOCOL_PACKAGE}.handlers.present_proof.PresentProofHandler\"\n\n\nclass PresentProof(AgentMessage):\n class Meta:\n handler_class = HANDLER_CLASS\n schema_class = \"PresentProofSchema\"\n message_type = PRESENT_PROOF\n\n def __init__(\n self,\n _id: str = None,\n *,\n credential_presentation=None,\n prover_public_did=None,\n decision: bool = True,\n **kwargs,\n ):\n \"\"\"Initialize credential issue object.\"\"\"\n super().__init__(_id=_id, **kwargs)\n self.credential_presentation = credential_presentation\n self.prover_public_did = prover_public_did\n self.decision = decision\n\n\nclass PresentProofSchema(AgentMessageSchema):\n \"\"\"Credential schema.\"\"\"\n\n class Meta:\n \"\"\"Credential schema metadata.\"\"\"\n\n model_class = PresentProof\n\n credential_presentation = fields.Str(required=False)\n prover_public_did = fields.Str(required=False)\n decision = fields.Bool(required=True)\n", "sub_path": "aries_cloudagent/protocols/present_proof/v1_1/messages/present_proof.py", "file_name": "present_proof.py", "file_ext": "py", "file_size_in_byte": 1177, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "message_types.PROTOCOL_PACKAGE", "line_number": 5, "usage_type": "name"}, {"api_name": "messaging.agent_message.AgentMessage", "line_number": 8, "usage_type": "name"}, {"api_name": "message_types.PRESENT_PROOF", "line_number": 12, "usage_type": "name"}, {"api_name": "messaging.agent_message.AgentMessageSchema", "line_number": 30, "usage_type": "name"}, {"api_name": "marshmallow.fields.Str", "line_number": 38, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 38, "usage_type": "name"}, {"api_name": "marshmallow.fields.Str", "line_number": 39, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 39, "usage_type": "name"}, {"api_name": "marshmallow.fields.Bool", "line_number": 40, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "522476109", "text": "from copy import deepcopy\nfrom pathlib import Path\n\nfrom scrapli_replay.server.collector import ScrapliCollector\n\nimport scrapli\nfrom scrapli.driver.core.cisco_nxos.base_driver import PRIVS\n\nTEST_DATA_DIR = f\"{Path(scrapli.__file__).parents[1]}/tests/test_data\"\n\n\ndef main():\n privs = deepcopy(PRIVS)\n privs.pop(\"exec\")\n privs[\"privilege_exec\"].previous_priv = \"\"\n privs[\"privilege_exec\"].escalate = \"\"\n privs[\"privilege_exec\"].escalate_prompt = \"\"\n\n scrapli_kwargs = {\n \"host\": \"localhost\",\n \"port\": 22022,\n \"ssh_config_file\": False,\n \"auth_strict_key\": False,\n \"auth_username\": \"vrnetlab\",\n \"auth_password\": \"VR-netlab9\",\n \"auth_secondary\": \"VR-netlab9\",\n \"platform\": \"cisco_nxos\",\n \"privilege_levels\": privs,\n \"timeout_ops\": 120.0,\n \"timeout_socket\": 120.0,\n \"timeout_transport\": 120.0,\n \"comms_ansi\": True,\n }\n\n collector = ScrapliCollector(\n channel_inputs=[\"show version\", \"show run\"],\n interact_events=[\n [\n (\"clear logg onboard\", \"Do you want to continue? (y/n) [n]\", False),\n (\"y\", \"switch#\", False),\n ]\n ],\n paging_indicator=\"--More--\",\n paging_escape_string=\"q\",\n collector_session_filename=f\"{TEST_DATA_DIR}/mock_server_sessions/nxos.yaml\",\n **scrapli_kwargs,\n )\n\n collector.open()\n collector.collect()\n collector.close()\n collector.dump()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "tests/integration/collect/nxos.py", "file_name": "nxos.py", "file_ext": "py", "file_size_in_byte": 1524, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pathlib.Path", "line_number": 9, "usage_type": "call"}, {"api_name": "scrapli.__file__", "line_number": 9, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 13, "usage_type": "call"}, {"api_name": "scrapli.driver.core.cisco_nxos.base_driver.PRIVS", "line_number": 13, "usage_type": "argument"}, {"api_name": "scrapli_replay.server.collector.ScrapliCollector", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "437604960", "text": "\"\"\"\r\n\r\nUsage:\r\n main.py [--data --folder ] [--batch-size ] [--bptt ]\r\n [--embed ] [--hidden ] [--cell ] [--num-layers ] [--resume ]\r\n [--dropout-embed ] [--dropout-input ] [--dropout-hidden ]\r\n [--dropconnect ] [--dropout-other ]\r\n [--epochs ] [--lr ] [--l2 ] [--ar ] [--tar ]\r\n [--clip ] [--device ]\r\n [--seed ]\r\n\r\nData Options:\r\n --data Dataset to use [default: ptb]\r\n --folder Directory to load dataset [default: ../../data/ptb]\r\n --batch-size Training batch size [default: 80]\r\n --bptt Base length of variable truncated BPTT [default: 70]\r\n\r\nModel Options:\r\n --embed Word embedding size [default: 400]\r\n --hidden Recurrent hidden layer size [default: 1150]\r\n --cell Recurrent cell type [default: LSTM]\r\n --num-layers Number of recurrent hidden layers [default: 3]\r\n --resume Path to resume model parameters\r\n\r\nModel Dropout Options:\r\n --dropout-embed Dropout rate in embedding layer [default: 0.1]\r\n --dropout-input Dropout rate of embedding input [default: 0.65]\r\n --dropout-hidden Dropout rate of hidden output of each recurrent layer [default: 0.3]\r\n --dropconnect DropConnect rate of weights of recurrent layers [default: 0.5]\r\n --dropout-other Dropout rate of other parts [default: 0.4]\r\n\r\nTraining Options:\r\n --epochs Number of epochs to train [default: 8000]\r\n --lr Learning rate [default: 30]\r\n --l2 L2 regularization scale [default: 1.2e-6]\r\n --ar Acticvation regularization scale [default: 2]\r\n --tar Temporal activation regularization scale [default: 1]\r\n --clip Gradient clipping scale [default: 0.25]\r\n --device GPU device to run\r\n\r\nRandom Options:\r\n --seed Random seed [default: 1]\r\n\r\n\"\"\"\r\nimport numpy as np\r\nimport torch\r\nimport copy\r\n\r\nimport nnlearn.dataset as Dataset\r\nimport nnlearn.dataset.transform as Transform\r\nimport nnlearn.model as Model\r\nimport nnlearn.optimizer as Optimizer\r\nimport nnlearn.learner as Learner\r\nimport nnlearn.callback as Callback\r\n\r\nfrom splitcross import SplitCrossEntropyLoss\r\n\r\n\r\n# Hook Function Loading Penn Treebank Dataset\r\ndef PennTreebank(learner, folder, bptt, batch_size):\r\n transform = Transform.TokenToTensor()\r\n\r\n train_data = Dataset.BPTTDataLoader(\r\n Dataset.PennTreebank(folder, mode='train', transform=transform),\r\n bptt=bptt, batch_size=batch_size, shuffle=False)\r\n valid_data = Dataset.BPTTDataLoader(\r\n Dataset.PennTreebank(folder, mode='valid', transform=transform),\r\n bptt=bptt, batch_size=10, shuffle=False)\r\n test_data = Dataset.BPTTDataLoader(\r\n Dataset.PennTreebank(folder, mode='test', transform=transform),\r\n bptt=bptt, batch_size=1, shuffle=False)\r\n\r\n # update number of tokens for model initialization\r\n learner.model_args['num_tokens'] = len(train_data.dataset.dictionary)\r\n\r\n return train_data, valid_data, test_data\r\n\r\n# Hook Function Using NT-ASGD\r\nclass NTASGD(Callback):\r\n def epoch_midterm(self):\r\n \"\"\"Operation between training an epoch and evaluation of that epoch\"\"\"\r\n if self.learner.epoch == 2:\r\n buffer = []\r\n buffer.append(\"Parameter After Epoch {}\".format(self.learner.epoch))\r\n buffer += [itr.cpu().data for itr in self.learner.model.parameters()]\r\n torch.save(buffer, 'buffer.pt')\r\n exit()\r\n else:\r\n pass\r\n\r\n # switch parameters to averaged weights from NT-ASGD initial point\r\n self.param_buffer = {}\r\n for param_group in self.learner.optimizer.param_groups:\r\n if 't0' in param_group:\r\n for param in param_group:\r\n self.param_buffer[param] = param.data.clone()\r\n param.data = self.learner.optimizer.state[param]['ax'].clone()\r\n else:\r\n pass\r\n\r\n def epoch_end(self):\r\n \"\"\"Operation after the end of a whole epoch\"\"\"\r\n # switch parameters back to continue training\r\n for param in self.learner.model.parameters():\r\n if param in self.param_buffer:\r\n param.data = self.param_buffer[param].clone()\r\n else:\r\n pass\r\n\r\n # switch to use ASGD if necessary (no copy in buffer means no averaging)\r\n loss_buffer = self.learner.loss_buffer['valid']\r\n is_asgd = isinstance(self.learner.optimizer, Optimizer.ASGD)\r\n is_averaged = (len(self.param_buffer) > 0)\r\n is_nonmono = len(loss_buffer) > self.train_args['nonmono'] + 1\r\n # MAYBE ERROR OF ORIGINAL CODE\r\n # ============================\r\n # It should compare current loss with n steps before (including current)\r\n # rather than all but n steps before.\r\n # ```\r\n # history = loss_buffer[-self.learner.train_args['nonmono']:]\r\n # ```\r\n # Force to fit original design.\r\n if len(self.learner.loss_buffer['valid']) > 0:\r\n current = loss_buffer[-1]\r\n history = loss_buffer[:-self.learner.train_args['nonmono'] - 1]\r\n is_better = (current > min(history))\r\n else:\r\n is_better = False\r\n if (not is_asgd) and (not is_averaged) and is_nonmono and is_better:\r\n self.learner.logger.info('Switch To NT-ASGD')\r\n self.learner.optimizer = Optimizer.ASGD(\r\n self.learner.model.parameters(), t0=0, lambd=0., **self.optimizer_args)\r\n else:\r\n pass\r\n\r\n# Main Process\r\ndef main(args):\r\n # parse arguments\r\n dataset = args['--data']\r\n folder = args['--folder']\r\n bptt = args['--bptt']\r\n bsz = args['--batch-size']\r\n num_embed = args['--embed']\r\n num_hidden = args['--hidden']\r\n cell = args['--cell']\r\n num_layers = args['--num-layers']\r\n resume = args['--resume']\r\n drop_embed = args['--dropout-embed']\r\n drop_input = args['--dropout-input']\r\n drop_hidden = args['--dropout-hidden']\r\n dropconnect = args['--dropconnect']\r\n drop_other = args['--dropout-other']\r\n num_epochs = args['--epochs']\r\n lr = args['--lr']\r\n l2 = args['--l2']\r\n ar = args['--ar']\r\n tar = args['--tar']\r\n clip = args['--clip']\r\n device = args['--device']\r\n seed = args['--seed']\r\n\r\n learner = Learner.LanguageAWDLearner(\r\n device=device, random_seed=seed,\r\n dataset_hook=PennTreebank, dataset_args=dict(\r\n folder=folder, bptt=bptt, batch_size=bsz),\r\n model=Model.LangAWD, model_args=dict(\r\n input=num_embed, hidden=num_hidden, recurrent=cell, num_layers=num_layers,\r\n dropout=dict(\r\n embed=drop_embed, input=drop_input, hidden=drop_hidden,\r\n weight=dropconnect, other=drop_other)),\r\n criterion=SplitCrossEntropyLoss, criterion_args=dict(\r\n hidden_size=num_embed, splits=[], verbose=False),\r\n optimizer=Optimizer.SGD, optimizer_args=dict(lr=lr, weight_decay=l2),\r\n metrics=['ppl'],\r\n callbacks=[NTASGD],\r\n train_args=dict(alpha=ar, beta=tar, clip=clip, bptt=bptt))\r\n learner.fit(num_epochs=num_epochs, eval_init=False, eval_train=False)\r\n\r\nif __name__ == '__main__':\r\n import docopt\r\n from schema import Schema, Use, And, Or\r\n args = docopt.docopt(__doc__, version='NNLearn Package Test (AWD-LSTM + (PennTreebank + WikiText))')\r\n requirements = {\r\n '--batch-size' : And(Use(int), lambda x: x > 0,\r\n error='Training batch size should be integer > 0'),\r\n '--bptt' : And(Use(int), lambda x: x > 0,\r\n error='Base length of variable truncated BPTT should be integer > 0'),\r\n '--embed' : And(Use(int), lambda x: x > 0,\r\n error='Word embedding size should be integer > 0'),\r\n '--hidden' : And(Use(int), lambda x: x > 0,\r\n error='Recurrent hidden layer size should be integer > 0'),\r\n '--num-layers' : And(Use(int), lambda x: x > 0,\r\n error='Number of recurrent hidden layers should be integer > 0'),\r\n '--dropout-embed' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--dropout-input' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--dropout-hidden' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--dropconnect' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--dropout-other' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--epochs' : And(Use(int), lambda x: x > 0,\r\n error='Number of epochs should be integer > 0'),\r\n '--lr' : And(Use(float), lambda x: x > 0,\r\n error='Learning rate should be float > 0'),\r\n '--l2' : And(Use(float), lambda x: x > 0,\r\n error='L2 regularization scale should be float > 0'),\r\n '--ar' : And(Use(float), lambda x: x > 0,\r\n error='Activation regularization scale should be float > 0'),\r\n '--tar' : And(Use(float), lambda x: x > 0,\r\n error='Temporal activation regularization should be float > 0'),\r\n '--clip' : And(Use(float), lambda x: x > 0,\r\n error='Gradient clipping scale should be float > 0'),\r\n '--device' : Or(None, And(Use(int), lambda x: x >= 0),\r\n error='GPU device should be integer >= 0'),\r\n '--seed' : And(Use(int), lambda x: x > 0,\r\n error='Random seed should be integer > 0'),\r\n object : object,\r\n }\r\n args = Schema(requirements).validate(args)\r\n main(args)\r\n", "sub_path": "validate/validate-3/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 10703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "nnlearn.dataset.transform.TokenToTensor", "line_number": 61, "usage_type": "call"}, {"api_name": "nnlearn.dataset.transform", "line_number": 61, "usage_type": "name"}, {"api_name": "nnlearn.dataset.BPTTDataLoader", "line_number": 63, "usage_type": "call"}, {"api_name": "nnlearn.dataset", "line_number": 63, "usage_type": "name"}, {"api_name": "nnlearn.dataset.PennTreebank", "line_number": 64, "usage_type": "call"}, {"api_name": "nnlearn.dataset", "line_number": 64, "usage_type": "name"}, {"api_name": "nnlearn.dataset.BPTTDataLoader", "line_number": 66, "usage_type": "call"}, {"api_name": "nnlearn.dataset", "line_number": 66, "usage_type": "name"}, {"api_name": "nnlearn.dataset.PennTreebank", "line_number": 67, "usage_type": "call"}, {"api_name": "nnlearn.dataset", "line_number": 67, "usage_type": "name"}, {"api_name": "nnlearn.dataset.BPTTDataLoader", "line_number": 69, "usage_type": "call"}, {"api_name": "nnlearn.dataset", "line_number": 69, "usage_type": "name"}, {"api_name": "nnlearn.dataset.PennTreebank", "line_number": 70, "usage_type": "call"}, {"api_name": "nnlearn.dataset", "line_number": 70, "usage_type": "name"}, {"api_name": "nnlearn.callback", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 86, "usage_type": "call"}, {"api_name": "nnlearn.optimizer.ASGD", "line_number": 112, "usage_type": "attribute"}, {"api_name": "nnlearn.optimizer", "line_number": 112, "usage_type": "name"}, {"api_name": "nnlearn.optimizer.ASGD", "line_number": 131, "usage_type": "call"}, {"api_name": "nnlearn.optimizer", "line_number": 131, "usage_type": "name"}, {"api_name": "nnlearn.learner.LanguageAWDLearner", "line_number": 162, "usage_type": "call"}, {"api_name": "nnlearn.learner", "line_number": 162, "usage_type": "name"}, {"api_name": "nnlearn.model.LangAWD", "line_number": 166, "usage_type": "attribute"}, {"api_name": "nnlearn.model", "line_number": 166, "usage_type": "name"}, {"api_name": "splitcross.SplitCrossEntropyLoss", "line_number": 171, "usage_type": "name"}, {"api_name": "nnlearn.optimizer.SGD", "line_number": 173, "usage_type": "attribute"}, {"api_name": "nnlearn.optimizer", "line_number": 173, "usage_type": "name"}, {"api_name": "docopt.docopt", "line_number": 182, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 184, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 184, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 186, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 186, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 188, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 188, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 190, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 190, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 192, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 192, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 194, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 194, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 196, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 196, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 198, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 198, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 200, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 200, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 202, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 202, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 204, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 204, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 206, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 206, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 208, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 208, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 210, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 210, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 212, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 212, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 214, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 214, "usage_type": "call"}, {"api_name": "schema.Or", "line_number": 216, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 216, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 216, "usage_type": "call"}, {"api_name": "schema.And", "line_number": 218, "usage_type": "call"}, {"api_name": "schema.Use", "line_number": 218, "usage_type": "call"}, {"api_name": "schema.Schema", "line_number": 222, "usage_type": "call"}]} +{"seq_id": "251898852", "text": "# This file should be in /front-end/django-project/sim_worker/celery.py as well as in /worker/sim_worker/celery.py\n# If you modify one, please copy/paste the modification in the other one\n\nfrom celery import Celery\n\napp = Celery('sim_worker',\n broker='redis://128.3.144.76:6379/0',\n backend='redis://128.3.144.76:6379/0',\n include=['sim_worker.tasks'])\n\n# Optional configuration, see the application user guide.\napp.conf.update(\n CELERY_TASK_RESULT_EXPIRES=3600,\n CELERY_TRACK_STARTED=True,\n)\n\nif __name__ == '__main__':\n app.start()\n", "sub_path": "front_end/django-project/sim_worker/celery.py", "file_name": "celery.py", "file_ext": "py", "file_size_in_byte": 579, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "celery.Celery", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "180436459", "text": "# Copyright 2021 AI Singapore. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport dash_table\n\n\ndef plot_probabilities_spread_pattern(df_specific_label: pd.DataFrame):\n '''\n Display scatter plot for probabilities comparison on correct data point vs miss-predicted data point\n for each class label\n\n Arguments:\n df_specific_label (:obj:`~pd.DataFrame`):\n dataframe of 1 specific label of 1 model type, output from int_miss_predictions\n\n Returns:\n :obj:`~plotly.graph_objects.Figure`:\n figure displaying scatter plot outlining probabilities comparison on correct data point vs miss-predicted data point \\\n for each class label\n '''\n label = list(df_specific_label.columns)[1]\n model_name = df_specific_label['model'].values[0]\n\n fig = px.scatter(df_specific_label,\n x=list(df_specific_label.index),\n y=df_specific_label[label],\n color='pred_state',\n category_orders={\"pred_state\": [\"correct\", \"miss-predict\"]},\n color_discrete_sequence=px.colors.qualitative.D3)\n\n fig.update_layout(\n title=f'Class {label}
[ {model_name} ]

',\n title_x=0.6,\n yaxis_title=f\"Probability is_class_{label} \",\n yaxis_showgrid=False,\n xaxis_title=\"data_point index\",\n xaxis_showgrid=False,\n legend_title=\"\", \n legend=dict(orientation=\"h\", yanchor=\"bottom\", y=1.03, xanchor=\"right\", x=0.8), \n width=250,\n height=600,\n margin=dict(t=170, b=0, l=12, r=12, pad=10))\n\n fig.update_yaxes(range=[0, 1])\n fig.update_xaxes(rangemode=\"tozero\")\n fig.add_hline(y=0.5, line_dash=\"dot\")\n\n # iterate through all traces, to ensure all label-class have consistent format\n for i in range(len(fig.data)):\n if fig.data[i]['legendgroup'] == 'correct':\n fig.data[i]['marker']['color'] = '#1f77b4'\n fig.data[i]['hovertemplate'] = \"Index %{x}
\" + \"[ correct ]

\" + \\\n \"probability: %{y:.4f}
\" + \"\"\n\n elif fig.data[i]['legendgroup'] == 'miss-predict':\n fig.data[i]['marker']['color'] = '#FF7F0E'\n fig.data[i]['hovertemplate'] = \"Index %{x}
\" + \"[ miss-predict ]

\" + \\\n \"probability: %{y:.4f}
\" + \"\"\n return fig\n\n\ndef plot_simple_probs_spread_overview(df_label_state: pd.DataFrame):\n '''\n Display data table listing simple stats on ss, %correct, % wrong, accuracy for each label class\n\n Arguments:\n df_label_state (:obj:`~pd.DataFrame`):\n dataframe containing info on simple stats, output from int_miss_predictions\n\n Returns:\n :obj:`~dash_table.DataTable`:\n table object outlining simple stats on ss, %correct, % wrong, accuracy for each label class\n '''\n fig = dash_table.DataTable(\n id='table', \n columns=[{'id': c, 'name': c} for c in df_label_state.columns], \n style_cell={'font-family': 'verdana', \n 'font-size': '14px', \n 'border': 'none', \n 'minWidth': '100px'},\n style_header={'display': 'none'},\n style_table={'width': '550', 'margin': 'auto'},\n style_data={'lineHeight': '15px'},\n style_data_conditional=[{'if': {'column_id': 'index'}, 'textAlign': 'left'},\n {'if': {'column_id': 'state_value'}, 'textAlign': 'right'}],\n data=df_label_state.to_dict('records'))\n return fig\n\n\ndef plot_prediction_offset_overview(df: pd.DataFrame):\n '''\n Display scatter plot for overview on prediction offset values\n\n Arguments:\n df (:obj:`~pd.DataFrame`):\n dataframe containing calculated offset values, output from int_miss_predictions\n\n Returns:\n :obj:`~plotly.graph_objects.Figure`:\n figure displaying scatter plot outlining overview on prediction offset values by index\n '''\n pred_cols = [col for col in df.columns if 'yPred_' in col]\n offset_cols = [col for col in df.columns if 'offset_' in col]\n corrected_legend_names = [col.replace('yPred_', '') for col in pred_cols]\n df.insert(0, 'index', list(df.index))\n\n fig = px.scatter(df, x='index', y=offset_cols[0], custom_data=['index'], color_discrete_sequence=px.colors.qualitative.D3)\n fig.data[0].name = corrected_legend_names[0]\n fig.update_traces(showlegend=True, hovertemplate=\"Data Index : %{x}
Prediction Offset : %{y}\")\n\n if len(pred_cols) > 1: # Bimodal\n fig.add_trace(go.Scatter(\n x=df['index'], \n y=df[offset_cols[1]], \n name=corrected_legend_names[1], \n mode='markers',\n marker=dict(color='#FF7F0E'),\n hovertemplate=\"Data Index : %{x}
Prediction Offset : %{y}\"))\n\n # add reference baseline [mainly to have baseline included in legend]\n fig.add_trace(go.Scatter(\n x=[0, len(df)], \n y=[0] * 2, \n name=\"Baseline [Prediction - Actual]\", \n visible=True, \n hoverinfo='skip',\n mode='lines',\n line=dict(color=\"green\", dash=\"dot\")))\n # referece baseline [mainly for the dotted line in graph, but no legend generated]\n fig.add_hline(y=0, line_dash=\"dot\")\n\n fig.update_layout(\n title='Prediction Offset Overview by Datapoint Index', \n xaxis_title='Datapoint Index', \n yaxis_title='Offset from baseline', \n title_x=0.5,\n legend=dict(orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1), \n width=1000,\n height=550,\n margin=dict(t=110), \n clickmode='event+select')\n\n return fig\n", "sub_path": "src/rarity/visualizers/miss_predictions.py", "file_name": "miss_predictions.py", "file_ext": "py", "file_size_in_byte": 6469, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "attribute"}, {"api_name": "plotly.express.scatter", "line_number": 38, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 38, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 43, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 43, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 76, "usage_type": "attribute"}, {"api_name": "dash_table.DataTable", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "attribute"}, {"api_name": "plotly.express.scatter", "line_number": 121, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 121, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 121, "usage_type": "attribute"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 126, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 126, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 135, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "324081070", "text": "__author__ = 'XChen'\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport MySQLdb\n#import pymysql\nfrom sklearn.linear_model import LinearRegression\n\n\nsdax = MySQLdb.connect(host='sdax-retail-dev.cd50doqou1ik.eu-west-1.rds.amazonaws.com', port=3306, user='wil', passwd='821027Si$', db='sdax_dev')\ncur = sdax.cursor()\n\n# execute SQL query using execute() method.\ncur.execute (\"select JAN_2012,FEB_2012,MAR_2012,APR_2012,MAY_2012,JUN_2012,JUL_2012,AUG_2012,SEP_2012,OCT_2012,NOV_2012,DEC_2012,JAN_2013,FEB_2013,MAR_2013,APR_2013,MAY_2013,JUN_2013,JUL_2013,AUG_2013,SEP_2013,OCT_2013,NOV_2013,DEC_2013,JAN_2014,FEB_2014,MAR_2014,APR_2014,MAY_2014,JUN_2014,JUL_2014,AUG_2014,SEP_2014,OCT_2014,NOV_2014,DEC_2014,JAN_2015,FEB_2015,MAR_2015,APR_2015,MAY_2015,JUN_2015 from NIELSEN_With_Zero\")\n\ndf = cur.fetchall()\nlengthDF = len(df)\nexampleDF = df[8822][:]\n\n##### Plot the example time series\n# fig = plt.figure(figsize=(12,8))\n# ax = fig.add_subplot(111)\n# plt.plot(exampleDF)\n# plt.title('Sales example')\n# plt.xlabel('Month')\n# plt.ylabel('Sales')\n# plt.show()\n# legend = ax.legend(loc = 'upper left')\n# legend.prop.set_size(20)\n\n\n# for row in df:\n# print(row[0], row[1])\n\n\n# disconnect from server\ncur.close()\nsdax.close()\n\n######Process the data\ndef Process_data(DF, window, horizon):\n \"\"\"\n Input:\n to_forecast, univariate time series organized as numpy array\n window, number of items to use in the forecast window\n horizon, horizon of the forecast\n Output:\n X, a matrix where each row contains a forecast window\n y, the target values for each row of X\n \"\"\"\n shape = DF.shape[:-1] + (DF.shape[-1] - window + 1, window)\n strides = DF.strides + (DF.strides[-1],)\n X = np.lib.stride_tricks.as_strided(DF,\n shape=shape,\n strides=strides)\n Y = np.array([X[i+horizon][-1] for i in range(len(X)-horizon)])\n return X[:-horizon], Y\n\ndef mape(Esti, Gtrue):\n \"\"\" returns the mean absolute percentage error \"\"\"\n idx = Gtrue != 0.0\n return 100*np.mean(np.abs(Esti[idx]-Gtrue[idx])/Gtrue[idx])\n\nk = 18 # number of previous observations to use\nh = 1 # forecast horizon\nvectorPred = [None] * 11\n\nfor ii in range(0,10):\n exDF = np.asarray(df[ii][:])\n X,Y = Process_data(exDF, k, h)\n\n m = len(exDF)-k-h # number of samples to take in account\n regressor = LinearRegression(normalize=True)\n regressor.fit(X[:m], Y[:m])\n predResult = regressor.predict(X[m:])\n flagP = sum(exDF[37:])\n if flagP == 0:\n predResult = 0.00000\n else:\n continue\n\n # print('The error of line is %0.2f%%' % mape(regressor.predict(X[m:]),Y[m:]))\n vectorPred[ii] = predResult\n\n\n# import statsmodels.api as sm\n# arma_res = sm.tsa.ARMA(exDF, order=(2,2)).fit()\n# preds, stderr, ci = arma_res.forecast(1)\n\n\n# plt.figure(figsize=(8,6))\n# plt.plot(Y, label='True data', color='#377EB8', linewidth=2)\n# plt.plot(regressor.predict(X),\n# '--', color='#EB3737', linewidth=3, label='Prediction')\n# plt.plot(Y[:m], label='True data', color='#3700B8', linewidth=2)\n# # plt.xticks(arange(len(dates))[1::4],dates[1::4], rotation=45)\n# plt.legend(loc='upper left')\n# # ylabel('beer consumed (millions of litres)')\n# plt.show()\n\n\nvectorP = np.asarray(vectorPred)\nnp.savetxt(\"Nielsen_OneStepPred_With_Zero_B.csv\", vectorP, delimiter=\",\")\n\n\n", "sub_path": "PredictionCode/Nielsen_With_Zero.py", "file_name": "Nielsen_With_Zero.py", "file_ext": "py", "file_size_in_byte": 3441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "MySQLdb.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.lib.stride_tricks.as_strided", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.lib", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "559888820", "text": "#https://www.hackerrank.com/challenges/ctci-bfs-shortest-reach/problem\nimport sys\nclass Vertex():\n def __init__(self,node):\n self.id = node\n self.neighbors = []\n def addNeighbor(self,node1):\n v = Vertex(node1)\n self.neighbors.append(v)\n v.neighbors.append(self)\n \nfrom collections import deque\nclass Graph():\n \n def __init__(self,n):\n self.vertices = {x : [] for x in range(n)}\n self.edges = []\n self.edgeLength = 6\n \n def connect(self,v1,v2):\n if v1!=v2:\n self.edges.append((v1,v2))\n self.vertices[v1].append(v2)\n self.vertices[v2].append(v1)\n \n def find_all_distances(self,start):\n q = deque();\n q.append(start);\n \n distances = [-1 for x in range (len(self.vertices))]\n distances[start] = 0\n \n while(q):\n v = q.popleft();\n for each in self.vertices[v]:\n if distances[each] == -1:\n distances[each] = distances[v]+self.edgeLength\n q.append(each)\n \n return distances\n \n \n \n\nt = int(input()) #No. of queries\nfor i in range(t): # i will be 0,1,..t-1\n n,m = [int(value) for value in input().split()]\n graph = Graph(n)\n for i in range(m):\n x,y = [int(x) for x in input().split()]\n graph.connect(x-1,y-1) \n s = int(input())\n d = graph.find_all_distances(s-1)\n for each in d:\n if each != 0:\n sys.stdout.write(str(each) + \" \")\n print()\n \n'''\nSample Input\n\n2\n4 2\n1 2\n1 3\n1\n3 1\n2 3\n2\nSample Output\n\n6 6 -1\n-1 6\n'''", "sub_path": "python/Graphs/BFSShortestpath.py", "file_name": "BFSShortestpath.py", "file_ext": "py", "file_size_in_byte": 1620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "collections.deque", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "295573738", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport logging\nimport threading\nimport time\nimport socket\nimport random\n\nimport rflib.ipc.IPC as IPC\nimport rflib.ipc.MongoIPC as MongoIPC\nfrom rflib.ipc.RFProtocol import *\nfrom rflib.ipc.RFProtocolFactory import RFProtocolFactory\nfrom rflib.defs import *\nfrom rflib.types.Match import *\nfrom rflib.types.Action import *\nfrom rflib.types.Option import *\n\n\nclass RFMonitor(RFProtocolFactory, IPC.IPCMessageProcessor):\n \"\"\"Monitors all the controller instances for failiure\n\n Attributes-\n controllers: A dictionary mapping controller address and\n port to controller role and number of devices \n it is connected to.\n monitors: A dictionary mapping controllers to monitor objects\n responsible for scheduling tests.\n eligible_masters: A dictionary mapping controllers to the maximum\n count of devices they are connected too.\n\n \"\"\"\n def __init__(self, *arg, **kwargs):\n self.controllers = dict()\n self.monitors = dict()\n self.eligible_masters = dict()\n self.controllerLock = threading.Lock()\n self.ipc = MongoIPC.MongoIPCMessageService(MONGO_ADDRESS,\n MONGO_DB_NAME,\n RFMONITOR_ID,\n threading.Thread,\n time.sleep)\n self.ipc.listen(RFMONITOR_RFPROXY_CHANNEL, self, self, False)\n self.log = logging.getLogger(\"rfmonitor\")\n self.log.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(logging.Formatter(logging.BASIC_FORMAT))\n self.log.addHandler(ch)\n self.test_controllers()\n\n def process(self, _from, to, channel, msg):\n \"\"\"Process messages sent by controllers.\n\n Types of messages being handled:\n CONTROLLER_REGISTER -- Register Controller details with RFMonitor.\n\n \"\"\" \n type_ = msg.get_type()\n address = msg.get_ct_addr()\n port = msg.get_ct_port()\n role = msg.get_ct_role()\n if type_ == CONTROLLER_REGISTER:\n self.controllerLock.acquire()\n try:\n if ((address + ':' + str(port)) not in \n self.controllers):\n self.controllers[address + ':' + str(port)] = {\n 'role': role,\n 'count': 1\n }\n self.log.info(\"A %s controller at %s:%s is up\",\n role, address, port)\n else:\n self.controllers[msg.get_ct_addr() + ':'\n + str(msg.get_ct_port())]['count'] += 1\n controller_count = self.controllers[address + ':' \n + str(port)]['count']\n\n if not self.eligible_masters:\n self.eligible_masters[address + ':' + str(port)] = \\\n controller_count\n else:\n maximum_controller_count = self.eligible_masters.values()[0]\n if maximum_controller_count < controller_count:\n self.eligible_masters = {}\n self.eligible_masters[address + ':' + str(port)] = \\\n controller_count\n elif maximum_controller_count == controller_count:\n self.eligible_masters[address + ':' + str(port)] = \\\n controller_count\n\n finally:\n self.controllerLock.release()\n\n def test_controllers(self):\n \"\"\"Invoke test on all the controllers\"\"\"\n while True:\n #Extract all the keys from self.controllers first so that \n #the main thread does not block the IPC thread\n self.controllerLock.acquire()\n try:\n controllers = self.controllers.keys()\n finally:\n self.controllerLock.release()\n for controller in controllers:\n host, port = controller.split(':')\n port = int(port)\n if controller in self.monitors:\n monitor = self.monitors[controller]\n #check if scheduled time has passed\n if monitor.timeout < time.time():\n self.test(host, port)\n monitor.schedule_test()\n else:\n continue\n else:\n monitor = Monitor(host, port, callback_time=5000)\n self.monitors[controller] = monitor\n\n def test(self, host, port):\n \"\"\"Test if a controller is up.\n\n Keyword Arguments:\n host -- host ip address at which controller is listening.\n port -- port at which the controller is listening at `host` address.\n\n \"\"\"\n s = socket(AF_INET, SOCK_STREAM)\n s.settimeout(1)\n result = s.connect_ex((host, port))\n\n if result != 0:\n self.log.info(\"Controller listening at %s:%s died\", host, port)\n self.handle_controller_death(host, port)\n s.close()\n\n def handle_controller_death(self, host, port):\n \"\"\"Remove all entries coresponding to a controller and \n elect new master if master controller is dead\n\n Keyword Arguments:\n host -- host ip address at which controller was listening.\n port -- port at which the controller was listening at `host` address.\n\n \"\"\"\n master = False\n self.controllerLock.acquire()\n try:\n if self.controllers[host + ':' + str(port)]['role'] == \"master\":\n master = True\n self.controllers.pop(host + ':' + str(port), None)\n self.monitors.pop(host + ':' + str(port), None)\n self.eligible_masters.pop(host + ':' + str(port), None)\n finally:\n self.controllerLock.release()\n if master:\n self.elect_new_master()\n\n def elect_new_master(self):\n \"\"\"Elect new master controller and inform to rfproxy\"\"\"\n master_key = random.randint(0, len(self.eligible_masters)-1)\n new_master = self.eligible_masters.keys()[master_key]\n self.log.info(\"The new master is %s\", new_master)\n host, port = new_master.split(\":\")\n msg = ElectMaster(ct_addr=host, ct_port=port)\n self.ipc.send(RFMONITOR_RFPROXY_CHANNEL, str(0), msg)\n\n\nclass Monitor(object):\n \"\"\"Monitors each controller individually\"\"\"\n def __init__(self, host, port, callback_time=1000):\n \"\"\"Initialize Monitor\n\n Keyword Arguments:\n host -- host ip address at which controller is listening.\n port -- port at which the controller is listening at `host` address.\n test -- callback function to be called periodically.\n callback_time -- time interval (in milliseconds) at which `test` is run.\n\n \"\"\"\n super(Monitor, self).__init__()\n self.host = host\n self.port = port\n self.callback_time = callback_time\n self.timeout = time.time()\n self.schedule_test()\n\n def schedule_test(self):\n \"\"\"Schedule the next test\"\"\"\n current_time = time.time()\n if self.timeout <= current_time:\n self.timeout += self.callback_time/1000.00\n\n\nif __name__ == \"__main__\":\n description = 'RFMonitor monitors RFProxy instances for failiure'\n epilog = 'Report bugs to: https://github.com/routeflow/RouteFlow/issues'\n RFMonitor()\n", "sub_path": "rfserver/rfmonitor.py", "file_name": "rfmonitor.py", "file_ext": "py", "file_size_in_byte": 7702, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "rflib.ipc.RFProtocolFactory.RFProtocolFactory", "line_number": 20, "usage_type": "name"}, {"api_name": "rflib.ipc.IPC.IPCMessageProcessor", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rflib.ipc.IPC", "line_number": 20, "usage_type": "name"}, {"api_name": "threading.Lock", "line_number": 37, "usage_type": "call"}, {"api_name": "rflib.ipc.MongoIPC.MongoIPCMessageService", "line_number": 38, "usage_type": "call"}, {"api_name": "rflib.ipc.MongoIPC", "line_number": 38, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 41, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 44, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 45, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 46, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 47, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.BASIC_FORMAT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 112, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 162, "usage_type": "call"}, {"api_name": "time.time", "line_number": 186, "usage_type": "call"}, {"api_name": "time.time", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "297481231", "text": "import leather\n\ncolumn_data = [\n ('Hello', 3),\n ('How', 5),\n ('Are', 9),\n ('You', 4)\n]\n\nline_data = [\n ('Hello', 1),\n ('How', 5),\n ('Are', 4),\n ('You', 3)\n]\n\ndot_data = [\n ('Hello', 3),\n ('How', 5),\n ('Are', 9),\n ('You', 4)\n]\n\nchart = leather.Chart('Mixed shapes')\nchart.add_columns(column_data)\nchart.add_lines(line_data)\nchart.add_dots(dot_data)\nchart.to_svg('examples/charts/mixed_shapes.svg')\n", "sub_path": "examples/mixed_shapes.py", "file_name": "mixed_shapes.py", "file_ext": "py", "file_size_in_byte": 433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "leather.Chart", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "10475513", "text": "# -*- coding: utf-8 -*-\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\nfrom django.contrib.redirects.models import Redirect\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core import management\nfrom django.conf import settings\nfrom livesettings import config_value \nfrom cc.pages.models import models\nfrom cc.shop.models.products import Product\nfrom cc.core.models import VisibleContentType\nfrom cc.conf import coresettings\nfrom cc.pages.models import Page\n#\n# the core tests\n#\nclass CoreTest(TestCase):\n fixtures = ['core.json', 'pages.json']\n \n def test_default_users(self):\n \"Test the default users are being installed correctly\"\n users = User.objects.all()\n self.assertEquals(users.count(), 3)\n \n \n \n def test_redirects(self):\n \"ensure that redirects are working\"\n if 'cc.pages' not in settings.INSTALLED_APPS:\n return True\n page = Page.objects.get(pk=1)\n old_url = page.get_absolute_url()\n response = self.client.get(old_url)\n self.failUnlessEqual(response.status_code, 200)\n # move the page with a new slug\n page.slug=\"and-form-my-next-trick-i-shall-move-somewhere\"\n page.save()\n new_url = page.get_absolute_url()\n response = self.client.get(old_url)\n self.failUnlessEqual(response.status_code, 301)\n response = self.client.get(new_url)\n self.failUnlessEqual(response.status_code, 200)\n # make sure thre was a redirect planted into the database\n self.failUnlessEqual(Redirect.objects.count(), 1)\n \n \n def test_visbility(self):\n \"test the visibility manager on the models\"\n if 'cc.pages' not in settings.INSTALLED_APPS:\n return True\n all_pages = Page.objects.all()\n visible_pages = Page.objects.visible()\n # all should be visible at this stage\n self.failUnlessEqual(all_pages.count(), visible_pages.count())\n # now make one invisible\n invisible_page = all_pages[1]\n invisible_page.visible = False\n invisible_page.save()\n self.failIfEqual(all_pages.count(), visible_pages.count())\n # make the all invisible\n for obj in visible_pages:\n obj.visible = False\n obj.save()\n # get them again\n visible_pages = Page.objects.visible()\n self.failUnlessEqual(visible_pages.count(),0)\n \n\n ", "sub_path": "cc/core/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 2557, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.test.TestCase", "line_number": 19, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "cc.pages.models.Page.objects.get", "line_number": 33, "usage_type": "call"}, {"api_name": "cc.pages.models.Page.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cc.pages.models.Page", "line_number": 33, "usage_type": "name"}, {"api_name": "django.contrib.redirects.models.Redirect.objects.count", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.redirects.models.Redirect.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.contrib.redirects.models.Redirect", "line_number": 46, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 51, "usage_type": "name"}, {"api_name": "cc.pages.models.Page.objects.all", "line_number": 53, "usage_type": "call"}, {"api_name": "cc.pages.models.Page.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cc.pages.models.Page", "line_number": 53, "usage_type": "name"}, {"api_name": "cc.pages.models.Page.objects.visible", "line_number": 54, "usage_type": "call"}, {"api_name": "cc.pages.models.Page.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cc.pages.models.Page", "line_number": 54, "usage_type": "name"}, {"api_name": "cc.pages.models.Page.objects.visible", "line_number": 67, "usage_type": "call"}, {"api_name": "cc.pages.models.Page.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cc.pages.models.Page", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "194096019", "text": "#important imports\nimport torch\nfrom torchvision import datasets,transforms\n#data loading\ntr = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=tr )\ntrainLoader = torch.utils.data.DataLoader(trainset,batch_size = 64,shuffle=True)\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/',download=True, train = False, transform=tr)\ntestLoader = torch.utils.data.DataLoader(testset,batch_size = 64,shuffle = True)\n\nfrom torch import nn\nimport torch.nn.functional as F\nclass model(nn.Module):\n def __init__(self,inputSize,outputSize,hiddenLayers,dropOut):\n super().__init__()\n self.hiddenlayer= nn.ModuleList([nn.Linear(inputSize,hiddenLayers[0])])\n layers = zip(hiddenLayers[:-1],hiddenLayers[1:])\n self.hiddenlayer.extend([nn.Linear(h1,h2) for h1,h2 in layers])\n self.output = nn.Linear(hiddenLayers[-1],outputSize)\n self.dropout = nn.Dropout(p=dropOut)\n def forward(self,x):\n x=x.view(x.shape[0],-1)\n for l in self.hiddenlayer:\n x = F.relu(l(x))\n x = self.dropout(x)\n x = (F.log_softmax(self.output(x),dim=1))\n return x\n\nneuNet = model(784,10,[256,128,64],0.2)\nprint(neuNet)\n\nfrom torch import optim\noptimizer = optim.Adam(neuNet.parameters(),lr = 0.003)\n\ncriterion = nn.NLLLoss()\n\nfor e in range(1):\n for images,labels in trainLoader:\n optimizer.zero_grad()\n loss = criterion(neuNet(images),labels)\n loss.backward()\n optimizer.step()\n print(\"epoch {0}/{1} completed\".format(e+1,5))\nprint(\"end of the training\")\n\nprint(\"the model parameters {0}\".format(neuNet.state_dict().keys()))\n\ntorch.save(neuNet.state_dict(),'checkpoint.pth')\n\nstate_dict = torch.load('checkpoint.pth')\nmodel = neuNet.load_state_dict(state_dict)\n\nprint(model)\nprint(neuNet)\n", "sub_path": "savingModel.py", "file_name": "savingModel.py", "file_ext": "py", "file_size_in_byte": 1924, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 5, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 5, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 5, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 5, "usage_type": "call"}, {"api_name": "torchvision.datasets.FashionMNIST", "line_number": 6, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.FashionMNIST", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "489850879", "text": "from django.core.management.base import BaseCommand\nfrom eveuniverse.models import EveType\nfrom eveuniverse.tasks import update_or_create_eve_object\n\nfrom allianceauth.services.hooks import get_extension_logger\nfrom app_utils.logging import LoggerAddTag\n\nfrom ... import __title__, constants\nfrom ...models import EveOreType\nfrom . import get_input\n\nlogger = LoggerAddTag(get_extension_logger(__name__), __title__)\n\n\nclass Command(BaseCommand):\n help = \"Preloads data like ore types from ESI.\"\n\n def handle(self, *args, **options):\n self.stdout.write(\"Loading all ore types from ESI. This can take a while.\")\n ore_types_count = EveOreType.objects.count()\n self.stdout.write(\n f\"You currently have {ore_types_count} ore types in your database.\"\n )\n self.stdout.write()\n user_input = get_input(\"Are you sure you want to proceed? (y/N)?\")\n\n if user_input.lower() == \"y\":\n self.stdout.write(\"Tasks for loading ore types have been started.\")\n update_or_create_eve_object.delay(\n model_name=\"EveCategory\",\n id=constants.EVE_CATEGORY_ID_ASTEROID,\n include_children=True,\n enabled_sections=[\n EveType.Section.DOGMAS,\n EveType.Section.TYPE_MATERIALS,\n ],\n )\n self.stdout.write(self.style.SUCCESS(\"Done\"))\n else:\n self.stdout.write(self.style.WARNING(\"Aborted\"))\n", "sub_path": "moonmining/management/commands/moonmining_load_eve.py", "file_name": "moonmining_load_eve.py", "file_ext": "py", "file_size_in_byte": 1494, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "app_utils.logging.LoggerAddTag", "line_number": 12, "usage_type": "call"}, {"api_name": "allianceauth.services.hooks.get_extension_logger", "line_number": 12, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 15, "usage_type": "name"}, {"api_name": "models.EveOreType.objects.count", "line_number": 20, "usage_type": "call"}, {"api_name": "models.EveOreType.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.EveOreType", "line_number": 20, "usage_type": "name"}, {"api_name": "eveuniverse.tasks.update_or_create_eve_object.delay", "line_number": 29, "usage_type": "call"}, {"api_name": "eveuniverse.tasks.update_or_create_eve_object", "line_number": 29, "usage_type": "name"}, {"api_name": "eveuniverse.models.EveType.Section", "line_number": 34, "usage_type": "attribute"}, {"api_name": "eveuniverse.models.EveType", "line_number": 34, "usage_type": "name"}, {"api_name": "eveuniverse.models.EveType.Section", "line_number": 35, "usage_type": "attribute"}, {"api_name": "eveuniverse.models.EveType", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "457687116", "text": "# load libraries\r\nimport trimesh\r\nimport torch\r\nimport json\r\nimport os\r\nfrom tqdm import tqdm as tqdm\r\nimport matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\nfrom types import SimpleNamespace\r\nfrom utils import render\r\nfrom shapenet_dataloader import ShapeNetMesh, FixedPointsCachedDataset\r\nfrom shapeflow.layers.deformation_layer import NeuralFlowDeformer\r\nfrom shapenet_embedding import LatentEmbedder\r\nimport shapeflow.utils.train_utils as utils\r\nfrom torch.utils.data import DataLoader\r\nimport pickle\r\nimport time\r\nfrom utils import render\r\n\r\n\r\ndef export_obj_cpu(filename, pc, colors=None, random_trans=[0,0,0]):\r\n # random_trans = random.uniform(1, 2)\r\n with open('%s'%(filename), 'w') as f:\r\n for i,p in enumerate(pc):\r\n x,y,z = p\r\n x += random_trans[0]\r\n y += random_trans[1]\r\n z += random_trans[2]\r\n r,g,b = [1,0,0]\r\n if colors is not None:\r\n r,g,b = colors[i]\r\n f.write('v {:.4f} {:.4f} {:.4f} \\\r\n {:.4f} {:.4f} {:.4f} \\n'.format(x, y, z, r, g, b))\r\n\r\n\r\n# choice of checkpoint to load\r\nrun_dir = \"/media/andy/Elements/Shapeflow_data/runs/pretrained_ckpt\"\r\ncheckpoint = \"checkpoint_latest.pth.tar_deepdeform_100.pth.tar\"\r\ndevice = torch.device(\"cuda\")\r\n\r\n\r\n# load training args\r\nargs = SimpleNamespace(**json.load(open(os.path.join(run_dir, 'params.json'), 'r')))\r\n\r\n# setup model\r\ndeformer = NeuralFlowDeformer(latent_size=args.lat_dims, f_width=args.deformer_nf, s_nlayers=2, \r\n s_width=5, method=args.solver, nonlinearity=args.nonlin, arch='imnet',\r\n adjoint=args.adjoint, rtol=args.rtol, atol=args.atol, via_hub=True,\r\n no_sign_net=(not args.sign_net), symm_dim=(2 if args.symm else None))\r\nlat_params = torch.nn.Parameter(torch.randn(4746, args.lat_dims)*1e-1, requires_grad=True)\r\ndeformer.add_lat_params(lat_params)\r\ndeformer.to(device)\r\n\r\n# load checkpoint\r\nresume_dict = torch.load(os.path.join(run_dir, checkpoint))\r\nstart_ep = resume_dict[\"epoch\"]\r\nglobal_step = resume_dict[\"global_step\"]\r\ntracked_stats = resume_dict[\"tracked_stats\"]\r\ndeformer.load_state_dict(resume_dict[\"deformer_state_dict\"])\r\nsample_points = 300\r\n# dataloader\r\ndata_root = args.data_root.replace('shapenet_watertight', 'shapenet_simplified')\r\nmesh_dataset = ShapeNetMesh(data_root=data_root, split=\"train\", category='chair', \r\n normals=False)\r\npoint_dataset = FixedPointsCachedDataset(\"/media/andy/Elements/Shapeflow_data/data/shapenet_pointcloud/train/03001627.pkl\", npts=sample_points)\r\n\r\n\r\n# take a sample point cloud from a shape\r\np = pickle.load(open(\"/media/andy/Elements/Shapeflow_data/data/shapenet_pointcloud/val/03001627.pkl\", \"rb\"))\r\nname = list(p.keys())[2]\r\ninput_points = p[name]\r\nmesh_gt = trimesh.load(\"/media/andy/Elements/Shapeflow_data/data/shapenet_simplified/val/03001627/bcc73b8ff332b4df3d25ee35360a1f4d/model.ply\")\r\n\r\n# view point\r\neye_1 = [.8, .4, .5]\r\neye_2 = [.3, .4, .9]\r\ncenter = [0, 0, 0]\r\nup = [0, 1, 0]\r\n\r\ndef rgb2rgba(rgb):\r\n \"\"\"remove white background.\"\"\"\r\n rgb = rgb.copy() / 255.\r\n alpha = np.linalg.norm(1-rgb, axis=-1) != 0\r\n alpha = alpha.astype(np.float32)[..., None]\r\n rgba = np.concatenate([rgb, alpha], axis=-1)\r\n return rgba\r\n\r\n# subsample points\r\npoint_subsamp = mesh_gt.sample(sample_points)\r\nexport_obj_cpu('inputs_fullpc.obj',mesh_gt.sample(2048),random_trans=[-3,0,0])\r\n\r\n# img_mesh, _, _, _ = render.render_trimesh(mesh_gt, eye_1, center, up, light_intensity=3)\r\n# img_pt_sub, _, _, _ = render.render_trimesh(trimesh.PointCloud(point_subsamp), \r\n# eye_1, center, up, light_intensity=3, point_size=8)\r\n# # virtual scan (view 2) and unproject depth\r\n# _, scan_depth, world2cam, cam2img = render.render_trimesh(mesh_gt, eye_2, center, up, res=(112, 112))\r\n# points_unproj = render.unproject_depth_img(scan_depth, cam2img, world2cam)\r\n# img_pt_dep, _, _, _ = render.render_trimesh(trimesh.PointCloud(points_unproj), \r\n# eye_1, center, up, light_intensity=3, point_size=5)\r\n\r\n# size_per_fig = 8\r\n# fig, axes = plt.subplots(figsize=(size_per_fig*4, size_per_fig), ncols=4)\r\n# axes[0].imshow(rgb2rgba(img_mesh))\r\n# axes[0].axis('off')\r\n# # axes[0].set_title(\"Ground Truth Mesh\")\r\n\r\n# axes[1].imshow(rgb2rgba(img_pt_sub))\r\n# axes[1].axis('off')\r\n# # axes[1].set_title(\"Sparse Point Samples\")\r\n\r\n# d = scan_depth.copy()\r\n# d[scan_depth==0] = np.nan\r\n# axes[2].imshow(d, cmap='coolwarm')\r\n# axes[2].axis('off')\r\n# # axes[2].set_title(\"Depth Scan\")\r\n\r\n# axes[3].imshow(rgb2rgba(img_pt_dep))\r\n# axes[3].axis('off')\r\n# # axes[3].set_title(\"Scanned Points (view 1)\")\r\n\r\n# plt.show()\r\n\r\nembedder = LatentEmbedder(point_dataset, mesh_dataset, deformer, topk=5)\r\n\r\n# inputs = input_points[:2048] \r\n# inputs = points_unproj\r\ninputs = mesh_gt.sample(sample_points) + np.random.randn(sample_points, 3) * 0.005\r\nprint(inputs.shape)\r\nexport_obj_cpu('inputs_subsampled.obj',inputs,random_trans=[-1.5,0,0])\r\n\r\ninput_pts = torch.tensor(inputs)[None].to(device)\r\nlat_codes_pre, lat_codes_post = embedder.embed(input_pts, matching=\"two_way\", verbose=True, lr=1e-2, embedding_niter=30, finetune_niter=30, bs=8, seed=1)\r\n\r\n# retrieve, save results\r\ndeformed_meshes, orig_meshes, dist = embedder.retrieve(lat_codes_post, tar_pts=inputs, matching=\"two_way\")\r\n\r\nasort = np.argsort(dist)\r\ndist = [dist[i] for i in asort]\r\ndeformed_meshes_ = [deformed_meshes[i] for i in asort]\r\norig_meshes_ = [orig_meshes[i] for i in asort]\r\n\r\n# pick_idx = np.argmin(dist)\r\nfor pick_idx in range(5):\r\n v, f = deformed_meshes_[pick_idx]\r\n mesh = trimesh.Trimesh(v, f)\r\n vo, fo = orig_meshes_[pick_idx]\r\n mesh_o = trimesh.Trimesh(vo, fo)\r\n # img_orig, _, _, _ = render.render_trimesh(mesh_o.copy(), eye_1, center, up, res=(512,512), light_intensity=8)\r\n colors = np.zeros_like(inputs[:sample_points]); colors[:, 1] = 1.;\r\n export_obj_cpu(\"latent-opt_deformed_%d.obj\"%(pick_idx), v,random_trans=[pick_idx*1.5,0,0])\r\n export_obj_cpu(\"latent-opt_orig_%d.obj\"%(pick_idx), vo,random_trans=[pick_idx*1.5,2,0])\r\n # img_def, _, _, _ = render.render_trimesh([mesh.copy(),\r\n # ],#trimesh.PointCloud(inputs[:512], colors=colors)], \r\n # eye_1, center, up, res=(512,512), light_intensity=8,\r\n # point_size=5)\r\n # img_gt, _, _, _ = render.render_trimesh(mesh_gt.copy(), eye_1, center, up, res=(512,512), light_intensity=8)\r\n # fig, axes = plt.subplots(ncols=3, nrows=1, figsize=(24, 8))\r\n # best = \" (best)\" if pick_idx == np.argmin(dist) else \"\"\r\n # axes[0].imshow(rgb2rgba(img_orig))\r\n # axes[0].axis('off')\r\n # axes[0].set_title(\"Retrieved Shape\"+best)\r\n # axes[1].imshow(rgb2rgba(img_def))\r\n # axes[1].axis('off')\r\n # axes[1].set_title(\"Deformed Shape\"+best)\r\n # axes[2].imshow(rgb2rgba(img_gt))\r\n # axes[2].axis('off')\r\n # axes[2].set_title(\"GT Shape\"+best)\r\n # plt.axis('off')\r\n # plt.show()\r\n\r\nlat_codes_ = torch.tensor(lat_codes_post).to(embedder.device)\r\nlat_src = torch.zeros_like(lat_codes_)\r\nlat_src_tar = torch.stack([lat_src, lat_codes_], dim=1)\r\n_ = embedder.deformer.net.update_latents(lat_src_tar)\r\n\r\n# create query grid\r\nr0, r1, r2 = 6, 11, 6\r\nb = mesh.bounding_box.bounds\r\ns = 0.05\r\nxyz_grid = torch.stack(torch.meshgrid(torch.linspace(b[0,0]-s, b[1,0]+s, r0),\r\n torch.linspace(b[0,1]-s, b[1,1]+s, r1),\r\n torch.linspace(b[0,2]-s, b[1,2]+s, r2)), dim=-1)\r\nxyz_pt = xyz_grid.reshape(1, -1, 3).to(embedder.device)\r\nvel = embedder.deformer.net(torch.tensor(0.5), xyz_pt)\r\nvel_np = vel.detach().cpu().numpy().reshape(r0, r1, r2, 3)\r\nxyz_np = xyz_pt.detach().cpu().numpy().reshape(r0, r1, r2, 3)\r\n\r\n# from mpl_toolkits.mplot3d import Axes3D\r\n# from matplotlib import cm\r\n# import matplotlib.pyplot as plt\r\n# import numpy as np\r\n\r\n# def set_axes_equal(ax):\r\n# '''Make axes of 3D plot have equal scale so that spheres appear as spheres,\r\n# cubes as cubes, etc.. This is one possible solution to Matplotlib's\r\n# ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\r\n\r\n# Input\r\n# ax: a matplotlib axis, e.g., as output from plt.gca().\r\n# '''\r\n\r\n# x_limits = [-.2, .2] # ax.get_xlim3d()\r\n# y_limits = [-.2, .2] # ax.get_ylim3d()\r\n# z_limits = [-.5, .5] # ax.get_zlim3d()\r\n\r\n# x_range = abs(x_limits[1] - x_limits[0])\r\n# x_middle = np.mean(x_limits)\r\n# y_range = abs(y_limits[1] - y_limits[0])\r\n# y_middle = np.mean(y_limits)\r\n# z_range = abs(z_limits[1] - z_limits[0])\r\n# z_middle = np.mean(z_limits)\r\n\r\n# # The plot bounding box is a sphere in the sense of the infinity\r\n# # norm, hence I call half the max range the plot radius.\r\n# plot_radius = 0.5*max([x_range, y_range, z_range])\r\n\r\n# ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])\r\n# ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])\r\n# ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])\r\n\r\n# fig = plt.figure(figsize=(10, 10))\r\n# ax = fig.gca(projection='3d')\r\n# ax.view_init(elev=30, azim=-30)\r\n\r\n# v = mesh.copy().vertices\r\n# xyz = xyz_np.reshape(-1, 3)\r\n# uvw = vel_np.reshape(-1, 3)\r\n\r\n# ax.plot_trisurf(v[:, 0], v[:, 2], v[:, 1], triangles=mesh.faces, color=np.ones(3), linewidth=0.2)\r\n# ax.quiver(xyz[:, 0], xyz[:, 2], xyz[:, 1],\r\n# uvw[:, 0], uvw[:, 2], uvw[:, 1],\r\n# length=0.05, color=\"black\", normalize=True)\r\n\r\n# ax.set_axis_off()\r\n# set_axes_equal(ax)\r\n# # plt.savefig(\"flow.pdf\")\r\n\r\n", "sub_path": "ShapeFlow/latent_optimization.py", "file_name": "latent_optimization.py", "file_ext": "py", "file_size_in_byte": 9719, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.device", "line_number": 40, "usage_type": "call"}, {"api_name": "types.SimpleNamespace", "line_number": 44, "usage_type": "call"}, {"api_name": "json.load", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "shapeflow.layers.deformation_layer.NeuralFlowDeformer", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "shapenet_dataloader.ShapeNetMesh", "line_number": 64, "usage_type": "call"}, {"api_name": "shapenet_dataloader.FixedPointsCachedDataset", "line_number": 66, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 70, "usage_type": "call"}, {"api_name": "trimesh.load", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 86, "usage_type": "call"}, {"api_name": "shapenet_embedding.LatentEmbedder", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 128, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 138, "usage_type": "call"}, {"api_name": "trimesh.Trimesh", "line_number": 146, "usage_type": "call"}, {"api_name": "trimesh.Trimesh", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.meshgrid", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "649442658", "text": "\"\"\"Low-level MediaFire API Client\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport hashlib\nimport requests\nimport logging\n\nimport six\n\nfrom six.moves.urllib.parse import urlencode\n\nfrom requests_toolbelt import MultipartEncoder\n\nfrom requests.adapters import HTTPAdapter\nfrom requests.exceptions import RequestException\n\nAPI_BASE = 'https://www.mediafire.com'\nAPI_VER = '1.3'\n\nUPLOAD_MIMETYPE = 'application/octet-stream'\nFORM_MIMETYPE = 'application/x-www-form-urlencoded'\n\n# Retries on connection errors/timeouts\nAPI_ERROR_MAX_RETRIES = 5\n\nlogger = logging.getLogger(__name__)\n\n# Each API call may have lots of parameters, so disable warning\n# pylint: disable=too-many-arguments\n\n\nclass QueryParams(dict):\n \"\"\"dict tailored for MediaFire requests.\n\n * won't store None values\n * boolean values are converted to 'yes'/'no'\n \"\"\"\n\n def __init__(self, defaults=None):\n super(QueryParams, self).__init__()\n if defaults is not None:\n for key, value in defaults.items():\n self.__setitem__(key, value)\n\n def __setitem__(self, key, value):\n \"\"\"Set dict item, handling booleans\"\"\"\n if value is not None:\n if value is True:\n value = 'yes'\n elif value is False:\n value = 'no'\n dict.__setitem__(self, key, value)\n\n\nclass MediaFireError(Exception):\n \"\"\"Base class for MediaFire-related errors\"\"\"\n pass\n\n\nclass MediaFireApiError(MediaFireError):\n \"\"\"Base class for API errors\"\"\"\n def __init__(self, message, code=None):\n \"\"\"Initialize exception\"\"\"\n self.code = code\n self.message = message\n super(MediaFireApiError, self).__init__(message, code)\n\n def __str__(self):\n \"\"\"Stringify exception\"\"\"\n return \"{}: {}\".format(self.code, self.message)\n\n\nclass MediaFireConnectionError(MediaFireError):\n \"\"\"Low level connection errors\"\"\"\n pass\n\n\nclass MediaFireApi(object): # pylint: disable=too-many-public-methods\n \"\"\"Low-level HTTP API Client\"\"\"\n\n def __init__(self):\n \"\"\"Initialize MediaFire Client\"\"\"\n\n self.http = requests.Session()\n self.http.mount('https://',\n HTTPAdapter(max_retries=API_ERROR_MAX_RETRIES))\n\n self._session = None\n self._action_tokens = {}\n\n @staticmethod\n def _build_uri(action):\n \"\"\"Build endpoint URI from action\"\"\"\n return '/api/' + API_VER + '/' + action + '.php'\n\n def _build_query(self, uri, params=None, action_token_type=None):\n \"\"\"Prepare query string\"\"\"\n\n if params is None:\n params = QueryParams()\n\n params['response_format'] = 'json'\n\n session_token = None\n\n if action_token_type in self._action_tokens:\n # Favor action token\n using_action_token = True\n session_token = self._action_tokens[action_token_type]\n else:\n using_action_token = False\n if self._session:\n session_token = self._session['session_token']\n\n if session_token:\n params['session_token'] = session_token\n\n # make order of parameters predictable for testing\n keys = list(params.keys())\n keys.sort()\n\n query = urlencode([tuple([key, params[key]]) for key in keys])\n\n if not using_action_token and self._session:\n secret_key_mod = int(self._session['secret_key']) % 256\n\n signature_base = (str(secret_key_mod) +\n self._session['time'] +\n uri + '?' + query).encode('ascii')\n\n query += '&signature=' + hashlib.md5(signature_base).hexdigest()\n\n return query\n\n def request(self, action, params=None, action_token_type=None,\n upload_info=None, headers=None):\n \"\"\"Perform request to MediaFire API\n\n action -- \"category/name\" of method to call\n params -- dict of parameters or query string\n action_token_type -- action token to use: None, \"upload\", \"image\"\n upload_info -- in case of upload, dict of \"fd\" and \"filename\"\n headers -- additional headers to send (used for upload)\n\n session_token and signature generation/update is handled automatically\n \"\"\"\n\n uri = self._build_uri(action)\n\n if isinstance(params, six.text_type):\n query = params\n else:\n query = self._build_query(uri, params, action_token_type)\n\n if headers is None:\n headers = {}\n\n if upload_info is None:\n # Use request body for query\n data = query\n headers['Content-Type'] = FORM_MIMETYPE\n else:\n # Use query string for query since payload is file\n uri += '?' + query\n\n if \"filename\" in upload_info:\n data = MultipartEncoder(\n fields={'file': (\n upload_info[\"filename\"],\n upload_info[\"fd\"],\n UPLOAD_MIMETYPE\n )}\n )\n headers[\"Content-Type\"] = data.content_type\n else:\n data = upload_info[\"fd\"]\n headers[\"Content-Type\"] = UPLOAD_MIMETYPE\n\n logger.debug(\"uri=%s query=%s\",\n uri, query if not upload_info else None)\n\n try:\n # bytes from now on\n url = (API_BASE + uri).encode('utf-8')\n if isinstance(data, six.text_type):\n # request's data is bytes, dict, or filehandle\n data = data.encode('utf-8')\n\n response = self.http.post(url, data=data,\n headers=headers, stream=True)\n except RequestException as ex:\n logger.exception(\"HTTP request failed\")\n raise MediaFireConnectionError(\n \"RequestException: {}\".format(ex))\n\n return self._process_response(response)\n\n def _process_response(self, response):\n \"\"\"Parse response\"\"\"\n\n forward_raw = False\n content_type = response.headers['Content-Type']\n if content_type != 'application/json':\n logger.debug(\"headers: %s\", response.headers)\n # API BUG: text/xml content-type with json payload\n # http://forum.mediafiredev.com/showthread.php?136\n if content_type == 'text/xml':\n # we never request xml, so check it quacks like JSON\n if not response.text.lstrip().startswith('{'):\n forward_raw = True\n else:\n # _process_response can't deal with non-json,\n # return response as is\n forward_raw = True\n\n if forward_raw:\n response.raise_for_status()\n return response\n\n logger.debug(\"response: %s\", response.text)\n\n # if we are here, then most likely have json\n try:\n response_node = response.json()['response']\n except ValueError:\n # promised JSON but failed\n raise MediaFireApiError(\"JSON decode failure\")\n\n if response_node.get('new_key', 'no') == 'yes':\n self._regenerate_secret_key()\n\n # check for errors\n if response_node['result'] != 'Success':\n raise MediaFireApiError(response_node['message'],\n response_node['error'])\n\n return response_node\n\n def _regenerate_secret_key(self):\n \"\"\"Regenerate secret key\n\n http://www.mediafire.com/developers/core_api/1.3/getting_started/#call_signature\n \"\"\"\n # Don't regenerate the key if we have none\n if self._session and 'secret_key' in self._session:\n self._session['secret_key'] = (\n int(self._session['secret_key']) * 16807) % 2147483647\n\n @property\n def session(self):\n \"\"\"Returns current session information\"\"\"\n return self._session\n\n @session.setter\n def session(self, value):\n \"\"\"Set session token\n\n value -- dict returned by user/get_session_token\"\"\"\n\n # unset session token\n if value is None:\n self._session = None\n return\n\n if not isinstance(value, dict):\n raise ValueError(\"session info is required\")\n\n session_parsed = {}\n\n for key in [\"session_token\", \"time\", \"secret_key\"]:\n if key not in value:\n raise ValueError(\"Missing parameter: {}\".format(key))\n session_parsed[key] = value[key]\n\n for key in [\"ekey\", \"pkey\"]:\n # nice to have, but not mandatory\n if key in value:\n session_parsed[key] = value[key]\n\n self._session = session_parsed\n\n @session.deleter\n def session(self):\n \"\"\"Unset session\"\"\"\n self._session = None\n\n def set_action_token(self, type_=None, action_token=None):\n \"\"\"Set action tokens\n\n type_ -- either \"upload\" or \"image\"\n action_token -- string obtained from user/get_action_token,\n set None to remove the token\n \"\"\"\n if action_token is None:\n del self._action_tokens[type_]\n else:\n self._action_tokens[type_] = action_token\n\n def user_fetch_tos(self):\n \"\"\"user/fetch_tos\n\n http://www.mediafire.com/developers/core_api/1.3/user/#fetch_tos\n \"\"\"\n\n return self.request(\"user/fetch_tos\")\n\n def user_accept_tos(self, acceptance_token):\n \"\"\"user/accept_tos\n\n http://www.mediafire.com/developers/core_api/1.3/user/#user_top\n \"\"\"\n\n return self.request(\"user/accept_tos\", QueryParams({\n \"acceptance_token\": acceptance_token\n }))\n\n def user_get_session_token(self, app_id=None, email=None, password=None,\n ekey=None, fb_access_token=None,\n tw_oauth_token=None,\n tw_oauth_token_secret=None, api_key=None):\n \"\"\"user/get_session_token\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_session_token\n \"\"\"\n\n if app_id is None:\n raise ValueError(\"app_id must be defined\")\n\n params = QueryParams({\n 'application_id': str(app_id),\n 'token_version': 2,\n 'response_format': 'json'\n })\n\n if fb_access_token:\n params['fb_access_token'] = fb_access_token\n signature_keys = ['fb_access_token']\n elif tw_oauth_token and tw_oauth_token_secret:\n params['tw_oauth_token'] = tw_oauth_token\n params['tw_oauth_token_secret'] = tw_oauth_token_secret\n signature_keys = ['tw_oauth_token',\n 'tw_oauth_token_secret']\n elif (email or ekey) and password:\n signature_keys = []\n if email:\n signature_keys.append('email')\n params['email'] = email\n\n if ekey:\n signature_keys.append('ekey')\n params['ekey'] = ekey\n\n params['password'] = password\n signature_keys.append('password')\n else:\n raise ValueError(\"Credentials not provided\")\n\n signature_keys.append('application_id')\n\n signature = hashlib.sha1()\n for key in signature_keys:\n signature.update(str(params[key]).encode('ascii'))\n\n # Note: If the app uses a callback URL to provide its API key,\n # or if it does not have the \"Require Secret Key\" option checked,\n # then the API key may be omitted from the signature\n if api_key:\n signature.update(api_key.encode('ascii'))\n\n query = urlencode(params)\n query += '&signature=' + signature.hexdigest()\n\n return self.request('user/get_session_token', params=query)\n\n def user_renew_session_token(self):\n \"\"\"user/renew_session_token:\n\n http://www.mediafire.com/developers/core_api/1.3/user/#renew_session_token\n \"\"\"\n return self.request('user/renew_session_token')\n\n def user_get_action_token(self, type_=None, lifespan=None):\n \"\"\"user/get_action_token\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_action_token\n \"\"\"\n return self.request('user/get_action_token', QueryParams({\n 'type': type_,\n 'lifespan': lifespan\n }))\n\n def user_destroy_action_token(self, action_token=None):\n \"\"\"user/destroy_action_token\n\n http://www.mediafire.com/developers/core_api/1.3/user/#destroy_action_token\n \"\"\"\n return self.request('user/destroy_action_token', QueryParams({\n 'action_token': action_token\n }))\n\n def user_get_avatar(self):\n \"\"\"user/get_avatar\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_avatar\n \"\"\"\n return self.request(\"user/get_avatar\")\n\n def user_get_info(self):\n \"\"\"user/get_info\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_info\n \"\"\"\n return self.request(\"user/get_info\")\n\n def user_get_limits(self):\n \"\"\"user/get_limits\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_limits\n \"\"\"\n return self.request(\"user/get_limits\")\n\n def user_get_settings(self):\n \"\"\"user/get_settings\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_settings\n \"\"\"\n return self.request(\"user/get_settings\")\n\n def user_set_avatar(self, action=None, quick_key=None, url=None):\n \"\"\"user/set_avatar\n\n http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar\n \"\"\"\n return self.request(\"user/set_avatar\", QueryParams({\n \"action\": action,\n \"quick_key\": quick_key,\n \"url\": url\n }))\n\n def user_update(self, display_name=None, first_name=None, last_name=None,\n email=None, password=None, current_password=None,\n birth_date=None, gender=None, website=None, subdomain=None,\n location=None, newsletter=None, primary_usage=None,\n timezone=None):\n \"\"\"\n user/update\n\n http://www.mediafire.com/developers/core_api/1.3/user/#update\n \"\"\"\n return self.request(\"user/update\", QueryParams({\n \"display_name\": display_name,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"email\": email,\n \"password\": password,\n \"current_password\": current_password,\n \"birth_date\": birth_date,\n \"gender\": gender,\n \"website\": website,\n \"subdomain\": subdomain,\n \"location\": location,\n \"newsletter\": newsletter,\n \"primary_usage\": primary_usage,\n \"timezone\": timezone\n }))\n\n def folder_get_info(self, folder_key=None, device_id=None, details=None):\n \"\"\"folder/get_info\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#get_info\n \"\"\"\n return self.request('folder/get_info', QueryParams({\n 'folder_key': folder_key,\n 'device_id': device_id,\n 'details': details\n }))\n\n def folder_get_content(self, folder_key=None, content_type=None,\n filter_=None, device_id=None, order_by=None,\n order_direction=None, chunk=None, details=None,\n chunk_size=None):\n \"\"\"folder/get_content\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#get_content\n \"\"\"\n return self.request('folder/get_content', QueryParams({\n 'folder_key': folder_key,\n 'content_type': content_type,\n 'filter': filter_,\n 'device_id': device_id,\n 'order_by': order_by,\n 'order_direction': order_direction,\n 'chunk': chunk,\n 'details': details,\n 'chunk_size': chunk_size\n }))\n\n def folder_update(self, folder_key, foldername=None, description=None,\n privacy=None, privacy_recursive=None, mtime=None):\n \"\"\"folder/update\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#update\n \"\"\"\n return self.request('folder/update', QueryParams({\n 'folder_key': folder_key,\n 'foldername': foldername,\n 'description': description,\n 'privacy': privacy,\n 'privacy_recursive': privacy_recursive,\n 'mtime': mtime\n }))\n\n def folder_create(self, foldername=None, parent_key=None,\n action_on_duplicate=None, mtime=None):\n \"\"\"folder/create\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#create\n \"\"\"\n return self.request('folder/create', QueryParams({\n 'foldername': foldername,\n 'parent_key': parent_key,\n 'action_on_duplicate': action_on_duplicate,\n 'mtime': mtime\n }))\n\n def folder_delete(self, folder_key):\n \"\"\"folder/delete\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#delete\n \"\"\"\n return self.request('folder/delete', QueryParams({\n 'folder_key': folder_key\n }))\n\n def folder_purge(self, folder_key):\n \"\"\"folder/purge\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#purge\n \"\"\"\n return self.request('folder/purge', QueryParams({\n 'folder_key': folder_key\n }))\n\n def folder_move(self, folder_key_src, folder_key_dst=None):\n \"\"\"folder/move\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#move\n \"\"\"\n return self.request('folder/move', QueryParams({\n 'folder_key_src': folder_key_src,\n 'folder_key_dst': folder_key_dst\n }))\n\n def upload_check(self, filename=None, folder_key=None, filedrop_key=None,\n size=None, hash_=None, path=None, resumable=None):\n \"\"\"upload/check\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#check\n \"\"\"\n return self.request('upload/check', QueryParams({\n 'filename': filename,\n 'folder_key': folder_key,\n 'filedrop_key': filedrop_key,\n 'size': size,\n 'hash': hash_,\n 'path': path,\n 'resumable': resumable\n }))\n\n def upload_simple(self, fd, filename, folder_key=None, path=None,\n filedrop_key=None, action_on_duplicate=None,\n mtime=None, file_size=None, file_hash=None):\n \"\"\"upload/simple\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#simple\n \"\"\"\n action = 'upload/simple'\n\n params = QueryParams({\n 'folder_key': folder_key,\n 'path': path,\n 'filedrop_key': filedrop_key,\n 'action_on_duplicate': action_on_duplicate,\n 'mtime': mtime\n })\n\n headers = QueryParams({\n 'X-Filesize': str(file_size),\n 'X-Filehash': file_hash,\n 'X-Filename': filename.encode('utf-8')\n })\n\n upload_info = {\n \"fd\": fd,\n }\n\n return self.request(action, params, action_token_type=\"upload\",\n upload_info=upload_info, headers=headers)\n\n # pylint: disable=too-many-locals\n # The API requires us to provide all of that\n def upload_resumable(self, fd, filesize, filehash, unit_hash, unit_id,\n unit_size, quick_key=None, action_on_duplicate=None,\n mtime=None, version_control=None, folder_key=None,\n filedrop_key=None, path=None, previous_hash=None):\n \"\"\"upload/resumable\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#resumable\n \"\"\"\n action = 'upload/resumable'\n\n headers = {\n 'x-filesize': str(filesize),\n 'x-filehash': filehash,\n 'x-unit-hash': unit_hash,\n 'x-unit-id': str(unit_id),\n 'x-unit-size': str(unit_size)\n }\n\n params = QueryParams({\n 'quick_key': quick_key,\n 'action_on_duplicate': action_on_duplicate,\n 'mtime': mtime,\n 'version_control': version_control,\n 'folder_key': folder_key,\n 'filedrop_key': filedrop_key,\n 'path': path,\n 'previous_hash': previous_hash\n })\n\n upload_info = {\n \"fd\": fd,\n \"filename\": \"chunk\"\n }\n\n return self.request(action, params, action_token_type=\"upload\",\n upload_info=upload_info, headers=headers)\n # pylint: enable=too-many-locals\n\n def upload_instant(self, filename, size, hash_, quick_key=None,\n folder_key=None, filedrop_key=None, path=None,\n action_on_duplicate=None, mtime=None,\n version_control=None, previous_hash=None):\n \"\"\"upload/instant\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#instant\n \"\"\"\n return self.request('upload/instant', QueryParams({\n 'filename': filename,\n 'size': size,\n 'hash': hash_,\n 'quick_key': quick_key,\n 'folder_key': folder_key,\n 'filedrop_key': filedrop_key,\n 'path': path,\n 'action_on_duplicate': action_on_duplicate,\n 'mtime': mtime,\n 'version_control': version_control,\n 'previous_hash': previous_hash\n }))\n\n def upload_poll(self, key):\n \"\"\"upload/poll\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#poll_upload\n \"\"\"\n return self.request('upload/poll_upload', QueryParams({\n 'key': key\n }))\n\n def file_get_info(self, quick_key=None):\n \"\"\"file/get_info\n\n http://www.mediafire.com/developers/core_api/1.3/file/#get_info\n \"\"\"\n return self.request('file/get_info', QueryParams({\n 'quick_key': quick_key\n }))\n\n def file_get_links(self, quick_key, link_type=None):\n \"\"\"file/get_links\n\n http://www.mediafire.com/developers/core_api/1.3/file/#get_links\n \"\"\"\n return self.request('file/get_links', QueryParams({\n 'quick_key': quick_key,\n 'link_type': link_type,\n }))\n\n def file_update(self, quick_key, filename=None, description=None,\n mtime=None, privacy=None):\n \"\"\"file/update\n\n http://www.mediafire.com/developers/core_api/1.3/file/#update\n \"\"\"\n return self.request('file/update', QueryParams({\n 'quick_key': quick_key,\n 'filename': filename,\n 'description': description,\n 'mtime': mtime,\n 'privacy': privacy\n }))\n\n def file_update_file(self, quick_key, file_extension=None, filename=None,\n description=None, mtime=None, privacy=None,\n timezone=None):\n \"\"\"file/update_file\n\n http://www.mediafire.com/developers/core_api/1.3/file/#update_file\n \"\"\"\n return self.request('file/update', QueryParams({\n 'quick_key': quick_key,\n 'file_extension': file_extension,\n 'filename': filename,\n 'description': description,\n 'mtime': mtime,\n 'privacy': privacy,\n 'timezone': timezone\n }))\n\n def file_delete(self, quick_key):\n \"\"\"file/delete\n\n http://www.mediafire.com/developers/core_api/1.3/file/#delete\n \"\"\"\n return self.request('file/delete', QueryParams({\n 'quick_key': quick_key\n }))\n\n def file_move(self, quick_key, folder_key=None):\n \"\"\"file/move\n\n http://www.mediafire.com/developers/core_api/1.3/file/#move\n \"\"\"\n return self.request('file/move', QueryParams({\n 'quick_key': quick_key,\n 'folder_key': folder_key\n }))\n\n def file_purge(self, quick_key):\n \"\"\"file/purge\n\n http://www.mediafire.com/developers/core_api/1.3/file/#purge\n \"\"\"\n return self.request('file/purge', QueryParams({\n 'quick_key': quick_key\n }))\n\n def file_zip(self, keys, confirm_download=None, meta_only=None):\n \"\"\"file/zip\n\n http://www.mediafire.com/developers/core_api/1.3/file/#zip\n \"\"\"\n return self.request('file/zip', QueryParams({\n 'keys': keys,\n 'confirm_download': confirm_download,\n 'meta_only': meta_only\n }))\n\n def system_get_info(self):\n \"\"\"system/get_info\n\n http://www.mediafire.com/developers/core_api/1.3/system/#get_info\n \"\"\"\n return self.request('system/get_info')\n\n def system_get_status(self):\n \"\"\"system/get_status\n\n http://www.mediafire.com/developers/core_api/1.3/system/#get_status\n \"\"\"\n return self.request('system/get_status')\n", "sub_path": "mediafire/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 25118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 85, "usage_type": "call"}, {"api_name": "requests.adapters.HTTPAdapter", "line_number": 87, "usage_type": "call"}, {"api_name": "six.moves.urllib.parse.urlencode", "line_number": 123, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 132, "usage_type": "call"}, {"api_name": "six.text_type", "line_number": 151, "usage_type": "attribute"}, {"api_name": "requests_toolbelt.MultipartEncoder", "line_number": 168, "usage_type": "call"}, {"api_name": "six.text_type", "line_number": 186, "usage_type": "attribute"}, {"api_name": "requests.exceptions.RequestException", "line_number": 192, "usage_type": "name"}, {"api_name": "hashlib.sha1", "line_number": 361, "usage_type": "call"}, {"api_name": "six.moves.urllib.parse.urlencode", "line_number": 371, "usage_type": "call"}]} +{"seq_id": "171947755", "text": "#encoding=utf-8\n\nfrom django.shortcuts import render\nfrom blogs.models import Tag, Category, BaseModel\nfrom common.helpers import paged_items, ok_json\nfrom common.pc_m import judge_pc_or_mobile\nfrom blogs.models import Category, Article\n\n\ndef blogs(request):\n nav_bar = \"blog\"\n user_agt = judge_pc_or_mobile(request.META.get(\"HTTP_USER_AGENT\"))\n cat_id = int(request.GET.get(\"cat_id\", 0))\n blog_cat_list = Category.objects.all()\n blog_list = Article.objects.filter(is_active=True).order_by(\"-id\")\n if cat_id not in [\"0\", 0, \"\"]:\n blog_list = blog_list.filter(category__id=cat_id)\n if user_agt is False:\n blog_list = paged_items(request, blog_list)\n return render(request, 'web/pages/blog/blog.html', locals())\n else:\n blog_list = paged_items(request, blog_list)\n return render(request, 'web/pages/blog/blog.html', locals())\n\n\ndef blog_detail(request, id):\n nav_bar = \"blog\"\n blog_dtl = Article.objects.filter(id=id).first()\n blog_dtl.views += 1\n blog_dtl.save()\n user_agt = judge_pc_or_mobile(request.META.get(\"HTTP_USER_AGENT\"))\n if user_agt is False:\n return render(request, 'web/pages/blog/blog_detail.html', locals())\n else:\n return render(request, 'web/pages/blog/blog_detail.html', locals())", "sub_path": "blogs/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1291, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "common.pc_m.judge_pc_or_mobile", "line_number": 12, "usage_type": "call"}, {"api_name": "blogs.models.Category.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "blogs.models.Category.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "blogs.models.Category", "line_number": 14, "usage_type": "name"}, {"api_name": "blogs.models.Article.objects.filter", "line_number": 15, "usage_type": "call"}, {"api_name": "blogs.models.Article.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "blogs.models.Article", "line_number": 15, "usage_type": "name"}, {"api_name": "common.helpers.paged_items", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "common.helpers.paged_items", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "blogs.models.Article.objects.filter", "line_number": 28, "usage_type": "call"}, {"api_name": "blogs.models.Article.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "blogs.models.Article", "line_number": 28, "usage_type": "name"}, {"api_name": "common.pc_m.judge_pc_or_mobile", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "55163435", "text": "import os\nimport sqlite3\nimport tqdm\nfrom tqdm import trange\n\n\ndef datewriter(tag, type, start, end, word, path, filename):\n with open(path + '/' + filename + \".ann\", 'a') as f:\n newline = [tag, type, str(start), str(end), word]\n newline = tag+'\\t'+type+' '+str(start)+' '+str(end)+'\\t'+word+'\\n'\n f.write(newline)\n\n\ndef dataopener(dbpath, path, dataname):\n connection = sqlite3.connect(dbpath)\n database = connection.cursor()\n\n #!start code block\n database.execute(\"select id from \" + dataname)\n ID = database.fetchall()\n ID = [x[0] for x in ID]\n\n # ?Their assume sql is in below format\n # ?id|tag|filename|type|start|end|word\n print(\"Start Writing ann file\")\n for x in ID:\n database.execute(\n \"select * from \" + dataname + \" where id == %d\" % (x))\n dataline = database.fetchall()[0]\n tag = dataline[1]\n filename = dataline[2]\n filename = os.path.splitext(filename)[0]\n type = dataline[3]\n start = dataline[4]\n end = dataline[5]\n word = dataline[6]\n datewriter(tag, type, start, end, word, path, filename)\n print(\"Finish transfor data into ann file\")\n #!end code block\n\n database.close()\n connection.commit()\n connection.close()\n\n return \"All Done\"\n", "sub_path": "TianChI/COMP1/src/dataFormter.py", "file_name": "dataFormter.py", "file_ext": "py", "file_size_in_byte": 1303, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sqlite3.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "654494025", "text": "import numpy as np ##Import numpy\nimport cv2 ##Import computer vision library\n\n\nvideo= cv2.VideoCapture(0) ##Create a new video object, conected to the first webcam\n\nframe_width = int(video.get(3))\nframe_height = int(video.get(4))\nout = cv2.VideoWriter('prueba1_.avi',cv2.VideoWriter_fourcc('M','J','P','G'),10,(frame_width,frame_height))\n\nfin = False\ncentro_x = 0\n\nwhile(1):\n okay,image = video.read() #Save video frame on image and status in okay\n\n if okay: #if frame image is completted\n blur = cv2.GaussianBlur(image,(5,5),0) ##GaussianBlur for filtering signal\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV) ##BGR to HSV for easier identification of object color\n\n #Select lower and upper color detection range\n lower_color = np.array([40,70,70])\n upper_color = np.array([80,200,200])\n\n #save in mask hsv image with filter color\n mask = cv2.inRange(hsv,lower_color,upper_color)\n mask = cv2.GaussianBlur(mask,(5,5),0)\n output = cv2.bitwise_and(image, image, mask = mask)\n\n moments = cv2.moments(mask)\n m00 = moments['m00']\n centro_x,centro_y = -1,-1\n if m00 != 0:\n centro_x = int(moments['m10']/m00)\n centro_y = int(moments['m01']/m00)\n\n if centro_y != -1 and centro_y != -1:\n ctr = (centro_x,centro_y)\n cv2.circle(image,ctr,5,(255,0,0),4)\n\n #cv2.circle(output,(300,250),50,(0,255,0))\n cv2.putText(image,\"X=\"+str(centro_x)+\", Y=\"+str(centro_y),(5,470),cv2.FONT_ITALIC,.4,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(image,\"Instituto Tecnologico de Ciudad Guzman\",(316,450),cv2.FONT_ITALIC,.5,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(image,\"Ball and Plate Project - Ing. Electronica\",(310,470),cv2.FONT_ITALIC,.5,(255,255,255),1,cv2.LINE_AA)\n #cv2.imshow(\"Images\",np.hstack([image,output]))\n cv2.imshow(\"Images\",image)\n out.write(image)\n if cv2.waitKey(1) & 0xFF == ord('b'):\n fin = True\n break\nout.release()\nvideo.release()\ncv2.destroyAllWindows()\n", "sub_path": "object-detection/test/color_tracking.py", "file_name": "color_tracking.py", "file_ext": "py", "file_size_in_byte": 2097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cv2.VideoCapture", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.moments", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.FONT_ITALIC", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.FONT_ITALIC", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.FONT_ITALIC", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "566907894", "text": "#!/usr/bin/env python3\n\nimport json\nimport random\n\nimport pandas as pd\nimport numpy as np\n\nfrom eval_ddos_block_sim import block_traffic_sim_both, block_sim_both_plot, block_traffic_sim_friend_tier1, \\\n block_sim_friend_tier1_plot\nfrom eval_ddos_route_sim import route_sim_multiprocess as route_sim\nfrom eval_ddos_utils import load_as_graph, get_stub_as_list, get_as_country, asn_lookup, get_vaild_dns_lists, \\\n get_non_stub_as_list\n\n\ndef generate_stat(stat_file, target_num = 10, dns_server_num = 3000, target_country_set=None, dns_country_set=None):\n stubs = get_stub_as_list()\n\n if target_country_set:\n asn_country, country_asn_set = get_as_country()\n select_country_as = set()\n for i in target_country_set:\n select_country_as |= country_asn_set[i]\n target_stubs = set(stubs) & select_country_as\n else:\n target_stubs = set(stubs)\n\n target_stubs = random.sample(target_stubs, target_num)\n print(\"target_stubs\", target_stubs)\n\n nameservers = pd.read_csv(\"data/nameservers.csv\", dtype=str)\n\n country_col = nameservers.columns[2]\n ip_col = nameservers.columns[0]\n dns_ip_list = []\n for i, row in nameservers.iterrows():\n ip = str(row[ip_col])\n if \":\" in ip: # ipv6\n continue\n if dns_country_set:\n country = str(row[country_col])\n if country not in dns_country_set:\n continue\n dns_ip_list.append(ip)\n print(\"dns ip list length\", len(dns_ip_list))\n\n g = load_as_graph()\n\n stat={}\n for asn in target_stubs:\n a = stat[asn] = {}\n random.shuffle(dns_ip_list)\n i = 0\n count = 0\n while count < dns_server_num:\n ip = dns_ip_list[i]\n asn = asn_lookup(ip)\n if asn and asn in g.nodes:\n a[ip] = {\"vol\": 1.0, \"as\":asn}\n count += 1\n i+=1\n if i>=len(dns_ip_list):\n break\n print(\"select dns list length\",len(a))\n\n vaild_dns_lists = get_vaild_dns_lists()\n for k,v in stat.items():\n vaild_dns_list = vaild_dns_lists.get(k,[])\n for dns in vaild_dns_list:\n if dns in v:\n v[dns][\"inwhitelist\"] = True\n print(\"inwhitelist\", dns)\n\n json.dump(stat, open(stat_file, 'w'), indent=4)\n print(\"dumped\")\n\n\ndef main():\n common = \"gen-stat-20World-10000World-01301200\"\n\n stat_file = \"result/%s.json\" % common\n sim_route_file = 'result/%s-sim-route.json' % common\n sim_block_file_both = 'result/%s-sim-block.csv' % common\n\n #generate_stat(stat_file, 20, 10000, None, None)\n #route_sim(stat_file, sim_route_file)\n #block_traffic_sim_both(sim_route_file, sim_block_file_both, np.linspace(0.05, 0.5, 19) , 50, incremental=200)\n #print(len(get_non_stub_as_list()))\n block_sim_both_plot(sim_block_file_both, fig_save=True, name_prefix=common + \"-\")\n\ndef main1():\n common = \"result/gen2-stat-500AS-5000DNS-01301900\"\n\n stat_file = \"%s.json\" % common\n sim_route_file = '%s-sim-route.json' % common\n sim_block_file = '%s-sim-block.csv' % common\n\n # generate_stat(stat_file, 500, 5000, None, None)\n # route_sim(stat_file, sim_route_file)\n # block_traffic_sim_friend_tier1(sim_route_file, sim_block_file, mp=True)\n block_sim_friend_tier1_plot(sim_block_file, fig_save=True, figpath_prefix=common)\n\n\n\nif __name__ == \"__main__\":\n main()\n\n", "sub_path": "eval_ddos.py", "file_name": "eval_ddos.py", "file_ext": "py", "file_size_in_byte": 3423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "eval_ddos_utils.get_stub_as_list", "line_number": 17, "usage_type": "call"}, {"api_name": "eval_ddos_utils.get_as_country", "line_number": 20, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "eval_ddos_utils.load_as_graph", "line_number": 47, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 52, "usage_type": "call"}, {"api_name": "eval_ddos_utils.asn_lookup", "line_number": 57, "usage_type": "call"}, {"api_name": "eval_ddos_utils.get_vaild_dns_lists", "line_number": 66, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 74, "usage_type": "call"}, {"api_name": "eval_ddos_block_sim.block_sim_both_plot", "line_number": 89, "usage_type": "call"}, {"api_name": "eval_ddos_block_sim.block_sim_friend_tier1_plot", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "535207929", "text": "import facebook\n\ntoken = '1172179256135929|1MatpThkY-qP-6zo_9Xe0LI-qCo'\ndetail = \"checkins,category,category_list,name,id,location\"\nmin_checkins = 20\n\ndef setCheckin(value):\n min_checkins = value\n\nclass Place():\n def __init__(self, name, id):\n self.name = name\n self.id = id\n self.checkins = 0\n self.root = None\n self.key = None\n self.location = {}\n self.link = None\n\nclass Result():\n def __init__(self, key):\n self.root = \"Facebook\"\n self.key = key\n self.latlng = list()\n self.place = {}\n\ndef request(lat,lng,distance,key):\n graph = facebook.GraphAPI(token)\n location_facebook = graph.request('search', {'q': str(key), 'type': 'place', 'center': str(lat)+','+str(lng),'limit': str(1000), 'distance': str(distance) , 'fields': detail})\n \n fb = Result(key)\n \n for i in range(len(location_facebook['data'])):\n for j in range(len(location_facebook['data'][i]['category_list'])):\n if location_facebook['data'][i].get('checkins','None') == 'None':\n break\n if location_facebook['data'][i]['checkins'] >= min_checkins and location_facebook['data'][i]['category'] != 'City':\n object = Place(location_facebook['data'][i]['name'].encode('utf8'),location_facebook['data'][i]['id'])\n object.checkins = location_facebook['data'][i]['checkins']\n object.location = location_facebook['data'][i]['location']\n object.root = \"Facebook\"\n object.key = key\n \n fb.latlng.insert(len(fb.latlng), [float(object.location['latitude']), float(object.location['longitude'])])\n fb.place.update({str(object.location['latitude'])+\",\"+str(object.location['longitude']): object})\n break\n return fb\n", "sub_path": "ApiFacebook/ApiFacebook.py", "file_name": "ApiFacebook.py", "file_ext": "py", "file_size_in_byte": 1849, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "facebook.GraphAPI", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "449251636", "text": "from django import forms\nfrom django.urls import reverse_lazy\nfrom rest_framework.fields import ChoiceField\n\nfrom BioPyApp.models import Batch, Class, Event, Process, Variable\nfrom BioPyApp.widgets import CRUDWidgetWrapper\n\nunfolding_method_options = (\n (\"idx\",\"Indexed\"),\n (\"ts\",\"Timeseries\"),\n (\"bw_tv\",\"Batch-wise (TxV)\"),\n (\"bw_vt\",\"Batch-wise (VxT)\"),\n (\"vw_bt\",\"Variable-wise (BxT)\"),\n (\"vw_tb\",\"Variable-wise (TxB)\"),\n (\"tw_bv\",\"Time-wise (BxV)\"),\n (\"tw_vb\",\"Time-wise (VxB)\"),\n)\nunfolding_axis_options = ((\"x\",\"X Axis (Column)\"),(\"y\",\"Y Axis (Row)\"))\ntime_reference_options = [(\"elapsed\",\"Elapsed\"),(\"timestamp\",\"Absolute\")]\ncompression_options = (('sparse',\"Sparse\"),('dense','Dense'))\nfile_format_options=[\"parquet\",\"pickle\",\"csv\",\"hdf\",\"xlsx\",\"json\",\"feather\",\"stata\",\"msgpack\"]\n\n\nclass SelectVariablePredictorsForm(forms.Form):\n\n predictors = forms.MultipleChoiceField()\n \n def __init__(self, *args, **kwargs):\n batches = kwargs.pop('batches')\n super(SelectVariablePredictorsForm,self).__init__(*args, **kwargs)\n preds=Variable.objects.filter(batch__in=batches).values_list('name',flat=True).distinct()\n choices = [(p,p) for p in preds ]\n self.fields['predictors'].choices = choices\n self.fields['predictors'].widget = CRUDWidgetWrapper(\n self.fields['predictors'].widget\n ,reverse_lazy('create_variable'),None,None)\n\nclass SelectEventPredictorsForm(forms.Form):\n\n predictors = forms.MultipleChoiceField()\n \n def __init__(self, *args, **kwargs):\n batches = kwargs.pop('batches')\n super(SelectEventPredictorsForm,self).__init__(*args, **kwargs)\n preds=Event.objects.filter(batch__in=batches).values_list('name',flat=True).distinct()\n choices = [(p,p) for p in preds]\n self.fields['predictors'].choices = choices\n self.fields['predictors'].widget = CRUDWidgetWrapper(\n self.fields['predictors'].widget\n ,reverse_lazy('create_event'),None,None)\n\nclass SelectClassPredictorsForm(forms.Form):\n\n predictors = forms.MultipleChoiceField()\n \n def __init__(self, *args, **kwargs):\n batches = kwargs.pop('batches')\n super(SelectClassPredictorsForm,self).__init__(*args, **kwargs)\n preds=Class.objects.filter(batch__in=batches).values_list('name',flat=True).distinct()\n choices = [(p,p) for p in preds]\n self.fields['predictors'].choices = choices\n self.fields['predictors'].widget = CRUDWidgetWrapper(\n self.fields['predictors'].widget\n ,reverse_lazy('create_class'),None,None)\n\nclass SelectSingleProcessDataframeOptionsForm(forms.Form):\n unfolding_method = forms.ChoiceField(choices=unfolding_method_options)\n unfolding_axis = forms.ChoiceField(choices=unfolding_axis_options)\n time_reference = forms.ChoiceField(choices=time_reference_options)\n compression = forms.ChoiceField(choices=compression_options)\n file_format = forms.ChoiceField(choices=zip(file_format_options,[o.title() for o in file_format_options]))\n\nclass MultiProcessForm(forms.Form):\n pass\n", "sub_path": "BioPyApp/forms/dataframe.py", "file_name": "dataframe.py", "file_ext": "py", "file_size_in_byte": 3121, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.forms.Form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 24, "usage_type": "name"}, {"api_name": "django.forms.MultipleChoiceField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "BioPyApp.models.Variable.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "BioPyApp.models.Variable.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "BioPyApp.models.Variable", "line_number": 31, "usage_type": "name"}, {"api_name": "BioPyApp.widgets.CRUDWidgetWrapper", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 36, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 38, "usage_type": "name"}, {"api_name": "django.forms.MultipleChoiceField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 40, "usage_type": "name"}, {"api_name": "BioPyApp.models.Event.objects.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "BioPyApp.models.Event.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "BioPyApp.models.Event", "line_number": 45, "usage_type": "name"}, {"api_name": "BioPyApp.widgets.CRUDWidgetWrapper", "line_number": 48, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 50, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 52, "usage_type": "name"}, {"api_name": "django.forms.MultipleChoiceField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 54, "usage_type": "name"}, {"api_name": "BioPyApp.models.Class.objects.filter", "line_number": 59, "usage_type": "call"}, {"api_name": "BioPyApp.models.Class.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "BioPyApp.models.Class", "line_number": 59, "usage_type": "name"}, {"api_name": "BioPyApp.widgets.CRUDWidgetWrapper", "line_number": 62, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 64, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 66, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 67, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 67, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 68, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 69, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 70, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 71, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 71, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "135549437", "text": "# 회귀분석 모델 : 자동차 연비 예측\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import layers\r\n\r\ndataset = pd.read_csv(\"https://raw.githubusercontent.com/pykwon/python/master/testdata_utf8/auto-mpg.csv\")\r\nprint(dataset.head(3))\r\ndel dataset['car name']\r\nprint(dataset.head(3))\r\npd.set_option('display.max_columns', 100)\r\nprint(dataset.corr())\r\ndataset.drop(['cylinders', 'acceleration', 'model year','origin'], axis='columns', inplace=True)\r\nprint()\r\nprint(dataset.head(2))\r\ndataset['horsepower'] = dataset['horsepower'].apply(pd.to_numeric, errors='coerce') # data중에 ? 가 있어 형변환 하면 NaN이 생김\r\nprint(dataset.info())\r\nprint(dataset.isna().sum()) # 6\r\ndataset = dataset.dropna()\r\n\r\nimport seaborn as sns\r\n# sns.pairplot(dataset[['mpg','displacement','horsepower','weight']], diag_kind='kde')\r\n# plt.show()\r\n\r\n# train / test\r\ntrain_dataset = dataset.sample(frac=0.7, random_state=123)\r\ntest_dataset = dataset.drop(train_dataset.index)\r\nprint(train_dataset.shape) # (274, 4)\r\nprint(test_dataset.shape) # (118, 4)\r\n\r\n# 표준화 작업(수식을 직접 사용)을 위한 준비\r\ntrain_stat = train_dataset.describe()\r\n# print(train_stat)\r\ntrain_stat.pop('mpg')\r\ntrain_stat = train_stat.transpose()\r\nprint(train_stat)\r\n\r\n# label : mpg\r\ntrain_labels = train_dataset.pop('mpg')\r\nprint(train_labels[:2])\r\ntest_labels = test_dataset.pop('mpg')\r\nprint(test_labels[:2])\r\n\r\ndef st_func(x):\r\n return ((x - train_stat['mean']) / train_stat['std'])\r\n\r\n# print(st_func(10))\r\n# print(train_dataset[:3])\r\n# print(st_func(train_dataset[:3]))\r\nst_train_data = st_func(train_dataset) # train feature\r\nst_test_data = st_func(test_dataset) # test feature\r\n# ------------ 모델에 적용할 dataset 준비 완료 -------------------\r\n\r\n# Model\r\ndef build_model():\r\n network = tf.keras.Sequential([\r\n layers.Dense(units=64, input_shape = [3], activation='linear'),\r\n layers.Dense(64, activation='linear'), # relu\r\n layers.Dense(1, activation='linear'),\r\n ])\r\n # opti = tf.keras.optimizers.RMSprop(0.01)\r\n opti = tf.keras.optimizers.Adam(0.01)\r\n network.compile(optimizer = opti, loss='mean_squared_error',\\\r\n metrics=['mean_absolute_error','mean_squared_error'])\r\n return network\r\n \r\nprint(build_model().summary())\r\nmodel = build_model()\r\n# fit() 전에 모델을 실행해 볼 수도 있다.\r\nprint(model.predict(st_train_data[:1]))\r\n\r\n# 훈련\r\nepochs = 5000\r\n\r\n# 학습 조기 종료 \r\nearly_stop = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)\r\n\r\nhistory = model.fit(st_train_data, train_labels, batch_size=32,\\\r\n epochs=epochs, validation_split=0.2, verbose=1)\r\ndf = pd.DataFrame(history.history)\r\nprint(df.head(3))\r\nprint(df.columns)\r\n\r\n# 시각화\r\ndef plot_history(history):\r\n hist = pd.DataFrame(history.history)\r\n hist['epoch'] = history.epoch\r\n plt.figure(figsize = (8, 12))\r\n \r\n plt.subplot(2, 1, 1)\r\n plt.xlabel('epoch')\r\n plt.ylabel('Mean Abs Error[MPG]')\r\n plt.plot(hist['epoch'], hist['mean_absolute_error'], label='train error')\r\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label='val error')\r\n plt.legend()\r\n \r\n plt.subplot(2, 1, 2)\r\n plt.xlabel('epoch')\r\n plt.ylabel('Mean Abs Error[MPG]')\r\n plt.plot(hist['epoch'], hist['mean_squared_error'], label='train error')\r\n plt.plot(hist['epoch'], hist['val_mean_squared_error'], label='val error')\r\n plt.legend()\r\n plt.show()\r\n \r\nplot_history(history)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "python_tensorflow/tf_test2/ke11cars.py", "file_name": "ke11cars.py", "file_ext": "py", "file_size_in_byte": 3608, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 59, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 60, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 61, "usage_type": "name"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.callbacks.EarlyStopping", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "241370090", "text": "import dpp\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.distributions as td\nfrom copy import deepcopy\ntorch.set_default_tensor_type(torch.cuda.FloatTensor)\n\n\n### Config\n\nseed = 0\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n\n## General data config\ndataset_name = 'synth/hawkes2' # other: [ 'yelp_toronto', 'wikipedia', 'mooc', 'stack_overflow', 'lastfm',\n # 'reddit', 'synth/poisson', 'synth/renewal', 'synth/self_correcting',\n # 'synth/hawkes1', 'synth/hawkes2']\n\nsplit = 'whole_sequences' # How to split the sequences (other 'each_sequence' -- split every seq. into train/val/test)\n\n## General model config\nuse_history = True # Whether to use RNN to encode history\nhistory_size = 64 # Size of the RNN hidden vector\nrnn_type = 'RNN' # Which RNN cell to use (other: ['GRU', 'LSTM'])\nuse_embedding = False # Whether to use sequence embedding (should use with 'each_sequence' split)\nembedding_size = 32 # Size of the sequence embedding vector\n # IMPORTANT: when using split = 'whole_sequences', the model will only learn embeddings\n # for the training sequences, and not for validation / test\ntrainable_affine = False # Train the final affine layer\n\n## Decoder config\ndecoder_name = 'LogNormMix' # other: ['RMTPP', 'FullyNeuralNet', 'Exponential', 'SOSPolynomial', 'DeepSigmoidalFlow']\nn_components = 64 # Number of components for a mixture model\nhypernet_hidden_sizes = [] # Number of units in MLP generating parameters ([] -- affine layer, [64] -- one layer, etc.)\n\n## Flow params\n# Polynomial\nmax_degree = 3 # Maximum degree value for Sum-of-squares polynomial flow (SOS)\nn_terms = 4 # Number of terms for SOS flow\n# DSF / FullyNN\nn_layers = 2 # Number of layers for Deep Sigmoidal Flow (DSF) / Fully Neural Network flow (Omi et al., 2019)\nlayer_size = 64 # Number of mixture components / units in a layer for DSF and FullyNN\n\n## Training config\nregularization = 1e-5 # L2 regularization parameter\nlearning_rate = 1e-3 # Learning rate for Adam optimizer\nmax_epochs = 1000 # For how many epochs to train\ndisplay_step = 50 # Display training statistics after every display_step\npatience = 50 # After how many consecutive epochs without improvement of val loss to stop training\n\n\n\n### Data\n\nprint('Loading data...')\nif '+' not in dataset_name:\n dataset = dpp.data.load_dataset(dataset_name)\nelse:\n # If '+' in dataset_name, load all the datasets together and concatenate them\n # For example, dataset_name='synth/poisson+synth/renewal' loads poisson and renewal datasets\n dataset_names = [d.strip() for d in dataset_name.split('+')]\n dataset = dpp.data.load_dataset(dataset_names.pop(0))\n for d in dataset_names:\n dataset += dpp.data.load_dataset(dataset_names.pop(0))\n\n# Split into train/val/test, on each sequence or assign whole sequences to different sets\nif split == 'each_sequence':\n d_train, d_val, d_test = dataset.train_val_test_split_each(seed=seed)\nelif split == 'whole_sequences':\n d_train, d_val, d_test = dataset.train_val_test_split_whole(seed=seed)\nelse:\n raise ValueError(f'Unsupported dataset split {split}')\n\n# Calculate mean and std of the input inter-event times and normalize only input\nmean_in_train, std_in_train = d_train.get_mean_std_in()\nstd_out_train = 1.0\nd_train.normalize(mean_in_train, std_in_train, std_out_train)\nd_val.normalize(mean_in_train, std_in_train, std_out_train)\nd_test.normalize(mean_in_train, std_in_train, std_out_train)\n\n# Break down long train sequences for faster batch traning and create torch DataLoaders\nd_train.break_down_long_sequences(128)\ncollate = dpp.data.collate\ndl_train = torch.utils.data.DataLoader(d_train, batch_size=64, shuffle=True, collate_fn=collate)\ndl_val = torch.utils.data.DataLoader(d_val, batch_size=1, shuffle=False, collate_fn=collate)\ndl_test = torch.utils.data.DataLoader(d_test, batch_size=1, shuffle=False, collate_fn=collate)\n\n# Set the parameters for affine normalization layer depending on the decoder (see Appendix D.3 in the paper)\nif decoder_name in ['RMTPP', 'FullyNeuralNet', 'Exponential']:\n _, std_out_train = d_train.get_mean_std_out()\n mean_out_train = 0.0\nelse:\n mean_out_train, std_out_train = d_train.get_log_mean_std_out()\n\n\n\n### Model setup\nprint('Building model...')\n\n# General model config\ngeneral_config = dpp.model.ModelConfig(\n use_history=use_history,\n history_size=history_size,\n rnn_type=rnn_type,\n use_embedding=use_embedding,\n embedding_size=embedding_size,\n num_embeddings=len(dataset),\n)\n\n# Decoder specific config\ndecoder = getattr(dpp.decoders, decoder_name)(general_config,\n n_components=n_components,\n hypernet_hidden_sizes=hypernet_hidden_sizes,\n max_degree=max_degree,\n n_terms=n_terms,\n n_layers=n_layers,\n layer_size=layer_size,\n shift_init=mean_out_train,\n scale_init=std_out_train,\n trainable_affine=trainable_affine)\n\n# Define model\nmodel = dpp.model.Model(general_config, decoder)\nmodel.use_history(general_config.use_history)\nmodel.use_embedding(general_config.use_embedding)\n\n# Define optimizer\nopt = torch.optim.Adam(model.parameters(), weight_decay=regularization, lr=learning_rate)\n\n\n### Traning\nprint('Starting training...')\n\n# Function that calculates the loss for the entire dataloader\ndef get_total_loss(loader):\n loader_log_prob, loader_lengths = [], []\n for input in loader:\n loader_log_prob.append(model.log_prob(input).detach())\n loader_lengths.append(input.length.detach())\n return -model.aggregate(loader_log_prob, loader_lengths)\n\nimpatient = 0\nbest_loss = np.inf\nbest_model = deepcopy(model.state_dict())\ntraining_val_losses = []\n\nfor epoch in range(max_epochs):\n model.train()\n for input in dl_train:\n opt.zero_grad()\n log_prob = model.log_prob(input)\n loss = -model.aggregate(log_prob, input.length)\n loss.backward()\n opt.step()\n\n model.eval()\n loss_val = get_total_loss(dl_val)\n training_val_losses.append(loss_val.item())\n\n if (best_loss - loss_val) < 1e-4:\n impatient += 1\n if loss_val < best_loss:\n best_loss = loss_val.item()\n best_model = deepcopy(model.state_dict())\n else:\n best_loss = loss_val.item()\n best_model = deepcopy(model.state_dict())\n impatient = 0\n\n if impatient >= patience:\n print(f'Breaking due to early stopping at epoch {epoch}')\n break\n\n if (epoch + 1) % display_step == 0:\n print(f\"Epoch {epoch+1:4d}, loss_train_last_batch = {loss:.4f}, loss_val = {loss_val:.4f}\")\n\n### Evaluation\n\nmodel.load_state_dict(best_model)\nmodel.eval()\n\npdf_loss_train = get_total_loss(dl_train)\npdf_loss_val = get_total_loss(dl_val)\npdf_loss_test = get_total_loss(dl_test)\n\nprint(f'Time NLL\\n'\n f' - Train: {pdf_loss_train:.4f}\\n'\n f' - Val: {pdf_loss_val.item():.4f}\\n'\n f' - Test: {pdf_loss_test.item():.4f}')\n", "sub_path": "code/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 7423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.set_default_tensor_type", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 14, "usage_type": "call"}, {"api_name": "dpp.data.load_dataset", "line_number": 59, "usage_type": "call"}, {"api_name": "dpp.data", "line_number": 59, "usage_type": "attribute"}, {"api_name": "dpp.data.load_dataset", "line_number": 64, "usage_type": "call"}, {"api_name": "dpp.data", "line_number": 64, "usage_type": "attribute"}, {"api_name": "dpp.data.load_dataset", "line_number": 66, "usage_type": "call"}, {"api_name": "dpp.data", "line_number": 66, "usage_type": "attribute"}, {"api_name": "dpp.data", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 88, "usage_type": "attribute"}, {"api_name": "dpp.model.ModelConfig", "line_number": 103, "usage_type": "call"}, {"api_name": "dpp.model", "line_number": 103, "usage_type": "attribute"}, {"api_name": "dpp.decoders", "line_number": 113, "usage_type": "attribute"}, {"api_name": "dpp.model.Model", "line_number": 125, "usage_type": "call"}, {"api_name": "dpp.model", "line_number": 125, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 145, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 146, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 166, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "481037018", "text": "import urllib\nimport csv\nimport os.path as pth\nimport os\nimport xml.etree.ElementTree as ET\n\n\nfiles = []\ndataset_dir = \"/home/jupyter/dataset/train\"\ntry:\n for filename in os.listdir(dataset_dir):\n if not filename.endswith('.xml'):\n continue\n\n path = os.path.join(dataset_dir, filename)\n if pth.isfile(path):\n tree = ET.parse(path)\n root = tree.getroot()\n for file_name in root.iter('filename'):\n file_name.text = filename[:-3] + \"jpg\"\n for width in root.iter('width'):\n width.text = str(round(int(width.text)/2))\n for height in root.iter('height'):\n height.text = str(round(int(height.text)/2))\n for xmin in root.iter('xmin'):\n xmin.text = str(round(int(xmin.text)/2))\n for ymin in root.iter('ymin'):\n ymin.text = str(round(int(ymin.text)/2))\n for xmax in root.iter('xmax'):\n xmax.text = str(round(int(xmax.text)/2))\n for ymax in root.iter('ymax'):\n ymax.text = str(round(int(ymax.text)/2))\n print(root)\n tree.write(path)\nexcept IOError:\n print(\"File 'annotations.csv' is no exist\")\n\n", "sub_path": "dataset_preprocess/edit_anotations.py", "file_name": "edit_anotations.py", "file_ext": "py", "file_size_in_byte": 1248, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 17, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "471335668", "text": "\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n\nfrom profileapp.forms import LoginForm, RegisterForm, UserForm, UserProfileForm\nfrom profileapp.models import UserProfile\nfrom utils.helpers import paginate\nfrom cities.models import Country, Region\n\n\ndef index(request):\n return render(request, 'profileapp/index.html')\n\n\ndef about(request):\n return render(request, 'profileapp/about.html')\n\n\ndef regions(request, id):\n country = Country.objects.get(id=id)\n regions = Region.objects.filter(country=country).order_by('name')\n result = \"\"\n for region in regions:\n result += \"\" % (region.id, region.name,)\n return HttpResponse(result)\n\n\ndef user_login(request):\n errors = []\n nxt = request.GET.get('next', 'profileapp:index')\n form = LoginForm(request.POST or None)\n if form.is_valid():\n user = authenticate(\n username=form.cleaned_data.get('username'),\n password=form.cleaned_data.get('password')\n )\n if user is not None:\n login(request, user)\n return redirect(nxt)\n errors.append(\"Incorrect username or password\")\n args = {'form': form, 'error': errors}\n return render(request, 'profileapp/login.html', args)\n\n\ndef user_logout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('profileapp:index')\n\n\ndef user_register(request):\n form = RegisterForm(request.POST or None)\n if form.is_valid():\n form.save()\n user = authenticate(\n username=form.cleaned_data.get('username'),\n password=form.cleaned_data.get('password')\n )\n if user is not None:\n login(request, user)\n return redirect('profileapp:index')\n return redirect('profileapp:login')\n args = {'form': form}\n return render(request, 'profileapp/register.html', args)\n\n\n@login_required\ndef user(request, pk):\n user = User.objects.get(pk=pk)\n is_current = False\n if request.user == user:\n is_current = True\n args = {\n 'is_current': is_current, 'user': user\n }\n return render(request, 'profileapp/user.html', args)\n\n\n@login_required\ndef users(request):\n args = {\n 'users': paginate(\n User.objects.all().exclude(id=request.user.id),\n int(request.GET.get('page', 1))\n )\n }\n return render(request, 'profileapp/users.html', args)\n\n\n@login_required\ndef user_edit(request):\n form = UserForm(request.POST or None, instance=request.user)\n if form.is_valid():\n request.user.first_name = form.cleaned_data.get('first_name')\n request.user.last_name = form.cleaned_data.get('last_name')\n request.user.username = form.cleaned_data.get('username')\n request.user.email = form.cleaned_data.get('email')\n request.user.save()\n return redirect('profileapp:user', request.user.id)\n args = {'form': form}\n return render(request, 'profileapp/user_edit.html', args)\n\n\n@login_required\ndef profile_edit(request):\n try:\n profile = UserProfile.objects.get(user=request.user)\n except UserProfile.DoesNotExist:\n profile = None\n form = UserProfileForm(request.POST or None)\n if form.is_valid():\n gender = form.cleaned_data.get('gender')\n about = form.cleaned_data.get('about')\n phone = form.cleaned_data.get('phone')\n country = form.cleaned_data.get('country')\n Region = form.cleaned_data.get('Region')\n if profile:\n profile.gender = gender\n profile.about = about\n profile.phone = phone\n profile.country = country\n profile.Region = Region\n profile.user = request.user\n profile.save()\n else:\n UserProfile(\n gender=gender, phone=phone, country=country, Region=Region,\n about=about, user=request.user\n ).save()\n return redirect('profileapp:user', request.user.pk)\n if profile:\n form = UserProfileForm(instance=profile)\n args = {'profile': profile, 'form': form}\n return render(request, 'profileapp/profile_edit.html', args)\n\n\n@login_required\ndef user_delete(request):\n if request.user.is_authenticated:\n request.user.delete()\n logout(request)\n return redirect('profileapp:index')\n", "sub_path": "profileapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "cities.models.Country.objects.get", "line_number": 23, "usage_type": "call"}, {"api_name": "cities.models.Country.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cities.models.Country", "line_number": 23, "usage_type": "name"}, {"api_name": "cities.models.Region.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "cities.models.Region.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cities.models.Region", "line_number": 24, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 28, "usage_type": "call"}, {"api_name": "profileapp.forms.LoginForm", "line_number": 34, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "profileapp.forms.RegisterForm", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 58, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 72, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 72, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 79, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 70, "usage_type": "name"}, {"api_name": "utils.helpers.paginate", "line_number": 85, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.all", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 86, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 90, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 82, "usage_type": "name"}, {"api_name": "profileapp.forms.UserForm", "line_number": 95, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 104, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 93, "usage_type": "name"}, {"api_name": "profileapp.models.UserProfile.objects.get", "line_number": 110, "usage_type": "call"}, {"api_name": "profileapp.models.UserProfile.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "profileapp.models.UserProfile", "line_number": 110, "usage_type": "name"}, {"api_name": "profileapp.models.UserProfile.DoesNotExist", "line_number": 111, "usage_type": "attribute"}, {"api_name": "profileapp.models.UserProfile", "line_number": 111, "usage_type": "name"}, {"api_name": "profileapp.forms.UserProfileForm", "line_number": 113, "usage_type": "call"}, {"api_name": "cities.models.Region", "line_number": 119, "usage_type": "name"}, {"api_name": "cities.models.Region", "line_number": 125, "usage_type": "name"}, {"api_name": "profileapp.models.UserProfile", "line_number": 129, "usage_type": "call"}, {"api_name": "cities.models.Region", "line_number": 130, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 133, "usage_type": "call"}, {"api_name": "profileapp.forms.UserProfileForm", "line_number": 135, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 137, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 107, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 144, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 145, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 140, "usage_type": "name"}]} +{"seq_id": "48953260", "text": "# 引入库\nimport requests\nimport time\nimport re #正则表达式\n# 写网站站点\nurl = \"http://www.jingcaiyuedu.com/novel/GLSmM4.html\"\n# 写入headers模拟浏览器上网,避免出现个别网站拒绝访问的情况\nheaders = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0\",}\n# get发送请求\nresponse = requests.get(url,headers=headers)\n# 将网页编码方式转换为utf-8\nresponse.encoding = 'utf-8'\n# 网站源码\nhtml = response.text\n# re.findall获取小说的名字\ntitle = re.findall(r'',html)[0]\n# 打印小说的名字\nprint(title)\n# 获取每一章的信息(章节的url)\ndl = re.findall(r'
.*?
',html,re.S)[1]\naill = re.findall(r'href=\"(.*?)\">(.*?)<',dl)\n# 新建文件保存小说内容\nf = open(f\"{title}.txt\",'w',encoding=\"utf-8\")\n# 循环每一个章节,分别去下载\nfor i in aill:\n# 反爬\n time.sleep(1)\n# 章节地址和名\n book_url,book_name = i\n# 正确章节地址http://www.jingcaiyuedu.com/novel/GLSmM4/1.html\n# 拼接正确章节地址\n book_url = f\"http://www.jingcaiyuedu.com{book_url}\"\n# 获取章节\n book_response = requests.get(book_url,headers=headers)\n book_response.encoding = 'utf-8'\n book_html = book_response.text\n if len(re.findall(r'
(.*?)
',book_html,re.S)) == 0:\n print(book_name + 'NULL')\n continue\n# 提取章节内容\n book_content = re.findall(r'
(.*?)
',book_html,re.S)[0]\n# 清洗提取的数据\n book_content = book_content.replace(' ','')\n# 将其中内容的空格部分替换成空\n book_content = book_content.replace(' ','')\n# 将其中内容的 部分替换成空\n book_content = book_content.replace('
','')\n# 将其中内容的
部分替换成空\n book_content = book_content.replace('
','')\n# 将其中内容的
部分替换成空\n# 写入\n f.write(f\"{book_name}\\n\")\n print(book_name)\n f.write(f\"{book_content}\\n\")\n f.write(\"\\n\")\n print(book_url)\n", "sub_path": "ghost_soldier.py", "file_name": "ghost_soldier.py", "file_ext": "py", "file_size_in_byte": 2077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 16, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 20, "usage_type": "call"}, {"api_name": "re.S", "line_number": 20, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 37, "usage_type": "call"}, {"api_name": "re.S", "line_number": 37, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 41, "usage_type": "call"}, {"api_name": "re.S", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "147224176", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n#from pyvirtualdisplay import Display\nfrom bs4 import BeautifulSoup\nimport time\nimport datetime\nimport traceback\nfrom selenium.webdriver.chrome.options import Options\nfrom influx import select, insert \n\n\nurl = 'https://web.kma.go.kr/eng/weather/forecast/current_korea.jsp'\n\ndef correct_url(url): \n\tif not url.startswith(\"http://\") and not url.startswith(\"https://\"):\n\t\turl = \"http://\" + url\n\treturn url\n\ndef scrollDown(browser, numberOfScrollDowns):\n\tbody = browser.find_element_by_tag_name(\"body\")\n\twhile numberOfScrollDowns >=0:\n\t\tbody.send_keys(Keys.PAGE_DOWN)\n\t\tnumberOfScrollDowns -= 1\n\t\ttime.sleep(0.3)\n\treturn browser\n\ndef crawl_url(url, run_headless=False):\n\twhile (1):\n\t\ttry:\n\t\t\turl = correct_url(url)\n\t\t\tchrome_options = Options()\n\t\t\tchrome_options.add_argument(\"--headless\")\t\n\t\t\tchrome_options.add_argument('--no-sandbox')\n\t\t\tchrome_options.add_argument('--disable-dev-shm-usage')\n\t\t\tbrowser = webdriver.Chrome(chrome_options=chrome_options)\n\t\t\tbrowser.get(url)\n\t\t\t#time.sleep(1)\n\t\t\t\n\t\t\tcurr_time = str(datetime.datetime.now())\n\t\t\tprint (str(curr_time))\n\t\t\t\n\t\t\tcontent = browser.page_source\n\t\t\tsoup = BeautifulSoup(content)\n\t\t\t\n\t\t\tlocations = soup.findAll('tr')\n\t\t\tlocation_infos = []\n\n\t\t\tfor location in locations:\n\t\t\t\tinfos = location.findAll(\"td\")\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif (infos != []):\n\t\t\t\t\tstation, weather, visibility, \\\n\t\t\t\t\tcloud, temp, wind_dir, wind_speed, \\\n\t\t\t\t\thum, _, air_pressure \t\t\t\t\t\t= infos\n\n\t\t\t\t\tinsert(infos)\n\n\t\t\tbrowser.quit()\n\t\t\ttime.sleep(60)\t\t# Get data one minute / time\n\t\t\t\n\t\texcept:\n\t\t\ttraceback.print_exc()\n\t\nif __name__=='__main__':\n\tcrawl_url(url)\n\t\n\t\n", "sub_path": "crawl_influxdb/crawl_temperature_influx.py", "file_name": "crawl_temperature_influx.py", "file_ext": "py", "file_size_in_byte": 1665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "selenium.webdriver.common.keys.Keys.PAGE_DOWN", "line_number": 22, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 22, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 35, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 43, "usage_type": "call"}, {"api_name": "influx.insert", "line_number": 57, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "268466150", "text": "import torch\nimport math\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom transformers import RobertaTokenizer, RobertaModel\nfrom model.rnn import RNNEncoder, max_along_time, mean_along_time\nfrom model.modules import CharMatching, ContextMatching\n\nclass MMT_joint(nn.Module):\n def __init__(self, args, vocab, n_dim, image_dim, layers, dropout, num_choice=5):\n super().__init__()\n self.vocab = vocab\n V = len(vocab)\n D = n_dim\n self.hidden_dim = n_dim\n \n #video_encoder_layer = nn.TransformerEncoderLayer(d_model=300, nhead=6, dim_feedforward=1024, dropout=0.1, activation='gelu')\n #self.video_encoder = nn.TransformerEncoder(video_encoder_layer, num_layers=1)\n self.video_encoder = nn.GRU(image_dim + 21, 150, bidirectional=True, batch_first=True)\n\n multimodal_encoder_layer = nn.TransformerEncoderLayer(d_model=n_dim, nhead=6, dim_feedforward=1024, dropout=0.5, activation='gelu')\n self.transformer = nn.TransformerEncoder(multimodal_encoder_layer, num_layers=2)\n\n self.embedding = nn.Embedding(V, D)\n n_dim = args.n_dim\n image_dim = args.image_dim\n\n self.tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n self.language_model = RobertaModel.from_pretrained('roberta-base', return_dict=True) \n #for param in self.language_model.base_model.parameters():\n # param.requires_grad = False\n\n # Update config to finetune token type embeddings\n #self.language_model.config.type_vocab_size = 3 \n\n # Create a new Embeddings layer, with 2 possible segments IDs instead of 1\n #self.language_model.embeddings.token_type_embeddings = nn.Embedding(3, self.language_model.config.hidden_size)\n \n # Initialize it\n #self.language_model.embeddings.token_type_embeddings.weight.data.normal_(mean=0.0, std=self.language_model.config.initializer_range)\n\n '''\n # Freeze the first 10 layers\n modules = [self.language_model.encoder.layer[:10]]\n for module in modules:\n for param in module.parameters():\n param.requires_grad = False\n '''\n\n #self.cmat = ContextMatching(n_dim * 3) \n #self.lstm_raw = RNNEncoder(300, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.lstm_script = RNNEncoder(321, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.script_on = \"script\" in args.stream_type\n self.vbb_on = \"visual_bb\" in args.stream_type\n self.vmeta_on = \"visual_meta\" in args.stream_type\n #self.conv_pool = Conv1d(n_dim*4+1, n_dim*2)\n\n self.character = nn.Parameter(torch.randn(22, D, device=args.device, dtype=torch.float), requires_grad=True)\n self.norm1 = Norm(D)\n\n self.lang_proj = nn.Linear(768, 300)\n self.visual_proj = nn.Linear(2048, 300) \n \n #self.mh_video = nn.MultiheadAttention(300, 6) \n #self.context_gru = nn.GRU(300, 150, bidirectional=True, batch_first=True)\n self.cross1 = UtilityLayer(300)\n self.cross2 = UtilityLayer(300)\n self.cross3 = UtilityLayer(300)\n self.context_proj = nn.Linear(5*300,300)\n\n self.char_classifier = nn.Linear(300, 21)\n self.mask_classifier = nn.Linear(300, self.tokenizer.vocab_size)\n\n self.output = nn.Linear(300, 1)\n\n self.answer_rnn = nn.LSTM(300, 300, 1, batch_first=True, dropout=0)\n\n speaker_name = [ \n 'None', # index 0: unknown speaker \n 'Anna', 'Chairman', 'Deogi', 'Dokyung', 'Gitae',\n 'Haeyoung1', 'Haeyoung2', 'Heeran', 'Hun', 'Jeongsuk',\n 'Jinsang', 'Jiya', 'Kyungsu', 'Sangseok', 'Seohee', \n 'Soontack', 'Sukyung', 'Sungjin', 'Taejin', 'Yijoon'\n ]\n self.speaker_to_index = {name: index for index, name in enumerate(speaker_name)} \n self.index_to_speaker = {v: k for k, v in self.speaker_to_index.items()}\n\n if self.script_on:\n self.lstm_script = RNNEncoder(321, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.classifier_script = nn.Sequential(nn.Linear(n_dim*2, 1), nn.Softmax(dim=1))\n self.mhattn_script = CharMatching(4, D, D)\n\n if self.vmeta_on: \n self.lstm_vmeta = RNNEncoder(321, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.classifier_vmeta = nn.Sequential(nn.Linear(n_dim*2, 1), nn.Softmax(dim=1))\n self.mhattn_vmeta = CharMatching(4, D, D)\n\n if self.vbb_on:\n self.lstm_vbb = RNNEncoder(image_dim+21, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.vbb_fc = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(image_dim, n_dim),\n nn.Tanh(),\n )\n self.classifier_vbb = nn.Sequential(nn.Linear(n_dim*2, 1), nn.Softmax(dim=1))\n\n self.mhattn_vbb = CharMatching(4, D, D)\n\n\n def _to_one_hot(self, y, n_dims, mask, dtype=torch.cuda.FloatTensor):\n scatter_dim = len(y.size())\n y_tensor = y.type(torch.LongTensor).view(*y.size(), -1).cuda()\n y_tensor = y.view(*y.size(), -1).cuda()\n zeros = torch.zeros(*y.size(), n_dims).type(dtype).cuda()\n out = zeros.scatter(scatter_dim, y_tensor, 1)\n\n out_mask,_ = self.len_to_mask(mask, out.shape[1])\n out_mask = out_mask.unsqueeze(2).repeat(1, 1, n_dims)\n\n return out.masked_fill_(out_mask, 0)\n\n\n def load_embedding(self, pretrained_embedding):\n print('Load pretrained embedding ...')\n #self.embedding.weight.data.copy_(pretrained_embedding)\n self.embedding.weight.data.copy_(torch.from_numpy(pretrained_embedding))\n\n def len_to_mask(self, lengths, len_max):\n #len_max = lengths.max().item()\n mask = torch.arange(len_max, device=lengths.device,\n dtype=lengths.dtype).expand(len(lengths), len_max) >= lengths.unsqueeze(1)\n mask = torch.as_tensor(mask, dtype=torch.uint8, device=lengths.device)\n\n return mask, len_max\n\n def get_name(self, x, x_l):\n x_mask = x.masked_fill(x>20, 21)\n x_onehot = self._to_one_hot(x_mask, 22, x_l)\n x_sum = torch.sum(x_onehot[:,:,:21], dim=1)\n return x_sum > 0\n\n def forward(self, que, answers, **features):\n '''\n filtered_sub (B, max_sub_len)\n filtered_sub_len (B)\n filtered_speaker (B, max_sub_len)\n\n filtered_visual (B, max_v_len*3)\n filtered_visual_len (B)\n\n filtered_image (B, max_v_len, 512)\n filtered_image_len (12)\n\n que (B, max_que_len)\n que_len (B)\n\n answers (B, 5, max_ans_len)\n ans_len (B, 5)\n \n print(que.shape)\n print(answers.shape)\n for key, value in features.items():\n print(key, value.shape)\n \n\n '''\n batch_size = que.shape[0]\n\n text = features['text_masked']\n text_lengths = features['text_masked_l']\n token_type_ids = features['token_type_ids']\n #labels = features['labels']\n\n # -------------------------------- #\n outputs = self.language_model(que)\n e_q = outputs.last_hidden_state\n e_q = self.lang_proj(e_q)\n # -------------------------------- #\n e_ans = []\n for i in range(5):\n outputs = self.language_model(answers[:,i,:])\n embedded = outputs.last_hidden_state\n embedded = self.lang_proj(embedded)\n e_ans.append(embedded)\n \n if self.script_on:\n s_len = features['filtered_sub_len']\n spk = features['filtered_speaker']\n spk_onehot = self._to_one_hot(spk, 21, mask=s_len)\n e_s = torch.cat([e_script, spk_onehot], dim=2)\n H_S, _ = self.lstm_script(e_s, s_len)\n\n if self.vmeta_on:\n vmeta = features['filtered_visual'].view(batch_size, -1, 3)\n vmeta_len = features['filtered_visual_len'].double()*2/3\n\n vp = vmeta[:,:,0]\n vp = vp.unsqueeze(2).repeat(1,1,2).view(batch_size, -1)\n vbe = vmeta[:,:,1:3].contiguous()\n vbe = vbe.view(batch_size, -1)\n #e_vbe = self.embedding(vbe)\n e_vbe = self.language_model(vbe).last_hidden_state\n e_vbe = self.lang_proj(e_vbe)\n # -------------------------------- #\n vp_onehot = self._to_one_hot(vp, 21, mask=vmeta_len)\n e_vbe = torch.cat([e_vbe, vp_onehot], dim=2)\n #vp_flag = [torch.matmul(vp_onehot, concat_qa[i].unsqueeze(2)) for i in range(5)]\n #vp_flag = [(vp_flag[i] > 0).type(torch.cuda.FloatTensor) for i in range(5)]\n M, _ = self.lstm_vmeta(e_vbe, vmeta_len)\n\n if self.vbb_on:\n e_vbb = features['filtered_person_full']\n vbb_len = features['filtered_person_full_len']\n\n vp = features['filtered_visual'].view(batch_size, -1, 3)[:,:,0]\n vp = vp.unsqueeze(2).view(batch_size, -1)\n vp_onehot = self._to_one_hot(vp, 21, mask=vbb_len)\n e_vbb = torch.cat([e_vbb, vp_onehot], dim=2)\n #vp_flag = [torch.matmul(vp_onehot, concat_qa[i].unsqueeze(2)) for i in range(5)]\n #vp_flag = [(vp_flag[i] > 0).type(torch.cuda.FloatTensor) for i in range(5)]\n H_B, _ = self.lstm_vbb(e_vbb, vbb_len)\n\n\n #S = H_S\n #M = H_M\n #B = H_B\n #Q = e_q\n #Q = torch.stack([q_c[i] for i in range(5)], dim=1)\n #F = features['images'].squeeze()\n #video = features['filtered_image']\n #per_person_features = self.visual_proj(features['per_person_features'])\n #video = self.visual_proj(video)\n\n attention_mask, _ = self.len_to_mask(text_lengths, text.shape[1])\n #outputs = self.language_model(text, token_type_ids=token_type_ids, attention_mask=attention_mask)\n text_length = text.size(1)\n outputs = self.language_model(text, attention_mask=attention_mask)\n text = outputs.last_hidden_state\n text = self.lang_proj(text)\n\n # encode video frames\n video = features['filtered_person_full']\n bb_lengths = features['filtered_person_full_len']\n frame_person = features['filtered_visual'].view(batch_size, -1, 3)[:,:,0]\n frame_person = frame_person.unsqueeze(2).view(batch_size, -1)\n frame_person = self._to_one_hot(frame_person, 21, mask=bb_lengths)\n video = torch.cat([video, frame_person], dim=-1)\n video_length = video.size(1)\n #video = self.visual_proj(video) \n video, _ = self.video_encoder(video)\n \n #inpt = torch.cat([Q,sep,a,sep,e_script,sep,per_person_features], dim=1)\n inpt = torch.cat([text,video,M], dim=1)\n inpt = inpt.permute(1,0,2) # sequence first\n out = self.transformer(inpt)\n out = out.permute(1,0,2) # batch first\n context = out[:,0,:]\n\n # predict person contained in each bounding box\n char = self.char_classifier(context.unsqueeze(dim=1).repeat(1, video_length, 1))\n \n # predict masked tokens\n labels = self.mask_classifier(out[:,:text_length,:])\n\n ### DISCRIMINATIVE DECODER\n\n num_options = 5\n hidden_dim = 300\n\n # stack answers\n e_ans = torch.stack(e_ans) \n\n # run through lstm\n e_ans = e_ans.reshape(batch_size * num_options, -1, hidden_dim)\n answers, _ = self.answer_rnn(e_ans)\n answers = answers[:,-1,:]\n answers = answers.reshape(num_options, batch_size, hidden_dim)\n\n # batch first\n answers = answers.permute(1,0,2)\n\n # shape the context so it is the same as the answers\n context = context.unsqueeze(dim=1).repeat(1, num_options, 1)\n\n answers = answers.contiguous().view(batch_size * num_options, hidden_dim)\n context = context.contiguous().view(batch_size * num_options, hidden_dim)\n\n # compute scores\n scores = torch.sum(answers * context, 1)\n scores = scores.view(batch_size, num_options)\n \n return scores, char, labels \n\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n\n def processor(self, context, context_l, qa_character, q_embed, q_l, a_embed, a_l, mhattn):\n #print(context.size(), context_l, len(qa_character), q_embed.size(), q_l, len(a_embed), a_l)\n u_q = self.cmat(context, context_l, q_embed, q_l)\n u_a = torch.stack([self.cmat(context, context_l, a_embed[i], a_l[i]) for i in range(5)])\n u_ch = torch.stack([mhattn(qa_character[i], context, context_l) for i in range(5)])\n return u_q, u_a, u_ch\n\n def stream_processor(self, classifier, mhattn, ctx_flag, ctx, ctx_l,\n qa_character, q_embed, q_l, a_embed, a_l):\n \n u_q = self.cmat(ctx, ctx_l, q_embed, q_l)\n u_a = [self.cmat(ctx, ctx_l, a_embed[i], a_l[i]) for i in range(5)]\n u_ch = [mhattn(qa_character[i], ctx, ctx_l) for i in range(5)]\n\n concat_a = [torch.cat([ctx, u_q,u_a[i], u_ch[i], ctx_flag[i]], dim=-1) for i in range(5)] \n \n # ctx, u_ch[i], ctx_flag[i],\n # exp_2 : ctx, u_a[i], u_q, ctx_flag[i], u_ch[i]\n maxout = [self.conv_pool(concat_a[i], ctx_l) for i in range(5)]\n\n answers = torch.stack(maxout, dim=1)\n return out\n\n @classmethod\n def resolve_args(cls, args, vocab):\n return cls(args, vocab, args.n_dim, args.image_dim, args.layers, args.dropout)\n\nclass Conv1d(nn.Module):\n def __init__(self, n_dim, out_dim):\n super().__init__()\n out_dim = int(out_dim/4)\n self.conv_k1 = nn.Conv1d(n_dim, out_dim, kernel_size=1, stride=1)\n self.conv_k2 = nn.Conv1d(n_dim, out_dim, kernel_size=2, stride=1)\n self.conv_k3 = nn.Conv1d(n_dim, out_dim, kernel_size=3, stride=1)\n self.conv_k4 = nn.Conv1d(n_dim, out_dim, kernel_size=4, stride=1)\n #self.maxpool = nn.MaxPool1d(kernel_size = )\n\n def forward(self, x, x_l):\n # x : (B, T, 5*D)\n x_pad = torch.zeros(x.shape[0],3,x.shape[2]).type(torch.cuda.FloatTensor)\n x = torch.cat([x, x_pad], dim=1)\n x1 = F.relu(self.conv_k1(x.transpose(1,2)))[:,:,:-3]\n x2 = F.relu(self.conv_k2(x.transpose(1,2)))[:,:,:-2]\n x3 = F.relu(self.conv_k3(x.transpose(1,2)))[:,:,:-1]\n x4 = F.relu(self.conv_k4(x.transpose(1,2)))\n out = torch.cat([x1, x2, x3, x4], dim=1)\n out = out.transpose(1,2)\n return max_along_time(out, x_l)\n\n\nclass Norm(nn.Module):\n def __init__(self, d_model, eps = 1e-6):\n super().__init__()\n\n self.size = d_model\n # create two learnable parameters to calibrate normalisation\n self.alpha = nn.Parameter(torch.ones(self.size))\n self.bias = nn.Parameter(torch.zeros(self.size))\n self.eps = eps\n def forward(self, x):\n norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=True) + self.eps) + self.bias\n return norm\n\nclass UtilityBlock(nn.Module):\n \"\"\"Efficient attention mechanism for many utilities block implemented for the visual dialog task (here: three utilities).\n Args:\n hidden_dim: dimension of the feature vector. Also the dimension of the final context vector provided to the decoder (required).\n feedforward_dim: dimension of the hidden feedforward layer, implementation details from \"Attention is all you need\" (default=2048).\n n_head: the number of heads in the multihead attention layers (default=8).\n dropout: the dropout probability (default=0.1).\n \"\"\"\n def __init__(self, hidden_dim, feedforward_dim=2048, n_head=8, dropout=0.1):\n super(UtilityBlock, self).__init__()\n self.multihead_attn = nn.MultiheadAttention(hidden_dim, n_head) # dropout? separate attention modules?\n self.linear = nn.Linear(2*hidden_dim, hidden_dim)\n self.relu = nn.ReLU(hidden_dim)\n self.dropout = nn.Dropout(dropout)\n self.norm = nn.LayerNorm([hidden_dim], elementwise_affine=False)\n\n def forward(self, target, source_a, source_b):\n \"\"\"Passes the inputs through the utility attention block. For a detailed description see the paper. Inputs are tensors for each utility. The output is the updated utility tensor.\n Args:\n target: the target utility. The output will be of the same shape as this target utility.\n source_a: the first source utility to attend to.\n source_b: the second source utility to attend to.\n \"\"\"\n # Permute to fit multihead attention input\n target = target.permute(1,0,2)\n source_a = source_a.permute(1,0,2)\n source_b = source_b.permute(1,0,2)\n\n # Apply multihead attention mechanism for target and multiple sources as described in the paper\n #out_t, _ = self.multihead_attn(target, target, target) # self attention for target utility\n out_a, _ = self.multihead_attn(target, source_a, source_a) # attention to source utility a\n out_b, _ = self.multihead_attn(target, source_b, source_b) # attention to source utility b\n\n # Permute back to batch-first\n target = target.permute(1,0,2)\n #out_t = out_t.permute(1,0,2)\n out_a = out_a.permute(1,0,2)\n out_b = out_b.permute(1,0,2)\n \n # Add & norm\n out_a = self.norm(out_a + target)\n out_b = self.norm(out_b + target)\n\n #out = torch.cat((out_t, out_a, out_b), dim=2) # concatenate the resulting output tensors\n out = torch.cat([out_a, out_b], dim=2) # concatenate the resulting output tensors\n out = self.relu(self.linear(out)) \n out = self.dropout(out)\n out = self.norm(out + target) # add & norm (residual target)\n return out\n\nclass UtilityLayer(nn.Module):\n \"\"\"Efficient attention mechanism for many utilities layer implemented for the visual dialog task (here: three utilities). The layer consist of three parallel utility attention blocks.\n Args:\n hidden_dim: dimension of the feature vector. Also the dimension of the final context vector provided to the decoder (required).\n feedforward_dim: dimension of the hidden feedforward layer, implementation details from \"Attention is all you need\" (default=2048).\n n_head: the number of heads in the multihead attention layers (default=8).\n dropout: the dropout probability (default=0.1).\n \"\"\"\n def __init__(self, hidden_dim, feedforward_dim=1024, n_head=5, dropout=0.1):\n super(UtilityLayer, self).__init__()\n self.utility_t = UtilityBlock(hidden_dim, feedforward_dim, n_head, dropout)\n self.utility_v = UtilityBlock(hidden_dim, feedforward_dim, n_head, dropout)\n self.utility_a = UtilityBlock(hidden_dim, feedforward_dim, n_head, dropout)\n self.norm = nn.LayerNorm(hidden_dim)\n trm_layer = nn.TransformerEncoderLayer(d_model=hidden_dim, nhead=n_head, dim_feedforward=feedforward_dim, dropout=dropout, activation='gelu')\n self.trm_t = nn.TransformerEncoder(trm_layer, num_layers=1, norm=self.norm)\n self.trm_v = nn.TransformerEncoder(trm_layer, num_layers=1, norm=self.norm)\n self.trm_a = nn.TransformerEncoder(trm_layer, num_layers=1, norm=self.norm)\n\n def forward(self, T, V, A):\n \"\"\"Passes the input utilities through the utility attention layer. Inputs are passed through their respective blocks in parallel. The output are the three updated utility tensors.\n Args:\n V: the visual utility tensor\n Q: the question utility tensor\n R: the history utility tensor\n \"\"\"\n T_out = self.utility_t(T, V, A)\n T_out = self.trm_t(T_out)\n V_out = self.utility_v(V, T, A)\n V_out = self.trm_v(V_out)\n A_out = self.utility_a(A, T, V)\n A_out = self.trm_a(A_out)\n return T_out, V_out, A_out\n", "sub_path": "code/model/MMT_joint.py", "file_name": "MMT_joint.py", "file_ext": "py", "file_size_in_byte": 20161, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "transformers.RobertaTokenizer.from_pretrained", "line_number": 30, "usage_type": "call"}, {"api_name": "transformers.RobertaTokenizer", "line_number": 30, "usage_type": "name"}, {"api_name": "transformers.RobertaModel.from_pretrained", "line_number": 31, "usage_type": "call"}, {"api_name": "transformers.RobertaModel", "line_number": 31, "usage_type": "name"}, {"api_name": "model.rnn.RNNEncoder", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "model.rnn.RNNEncoder", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 92, "usage_type": "call"}, {"api_name": "model.modules.CharMatching", "line_number": 93, "usage_type": "call"}, {"api_name": "model.rnn.RNNEncoder", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 97, "usage_type": "call"}, {"api_name": "model.modules.CharMatching", "line_number": 98, "usage_type": "call"}, {"api_name": "model.rnn.RNNEncoder", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 107, "usage_type": "call"}, {"api_name": "model.modules.CharMatching", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 114, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 291, "usage_type": "call"}, {"api_name": "torch.triu", "line_number": 298, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 298, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 306, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 307, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 317, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 323, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 330, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 330, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 334, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 334, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 335, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 335, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 336, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 336, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 337, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 337, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 342, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 342, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 343, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 344, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 344, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 345, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 345, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 346, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 346, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 347, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 347, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 348, "usage_type": "call"}, {"api_name": "model.rnn.max_along_time", "line_number": 350, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 353, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 353, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 359, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 359, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 359, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 360, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 360, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 360, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 366, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 366, "usage_type": "name"}, {"api_name": "torch.nn.MultiheadAttention", "line_number": 376, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 376, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 377, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 377, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 378, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 378, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 379, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 379, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 380, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 380, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 410, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 416, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 416, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 429, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 429, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 430, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 430, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 431, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 431, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 432, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 432, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 433, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 433, "usage_type": "name"}]} +{"seq_id": "229290340", "text": "from flask import jsonify\n\ndef fibGenerator():\n a, b = 0, 1\n yield 0\n while True:\n a, b = b, a + b\n yield a\n\n\ndef fibJSON(sequence):\n fiblist = []\n fib = fibGenerator()\n for n in range(int(sequence)):\n fiblist.append(str(next(fib)))\n return jsonify( {'sequence': ' '.join(fiblist)} )\n", "sub_path": "fibonacci.py", "file_name": "fibonacci.py", "file_ext": "py", "file_size_in_byte": 326, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.jsonify", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "486493477", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch_geometric.nn import global_add_pool\n\nimport pandas as pd\nfrom tqdm import tqdm\nfrom torch_geometric.data import DataLoader\nimport numpy as np\nimport random\n\nclass TripletLossRegression(nn.Module):\n \"\"\"\n anchor, positive, negative are node-level embeddings of a GNN before they are sent to a pooling layer,\n and hence are expected to be matrices.\n anchor_gt, positive_gt, and negative_gt are ground truth tensors that correspond to the ground-truth\n values of the anchor, positive, and negative respectively.\n \"\"\"\n\n def __init__(self, margin: float = 0.0, eps=1e-6):\n super(TripletLossRegression, self).__init__()\n self.margin = margin\n self.eps = eps\n\n def forward(self, anchor_batch, negative_batch, positive_batch,\n anchor: Tensor, negative: Tensor, positive: Tensor,\n anchor_gt: Tensor, negative_gt: Tensor, positive_gt: Tensor) -> Tensor:\n anchor = global_add_pool(anchor, anchor_batch)\n\n positive = global_add_pool(positive, positive_batch)\n\n negative = global_add_pool(negative, negative_batch)\n\n pos_distance = torch.linalg.norm(positive - anchor, dim=1)\n negative_distance = torch.linalg.norm(negative - anchor, dim=1)\n\n coeff = torch.div(torch.abs(negative_gt - anchor_gt) , (torch.abs(positive_gt - anchor_gt) + self.eps))\n loss = F.relu((pos_distance - coeff * negative_distance) + self.margin)\n return torch.mean(loss)\n\n\n\"\"\"\ndynamic triplet dataset based on error\n\"\"\"\n# def createTripletLoader(model, train_loader, dataset, errorThres = 5) -> (anchor_loader, pos_loader, neg_loader)\ndef createTripletLoader(device, model, train_loader, dataset, args, errorThres = 5):\n\n # 2. get losses for training dataset\n y_true, y_pred = [], []\n for step, batch in enumerate(tqdm(train_loader, desc=\"Sampling Triplets\")):\n # put batch to cuda\n batch = batch.to(device)\n\n # get prediction\n pred = model(batch).view(-1, )\n pred = pred.detach().cpu().tolist()\n y_pred.extend(pred)\n\n # get labels\n label = batch.y.detach().cpu().tolist()\n y_true.extend(label)\n \n if step == 100:\n break\n\n\n # 3. convert to dataframe\n trainDF = pd.DataFrame(zip(y_pred, y_true), columns=[\"y_pred\", \"y_true\"])\n trainDF[\"error\"] = (trainDF[\"y_pred\"] - trainDF[\"y_true\"]).apply(lambda x: abs(x))\n # bin y_pred\n trainDF[\"y_class\"] = trainDF[\"y_true\"].apply(lambda x: int(np.floor(x)))\n\n # 4. pick data with error threshold < k\n highErrorDF = trainDF[trainDF.error > errorThres]\n lowErrorDF = trainDF[trainDF.error < errorThres]\n # create [anchorID, posId, negID]\n triplets = []\n # get number of data\n ndata = len(y_pred)\n for i, row in tqdm(list(highErrorDF.iterrows())):\n i_class = row[\"y_class\"]\n\n # 4a. set i to be pos, find anchor and neg samples\n # set default to be random\n tripA = [random.randint(0, ndata), random.randint(0, ndata), random.randint(0, ndata)]\n tripA[1] = i\n # find anchor by sampling from lowErrorDF of same class\n tripA[0] = lowErrorDF[lowErrorDF.y_class == i_class].sample(1).index.item()\n # find negative by sampling from lowErrorDF of other class\n tripA[2] = lowErrorDF[lowErrorDF.y_class != i_class].sample(1).index.item()\n triplets.append(tripA)\n\n # 4b. set i to be neg, find anchor and pos samples\n # set default to be random\n tripB = [random.randint(0, ndata), random.randint(0, ndata), random.randint(0, ndata)]\n tripB[2] = i\n # find anchor by sampling from lowErrorDF of same class\n tripB[0] = lowErrorDF[lowErrorDF.y_class != i_class].sample(1).index.item()\n # find positive by sampling from lowErrorDF of other class\n tripB[1] = lowErrorDF[lowErrorDF.y_class != i_class].sample(1).index.item()\n triplets.append(tripB)\n\n if len(triplets) == 0:\n raise Exception(\"no triplets found\")\n \n # 5. create anchor, pos and neg IDs\n triplets = np.array(triplets)\n anchorIDs = list(triplets[:, 0])\n posIDs = list(triplets[:, 1])\n negIDs = list(triplets[:, 2])\n\n # 6. create triplet loaders\n anchor_loader = DataLoader(dataset[anchorIDs], batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)\n positive_loader = DataLoader(dataset[posIDs], batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)\n negative_loader = DataLoader(dataset[negIDs], batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)\n\n return anchor_loader, positive_loader, negative_loader", "sub_path": "examples/lsc/pcqm4m/loss_functions/TripletLossRegression.py", "file_name": "TripletLossRegression.py", "file_ext": "py", "file_size_in_byte": 4771, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.nn.Module", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 28, "usage_type": "name"}, {"api_name": "torch_geometric.nn.global_add_pool", "line_number": 29, "usage_type": "call"}, {"api_name": "torch_geometric.nn.global_add_pool", "line_number": 31, "usage_type": "call"}, {"api_name": "torch_geometric.nn.global_add_pool", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.linalg.norm", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.linalg", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.linalg.norm", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.linalg", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.div", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 40, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 72, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 81, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 86, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 108, "usage_type": "call"}, {"api_name": "torch_geometric.data.DataLoader", "line_number": 114, "usage_type": "call"}, {"api_name": "torch_geometric.data.DataLoader", "line_number": 115, "usage_type": "call"}, {"api_name": "torch_geometric.data.DataLoader", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "133833783", "text": "# python 3.7.9\n\nfrom datetime import datetime\n\n\n\n\n# \ndef prime(number):\n\n number += 1\n\n PrimeNumbers = []\n\n for prime in range(2, number):\n \n count = 2\n\n while True:\n\n if prime % count == 0:\n break\n\n count += 1\n\n if count == 1:\n PrimeNumbers.append(prime)\n break\n\n\n return PrimeNumbers\n\n\nCountNumber = 9\n\n\nfor result in range(CountNumber):\n\n start = datetime.now()\n prime(32768) # 32.768\n\n print(\n \"Test Count #{} | Took = {} second\".format\n (result + 1, (datetime.now()-start).total_seconds())\n )\n", "sub_path": "python/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 635, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "571035607", "text": "import json\nimport logging\n\nlog = logging.getLogger(__name__)\nsh = logging.StreamHandler()\nlog.addHandler(sh)\n\n\ndef test_users(api_as_admin):\n new_user_id = 'new@user.com'\n\n # List users\n r = api_as_admin.get('/users')\n assert r.ok\n\n # Get self\n r = api_as_admin.get('/users/self')\n assert r.ok\n\n # Try adding new user missing required attr\n payload = json.dumps({\n '_id': 'jane.doe@gmail.com',\n 'lastname': 'Doe',\n 'email': 'jane.doe@gmail.com',\n })\n r = api_as_admin.post('/users', data=payload)\n assert r.status_code == 400\n assert \"'firstname' is a required property\" in r.text\n\n # Add new user\n r = api_as_admin.get('/users/' + new_user_id)\n assert r.status_code == 404\n payload = json.dumps({\n '_id': new_user_id,\n 'firstname': 'New',\n 'lastname': 'User',\n })\n r = api_as_admin.post('/users', data=payload)\n assert r.ok\n r = api_as_admin.get('/users/' + new_user_id)\n assert r.ok\n\n # Modify existing user\n payload = json.dumps({\n 'firstname': 'Realname'\n })\n r = api_as_admin.put('/users/' + new_user_id, data=payload)\n assert r.ok\n\n # Cleanup\n r = api_as_admin.delete('/users/' + new_user_id)\n assert r.ok\n", "sub_path": "test/integration_tests/python/test_users.py", "file_name": "test_users.py", "file_ext": "py", "file_size_in_byte": 1252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 5, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "288579631", "text": "from flask import Flask, jsonify, request\nimport os\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow \nfrom flask_jwt_extended import JWTManager, create_access_token, get_jwt_identity, jwt_required\n\n\n\n\napp = Flask(__name__)\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///\" + os.path.join(basedir, 'recipes.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['JWT_SECRET_KEY'] = 'super jwt key'\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\njwt = JWTManager(app)\n\n# Flask CLI \n@app.cli.command('create_database')\ndef create_database():\n db.create_all()\n\n print('Database tables created')\n\n@app.cli.command('seed_db')\ndef seed_database():\n user1 = User(email='admin@gmail.com', username='adminuser', is_admin=True)\n user2 = User(email='testuser@gmail.com', username='testuser', is_admin=False)\n recipe1 = Recipe(name='Roast_Chicken', protein='chicken', ingredients='chicken, onions, carrots, celery, garlic, oil, lemon, fresh herbs')\n recipe2 = Recipe(name='Creamy_Beef_Pasta', protein='beef', ingredients='beef, garlic, basil, oregano, salt, pepper, flour, tomato sauce, beef broth, pasta, heavy cream, cheddar cheese')\n db.session.add_all([user1, user2, recipe1, recipe2])\n db.session.commit()\n print('Database seeded')\n\n@app.cli.command('destroy_database')\ndef destroy_databse():\n db.drop_all()\n print('Database tables destroyed')\n\n\n# Database Models\n\nclass User(db.Model):\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String, unique=True)\n username = db.Column(db.String) #should be unique\n is_admin = db.Column(db.Boolean)\n\nclass Recipe(db.Model):\n __tablename__ = 'recipes'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, unique=True)\n protein = db.Column(db.String)\n ingredients = db.Column(db.String)\n\n\n# Marshmallow Schema\n\nclass UserSchema(ma.SQLAlchemySchema):\n class Meta:\n model = User\n\n email = ma.auto_field()\n username = ma.auto_field()\n links = ma.auto_field()\n\n # Hyperlinks\n links = ma.Hyperlinks(\n {\n 'self': ma.URLFor(\"get_user_detail\", values=dict(username=\"\")),\n \"collection\": ma.URLFor(\"get_all_users\"),\n }\n )\n\nclass RecipeSchema(ma.SQLAlchemySchema):\n class Meta:\n model = Recipe\n\n name = ma.auto_field()\n protein = ma.auto_field()\n ingredients = ma.auto_field()\n links = ma.auto_field()\n\n # Hyperlinks\n links = ma.Hyperlinks(\n {\n 'self': ma.URLFor(\"get_recipe_detail\", values=dict(recipe_name=\"\")),\n \"collection\": ma.URLFor(\"get_all_recipes\"),\n }\n )\n\n# Define the ability to serialize objects\nuser_schema = UserSchema()\nrecipe_schema = RecipeSchema()\n\n# Define the ability to serialize a collection of objects\nusers_schema = UserSchema(many=True)\nrecipes_schema = RecipeSchema(many=True)\n\n\n\n# Main Code\n@app.route('/recipes/', methods=['GET'])\ndef get_all_recipes():\n \"\"\"\n GET: Returns a list of all recipes \n No authentification is required to access the recipes \n \"\"\"\n recipes = Recipe.query.all()\n return jsonify(recipes_schema.dump(recipes))\n\n@app.route('/recipes/', methods=['GET'])\ndef get_recipe_detail(recipe_name:str):\n \"\"\"\n GET: Returns recipe data for the given recipe\n \"\"\"\n recipe = Recipe.query.filter_by(name=recipe_name).first()\n return recipe_schema.dump(recipe)\n\n@app.route('/recipes/', methods=['POST'])\n@jwt_required()\ndef add_recipe():\n \"\"\"\n POST: Adds a new recipe to the database\n User needs to be logged in to access this route. Furthermore the user has to be an admin\n \"\"\"\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n if user.is_admin:\n name = request.json.get('name')\n recipe = Recipe.query.filter_by(name=name).first()\n if recipe:\n return jsonify({\"message\": \"Recipe already exists!\"}), 409\n ingredients = request.json.get('ingredients')\n protein = request.json.get('protein')\n recipe = Recipe(name=name, ingredients=ingredients, protein=protein)\n db.session.add(recipe)\n db.session.commit()\n return jsonify({\"message\": \"New recipe added\"})\n \n\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n\n@app.route('/recipes/', methods=['POST'])\n@jwt_required()\ndef put_recipe_detail(recipe_name:str):\n \"\"\"\n POST: Modifies recipe data for the given recipe\n User needs to be authenticated to access this endpoint\n Furthermore the user needs to be admin\n \"\"\"\n #current user is the username of the user\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n if user.is_admin:\n recipe = Recipe.query.filter_by(name=recipe_name).first()\n if recipe: \n recipe.name = request.json.get('name')\n recipe.ingredients = request.json.get('ingredients')\n recipe.protein = request.json.get('protein')\n db.session.add(recipe)\n db.session.commit()\n return jsonify({\"message\": \"Recipe updated\"})\n return jsonify({\"message\": \"Recipe does not exist\"}), 404\n\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n\n@app.route('/recipes/', methods=['DELETE'])\n@jwt_required()\ndef delete_recipe_detail(recipe_name:str):\n \"\"\"\n DELETE: Deletes the given \n User needs to be authenticated to access this endpoint\n Furthermore the user needs to be admin\n\n \"\"\"\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n if user.is_admin:\n recipe = Recipe.query.filter_by(name=recipe_name).first()\n db.session.delete(recipe)\n db.session.commit()\n return jsonify({\"message\": \"Recipe Deleted\"})\n\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n@app.route('/users/', methods=['GET'])\n@jwt_required()\ndef get_all_users():\n \"\"\"\n GET: Returns a list of all users\n User needs to be logged in to access this endpoint \n Furthermore the user needs to be admin\n \"\"\"\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n\n if current_user.is_admin:\n users_list = User.query.all()\n return jsonify(users_schema.dump(users_list))\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n@app.route('/users/', methods=['POST'])\n@jwt_required()\ndef add_new_user():\n \"\"\"\n POST: Adds a new user to the database\n User needs to be logged in to access this endpoint \n Furthermore the user needs to be admin\n \"\"\"\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n if current_user.is_admin:\n email = request.json.get('email')\n user = User.query.filter_by(email=email).first()\n if user:\n return jsonify({\"message\": \"Email already registered!\"}), 409\n username = request.json.get('username')\n is_admin = bool(request.json.get('is_admin'))\n user = User(username=username, email=email, is_admin=is_admin)\n db.session.add(user)\n db.session.commit()\n return jsonify({\"message\": \"New user added\"})\n \n\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n\n\n@app.route('/users/', methods=['GET'])\n@jwt_required()\ndef get_user_detail(username:str):\n \"\"\"\n GET: Returns user data for the given user\n User needs to be logged in to access this endpoint\n Furthermore a user can only access their own data. Admin user can access all user data\n \"\"\"\n user = User.query.filter_by(username=username).first()\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n if user:\n if current_user.username == user.username or current_user.is_admin:\n return user_schema.dump(user)\n return jsonify({\"message\": \"Unauthorised\"}), 403\n return jsonify({\"message\": \"Bad Request\"}), 400\n\n@app.route('/users/', methods=['POST'])\n@jwt_required()\ndef update_user_detail(username:str):\n \"\"\"\n PUT: Modifies user data for the given user\n\n \"\"\"\n user = User.query.filter_by(username=username).first()\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n if current_user.is_admin:\n email = request.json.get('email')\n username = request.json.get('username')\n is_admin = request.json.get('is_admin')\n user = User.query.filter_by(username=username).first()\n if not user:\n return jsonify({\"message\": \"Bad Request\"}), 400\n \n user.username = username\n user.email = email\n if is_admin == \"1\":\n user.is_admin = True\n else:\n user.is_admin = False\n print(user.username, user.email, user.is_admin)\n db.session.add(user)\n db.session.commit()\n return jsonify({\"message\": \"User updated\"})\n return jsonify({\"message\": \"Unauthorised\"}), 403\n \n\n@app.route('/users/', methods=['DELETE'])\n@jwt_required()\ndef delete_user(username:str):\n \"\"\"\n DELETE: Deletes the given user\n Only admin can delete users\n\n \"\"\"\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n\n if current_user.is_admin:\n user = User.query.filter_by(username=username).first()\n if user:\n db.session.delete(user)\n db.session.commit()\n return jsonify({\"message\": \"User deleted\"})\n return jsonify({\"message\": \"Bad Request\"}), 400\n print('User deleted')\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n\n# Create a registration route for creating new users. If the email address is already\n# registered, return 409 (conflict)\n@app.route('/register', methods=['POST'])\ndef register():\n email = request.json.get('email')\n username = request.json.get('username')\n\n user = User.query.filter_by(email=email).first()\n if user:\n return jsonify({'message': 'Email address already registered!'}), 409\n new_user = User(email=email, username=username, is_admin=False)\n db.session.add(new_user)\n db.session.commit()\n return jsonify({'message': 'User created successfully'}), 201 \n\n\n# Create a route to authenticate your users and return JWTs. The\n# create_access_token() function is used to actually generate the JWT.\n# for simplicity sake users only need to provide email and username to login\n@app.route(\"/login\", methods=[\"POST\"])\ndef login():\n submitted_email = request.json.get(\"email\")\n submitted_username = request.json.get(\"username\")\n user = User.query.filter_by(email=submitted_email).first()\n if user:\n if user.username == submitted_username:\n access_token = create_access_token(identity=submitted_username)\n return jsonify(access_token=access_token)\n \n return jsonify({\"message\": \"Bad username or email\"}), 401\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n app.run()\n\n\n\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 11083, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_marshmallow.Marshmallow", "line_number": 16, "usage_type": "call"}, {"api_name": "flask_jwt_extended.JWTManager", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 114, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 139, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 146, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 125, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 158, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 163, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 163, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 163, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 164, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 164, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 165, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 165, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 171, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 150, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 183, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 191, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 175, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 205, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 206, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 194, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 216, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 218, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 218, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 221, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 222, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 222, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 223, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 223, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 230, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 209, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 243, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 248, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 235, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 258, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 260, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 260, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 260, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 261, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 261, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 261, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 262, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 262, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 265, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 276, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 277, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 251, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 288, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 295, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 296, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 298, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 281, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 305, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 305, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 305, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 306, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 306, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 306, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 310, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 314, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 322, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 322, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 322, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 323, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 323, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 323, "usage_type": "name"}, {"api_name": "flask_jwt_extended.create_access_token", "line_number": 327, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 328, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 330, "usage_type": "call"}]} +{"seq_id": "436673928", "text": "\"\"\"UCMS URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\napp_name='assistant'\nurlpatterns = [\n path('home/', views.home, name='home'),\n path('tbl/', views.tbl, name='tbl'),\n path('depart_tbl/', views.depart_tbl, name='depart_tbl'),\n path('subjects/', views.subjects, name='subjects'),\n path('subjects//', views.students, name='students'),\n path('subjects/details//', views.details, name='details'),\n path('results//', views.dgree, name='dgree'),\n path('addclm//', views.AddClm, name='addclm'),\n path('absence//', views.Absences, name='absence'),\n path('addabsence//', views.addAbsences, name='addabsence'),\n]\n", "sub_path": "assiatant/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1340, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "637875144", "text": "from django import forms\nfrom django.forms import ModelForm\nfrom .models import Topic, ChatMessage, Subscription\nimport bleach\nimport markdown\nfrom django.conf import settings\nfrom django.utils.html import escape\nimport re\nfrom django.core.validators import RegexValidator\n\nclass CreateRoomForm(forms.Form):\n name = forms.CharField(max_length=20, validators=[RegexValidator(r'^[a-z]+$', \"Only lower case letters without spaces are allowed\")])\n\nclass AdminTopicForm(forms.ModelForm):\n class Meta:\n model = Topic\n fields = ['name', 'title']\n\nclass AdminChatMessageForm(forms.ModelForm):\n class Meta:\n model = ChatMessage\n fields = ['topic', 'user', 'message', 'message_html']\n\n def clean(self):\n message = self.cleaned_data['message']\n\n message_html = escape(message)\n urlRegex = re.compile(\n u'(?isu)(\\\\b(?:https?://|www\\\\d{0,3}[.]|[a-z0-9.\\\\-]+[.][a-z]{2,4}/)[^\\\\s()<'\n u'>\\\\[\\\\]]+[^\\\\s`!()\\\\[\\\\]{};:\\'\".,<>?\\xab\\xbb\\u201c\\u201d\\u2018\\u2019])'\n )\n \n processed_urls = list()\n for obj in urlRegex.finditer(message_html):\n old_url = obj.group(0)\n if old_url in processed_urls:\n continue\n processed_urls.append(old_url)\n new_url = old_url\n if not old_url.startswith(('http://', 'https://')):\n new_url = 'http://' + new_url\n new_url = '' + new_url + \"\"\n message_html = message_html.replace(old_url, new_url)\n\n self.cleaned_data['message_html'] = message_html\n\n return self.cleaned_data\n\nclass AdminSubscriptionForm(forms.ModelForm):\n class Meta:\n model = Subscription\n fields = ['topic', 'user', 'deleted']", "sub_path": "mainapp/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.forms.Form", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.core.validators.RegexValidator", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "models.Topic", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 19, "usage_type": "name"}, {"api_name": "models.ChatMessage", "line_number": 21, "usage_type": "name"}, {"api_name": "django.utils.html.escape", "line_number": 27, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 28, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 49, "usage_type": "name"}, {"api_name": "models.Subscription", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "346809860", "text": "from flask import render_template, flash, redirect, url_for, abort\nfrom flask.ext.login import current_user, login_required\n\nfrom app import db\nfrom app.models import Group, Contact\nfrom . import main\nfrom .forms import SearchForm, NewGroupForm, EditGroupForm, NewContactForm, EditContactForm\n\n\n@main.route('/')\ndef index():\n return render_template('index.html')\n\n\n@main.route('/search', methods=['GET', 'POST'])\n@login_required\ndef search():\n form = SearchForm()\n if form.validate_on_submit():\n return redirect(url_for('.search_results', query=form.search.data))\n return render_template('search.html', form=form)\n\n\n@main.route('/search/results/')\n@login_required\ndef search_results(query):\n contacts = Contact.query.search(query).filter_by(user_id=current_user.get_id()).all()\n return render_template('search_results.html', query=query, contacts=contacts)\n\n\n@main.route('/new/group', methods=['GET', 'POST'])\n@login_required\ndef new_group():\n form = NewGroupForm()\n if form.validate_on_submit():\n group = Group(group_name=form.name.data, user_id=current_user.get_id())\n db.session.add(group)\n db.session.commit()\n flash('Group has been created.')\n return redirect(url_for('.list_groups'))\n return render_template('groups/new_group.html', form=form)\n\n\n@main.route('/list/groups')\n@login_required\ndef list_groups():\n groups = Group.query.filter_by(user_id=current_user.get_id()).order_by(Group.group_name).all()\n total_groups = Group.query.filter(Group.user_id == current_user.get_id()).count()\n return render_template('groups/list_groups.html', groups=groups, total_groups=total_groups)\n\n\n@main.route('/edit/group/', methods=['GET', 'POST'])\n@login_required\ndef edit_group(id):\n group = Group.query.get_or_404(id)\n if current_user != group.user:\n abort(403)\n form = EditGroupForm()\n if form.validate_on_submit():\n group.group_name = form.name.data\n db.session.add(group)\n db.session.commit()\n flash('Group name has been updated.')\n return redirect(url_for('.list_groups'))\n form.name.data = group.group_name\n return render_template('groups/edit_group.html', form=form)\n\n\n@main.route('/details/group/')\n@login_required\ndef group_details(id):\n group = Group.query.get_or_404(id)\n contacts = Contact.query.join(Group).filter(Contact.group_id == id).order_by(Contact.contact_name).all()\n if current_user != group.user:\n abort(403)\n return render_template('groups/group_details.html', group=group, contacts=contacts)\n\n\n@main.route('/delete/group/')\n@login_required\ndef delete_group(id):\n group = Group.query.get_or_404(id)\n if current_user != group.user:\n abort(403)\n db.session.delete(group)\n db.session.commit()\n flash('Group has been deleted.')\n return redirect(url_for('.list_groups'))\n\n\n@main.route('/new/contact', methods=['GET', 'POST'])\n@login_required\ndef new_contact():\n if Group.query.filter_by(user_id=current_user.get_id()).first() is None:\n flash('Create a group first before creating a new contact.')\n return redirect(url_for('.new_group'))\n form = NewContactForm()\n if form.validate_on_submit():\n contact = Contact(contact_name=form.name.data, contact_nickname=form.nickname.data,\n contact_company=form.company.data, contact_job_title=form.job_title.data,\n contact_email=form.email.data, contact_phone=form.phone.data,\n contact_address=form.address.data, contact_birthday=form.birthday.data,\n contact_notes=form.notes.data, group_id=form.group.data, user_id=current_user.get_id())\n db.session.add(contact)\n db.session.commit()\n flash('Contact has been created.')\n return redirect(url_for('.list_contacts'))\n return render_template('contacts/new_contact.html', form=form)\n\n\n@main.route('/list/contacts')\n@login_required\ndef list_contacts():\n contacts = Contact.query.join(Group).filter_by(user_id=current_user.get_id()).order_by(Contact.contact_name).all()\n total_contacts = Contact.query.filter(Contact.user_id == current_user.get_id()).count()\n return render_template('contacts/list_contacts.html', contacts=contacts, total_contacts=total_contacts)\n\n\n@main.route('/edit/contact/', methods=['GET', 'POST'])\n@login_required\ndef edit_contact(id):\n contact = Contact.query.get_or_404(id)\n if current_user != contact.user:\n abort(403)\n form = EditContactForm()\n if form.validate_on_submit():\n contact.contact_name = form.name.data\n contact.contact_nickname = form.nickname.data\n contact.contact_company = form.company.data\n contact.contact_job_title = form.job_title.data\n contact.contact_email = form.email.data\n contact.contact_phone = form.phone.data\n contact.contact_address = form.address.data\n contact.contact_birthday = form.birthday.data\n contact.contact_notes = form.notes.data\n contact.group_id = form.group.data\n db.session.add(contact)\n db.session.commit()\n flash('Contact has been updated.')\n return redirect(url_for('.list_contacts'))\n form.name.data = contact.contact_name\n form.nickname.data = contact.contact_nickname\n form.company.data = contact.contact_company\n form.job_title.data = contact.contact_job_title\n form.email.data = contact.contact_email\n form.phone.data = contact.contact_phone\n form.address.data = contact.contact_address\n form.birthday.data = contact.contact_birthday\n form.notes.data = contact.contact_notes\n form.group.data = contact.group_id\n return render_template('contacts/edit_contact.html', form=form)\n\n\n@main.route('/details/contact/')\n@login_required\ndef contact_details(id):\n contact = Contact.query.get_or_404(id)\n if current_user != contact.user:\n abort(403)\n return render_template('contacts/contact_details.html', contact=contact)\n\n\n@main.route('/delete/contact/')\n@login_required\ndef delete_contact(id):\n contact = Contact.query.get_or_404(id)\n if current_user != contact.user:\n abort(403)\n db.session.delete(contact)\n db.session.commit()\n flash('Contact has been deleted.')\n return redirect(url_for('.list_contacts'))\n", "sub_path": "app/main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6375, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "forms.SearchForm", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 16, "usage_type": "name"}, {"api_name": "app.models.Contact.query.search", "line_number": 27, "usage_type": "call"}, {"api_name": "app.models.Contact.query", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.models.Contact", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 25, "usage_type": "name"}, {"api_name": "forms.NewGroupForm", "line_number": 34, "usage_type": "call"}, {"api_name": "app.models.Group", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 36, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 37, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 37, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 37, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 38, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 38, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 32, "usage_type": "name"}, {"api_name": "app.models.Group.query.filter_by", "line_number": 47, "usage_type": "call"}, {"api_name": "app.models.Group.query", "line_number": 47, "usage_type": "attribute"}, {"api_name": "app.models.Group", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 47, "usage_type": "name"}, {"api_name": "app.models.Group.group_name", "line_number": 47, "usage_type": "attribute"}, {"api_name": "app.models.Group.query.filter", "line_number": 48, "usage_type": "call"}, {"api_name": "app.models.Group.query", "line_number": 48, "usage_type": "attribute"}, {"api_name": "app.models.Group", "line_number": 48, "usage_type": "name"}, {"api_name": "app.models.Group.user_id", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 45, "usage_type": "name"}, {"api_name": "app.models.Group.query.get_or_404", "line_number": 55, "usage_type": "call"}, {"api_name": "app.models.Group.query", "line_number": 55, "usage_type": "attribute"}, {"api_name": "app.models.Group", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 57, "usage_type": "call"}, {"api_name": "forms.EditGroupForm", "line_number": 58, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 61, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 61, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 61, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 62, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 62, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 53, "usage_type": "name"}, {"api_name": "app.models.Group.query.get_or_404", "line_number": 72, "usage_type": "call"}, {"api_name": "app.models.Group.query", "line_number": 72, "usage_type": "attribute"}, {"api_name": "app.models.Group", "line_number": 72, "usage_type": "name"}, {"api_name": "app.models.Contact.query.join", "line_number": 73, "usage_type": "call"}, {"api_name": "app.models.Group", "line_number": 73, "usage_type": "argument"}, {"api_name": "app.models.Contact.query", "line_number": 73, "usage_type": "attribute"}, {"api_name": "app.models.Contact", "line_number": 73, "usage_type": "name"}, {"api_name": "app.models.Contact.group_id", "line_number": 73, "usage_type": "attribute"}, {"api_name": "app.models.Contact.contact_name", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 70, "usage_type": "name"}, {"api_name": "app.models.Group.query.get_or_404", "line_number": 82, "usage_type": "call"}, {"api_name": "app.models.Group.query", "line_number": 82, "usage_type": "attribute"}, {"api_name": "app.models.Group", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 84, "usage_type": "call"}, {"api_name": "app.db.session.delete", "line_number": 85, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 85, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 85, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 86, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 86, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 80, "usage_type": "name"}, {"api_name": "app.models.Group.query.filter_by", "line_number": 94, "usage_type": "call"}, {"api_name": "app.models.Group.query", "line_number": 94, "usage_type": "attribute"}, {"api_name": "app.models.Group", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 96, "usage_type": "call"}, {"api_name": "forms.NewContactForm", "line_number": 97, "usage_type": "call"}, {"api_name": "app.models.Contact", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 103, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 104, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 104, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 104, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 105, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 105, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 92, "usage_type": "name"}, {"api_name": "app.models.Contact.query.join", "line_number": 114, "usage_type": "call"}, {"api_name": "app.models.Group", "line_number": 114, "usage_type": "argument"}, {"api_name": "app.models.Contact.query", "line_number": 114, "usage_type": "attribute"}, {"api_name": "app.models.Contact", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 114, "usage_type": "name"}, {"api_name": "app.models.Contact.contact_name", "line_number": 114, "usage_type": "attribute"}, {"api_name": "app.models.Contact.query.filter", "line_number": 115, "usage_type": "call"}, {"api_name": "app.models.Contact.query", "line_number": 115, "usage_type": "attribute"}, {"api_name": "app.models.Contact", "line_number": 115, "usage_type": "name"}, {"api_name": "app.models.Contact.user_id", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 115, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 112, "usage_type": "name"}, {"api_name": "app.models.Contact.query.get_or_404", "line_number": 122, "usage_type": "call"}, {"api_name": "app.models.Contact.query", "line_number": 122, "usage_type": "attribute"}, {"api_name": "app.models.Contact", "line_number": 122, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user", "line_number": 123, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 124, "usage_type": "call"}, {"api_name": "forms.EditContactForm", "line_number": 125, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 137, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 137, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 137, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 138, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 138, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 151, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 120, "usage_type": "name"}, {"api_name": "app.models.Contact.query.get_or_404", "line_number": 157, "usage_type": "call"}, {"api_name": "app.models.Contact.query", "line_number": 157, "usage_type": "attribute"}, {"api_name": "app.models.Contact", "line_number": 157, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 159, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 160, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 155, "usage_type": "name"}, {"api_name": "app.models.Contact.query.get_or_404", "line_number": 166, "usage_type": "call"}, {"api_name": "app.models.Contact.query", "line_number": 166, "usage_type": "attribute"}, {"api_name": "app.models.Contact", "line_number": 166, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 168, "usage_type": "call"}, {"api_name": "app.db.session.delete", "line_number": 169, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 169, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 169, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 170, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 170, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 170, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 164, "usage_type": "name"}]} +{"seq_id": "506818285", "text": "# -*- coding: utf-8 -*-\n\n# coding: utf-8\n\n# # Mask R-CNN Demo\n#\n# A quick intro to using the pre-trained model to detect and segment objects.\n\n# In[1]:\n\n\nimport os\nimport sys\nimport random\nimport math\nimport numpy as np\nimport skimage.io\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nfrom IPython.core.display import JSON\n\nROOT_DIR = os.path.abspath(\"./\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\n# Import COCO config\n# sys.path.append(os.path.join(ROOT_DIR, \"./coco/\")) # To find local version\nimport coco\n\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n# Download COCO trained weights from Releases if needed\nif not os.path.exists(COCO_MODEL_PATH):\n utils.download_trained_weights(COCO_MODEL_PATH)\n\n\nclass InferenceConfig(coco.CocoConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n\nconfig = InferenceConfig()\nconfig.display()\n\n# ## Create Model and Load Trained Weights\n\n# In[3]:\n\n\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n\nclass_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\n\nimport time\nfrom skimage.measure import find_contours\n\ndef handleImage(image):\n # Load a random image from the images folder\n # file_names = \"./image/test1.jpeg\"\n\n # 把数据存入data中。最后转成json\n data = {}\n # 存储图像的大小,在安卓设备绘制遮罩的时候需要根据这个大小进行缩放\n data[\"shape\"] = [image.shape[0], image.shape[1]]\n # 一张图可能有多个人物,一个人物会有多个轮廓,用数组存\n data['contours'] = []\n # Run detection\n results = model.detect([image], verbose=1)\n\n # Visualize results\n r = results[0]\n\n # 以下代码修改自visualize.display_instances方法\n # 获取id\n class_ids = r['class_ids']\n masks = r['masks']\n boxes = r['rois']\n # Number of instances\n N = boxes.shape[0]\n\n\n\n\n # 转换轮廓数据为坐标数据\n def getContourData(contour):\n contourData = []\n for point in contour:\n contourData.append([point[0], point[1]])\n return contourData\n\n for i in range(N):\n class_id = class_ids[i]\n label = class_names[class_id]\n # 如果不是人,跳过\n if (label != 'person'):\n continue\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n\n # Mask\n mask = masks[:, :, i]\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n # for verts in contours:\n # 遍历轮廓\n for contour in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n contour = np.fliplr(contour) - 1\n # 轮廓数据构添加到集合中。\n contourData = getContourData(contour)\n data['contours'].append(contourData)\n # printImg(contour, contour)\n\n return data\n\n\n\nimport cv2\nimport time\nimport demoForData\n\nimport numpy as np\n\n###################### 视频载入 #############################\ncap = cv2.VideoCapture(\"../app/src/main/res/raw/video.mp4\")\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\n# out = cv2.VideoWriter('E:\\\\Data_Set\\\\AODnet\\\\测试视频\\\\生成视频\\\\output11.avi', fourcc, 20, (1920, 1080))\n\n##################### 模型载入 #############################\n\n##################### 视频处理 #############################\nnum = 0\ncurrentTime = time.time()\n# 总共215帧\nfile = open(\"video.txt\", \"a+\",encoding=\"utf-8\")\n\ncurrentFrame = 0\nwhile cap.isOpened():\n preFrameTime = time.time()\n currentFrame += 1\n # get a frame\n # 小于几就是从几开始\n if currentFrame<1:\n continue\n rval, frame = cap.read()\n # save a frame\n if rval == True:\n # print(type(rval))\n # print(type(frame))\n # 获取到当前帧的数据\n try:\n print(\"====开始处理第\" + str(currentFrame) + \"帧\")\n try:\n frameData = demoForData.handleImage(frame)\n print(\"====处理完毕第\" + str(currentFrame) + \"帧\" + str(time.time() - preFrameTime))\n file.write(\"\\n\"+str(currentFrame)+str(frameData))\n except:\n file.write(\"\\nerror\")\n\n print(\"====识别错误第\" + str(currentFrame) + \"帧\" + str(time.time() - preFrameTime))\n\n except Exception as e:\n print(\"====错误处理第\" + str(currentFrame) + \"帧\")\n print(e)\n\n print(\"====用时\" + str(time.time() - currentTime) + \",总共215帧,当前处理了\" + str(currentFrame) + \"帧\")\n print(\"===============================================\")\n\n else:\n print(\"====error处理第\" + str(currentFrame) + \"帧\")\n break\nprint(\"====close\")\n\nfile.close()\n", "sub_path": "Py3_6/video.py", "file_name": "video.py", "file_ext": "py", "file_size_in_byte": 6666, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.path.abspath", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "mrcnn.utils.download_trained_weights", "line_number": 44, "usage_type": "call"}, {"api_name": "mrcnn.utils", "line_number": 44, "usage_type": "name"}, {"api_name": "coco.CocoConfig", "line_number": 47, "usage_type": "attribute"}, {"api_name": "mrcnn.model.MaskRCNN", "line_number": 63, "usage_type": "call"}, {"api_name": "mrcnn.model", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 138, "usage_type": "attribute"}, {"api_name": "skimage.measure.find_contours", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 145, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 162, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 163, "usage_type": "call"}, {"api_name": "time.time", "line_number": 170, "usage_type": "call"}, {"api_name": "time.time", "line_number": 176, "usage_type": "call"}, {"api_name": "demoForData.handleImage", "line_number": 191, "usage_type": "call"}, {"api_name": "time.time", "line_number": 192, "usage_type": "call"}, {"api_name": "time.time", "line_number": 197, "usage_type": "call"}, {"api_name": "time.time", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "15585092", "text": "from postgreslib.database_connection import DBConnection\nfrom helpers.kafka import KafkaWriter, get_topic\nfrom config.config import JSON_RECORDS\nimport json\nimport time\nfrom json.decoder import JSONDecodeError\nimport os\n\nclass IngestionProducer(KafkaWriter):\n def __init__(self,bootstrap_servers,datasource,outfile = JSON_RECORDS):\n super().__init__(bootstrap_servers)\n self.datasource = datasource\n\n def get_records(self,table,number):\n self.db = DBConnection(self.datasource)\n print(\"running get records {}\".format(number))\n generator,header = self.db.stream_table(table)\n def format_record(record):\n return {str(h.name):str(v) for h,v in zip(header,record)}\n try:\n out = []\n for i , x in enumerate(generator):\n out.append(format_record(x))\n if i == number:\n reason = \"break\"\n break\n except Exception as e:\n reason = e\n finally:\n resp = input(\"stopped on {} write to file? (y/n) : \".format(reason))\n write = resp == \"y\"\n print(\"\\n chosen write {} \".format(write))\n if write:\n path = JSON_RECORDS\n print(\"out len {}\".format(len(out)))\n with open(path,\"w+\") as f:\n f.write(json.dumps(out))\n print(\"wrote {} records to \\n{}\".format(len(out),path))\n else:\n print(\"not writing\")\n\n def get_records_json(self):\n path = JSON_RECORDS\n with open(path,\"r\") as f:\n data = json.loads(f.read())\n return data\n\n def ingest_data(self,table,number = False):\n print(\"in ingest data method max {}\".format(number))\n records = self.get_records_json()\n\n print(\" got {} records to stream\".format(len(records)))\n topic = get_topic(self.datasource,table)\n\n print(\"streaming data from table {} to topic {}\".format(table,topic))\n input(\"press enter to start producing\")\n print(\"producing...\")\n for i,record in enumerate(records):\n self.produce(record, topic)\n if number:\n if i == number:\n break\n self.produce_debug(\"completed producing {}\".format(table))\n\ndef cache_records(bootstrap_servers,db,table,number):\n print(\"main table {}\".format(table))\n producer = IngestionProducer(bootstrap_servers,db)\n producer.get_records(\"sales_orders\",number)\n", "sub_path": "producer/producer.py", "file_name": "producer.py", "file_ext": "py", "file_size_in_byte": 2515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "helpers.kafka.KafkaWriter", "line_number": 9, "usage_type": "name"}, {"api_name": "config.config.JSON_RECORDS", "line_number": 10, "usage_type": "name"}, {"api_name": "postgreslib.database_connection.DBConnection", "line_number": 15, "usage_type": "call"}, {"api_name": "config.config.JSON_RECORDS", "line_number": 34, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "config.config.JSON_RECORDS", "line_number": 43, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 45, "usage_type": "call"}, {"api_name": "helpers.kafka.get_topic", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "612916476", "text": "#!/usr/bin/python3\n\"\"\"List 10 commits (newest to oldest) of the repo \"rails\" by the user \"rails\"\n\"\"\"\n\nif __name__ == \"__main__\":\n import requests\n import sys\n\n repo = sys.argv[1]\n owner = sys.argv[2]\n\n url = 'https://api.github.com/repos/{}/{}/commits'.format(owner, repo)\n req = requests.get(url)\n data = req.json()\n i = 0\n while len(data) > i and i < 10:\n print(data[i].get('sha'), end=': ')\n print(data[i].get('commit').get('author').get('name'))\n i += 1\n", "sub_path": "0x11-python-network_1/100-github_commits.py", "file_name": "100-github_commits.py", "file_ext": "py", "file_size_in_byte": 506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "391596588", "text": "from django.test import TestCase\nfrom dojo.models import Product\n\n\nclass TaggitTests(TestCase):\n fixtures = ['dojo_testdata.json']\n\n def setUp(self, *args, **kwargs):\n pass\n\n def test_tags_prefetching(self):\n print('\\nadding tags')\n for product in Product.objects.all():\n product.tags = self.add_tags(product.tags, ['product_' + str(product.id)])\n product.save()\n for eng in product.engagement_set.all():\n eng.tags = self.add_tags(eng.tags, ['eng_' + str(eng.id), 'product_' + str(product.id)])\n eng.save()\n for test in eng.test_set.all():\n test.tags = self.add_tags(test.tags, ['test_' + str(test.id), 'eng_' + str(eng.id), 'product_' + str(product.id)])\n test.save()\n\n # print('testing tags for correctness without prefetching')\n self.check_tags(Product.objects.all())\n\n # print('testing tags for correctness with prefetching')\n self.check_tags(Product.objects.all().prefetch_related('tagged_items__tag'))\n\n # print('testing tags for correctness with nested prefetching')\n self.check_tags(Product.objects.all().prefetch_related('tagged_items__tag', 'engagement_set__tagged_items__tag'))\n\n def add_tags(self, curr_tags, extra_tags):\n for tag in extra_tags:\n curr_tags.append(tag)\n return \", \".join(curr_tags)\n\n def check_tags(self, queryset):\n for product in queryset:\n # print(product.name + \": \" + str(product.tags))\n self.assertEqual(len(product.tags), 1)\n self.assertEqual(product.tags[0].name, 'product_' + str(product.id))\n for eng in product.engagement_set.all():\n # print(\" :\" + eng.name + \": \" + str(eng.tags))\n self.assertEqual(len(eng.tags), 2)\n self.assertEqual('product_' + str(product.id) in [tag.name for tag in product.tags], True)\n self.assertEqual('eng_' + str(eng.id) in [tag.name for tag in eng.tags], True)\n self.assertEqual('eng_' + str(eng.id + 1) in [tag.name for tag in eng.tags], False)\n for test in eng.test_set.all():\n # print(\" :\" + eng.name + \": \" + test.test_type.name + \": \" + str(test.tags))\n self.assertEqual(len(test.tags), 3)\n self.assertEqual('product_' + str(product.id) in [tag.name for tag in product.tags], True)\n self.assertEqual('eng_' + str(eng.id) in [tag.name for tag in eng.tags], True)\n self.assertEqual('eng_' + str(eng.id + 1) in [tag.name for tag in eng.tags], False)\n self.assertEqual('test_' + str(test.id) in [tag.name for tag in test.tags], True)\n self.assertEqual('test_' + str(test.id + 1) in [tag.name for tag in test.tags], False)\n", "sub_path": "dojo/unittests/test_taggit_tags.py", "file_name": "test_taggit_tags.py", "file_ext": "py", "file_size_in_byte": 2900, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.test.TestCase", "line_number": 5, "usage_type": "name"}, {"api_name": "dojo.models.Product.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "dojo.models.Product.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "dojo.models.Product", "line_number": 13, "usage_type": "name"}, {"api_name": "dojo.models.Product.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "dojo.models.Product.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "dojo.models.Product", "line_number": 24, "usage_type": "name"}, {"api_name": "dojo.models.Product.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "dojo.models.Product.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "dojo.models.Product", "line_number": 27, "usage_type": "name"}, {"api_name": "dojo.models.Product.objects.all", "line_number": 30, "usage_type": "call"}, {"api_name": "dojo.models.Product.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "dojo.models.Product", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "379608097", "text": "from __future__ import absolute_import, division, print_function\nimport lightgbm as lgb\nimport numpy as np\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import f1_score\nfrom time import gmtime, strftime\n\nfrom config import logger, config\nfrom feature import get_train_test_features, get_train_test_features2, get_train_test_features3, get_train_test_features4, get_train_test_features0\n\ndef eval_f(y_pred, train_data):\n y_true = train_data.label\n y_pred = y_pred.reshape((config.n_class, -1)).T\n y_pred = np.argmax(y_pred, axis=1)\n score = f1_score(y_true, y_pred, average='weighted')\n return 'weighted-f1-score', score, True\n\n\ndef submit_result(submit, result):\n submit['recommend_mode'] = result\n submit.to_csv(config.trn_bag_submission_file, index=False)\n \ndef train_lgb(trn, y, tst):\n params = {'objective': 'multiclass', \n 'num_class': 12, \n 'seed': 2019, \n 'learning_rate': 0.05, \n 'num_threads': 8, \n 'num_leaves': 44, \n 'max_depth': 11, \n 'lambda_l1': 4.717461111446621, \n 'lambda_l2': 10.550885244591129, \n 'feature_fraction': 0.8235898660709667, \n 'bagging_fraction': 0.9018152298305773, \n 'bagging_freq': 3,\n 'verbose': -1}\n \n cat_cols = ['max_dist_mode', 'min_dist_mode', 'max_price_mode',\n 'min_price_mode', 'max_eta_mode', 'min_eta_mode',\n 'first_mode', 'weekday', 'hour']\n \n cat_cols = ['pid', 'max_dist_mode', 'min_dist_mode', 'max_price_mode',\n 'min_price_mode', 'max_eta_mode', 'min_eta_mode', 'first_mode', 'day_of_week', 'req_hour', 'weather']\n\n\n p = np.zeros_like(y)\n best_iteration = 250\n \n lgb_trn = lgb.Dataset(trn, y, categorical_feature=cat_cols, free_raw_data=False)\n prob_trn_tst = 0\n for seed in [0, 17, 23, 29]:\n params['seed'] = 2019 + seed\n print(params)\n clf = lgb.train(params, lgb_trn,\n valid_sets=[lgb_trn],\n num_boost_round=best_iteration,\n verbose_eval=50,\n feval=eval_f)\n \n prob_trn_tst += clf.predict(tst)\n \n prob_trn_tst /= 4.0\n\n np.savetxt(config.predict_trn_tst_bag_file, prob_trn_tst, delimiter=',')\n \n trn_tst = np.argmax(prob_trn_tst, axis=1)\n\n return trn_tst\n\nif __name__ == '__main__':\n\n trn, y, tst, sub = get_train_test_features0()\n\n config.set_algo_name('lgb5')\n config.set_feature_name('f0')\n p_tst = train_lgb(trn, y, tst)\n\n submit_result(sub, p_tst)", "sub_path": "src/train_predict_bag_v5.py", "file_name": "train_predict_bag_v5.py", "file_ext": "py", "file_size_in_byte": 2682, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "config.config.n_class", "line_number": 15, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 17, "usage_type": "call"}, {"api_name": "config.config.trn_bag_submission_file", "line_number": 23, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 48, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 51, "usage_type": "call"}, {"api_name": "lightgbm.train", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 66, "usage_type": "call"}, {"api_name": "config.config.predict_trn_tst_bag_file", "line_number": 66, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 68, "usage_type": "call"}, {"api_name": "feature.get_train_test_features0", "line_number": 74, "usage_type": "call"}, {"api_name": "config.config.set_algo_name", "line_number": 76, "usage_type": "call"}, {"api_name": "config.config", "line_number": 76, "usage_type": "name"}, {"api_name": "config.config.set_feature_name", "line_number": 77, "usage_type": "call"}, {"api_name": "config.config", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "160247440", "text": "import gym\nimport time\nimport random\nimport numpy as np\nenv = gym.make('FrozenLake-v0')\nenv.reset()\n\nnumberOfEpisodes = 1000\n#think of it as the levels of a game (progressively harder)\nnumberOfStepsPerEpisode = 10\n# Think of '10' as the progress it is allowed to reach. Number of steps per episode are the actions it can take.\n# it can't exceed 10 steps. If it doesnt reach the goal, the environment resets.\n\nfor episode in range(numberOfEpisodes):\n print(\"resetting..\")\n state = env.reset()\n time.sleep(3)\n # slows the environment down for 3 seconds; think of it as a level reset\n print(\"resetted(not sure if thats an english word lol)\")\n print(\"starting environment\")\n for steps in range(numberOfStepsPerEpisode):\n env.render()\n # render prints the environment (when you play)\n time.sleep(1)\n # slows down the action the agent is taking for 1 second.\n action = env.action_space.sample() #raise hand\n # a code to make a action into an environment - for now it is random and not implemented into anything\n new_state, reward, done, info = env.step(action)\n # the 4 variables are stated above. New state - new action. Reward - based on what the new state is; if it is\n # point worthy, this will be documented in code. Done - true/false. Info: generic of what occurs.\n if done==True:\n break\n", "sub_path": "FrozenLake.py", "file_name": "FrozenLake.py", "file_ext": "py", "file_size_in_byte": 1390, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "gym.make", "line_number": 5, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "405671442", "text": "import base64\nfrom django.http import JsonResponse\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nfrom time import sleep\n\n\ndef status(status,payload):\n return JsonResponse({\"status\":status,\"payload\":payload})\n\n\ndef process_img(img):\n img = base64.b64decode(img)\n file = {'upload': img}\n r = requests.post('https://api.platerecognizer.com/v1/plate-reader/',\n files=file,\n headers={\"authorization\":\"Token fa43527529a25e1c6b2cd1670c6ccb74d6e1104e\"})\n\n return_dict = r.json()\n print(return_dict)\n if not return_dict['results']:\n return False\n license = return_dict['results'][0]['plate']\n state = return_dict['results'][0]['region']['code']\n if not state or not license:\n return False\n try:\n state = state[3:6]\n state = state.upper()\n except ValueError:\n # Not in US\n # All US State plates are of the form:\n # us-XX where XX is the lowercase state abbreviation\n sucess = False\n return False\n\n return {\"state\":state,\"license\":license}\n\n\ndef process(license, state):\n lister = []\n soup = None\n counter = 0\n url = f\"https://api.carsxe.com/platedecoder?key=zec39wzfq_yagecmcew_twtfw6cmx&plate={license}&state={state}&format=json\"\n # headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\",\n # \"Accept-Encoding\": \"gzip, deflate\",\n # \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,/;q=0.8\", \"DNT\": \"1\",\n # \"Connection\": \"close\", \"Upgrade-Insecure-Requests\": \"1\"}\n r = requests.get(url)\n if r.status_code == 200:\n # if r.status_code == 200:\n # soup = BeautifulSoup(r.text,'html.parser')\n # print(soup.select(\"body tr b\"))\n # if soup.select(\"body tr b\"):\n # break\n # sleep(20)\n # counter += 20\n # for item in soup.select(\"body tr b\"):\n # item = str(item)\n # item = item.strip(\"\")\n # item = item.strip(\"\")\n # lister.append(item)\n json = r.json()\n if json['success']:\n # print({\"make\":json['CarMake'],\"model\":json[\"CarModel\"].split(\" \")[0],\"year\":int(json[\"RegistrationYear\"])})\n make = json['CarMake']\n model = json[\"CarModel\"].split(\" \")[0]\n if make.lower() == \"mazda\":\n model = model[len(model)-1]\n return {\"make\":make,\"model\":model,\"year\":int(json[\"RegistrationYear\"])}\n else:\n return False\n\n\ndef upload(img):\n url = f\"http://api.carsxe.com/whatcaristhat?key=0hsbdq9rl_o6thqm9v5_bv25wj6aa\"\n r = requests.post(url,headers = {'Content-type': 'text/plain'},data=\"https://upload.wikimedia.org/wikipedia/commons/thumb/6/6d/2006-2009_Honda_Civic_VTi_sedan_%282018-10-19%29_01.jpg/800px-2006-2009_Honda_Civic_VTi_sedan_%282018-10-19%29_01.jpg\")\n if r.status_code == 200:\n print(r.json())\n else:\n print(r.status_code)\n", "sub_path": "carz/boiler/api_helper.py", "file_name": "api_helper.py", "file_ext": "py", "file_size_in_byte": 3050, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.http.JsonResponse", "line_number": 10, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 50, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "137808488", "text": "#!/usr/bin/python\n#\n# Copyright 2018-2021 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\n\nimport click\n\nfrom polyaxon_sdk import V1ComponentHub, V1ComponentVersion\nfrom polyaxon_sdk.rest import ApiException\nfrom urllib3.exceptions import HTTPError\n\nfrom polyaxon import settings\nfrom polyaxon.cli.dashboard import get_dashboard_url\nfrom polyaxon.cli.errors import handle_cli_error\nfrom polyaxon.cli.options import (\n OPTIONS_COMPONENT_HUB,\n OPTIONS_COMPONENT_VERSION,\n OPTIONS_OWNER,\n)\nfrom polyaxon.cli.utils import get_entity_details\nfrom polyaxon.client import PolyaxonClient\nfrom polyaxon.constants.globals import DEFAULT_HUB, NO_AUTH\nfrom polyaxon.env_vars.getters import get_component_info\nfrom polyaxon.exceptions import PolyaxonException\nfrom polyaxon.logger import clean_outputs\nfrom polyaxon.polyaxonfile import get_specification\nfrom polyaxon.schemas.cli.client_config import ClientConfig\nfrom polyaxon.utils.formatting import (\n Printer,\n dict_tabulate,\n dict_to_tabulate,\n get_meta_response,\n list_dicts_to_tabulate,\n)\nfrom polyaxon.utils.query_params import get_query_params\nfrom polyaxon.utils.validation import validate_tags\n\n\ndef get_current_or_public_client():\n if settings.CLI_CONFIG.is_ce:\n return PolyaxonClient(config=ClientConfig(), token=NO_AUTH)\n\n return PolyaxonClient()\n\n\ndef get_specification_details(specification):\n if specification.inputs:\n Printer.print_header(\"Component inputs:\")\n objects = list_dicts_to_tabulate([i.to_dict() for i in specification.inputs])\n dict_tabulate(objects, is_list_dict=True)\n\n if specification.outputs:\n Printer.print_header(\"Component outputs:\")\n objects = list_dicts_to_tabulate([o.to_dict() for o in specification.outputs])\n dict_tabulate(objects, is_list_dict=True)\n\n Printer.print_header(\"Content:\")\n click.echo(specification.to_dict())\n\n\ndef get_component_version_details(response):\n content = response.content\n response = dict_to_tabulate(\n response.to_dict(), humanize_values=True, exclude_attrs=[\"content\"]\n )\n\n Printer.print_header(\"Component info:\")\n dict_tabulate(response)\n\n if content:\n specification = get_specification(data=content)\n get_specification_details(specification)\n else:\n Printer.print_warning(\n \"This component version does not have any polyaxonfile content!\"\n )\n\n\ndef get_info(component: str = None, version: str = None, use_local_owner: bool = False):\n if not any([component, version]):\n Printer.print_error(\n \"A component or a component version is required.\", sys_exit=True\n )\n if all([component, version]):\n Printer.print_error(\n \"Only a component or a component version is required, not both.\",\n sys_exit=True,\n )\n\n if component:\n entity = component\n entity_name = \"component\"\n is_version = False\n else:\n entity = version\n entity_name = \"component version\"\n is_version = True\n\n try:\n owner, component_hub, component_version = get_component_info(\n entity, use_local_owner\n )\n return owner, component_hub, component_version, is_version\n except PolyaxonException as e:\n handle_cli_error(\n e,\n message=\"Could not resolve the {} from the value `{}`.\".format(\n entity_name, entity\n ),\n sys_exit=True,\n )\n\n\n@click.group()\n@clean_outputs\ndef hub():\n \"\"\"Commands for component hub.\"\"\"\n\n\n@hub.command()\n@click.option(\n \"--name\", type=str, help=\"The component hub name, e.g. 'kaniko' or 'acme/kaniko'.\"\n)\n@click.option(\"--description\", type=str, help=\"Description of the component.\")\n@click.option(\"--tags\", type=str, help=\"Tags of the component, comma separated values.\")\n@click.option(\n \"--public\", is_flag=True, help=\"Set the visibility of the component to public.\"\n)\n@clean_outputs\ndef create(name, description, tags, public):\n \"\"\"Create a new component.\n\n Example:\n\n \\b\n $ polyaxon hub create --name=kaniko --description=\"Tool to build container images\"\n\n \\b\n $ polyaxon hub create --name=owner/name --description=\"Component description\"\n \"\"\"\n if not name:\n Printer.print_error(\n \"Please provide a name to create a component hub.\",\n command_help=\"hub create\",\n sys_exit=True,\n )\n owner, hub_name, _, _ = get_info(name, None, True)\n\n tags = validate_tags(tags)\n\n if not owner or not hub_name:\n Printer.print_error(\n \"Please provide a valid component name with --name=owner/hub-name. \"\n )\n sys.exit(1)\n\n try:\n hub_config = V1ComponentHub(\n name=hub_name, description=description, tags=tags, is_public=public\n )\n polyaxon_client = PolyaxonClient()\n _hub = polyaxon_client.component_hub_v1.create_component_hub(owner, hub_config)\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e, message=\"Could not create component hub `{}`.\".format(hub_name)\n )\n sys.exit(1)\n\n Printer.print_success(\n \"Component hub `{}` was created successfully.\".format(_hub.name)\n )\n click.echo(\n \"You can view this component hub on Polyaxon UI: {}\".format(\n get_dashboard_url(subpath=\"{}/hub/{}\".format(owner, _hub.name))\n )\n )\n\n\n@hub.command()\n@click.option(\n \"-f\",\n \"--file\",\n \"polyaxonfile\",\n type=click.Path(exists=True),\n help=\"The component spec version to push.\",\n)\n@click.option(\n \"--name\",\n type=str,\n help=\"The component version name, e.g. 'kaniko' or 'kaniko:1.2' \"\n \"or 'acme/kaniko:latest' or 'acme/kaniko:dev'.\",\n)\n@click.option(\"--description\", type=str, help=\"Description of the version.\")\n@click.option(\"--tags\", type=str, help=\"Tags of the version, comma separated values.\")\n@clean_outputs\ndef push(polyaxonfile, name, description, tags):\n \"\"\"Push a new component version.\n If the name corresponds to an existing component version, it will be updated.\n\n Example:\n\n \\b\n $ polyaxon hub push -f polyaxonfile.yaml --name=kaniko:latest --description=\"Tool to build container images\"\n\n \\b\n $ polyaxon hub push -f polyaxonfile.yaml --name=owner/name:v1 --description=\"Component description\"\n \"\"\"\n if not name:\n Printer.print_error(\n \"Please provide a name to create a component version.\",\n command_help=\"hub push\",\n sys_exit=True,\n )\n owner, hub_name, version, is_version = get_info(None, name, True)\n tags = validate_tags(tags)\n\n if not polyaxonfile or not os.path.isfile(polyaxonfile):\n Printer.print_error(\n \"Please provide a path to a polyaxonfile to create a component version.\",\n command_help=\"hub push\",\n sys_exit=True,\n )\n try:\n plx_file = get_specification(data=polyaxonfile)\n except Exception as e:\n handle_cli_error(e, message=\"Polyaxonfile is not valid.\")\n sys.exit(1)\n\n if not owner or not hub_name or not version:\n Printer.print_error(\n \"Please provide a valid component version with --name=owner/hub-name:version. \"\n )\n sys.exit(1)\n\n polyaxon_client = PolyaxonClient()\n try:\n polyaxon_client.component_hub_v1.get_component_version(owner, hub_name, version)\n to_update = True\n except (ApiException, HTTPError):\n to_update = False\n\n if to_update:\n if not click.confirm(\n \"A component version {}/{}:{} already exists. \"\n \"Do you want to push force this version?\".format(owner, hub_name, version)\n ):\n click.echo(\"Existing without pushing component version.\")\n sys.exit(1)\n\n try:\n hub_config = V1ComponentVersion(\n name=version,\n description=description,\n tags=tags,\n content=plx_file.to_dict(dump=True),\n )\n if to_update:\n _version = polyaxon_client.component_hub_v1.update_component_version(\n owner,\n hub_name,\n version,\n hub_config,\n )\n else:\n _version = polyaxon_client.component_hub_v1.create_component_version(\n owner,\n hub_name,\n hub_config,\n )\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e, message=\"Could not create component version `{}`.\".format(hub_name)\n )\n sys.exit(1)\n\n Printer.print_success(\n \"Component version `{}` was created successfully.\".format(_version.name)\n )\n click.echo(\n \"You can view this component version on Polyaxon UI: {}\".format(\n get_dashboard_url(\n subpath=\"{}/hub/{}/versions/{}\".format(owner, hub_name, _version.name)\n )\n )\n )\n\n\n@hub.command()\n@click.option(*OPTIONS_OWNER[\"args\"], **OPTIONS_OWNER[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(\n \"--query\",\n \"-q\",\n type=str,\n help=\"To filter the component hub/versions based on this query spec.\",\n)\n@click.option(\n \"--sort\",\n \"-s\",\n type=str,\n help=\"To order the component hub/versions based on the sort spec.\",\n)\n@click.option(\"--limit\", type=int, help=\"To limit the list of component hub/versions.\")\n@click.option(\n \"--offset\", type=int, help=\"To offset the list of component hub/versions.\"\n)\n@clean_outputs\ndef ls(owner, component, query, sort, limit, offset):\n \"\"\"List component hub/versions by owner or owner/component.\"\"\"\n if owner and component:\n Printer.print_error(\n \"Only an owner or a component is required, not both.\", sys_exit=True\n )\n if component:\n owner, component_hub, component_version, is_version = get_info(component, None)\n else:\n owner = owner or DEFAULT_HUB\n component_hub = None\n if not owner:\n Printer.print_error(\n \"Please provide a valid owner --owner/-o or a component --component/-c.\"\n )\n sys.exit(1)\n\n def list_versions():\n component_info = \" \".format(owner, component_hub)\n try:\n polyaxon_client = get_current_or_public_client()\n params = get_query_params(\n limit=limit, offset=offset, query=query, sort=sort\n )\n response = polyaxon_client.component_hub_v1.list_component_versions(\n owner, component_hub, **params\n )\n except (ApiException, HTTPError) as e:\n message = \"Could not get list of component version.\"\n handle_cli_error(e, message=message)\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header(\"Versions for {}\".format(component_info))\n Printer.print_header(\"Navigation:\")\n dict_tabulate(meta)\n else:\n Printer.print_header(\"No version found for {}\".format(component_info))\n\n objects = list_dicts_to_tabulate(\n [o.to_dict() for o in response.results],\n humanize_values=True,\n exclude_attrs=[\n \"uuid\",\n \"readme\",\n \"description\",\n \"owner\",\n \"owner\",\n \"role\",\n \"settings\",\n \"content\",\n \"live_state\",\n ],\n )\n if objects:\n Printer.print_header(\"Component versions:\")\n dict_tabulate(objects, is_list_dict=True)\n\n def list_components():\n try:\n polyaxon_client = get_current_or_public_client()\n params = get_query_params(\n limit=limit, offset=offset, query=query, sort=sort\n )\n response = polyaxon_client.component_hub_v1.list_component_hubs(\n owner, **params\n )\n except (ApiException, HTTPError) as e:\n message = \"Could not get list of components.\"\n handle_cli_error(e, message=message)\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header(\"Components for owner {}\".format(owner))\n Printer.print_header(\"Navigation:\")\n dict_tabulate(meta)\n else:\n Printer.print_header(\"No component hub found for owner {}\".format(owner))\n\n objects = list_dicts_to_tabulate(\n [o.to_dict() for o in response.results],\n humanize_values=True,\n exclude_attrs=[\n \"uuid\",\n \"readme\",\n \"description\",\n \"owner\",\n \"role\",\n \"settings\",\n \"live_state\",\n ],\n )\n if objects:\n Printer.print_header(\"Components:\")\n dict_tabulate(objects, is_list_dict=True)\n\n if component:\n list_versions()\n else:\n list_components()\n\n\n@hub.command()\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_VERSION[\"args\"], **OPTIONS_COMPONENT_VERSION[\"kwargs\"])\n@clean_outputs\ndef get(component, version):\n \"\"\"Get info for a component hub by name, or owner/hub_name,\n or component version by name, name:tag, owner/name:tag.\n\n Examples:\n\n To get a default component hub:\n\n \\b\n $ polyaxon hub get -h tensorboard\n\n To get by specific owner/name\n\n \\b\n $ polyaxon hub get -p owner/my-component\n \"\"\"\n owner, component_hub, component_version, is_version = get_info(component, version)\n\n try:\n polyaxon_client = get_current_or_public_client()\n if is_version:\n response = polyaxon_client.component_hub_v1.get_component_version(\n owner, component_hub, component_version\n )\n get_component_version_details(response)\n else:\n response = polyaxon_client.component_hub_v1.get_component_hub(\n owner, component_hub\n )\n response.owner = owner\n get_entity_details(response, \"Component hub\")\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e,\n message=\"Could not get `{}`.\".format(\n component_version if is_version else component_hub\n ),\n sys_exit=True,\n )\n\n\n@hub.command()\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_VERSION[\"args\"], **OPTIONS_COMPONENT_VERSION[\"kwargs\"])\n@clean_outputs\ndef delete(component, version):\n \"\"\"Delete a component hub or a component version.\"\"\"\n owner, component_hub, component_version, is_version = get_info(\n component, version, True\n )\n full_entity = (\n \"{}/{}:{}\".format(owner, component_hub, component_version)\n if is_version\n else \"{}/{}\".format(owner, component_hub)\n )\n\n if not click.confirm(\n \"Are sure you want to delete component {} `{}`\".format(\n \"version\" if is_version else \"hub\", full_entity\n )\n ):\n click.echo(\"Existing without deleting component hub.\")\n sys.exit(1)\n\n try:\n polyaxon_client = PolyaxonClient()\n if is_version:\n polyaxon_client.component_hub_v1.delete_component_version(\n owner, component_hub, component_version\n )\n else:\n polyaxon_client.component_hub_v1.delete_component_hub(owner, component_hub)\n Printer.print_success(\n \"Component {} `{}` was delete successfully\".format(\n \"version\" if is_version else \"hub\", full_entity\n )\n )\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e,\n message=\"Could not delete component {} `{}`.\".format(\n \"version\" if is_version else \"hub\", full_entity\n ),\n )\n sys.exit(1)\n\n\n@hub.command()\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_VERSION[\"args\"], **OPTIONS_COMPONENT_VERSION[\"kwargs\"])\n@click.option(\n \"--name\",\n type=str,\n help=\"Name of the component hub, must be unique for the same user.\",\n)\n@click.option(\"--description\", type=str, help=\"Description of the component hub.\")\n@click.option(\n \"--tags\", type=str, help=\"Tags of the run, comma separated values (optional).\"\n)\n@click.option(\n \"--private\",\n type=bool,\n help=\"Set the visibility of the component hub to private/public.\",\n)\n@clean_outputs\ndef update(component, version, name, description, tags, private):\n \"\"\"Update component hub.\n\n Uses /docs/core/cli/#caching\n\n Example:\n\n \\b\n $ polyaxon hub update foobar --description=\"Image Classification with DL using TensorFlow\"\n\n \\b\n $ polyaxon hub update mike1/foobar --description=\"Image Classification with DL using TensorFlow\"\n\n \\b\n $ polyaxon hub update --tags=\"foo, bar\"\n \"\"\"\n owner, component_hub, component_version, is_version = get_info(\n component, version, True\n )\n full_entity = (\n \"{}/{}:{}\".format(owner, component_hub, component_version)\n if is_version\n else \"{}/{}\".format(owner, component_hub)\n )\n\n update_dict = {}\n if name:\n update_dict[\"name\"] = name\n\n if description:\n update_dict[\"description\"] = description\n\n tags = validate_tags(tags)\n if tags:\n update_dict[\"tags\"] = tags\n\n if private is not None:\n update_dict[\"is_public\"] = not private\n\n if not update_dict:\n Printer.print_warning(\n \"No argument was provided to update the component {}.\".format(\n \"version\" if is_version else \"hub\"\n )\n )\n sys.exit(1)\n\n try:\n polyaxon_client = PolyaxonClient()\n if is_version:\n response = polyaxon_client.component_hub_v1.patch_component_version(\n owner, component_hub, component_version, body=update_dict\n )\n Printer.print_success(\"Component version updated.\")\n get_component_version_details(response)\n else:\n response = polyaxon_client.component_hub_v1.patch_component_hub(\n owner, component_hub, body=update_dict\n )\n Printer.print_success(\"Component updated.\")\n get_entity_details(response, \"Component hub\")\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e,\n message=\"Could not update component {} `{}`.\".format(\n \"version\" if is_version else \"hub\", full_entity\n ),\n )\n sys.exit(1)\n\n\n@hub.command()\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_VERSION[\"args\"], **OPTIONS_COMPONENT_VERSION[\"kwargs\"])\n@click.option(\n \"--yes\",\n \"-y\",\n is_flag=True,\n default=False,\n help=\"Automatic yes to prompts. \"\n 'Assume \"yes\" as answer to all prompts and run non-interactively.',\n)\n@click.option(\n \"--url\",\n is_flag=True,\n default=False,\n help=\"Print the url of the dashboard for this component hub.\",\n)\n@clean_outputs\ndef dashboard(component, version, yes, url):\n \"\"\"Open this operation's dashboard details in browser.\"\"\"\n owner, component_hub, component_version, is_version = get_info(component, version)\n subpath = (\n \"{}/hub/{}/versions?version={}\".format(owner, component_hub, component_version)\n if is_version\n else \"{}/hub/{}\".format(owner, component_hub)\n )\n\n hub_url = get_dashboard_url(subpath=subpath, use_cloud=settings.CLI_CONFIG.is_ce)\n if url:\n Printer.print_header(\"The dashboard is available at: {}\".format(hub_url))\n sys.exit(0)\n if not yes:\n click.confirm(\n \"Dashboard page will now open in your browser. Continue?\",\n abort=True,\n default=True,\n )\n click.launch(hub_url)\n", "sub_path": "core/polyaxon/cli/hub.py", "file_name": "hub.py", "file_ext": "py", "file_size_in_byte": 20627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "polyaxon.settings.CLI_CONFIG", "line_number": 53, "usage_type": "attribute"}, {"api_name": "polyaxon.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "polyaxon.client.PolyaxonClient", "line_number": 54, "usage_type": "call"}, {"api_name": "polyaxon.schemas.cli.client_config.ClientConfig", "line_number": 54, "usage_type": "call"}, {"api_name": "polyaxon.constants.globals.NO_AUTH", "line_number": 54, "usage_type": "name"}, {"api_name": "polyaxon.client.PolyaxonClient", "line_number": 56, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 61, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 61, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.list_dicts_to_tabulate", "line_number": 62, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.dict_tabulate", "line_number": 63, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 66, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 66, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.list_dicts_to_tabulate", "line_number": 67, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.dict_tabulate", "line_number": 68, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 70, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 70, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 71, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.dict_to_tabulate", "line_number": 76, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 80, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 80, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.dict_tabulate", "line_number": 81, "usage_type": "call"}, {"api_name": "polyaxon.polyaxonfile.get_specification", "line_number": 84, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_warning", "line_number": 87, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 87, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_error", "line_number": 94, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 94, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_error", "line_number": 98, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 98, "usage_type": "name"}, {"api_name": "polyaxon.env_vars.getters.get_component_info", "line_number": 113, "usage_type": "call"}, {"api_name": "polyaxon.exceptions.PolyaxonException", "line_number": 117, "usage_type": "name"}, {"api_name": "polyaxon.cli.errors.handle_cli_error", "line_number": 118, "usage_type": "call"}, {"api_name": "click.group", "line_number": 127, "usage_type": "call"}, {"api_name": "polyaxon.logger.clean_outputs", "line_number": 128, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_error", "line_number": 155, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 155, "usage_type": "name"}, {"api_name": "polyaxon.utils.validation.validate_tags", "line_number": 162, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_error", "line_number": 165, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 165, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 168, "usage_type": "call"}, {"api_name": "polyaxon_sdk.V1ComponentHub", "line_number": 171, "usage_type": "call"}, {"api_name": "polyaxon.client.PolyaxonClient", "line_number": 174, "usage_type": "call"}, {"api_name": "polyaxon_sdk.rest.ApiException", "line_number": 176, "usage_type": "name"}, {"api_name": "urllib3.exceptions.HTTPError", "line_number": 176, "usage_type": "name"}, {"api_name": "polyaxon.cli.errors.handle_cli_error", "line_number": 177, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 180, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_success", "line_number": 182, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 182, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 185, "usage_type": "call"}, {"api_name": "polyaxon.cli.dashboard.get_dashboard_url", "line_number": 187, "usage_type": "call"}, {"api_name": "click.option", "line_number": 134, "usage_type": "call"}, {"api_name": "click.option", "line_number": 137, "usage_type": "call"}, {"api_name": "click.option", "line_number": 138, "usage_type": "call"}, {"api_name": "click.option", "line_number": 139, "usage_type": "call"}, {"api_name": "polyaxon.logger.clean_outputs", "line_number": 142, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_error", "line_number": 222, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 222, "usage_type": "name"}, {"api_name": "polyaxon.utils.validation.validate_tags", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "polyaxon.utils.formatting.Printer.print_error", "line_number": 231, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 231, "usage_type": "name"}, {"api_name": "polyaxon.polyaxonfile.get_specification", "line_number": 237, "usage_type": "call"}, {"api_name": "polyaxon.cli.errors.handle_cli_error", "line_number": 239, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 240, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_error", "line_number": 243, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 243, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 246, "usage_type": "call"}, {"api_name": "polyaxon.client.PolyaxonClient", "line_number": 248, "usage_type": "call"}, {"api_name": "polyaxon_sdk.rest.ApiException", "line_number": 252, "usage_type": "name"}, {"api_name": "urllib3.exceptions.HTTPError", "line_number": 252, "usage_type": "name"}, {"api_name": "click.confirm", "line_number": 256, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 260, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 261, "usage_type": "call"}, {"api_name": "polyaxon_sdk.V1ComponentVersion", "line_number": 264, "usage_type": "call"}, {"api_name": "polyaxon_sdk.rest.ApiException", "line_number": 283, "usage_type": "name"}, {"api_name": "urllib3.exceptions.HTTPError", "line_number": 283, "usage_type": "name"}, {"api_name": "polyaxon.cli.errors.handle_cli_error", "line_number": 284, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 287, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_success", "line_number": 289, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 289, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 292, "usage_type": "call"}, {"api_name": "polyaxon.cli.dashboard.get_dashboard_url", "line_number": 294, "usage_type": "call"}, {"api_name": "click.option", "line_number": 193, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 197, "usage_type": "call"}, {"api_name": "click.option", "line_number": 200, "usage_type": "call"}, {"api_name": "click.option", "line_number": 206, "usage_type": "call"}, {"api_name": "click.option", "line_number": 207, "usage_type": "call"}, {"api_name": "polyaxon.logger.clean_outputs", "line_number": 208, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_error", "line_number": 324, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 324, "usage_type": "name"}, {"api_name": "polyaxon.constants.globals.DEFAULT_HUB", "line_number": 330, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_error", "line_number": 333, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 333, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 336, "usage_type": "call"}, {"api_name": "polyaxon.utils.query_params.get_query_params", "line_number": 342, "usage_type": "call"}, {"api_name": "polyaxon_sdk.rest.ApiException", "line_number": 348, "usage_type": "name"}, {"api_name": "urllib3.exceptions.HTTPError", "line_number": 348, "usage_type": "name"}, {"api_name": "polyaxon.cli.errors.handle_cli_error", "line_number": 350, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 351, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.get_meta_response", "line_number": 353, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 355, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 355, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 356, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 356, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.dict_tabulate", "line_number": 357, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 359, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 359, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.list_dicts_to_tabulate", "line_number": 361, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 377, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 377, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.dict_tabulate", "line_number": 378, "usage_type": "call"}, {"api_name": "polyaxon.utils.query_params.get_query_params", "line_number": 383, "usage_type": "call"}, {"api_name": "polyaxon_sdk.rest.ApiException", "line_number": 389, "usage_type": "name"}, {"api_name": "urllib3.exceptions.HTTPError", "line_number": 389, "usage_type": "name"}, {"api_name": "polyaxon.cli.errors.handle_cli_error", "line_number": 391, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 392, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.get_meta_response", "line_number": 394, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 396, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 396, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 397, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 397, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.dict_tabulate", "line_number": 398, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 400, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 400, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.list_dicts_to_tabulate", "line_number": 402, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 416, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 416, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.dict_tabulate", "line_number": 417, "usage_type": "call"}, {"api_name": "click.option", "line_number": 302, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_OWNER", "line_number": 302, "usage_type": "name"}, {"api_name": "click.option", "line_number": 303, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_COMPONENT_HUB", "line_number": 303, "usage_type": "name"}, {"api_name": "click.option", "line_number": 304, "usage_type": "call"}, {"api_name": "click.option", "line_number": 310, "usage_type": "call"}, {"api_name": "click.option", "line_number": 316, "usage_type": "call"}, {"api_name": "click.option", "line_number": 317, "usage_type": "call"}, {"api_name": "polyaxon.logger.clean_outputs", "line_number": 320, "usage_type": "name"}, {"api_name": "polyaxon.cli.utils.get_entity_details", "line_number": 459, "usage_type": "call"}, {"api_name": "polyaxon_sdk.rest.ApiException", "line_number": 460, "usage_type": "name"}, {"api_name": "urllib3.exceptions.HTTPError", "line_number": 460, "usage_type": "name"}, {"api_name": "polyaxon.cli.errors.handle_cli_error", "line_number": 461, "usage_type": "call"}, {"api_name": "click.option", "line_number": 426, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_COMPONENT_HUB", "line_number": 426, "usage_type": "name"}, {"api_name": "click.option", "line_number": 427, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_COMPONENT_VERSION", "line_number": 427, "usage_type": "name"}, {"api_name": "polyaxon.logger.clean_outputs", "line_number": 428, "usage_type": "name"}, {"api_name": "click.confirm", "line_number": 485, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 490, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 491, "usage_type": "call"}, {"api_name": "polyaxon.client.PolyaxonClient", "line_number": 494, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_success", "line_number": 501, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 501, "usage_type": "name"}, {"api_name": "polyaxon_sdk.rest.ApiException", "line_number": 506, "usage_type": "name"}, {"api_name": "urllib3.exceptions.HTTPError", "line_number": 506, "usage_type": "name"}, {"api_name": "polyaxon.cli.errors.handle_cli_error", "line_number": 507, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 513, "usage_type": "call"}, {"api_name": "click.option", "line_number": 471, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_COMPONENT_HUB", "line_number": 471, "usage_type": "name"}, {"api_name": "click.option", "line_number": 472, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_COMPONENT_VERSION", "line_number": 472, "usage_type": "name"}, {"api_name": "polyaxon.logger.clean_outputs", "line_number": 473, "usage_type": "name"}, {"api_name": "polyaxon.utils.validation.validate_tags", "line_number": 566, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_warning", "line_number": 574, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 574, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 579, "usage_type": "call"}, {"api_name": "polyaxon.client.PolyaxonClient", "line_number": 582, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer.print_success", "line_number": 587, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 587, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_success", "line_number": 593, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 593, "usage_type": "name"}, {"api_name": "polyaxon.cli.utils.get_entity_details", "line_number": 594, "usage_type": "call"}, {"api_name": "polyaxon_sdk.rest.ApiException", "line_number": 595, "usage_type": "name"}, {"api_name": "urllib3.exceptions.HTTPError", "line_number": 595, "usage_type": "name"}, {"api_name": "polyaxon.cli.errors.handle_cli_error", "line_number": 596, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 602, "usage_type": "call"}, {"api_name": "click.option", "line_number": 517, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_COMPONENT_HUB", "line_number": 517, "usage_type": "name"}, {"api_name": "click.option", "line_number": 518, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_COMPONENT_VERSION", "line_number": 518, "usage_type": "name"}, {"api_name": "click.option", "line_number": 519, "usage_type": "call"}, {"api_name": "click.option", "line_number": 524, "usage_type": "call"}, {"api_name": "click.option", "line_number": 525, "usage_type": "call"}, {"api_name": "click.option", "line_number": 528, "usage_type": "call"}, {"api_name": "polyaxon.logger.clean_outputs", "line_number": 533, "usage_type": "name"}, {"api_name": "polyaxon.cli.dashboard.get_dashboard_url", "line_number": 632, "usage_type": "call"}, {"api_name": "polyaxon.settings.CLI_CONFIG", "line_number": 632, "usage_type": "attribute"}, {"api_name": "polyaxon.settings", "line_number": 632, "usage_type": "name"}, {"api_name": "polyaxon.utils.formatting.Printer.print_header", "line_number": 634, "usage_type": "call"}, {"api_name": "polyaxon.utils.formatting.Printer", "line_number": 634, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 635, "usage_type": "call"}, {"api_name": "click.confirm", "line_number": 637, "usage_type": "call"}, {"api_name": "click.launch", "line_number": 642, "usage_type": "call"}, {"api_name": "click.option", "line_number": 606, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_COMPONENT_HUB", "line_number": 606, "usage_type": "name"}, {"api_name": "click.option", "line_number": 607, "usage_type": "call"}, {"api_name": "polyaxon.cli.options.OPTIONS_COMPONENT_VERSION", "line_number": 607, "usage_type": "name"}, {"api_name": "click.option", "line_number": 608, "usage_type": "call"}, {"api_name": "click.option", "line_number": 616, "usage_type": "call"}, {"api_name": "polyaxon.logger.clean_outputs", "line_number": 622, "usage_type": "name"}]} +{"seq_id": "34155975", "text": "from __future__ import with_statement\nimport sys\ntry:\n import queue\nexcept ImportError:\n import Queue as queue\nimport random\nimport Pyro4.core\nfrom workitem import Workitem\nimport os\nimport socket\nfrom time import sleep, time\nfrom functools import partial\n\nPyro4.config.SERIALIZER = 'pickle'\nPyro4.config.SERIALIZERS_ACCEPTED.add('pickle')\n\nCLIENT_NAME = 'C%d@%s:%s' % (os.getpid(), socket.gethostname(), str(time()))\nWAIT_DISPATCHER_TIMEOUT = 4\n\ndef repeater(function, args=None, kwargs=None, exceptions=Exception,\n timeout=0):\n args = args or ()\n kwargs = kwargs or {}\n\n while True:\n try:\n result = function(*args, **kwargs)\n return result\n except exceptions as e:\n print(\"Error:\", e)\n\n if timeout > 0:\n sleep(timeout)\n\ndispatcherRepeater = partial(\n repeater,\n exceptions=(Pyro4.errors.CommunicationError),\n timeout=WAIT_DISPATCHER_TIMEOUT)\n\ndef readNumbers(path):\n print('\\nReading numbers')\n with open(path) as f:\n lines = f.read().splitlines()\n numbers = [int(e) for e in lines]\n return numbers\n\n\ndef placeWork(dispatcher, numbers):\n print('\\nPlacing work items into dispatcher queue')\n for i in range(len(numbers)):\n item = Workitem(i + 1, CLIENT_NAME, numbers[i])\n dispatcherRepeater(dispatcher.putWork, [item])\n\n\ndef collectResults(dispatcher, item_count):\n print('\\nGetting results from dispatcher queue')\n results = {}\n while len(results) < item_count:\n try:\n item = dispatcherRepeater(dispatcher.getResult, [CLIENT_NAME])\n print('Got result: %s (from %s)' % (item, item.processedBy))\n results[item.data] = item.result\n except queue.Empty:\n result_queue_size = dispatcherRepeater(\n dispatcher.resultQueueSize, [CLIENT_NAME])\n print('Not all results available yet (got %d out of %d). Work queue size: %d' %\n (len(results), item_count, item_count - result_queue_size))\n\n dispatcherRepeater(dispatcher.clientExit, [CLIENT_NAME])\n return results\n\n\ndef writeResults(results, path):\n print('\\nWriting results')\n with open(path, 'w') as f:\n for (number, factorials) in results.items():\n f.write(str(number) + ': ' + ', '.join(map(str, factorials)) + '\\n')\n\n\ndef main():\n disp_address = str(sys.argv[1])\n numbers_path = str(sys.argv[2])\n results_path = str(sys.argv[3])\n\n numbers = readNumbers(numbers_path)\n\n with Pyro4.core.Proxy('PYRO:dispatcher@' + disp_address) as dispatcher:\n dispatcherRepeater(dispatcher.clientRegister, [CLIENT_NAME])\n placeWork(dispatcher, numbers)\n results = collectResults(dispatcher, len(numbers))\n\n writeResults(results, results_path)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "05/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 2844, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "Pyro4.core.config", "line_number": 15, "usage_type": "attribute"}, {"api_name": "Pyro4.core", "line_number": 15, "usage_type": "name"}, {"api_name": "Pyro4.core.config.SERIALIZERS_ACCEPTED.add", "line_number": 16, "usage_type": "call"}, {"api_name": "Pyro4.core.config", "line_number": 16, "usage_type": "attribute"}, {"api_name": "Pyro4.core", "line_number": 16, "usage_type": "name"}, {"api_name": "os.getpid", "line_number": 18, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 36, "usage_type": "call"}, {"api_name": "Pyro4.core.errors", "line_number": 38, "usage_type": "attribute"}, {"api_name": "Pyro4.core", "line_number": 38, "usage_type": "name"}, {"api_name": "workitem.Workitem", "line_number": 52, "usage_type": "call"}, {"api_name": "Queue.Empty", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 82, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 83, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 84, "usage_type": "attribute"}, {"api_name": "Pyro4.core.core.Proxy", "line_number": 88, "usage_type": "call"}, {"api_name": "Pyro4.core.core", "line_number": 88, "usage_type": "attribute"}, {"api_name": "Pyro4.core", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "144625495", "text": "'''\nMARS Blender Tools - a Blender Add-On to work with MARS robot models\n\nFile mtexport.py\n\nCreated on 13 Feb 2014\n\n@author: Kai von Szadkowski\n\nCopy this add-on to your Blender add-on folder and activate it\nin your preferences to gain instant (virtual) world domination.\nYou may use the provided install shell script.\n\nNOTE: If you edit this script, please make sure not to use any imports\nnot supported by Blender's standard Python distribution. This is a script\nintended to be usable on its own and thus should not use external dependencies,\nespecially none of the other modules of the MARStools package.\n'''\n\nimport bpy\nimport mathutils\nimport os\nfrom datetime import datetime\nimport yaml\nimport struct\nfrom bpy.types import Operator\nfrom bpy.props import StringProperty, BoolProperty, IntProperty\nfrom marstools.mtutility import *\nimport marstools.mtdefs as mtdefs\nimport marstools.mtmarssceneexport as mtmse\nimport marstools.mtinertia as mtinertia\nimport marstools.mtrobotdictionary as mtrobotdictionary\n\ndef register():\n print(\"Registering mtexport...\")\n\ndef unregister():\n print(\"Unregistering mtexport...\")\n\nindent = ' '\nurdfHeader = '\\n'\nurdfFooter = indent+'\\n'\n\ndef exportBobj(path, obj):\n bpy.ops.object.select_all(action='DESELECT')\n obj.select = True#\n bpy.context.scene.objects.active = obj\n #TODO: make this exception-handled\n totverts = totuvco = totno = 1\n\n globalNormals = {}\n\n # ignore dupli children\n if obj.parent and obj.parent.dupli_type in {'VERTS', 'FACES'}:\n # XXX\n print(obj.name, 'is a dupli child - ignoring')\n return\n\n mesh = obj.to_mesh(bpy.context.scene, True, 'PREVIEW')\n #mesh.transform(obj.matrix_world)\n\n faceuv = len(mesh.uv_textures)\n if faceuv:\n uv_layer = mesh.uv_textures.active.data[:]\n\n if bpy.app.version[0] * 100 + bpy.app.version[1] >= 265:\n face_index_pairs = [(face, index) for index, face in enumerate(mesh.tessfaces)]\n else:\n face_index_pairs = [(face, index) for index, face in enumerate(mesh.faces)]\n\n mesh.calc_normals()\n\n me_verts = mesh.vertices[:]\n\n out = open(os.path.join(path, obj.name) + '.bobj', \"wb\")\n\n for v in mesh.vertices:\n out.write(struct.pack('ifff', 1, v.co[0], v.co[1], v.co[2]))\n\n if faceuv:\n uv = uvkey = uv_dict = f_index = uv_index = None\n\n uv_face_mapping = [[0, 0, 0, 0]] * len(face_index_pairs) # a bit of a waste for tri's :/\n\n uv_dict = {} # could use a set() here\n if bpy.app.version[1] >= 65:\n uv_layer = mesh.tessface_uv_textures.active.data[:]\n else:\n uv_layer = mesh.uv_textures.active.data\n for f, f_index in face_index_pairs:\n for uv_index, uv in enumerate(uv_layer[f_index].uv):\n uvkey = round(uv[0], 6), round(uv[1], 6)\n try:\n uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]\n except:\n uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)\n out.write(struct.pack('iff', 2, uv[0], uv[1]))\n\n del uv, uvkey, uv_dict, f_index, uv_index\n\n for f, f_index in face_index_pairs:\n if f.use_smooth:\n for v_idx in f.vertices:\n v = me_verts[v_idx]\n noKey = roundVector(v.normal, 6)\n if noKey not in globalNormals:\n globalNormals[noKey] = totno\n totno += 1\n out.write(struct.pack('ifff', 3, noKey[0], noKey[1], noKey[2]))\n else:\n # Hard, 1 normal from the face.\n noKey = roundVector(f.normal, 6)\n if noKey not in globalNormals:\n globalNormals[noKey] = totno\n totno += 1\n out.write(struct.pack('ifff', 3, noKey[0], noKey[1], noKey[2]))\n\n for f, f_index in face_index_pairs:\n f_smooth = f.use_smooth\n # write smooth info for face?\n\n f_v_orig = [(vi, me_verts[v_idx]) for vi, v_idx in enumerate(f.vertices)]\n\n if len(f_v_orig) == 3:\n f_v_iter = (f_v_orig, )\n else:\n f_v_iter = (f_v_orig[0], f_v_orig[1], f_v_orig[2]), (f_v_orig[0], f_v_orig[2], f_v_orig[3])\n\n for f_v in f_v_iter:\n da = struct.pack('i', 4)\n out.write(da)\n\n if faceuv:\n if f_smooth: # Smoothed, use vertex normals\n for vi, v in f_v:\n out.write(struct.pack('iii', v.index + totverts, totuvco + uv_face_mapping[f_index][vi], globalNormals[roundVector(v.normal, 6)]))\n else: # No smoothing, face normals\n no = globalNormals[roundVector(f.normal, 6)]\n for vi, v in f_v:\n out.write(struct.pack('iii', v.index + totverts, totuvco + uv_face_mapping[f_index][vi], no))\n else: # No UV's\n if f_smooth: # Smoothed, use vertex normals\n for vi, v in f_v:\n out.write(struct.pack('iii', v.index + totverts, 0, globalNormals[roundVector(v.normal, 6)]))\n else: # No smoothing, face normals\n no = globalNormals[roundVector(f.normal, 6)]\n for vi, v in f_v:\n out.write(struct.pack('iii', v.index + totverts, 0, no))\n out.close()\n\ndef exportObj(path, obj):\n objname = obj.name\n obj.name = 'tmp_export_666' #surely no one will ever name an object like so\n tmpobject = createPrimitive(objname, 'box', (2.0, 2.0, 2.0))\n tmpobject.data = obj.data #copy the mesh here\n outpath = os.path.join(path, objname) + '.obj'\n bpy.ops.export_scene.obj(filepath=outpath, use_selection=True, use_normals=True)\n bpy.ops.object.select_all(action='DESELECT')\n tmpobject.select = True\n bpy.ops.object.delete()\n obj.name = objname\n\n #This is the old implementation which did not work properly (08.08.2014)\n #bpy.ops.object.select_all(action='DESELECT')\n #obj.select = True\n #outpath = os.path.join(path, obj.name) + '.obj'\n #world_matrix = obj.matrix_world.copy()\n ##inverse_local_rotation = obj.matrix_local.to_euler().to_matrix().inverted()\n ##world_scale = world_matrix.to_scale() TODO: implement scale\n ## we move the object to the world origin and revert its local rotation\n ##print(inverse_local_rotation, mathutils.Matrix.Translation((0, 0, 0)))\n ##obj.matrix_world = inverse_local_rotation.to_4x4() * mathutils.Matrix.Identity(4)\n #obj.matrix_world = mathutils.Matrix.Identity(4)\n #bpy.ops.export_scene.obj(filepath=outpath, axis_forward='-Z',\n # axis_up='Y', use_selection=True, use_normals=True)\n #obj.matrix_world = world_matrix\n\ndef exportModelToYAML(model, filepath):\n print(\"MARStools YAML export: Writing model data to\", filepath )\n with open(filepath, 'w') as outputfile:\n outputfile.write('#YAML dump of robot model \"'+model['modelname']+'\", '+datetime.now().strftime(\"%Y%m%d_%H:%M\")+\"\\n\\n\")\n outputfile.write(yaml.dump(model))#, default_flow_style=False)) #last parameter prevents inline formatting for lists and dictionaries\n\ndef xmlline(ind, tag, names, values):\n line = []\n line.append(indent*ind+'<'+tag)\n for i in range(len(names)):\n line.append(' '+names[i]+'=\"'+str(values[i])+'\"')\n line.append('/>\\n')\n return ''.join(line)\n\ndef l2str(items, start=-1, end=-1):\n line = []\n i = start if start >= 0 else 0\n maxi = end if end >= 0 else len(items)\n while i < maxi:\n line.append(str(items[i])+' ')\n i += 1\n return ''.join(line)[0:-1]\n\ndef writeURDFGeometry(output, element):\n output.append(indent*4+'\\n')\n if element['geometryType'] == 'box':\n output.append(xmlline(5, 'box', ['size'], [l2str(element['size'])]))\n elif element['geometryType'] == \"cylinder\":\n output.append(xmlline(5, 'cylinder', ['radius', 'length'], [element['radius'], element['height']]))\n elif element['geometryType'] == \"sphere\":\n output.append(xmlline(5, 'sphere', ['radius'], [element['radius']]))\n elif element['geometryType'] in ['capsule', 'mesh']: #capsules are not supported in URDF and are emulated using meshes\n output.append(xmlline(5, 'mesh', ['filename', 'scale'], [element['filename'], '1.0 1.0 1.0']))#TODO correct this after implementing scale properly\n output.append(indent*4+'\\n')\n\ndef exportModelToURDF(model, filepath):\n output = []\n output.append(urdfHeader)\n output.append(indent+'\\n\\n')\n #export link information\n for l in model['links'].keys():\n link = model['links'][l]\n output.append(indent*2+'\\n')\n if link['inertial'] != {} and 'mass' in link['inertial'] and 'inertia' in link['inertial']:\n output.append(indent*3+'\\n')\n if 'pose' in link['inertial']:\n output.append(xmlline(4, 'origin', ['xyz', 'rpy'], [l2str(link['inertial']['pose']['translation']), l2str(link['inertial']['pose']['rotation_euler'])]))\n output.append(xmlline(4, 'mass', ['value'], [str(link['inertial']['mass'])]))\n output.append(xmlline(4, 'inertia', ['ixx', 'ixy', 'ixz', 'iyy', 'iyz', 'izz'], ' '.join([str(i) for i in link['inertial']['inertia']])))\n output.append(indent*3+'\\n')\n #visual object\n if link['visual']:\n for v in link['visual']:\n vis = link['visual'][v]\n output.append(indent*3+'\\n')\n output.append(xmlline(4, 'origin', ['xyz', 'rpy'], [l2str(vis['pose']['translation']), l2str(vis['pose']['rotation_euler'])]))\n writeURDFGeometry(output, vis['geometry'])\n if 'material' in vis:\n if model['materials'][vis['material']]['users'] == 0: #FIXME: change back to 1 when implemented in urdfloader\n mat = model['materials'][vis['material']]\n output.append(indent*4+'\\n')\n color = mat['diffuseFront']\n output.append(indent*5+'\\n')\n if 'texturename' in mat:\n output.append(indent*5+'\\n')\n output.append(indent*4+'\\n')\n else:\n output.append(indent*4+'\\n')\n output.append(indent*3+'\\n')\n #collision object\n if link['collision']:\n for c in link['collision']:\n col = link['collision'][c]\n output.append(indent*3+'\\n')\n output.append(xmlline(4, 'origin', ['xyz', 'rpy'], [l2str(col['pose']['translation']), l2str(col['pose']['rotation_euler'])]))\n writeURDFGeometry(output, col['geometry'])\n output.append(indent*3+'\\n')\n output.append(indent*2+'\\n\\n')\n #export joint information\n for j in model['joints']:\n joint = model['joints'][j]\n output.append(indent*2+'\\n')\n child = model['links'][joint[\"child\"]]\n output.append(xmlline(3, 'origin', ['xyz', 'rpy'], [l2str(child['pose']['translation']), l2str(child['pose']['rotation_euler'])]))\n output.append(indent*3+'\\n')\n output.append(indent*3+'\\n')\n if 'axis' in joint:\n output.append(indent*3+'\\n')\n if 'limits' in joint:\n output.append(xmlline(3, 'limit', ['lower', 'upper', 'velocity', 'effort'], [str(joint['limits'][0]), str(joint['limits'][1]), joint['maxvelocity'], joint['maxeffort']]))\n output.append(indent*2+'\\n\\n')\n #export material information\n for m in model['materials']:\n if model['materials'][m]['users'] > 0: #FIXME: change back to 1 when implemented in urdfloader\n output.append(indent*2+'\\n')\n color = model['materials'][m]['diffuseFront']\n transparency = model['materials'][m]['transparency'] if 'transparency' in model['materials'][m] else 0.0\n output.append(indent*3+'\\n')\n if 'texturename' in model['materials'][m]:\n output.append(indent*3+'\\n')\n output.append(indent*2+'\\n\\n')\n #finish the export\n output.append(urdfFooter)\n with open(filepath, 'w') as outputfile:\n outputfile.write(''.join(output))\n # problem of different joint transformations needed for fixed joints\n print(\"MARStools URDF export: Writing model data to\", filepath )\n\ndef exportModelToSMURF(model, path):\n export = {'semantics': model['groups'] != {} or model['chains'] != {},\n 'state': False,#model['state'] != {}, #TODO: handle state\n 'materials': model['materials'] != {},\n 'sensors': model['sensors'] != {},\n 'motors': model['motors'] != {},\n 'controllers': model['controllers'] != {},\n 'simulation': True#model['simulation'] != {} #TODO: make this a nice test\n }\n\n\n #create all filenames\n smurf_filename = model['modelname'] + \".smurf\"\n urdf_filename = model['modelname'] + \".urdf\"\n filenames = {'semantics': model['modelname'] + \"_semantics.yml\",\n 'state': model['modelname'] + \"_state.yml\",\n 'materials': model['modelname'] + \"_materials.yml\",\n 'sensors': model['modelname'] + \"_sensors.yml\",\n 'motors': model['modelname'] + \"_motors.yml\",\n 'controllers': model['modelname'] + \"_controllers.yml\",\n 'simulation': model['modelname'] + \"_simulation.yml\"\n }\n\n infostring = ' definition SMURF file for \"'+model['modelname']+'\", '+model[\"date\"]+\"\\n\\n\"\n\n #write model information\n print('Writing SMURF information to', smurf_filename)\n modeldata = {}\n modeldata[\"date\"] = model[\"date\"]\n modeldata[\"files\"] = [urdf_filename] + [filenames[f] for f in filenames if export[f]]\n with open(path + smurf_filename, 'w') as op:\n op.write('#main SMURF file of model \"'+model['modelname']+'\"\\n\\n')\n op.write(\"modelname: \"+model['modelname']+\"\\n\")\n op.write(yaml.dump(modeldata, default_flow_style=False))\n\n #write urdf\n exportModelToURDF(model, path + urdf_filename)\n\n #write semantics (SRDF information in YML format)\n if export['semantics']:\n with open(path + filenames['semantics'], 'w') as op:\n op.write('#semantics'+infostring)\n op.write(\"modelname: \"+model['modelname']+'\\n')\n semantics = {}\n if model['groups'] != {}:\n semantics['groups'] = model['groups']\n if model['chains'] != {}:\n semantics['chains'] = model['chains']\n op.write(yaml.dump(semantics, default_flow_style=False))\n\n #write state (state information of all joints, sensor & motor activity etc.) #TODO: implement everything but joints\n if export['state']:\n states = []\n #gather all states\n for jointname in model['joints']:\n joint = model['joints'][jointname]\n if 'state' in joint: #this should always be the case, but testing doesn't hurt\n tmpstate = joint['state'].copy()\n tmpstate['name'] = jointname\n states.append(joint['state'])\n with open(path + filenames['state'], 'w') as op:\n op.write('#state'+infostring)\n op.write(\"modelname: \"+model['modelname']+'\\n')\n op.write(yaml.dump(states))#, default_flow_style=False))\n\n #write materials, sensors, motors & controllers\n for data in ['materials', 'sensors', 'motors', 'controllers']:\n if export[data]:\n with open(path + filenames[data], 'w') as op:\n op.write('#' + data +infostring)\n op.write(yaml.dump({data: list(model[data].values())}, default_flow_style=False))\n\n #write simulation\n if export['simulation']:\n nodes = {'visual': {}, 'collision': {}}\n for link in model['links']:\n for objtype in ['visual', 'collision']:\n for objname in model['links'][link][objtype]:\n props = model['links'][link][objtype][objname]\n #for prop in ['name']: #TODO: filter these properties and purge redundant ones\n # del(props[prop])\n nodes[objtype][objname] = props\n with open(path + filenames['simulation'], 'w') as op:\n op.write('#simulation'+infostring)\n if model['simulation'] != {}:\n op.write(\"modelname: \"+model['modelname']+'\\n')\n #TODO: handle simulation-specific data\n op.write(yaml.dump(list(model['simulation'].values()), default_flow_style=False))\n op.write(\"\\nvisual:\\n\")\n op.write(yaml.dump(list(nodes['visual'].values())))\n op.write(\"\\ncollision:\\n\")\n op.write(yaml.dump(list(nodes['collision'].values())))\n\ndef exportSceneToSMURF(path):\n \"\"\"Exports all robots in a scene to separate SMURF folders.\"\"\"\n pass\n\ndef exportModelToMARS(model, path):\n \"\"\"Exports selected robot as a MARS scene\"\"\"\n mtmse.exportModelToMARS(model, path)\n\ndef securepath(path): #TODO: this is totally not error-handled!\n if not os.path.exists(path):\n os.makedirs(path)\n return os.path.expanduser(path)\n\nclass ExportModelOperator(Operator):\n \"\"\"ExportModelOperator\"\"\"\n bl_idname = \"object.mt_export_robot\"\n bl_label = \"Export the selected model(s)\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n export()\n return {'FINISHED'}\n\ndef export():\n #TODO: check if all selected objects are on visible layers (option bpy.ops.object.select_all()?)\n if bpy.data.worlds[0].relativePath:\n outpath = securepath(os.path.expanduser(os.path.join(bpy.path.abspath(\"//\"), bpy.data.worlds[0].path)))\n else:\n outpath = securepath(os.path.expanduser(bpy.data.worlds[0].path))\n yaml = bpy.data.worlds[0].exportYAML\n urdf = bpy.data.worlds[0].exportURDF\n smurf = bpy.data.worlds[0].exportSMURF\n mars = bpy.data.worlds[0].exportMARSscene\n meshexp = bpy.data.worlds[0].exportMesh\n objexp = bpy.data.worlds[0].useObj\n bobjexp = bpy.data.worlds[0].useBobj\n objectlist = bpy.context.selected_objects\n\n if yaml or urdf or smurf or mars:\n robot = mtrobotdictionary.buildRobotDictionary()\n if yaml:\n exportModelToYAML(robot, outpath + robot[\"modelname\"] + \"_dict.yml\")\n if mars:\n exportModelToMARS(robot, outpath + robot[\"modelname\"] + \"_mars.scene\")\n if smurf:\n exportModelToSMURF(robot, outpath)\n elif urdf:\n exportModelToURDF(robot, outpath + robot[\"modelname\"] + \".urdf\")\n selectObjects(objectlist, True)\n if meshexp:\n show_progress = bpy.app.version[0] * 100 + bpy.app.version[1] >= 269;\n if show_progress:\n wm = bpy.context.window_manager\n total = float(len(objectlist))\n wm.progress_begin(0, total)\n i = 1\n for obj in bpy.context.selected_objects:\n if ((obj.MARStype == 'visual' or\n obj.MARStype == 'collision') and obj['geometryType'] == 'mesh'):\n if objexp:\n exportObj(outpath, obj)\n if bobjexp:\n exportBobj(outpath, obj)\n if show_progress:\n wm.progress_update(i)\n i += 1\n if show_progress:\n wm.progress_end()\n\n", "sub_path": "scripts/blender/marstools/mtexport.py", "file_name": "mtexport.py", "file_ext": "py", "file_size_in_byte": 20268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "bpy.ops.object.select_all", "line_number": 45, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 45, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 47, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 59, "usage_type": "attribute"}, {"api_name": "bpy.app", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "struct.pack", "line_number": 78, "usage_type": "call"}, {"api_name": "bpy.app", "line_number": 86, "usage_type": "attribute"}, {"api_name": "struct.pack", "line_number": 97, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 109, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 116, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 130, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 136, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 140, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 144, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "bpy.ops.export_scene.obj", "line_number": 157, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 157, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.select_all", "line_number": 158, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 158, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.delete", "line_number": 160, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 160, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 181, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 181, "usage_type": "name"}, {"api_name": "yaml.dump", "line_number": 182, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 319, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 334, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 349, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 356, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 373, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 375, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 377, "usage_type": "call"}, {"api_name": "marstools.mtmarssceneexport.exportModelToMARS", "line_number": 385, "usage_type": "call"}, {"api_name": "marstools.mtmarssceneexport", "line_number": 385, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 388, "usage_type": "call"}, {"api_name": "os.path", "line_number": 388, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 389, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 390, "usage_type": "call"}, {"api_name": "os.path", "line_number": 390, "usage_type": "attribute"}, {"api_name": "bpy.types.Operator", "line_number": 392, "usage_type": "name"}, {"api_name": "bpy.data", "line_number": 404, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path", "line_number": 405, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 405, "usage_type": "call"}, {"api_name": "bpy.path.abspath", "line_number": 405, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 405, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 405, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path", "line_number": 407, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 407, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 408, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 409, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 410, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 411, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 412, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 413, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 414, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 415, "usage_type": "attribute"}, {"api_name": "marstools.mtrobotdictionary.buildRobotDictionary", "line_number": 418, "usage_type": "call"}, {"api_name": "marstools.mtrobotdictionary", "line_number": 418, "usage_type": "name"}, {"api_name": "bpy.app", "line_number": 429, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 431, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 435, "usage_type": "attribute"}]} +{"seq_id": "514001531", "text": "from rest_framework.views import exception_handler\n\nfrom .constants import CUSTOM_MESSAGES\n\n\ndef custom_exception_handler(exc, context):\n # Call REST framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler(exc, context)\n\n # set the custom message if found one\n if response is not None:\n keys_data = response.data.keys()\n for key in keys_data:\n if key in CUSTOM_MESSAGES.keys():\n response.data.update({key: CUSTOM_MESSAGES[key]})\n return response\n", "sub_path": "AlbumApi/utils/custom_exception_handler.py", "file_name": "custom_exception_handler.py", "file_ext": "py", "file_size_in_byte": 562, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "rest_framework.views.exception_handler", "line_number": 9, "usage_type": "call"}, {"api_name": "constants.CUSTOM_MESSAGES.keys", "line_number": 15, "usage_type": "call"}, {"api_name": "constants.CUSTOM_MESSAGES", "line_number": 15, "usage_type": "name"}, {"api_name": "constants.CUSTOM_MESSAGES", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "399071677", "text": "import logging as logger\nimport os\n\nimport requests\n\nfrom django.conf import settings\nfrom kolibri.content.utils.annotation import update_channel_metadata_cache\nfrom kolibri.tasks.management.commands.base import AsyncCommand\n\n\nlogging = logger.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n\n def add_arguments(self, parser):\n parser.add_argument(\"channel_id\", type=str)\n\n def handle_async(self, *args, **options):\n channel_id = options[\"channel_id\"]\n logging.info(\"Downloading data for channel id {}\".format(channel_id))\n\n url = os.path.join(\n settings.CENTRAL_CONTENT_DOWNLOAD_DOMAIN,\n \"content\",\n \"databases\",\n \"{}.sqlite3\".format(channel_id),\n )\n\n dest = os.path.join(\n settings.CONTENT_DATABASE_DIR,\n \"{}.sqlite3\".format(channel_id),\n )\n\n logging.debug(\"URL to fetch: {}\".format(url))\n logging.debug(\"Destination: {}\".format(dest))\n\n r = requests.get(url, stream=True)\n r.raise_for_status()\n\n dbsize = int(r.headers['content-length'])\n\n with self.start_progress(total=dbsize) as progress_update:\n with open(dest, \"wb\") as f:\n for content in r.iter_content(1024):\n f.write(content)\n contentlength = len(content)\n progress_update(contentlength)\n\n update_channel_metadata_cache()\n", "sub_path": "kolibri/content/management/commands/importchannel.py", "file_name": "importchannel.py", "file_ext": "py", "file_size_in_byte": 1439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "kolibri.tasks.management.commands.base.AsyncCommand", "line_number": 14, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.conf.settings.CENTRAL_CONTENT_DOWNLOAD_DOMAIN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.conf.settings.CONTENT_DATABASE_DIR", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "kolibri.content.utils.annotation.update_channel_metadata_cache", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "162885898", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/xpython/pyvm2.py\n# Compiled at: 2020-05-08 06:20:17\n\"\"\"A pure-Python Python bytecode interpreter.\"\"\"\nfrom __future__ import print_function, division\nimport linecache, logging, operator, sys, six\nfrom six.moves import reprlib\nfrom xdis import PYTHON3, PYTHON_VERSION, IS_PYPY, op_has_argument\nfrom xdis.util import code2num, CO_NEWLOCALS\nfrom xdis.op_imports import get_opcode_module\nfrom xpython.pyobj import Frame, Block, traceback_from_frame\nfrom xpython.byteop import get_byteop\nPY2 = not PYTHON3\nlog = logging.getLogger(__name__)\nif PYTHON3:\n byteint = lambda b: b\nelse:\n byteint = ord\nrepr_obj = reprlib.Repr()\nrepr_obj.maxother = 120\nrepper = repr_obj.repr\n\nclass VMError(Exception):\n \"\"\"For raising errors in the operation of the VM.\"\"\"\n pass\n\n\nclass VMRuntimeError(Exception):\n \"\"\"RuntimeError in operation of the VM.\"\"\"\n pass\n\n\nclass VirtualMachine(object):\n\n def __init__(self, python_version=PYTHON_VERSION, vmtest_testing=False, is_pypy=IS_PYPY):\n self.frames = []\n self.frame = None\n self.return_value = None\n self.last_exception = None\n self.last_traceback_limit = None\n self.last_traceback = None\n self.version = python_version\n self.is_pypy = is_pypy\n self.vmtest_testing = vmtest_testing\n self.last_exception = None\n self.fn2native = {}\n self.in_exception_processing = False\n self.VMError = VMError\n int_vers = int(python_version * 10)\n version_info = (int_vers // 10, int_vers % 10)\n self.opc = get_opcode_module(version_info)\n self.byteop = get_byteop(self, python_version, is_pypy)\n return\n\n def top(self):\n \"\"\"Return the value at the top of the stack, with no changes.\"\"\"\n return self.frame.stack[(-1)]\n\n def pop(self, i=0):\n \"\"\"Pop a value from the stack.\n\n Default to the top of the stack, but `i` can be a count from the top\n instead.\n\n \"\"\"\n return self.frame.stack.pop(-1 - i)\n\n def push(self, *vals):\n \"\"\"Push values onto the value stack.\"\"\"\n self.frame.stack.extend(vals)\n\n def popn(self, n):\n \"\"\"Pop a number of values from the value stack.\n\n A list of `n` values is returned, the deepest value first.\n\n \"\"\"\n if n:\n ret = self.frame.stack[-n:]\n self.frame.stack[(-n):] = []\n return ret\n else:\n return []\n\n def peek(self, n):\n \"\"\"Get a value `n` entries down in the stack, without changing the stack.\"\"\"\n return self.frame.stack[(-n)]\n\n def push_block(self, type, handler=None, level=None):\n if level is None:\n level = len(self.frame.stack)\n self.frame.block_stack.append(Block(type, handler, level))\n return\n\n def pop_block(self):\n return self.frame.block_stack.pop()\n\n def top_block(self):\n return self.frame.block_stack[(-1)]\n\n def jump(self, jump):\n \"\"\"Move the bytecode pointer to `jump`, so it will execute next.\"\"\"\n self.frame.f_lasti = jump\n\n def make_frame(self, code, callargs={}, f_globals=None, f_locals=None):\n log.debug('make_frame: code=%r, callargs=%s, f_globals=%r, f_locals=%r', code, repper(callargs), (\n type(f_globals), id(f_globals)), (\n type(f_locals), id(f_locals)))\n if f_globals is not None:\n f_globals = f_globals\n if f_locals is None:\n f_locals = f_globals\n elif self.frames:\n f_globals = self.frame.f_globals\n if f_locals is None:\n f_locals = {}\n else:\n f_globals = f_locals = {'__builtins__': __builtins__, \n '__name__': '__main__', \n '__doc__': None, \n '__package__': None}\n if code.co_flags & CO_NEWLOCALS:\n f_locals = {'__locals__': {}}\n f_locals.update(callargs)\n frame = Frame(code, f_globals, f_locals, self.frame)\n log.debug('%r', frame)\n return frame\n\n def push_frame(self, frame):\n self.frames.append(frame)\n self.frame = frame\n\n def pop_frame(self):\n self.frames.pop()\n if self.frames:\n self.frame = self.frames[(-1)]\n else:\n self.frame = None\n return\n\n def print_frames(self):\n \"\"\"Print the call stack for debugging. Note that the\n format exactly the same as in traceback.print_tb()\n \"\"\"\n for f in self.frames:\n filename = f.f_code.co_filename\n lineno = f.line_number()\n print(' File \"%s\", line %d, in %s' % (filename, lineno, f.f_code.co_name))\n linecache.checkcache(filename)\n line = linecache.getline(filename, lineno, f.f_globals)\n if line:\n print(' ' + line.strip())\n\n def resume_frame(self, frame):\n frame.f_back = self.frame\n log.debug('resume_frame: %r', frame)\n val = self.run_frame(frame)\n frame.f_back = None\n return val\n\n def run_code(self, code, f_globals=None, f_locals=None):\n \"\"\"run code using f_globals and f_locals in our VM\"\"\"\n frame = self.make_frame(code, f_globals=f_globals, f_locals=f_locals)\n try:\n val = self.run_frame(frame)\n except Exception:\n if self.vmtest_testing:\n raise\n if self.last_traceback:\n self.last_traceback.print_tb()\n print('%s' % self.last_exception[0].__name__, end='')\n exc_value = self.last_exception[1]\n tail = ': %s' % ('\\n').join(self.last_exception[1].args) if self.last_exception[1].args else ''\n print(tail)\n raise VMRuntimeError\n\n if self.frames:\n raise VMError('Frames left over!')\n if self.frame and self.frame.stack:\n raise VMError('Data left on stack! %r' % self.frame.stack)\n return val\n\n def instruction_info(self, byteName, arguments, opoffset):\n frame = self.frame\n code = frame.f_code\n return '%d: %s %s\\n\\t%s in %s:%s' % (\n opoffset,\n byteName,\n arguments,\n code.co_name,\n code.co_filename,\n frame.f_lineno)\n\n def unwind_block(self, block):\n if block.type == 'except-handler':\n offset = 3\n else:\n offset = 0\n while len(self.frame.stack) > block.level + offset:\n self.pop()\n\n if block.type == 'except-handler':\n tb, value, exctype = self.popn(3)\n self.last_exception = (exctype, value, tb)\n\n def parse_byte_and_args(self):\n \"\"\" Parse 1 - 3 bytes of bytecode into\n an instruction and optionally arguments.\"\"\"\n f = self.frame\n f_code = f.f_code\n co_code = f_code.co_code\n extended_arg = 0\n while True:\n opoffset = f.f_lasti\n line_number = self.linestarts.get(opoffset, None)\n byteCode = byteint(co_code[opoffset])\n byteName = self.opc.opname[byteCode]\n f.f_lasti += 1\n arg = None\n arguments = []\n if op_has_argument(byteCode, self.opc):\n if PYTHON_VERSION >= 3.6:\n intArg = code2num(co_code, f.f_lasti) | extended_arg\n f.f_lasti += 1\n if byteCode == self.opc.EXTENDED_ARG:\n extended_arg = intArg << 8\n continue\n else:\n extended_arg = 0\n else:\n intArg = code2num(co_code, f.f_lasti) + code2num(co_code, f.f_lasti + 1) * 256 + extended_arg\n f.f_lasti += 2\n if byteCode == self.opc.EXTENDED_ARG:\n extended_arg = intArg * 65536\n continue\n else:\n extended_arg = 0\n if byteCode in self.opc.CONST_OPS:\n arg = f_code.co_consts[intArg]\n elif byteCode in self.opc.FREE_OPS:\n if intArg < len(f_code.co_cellvars):\n arg = f_code.co_cellvars[intArg]\n else:\n var_idx = intArg - len(f.f_code.co_cellvars)\n arg = f_code.co_freevars[var_idx]\n elif byteCode in self.opc.NAME_OPS:\n arg = f_code.co_names[intArg]\n elif byteCode in self.opc.JREL_OPS:\n arg = f.f_lasti + intArg\n elif byteCode in self.opc.JABS_OPS:\n arg = intArg\n elif byteCode in self.opc.LOCAL_OPS:\n arg = f_code.co_varnames[intArg]\n else:\n arg = intArg\n arguments = [\n arg]\n elif PYTHON_VERSION >= 3.6:\n f.f_lasti += 1\n break\n\n return (\n byteName, arguments, opoffset, line_number)\n\n def log(self, byteName, arguments, opoffset, line_number):\n \"\"\" Log arguments, block stack, and data stack for each opcode.\"\"\"\n if line_number is not None:\n op = 'Line %4d, ' % line_number\n else:\n op = ' '\n op += '%3d: %s' % (opoffset, byteName)\n if arguments:\n op += ' %r' % (arguments[0],)\n indent = ' ' * (len(self.frames) - 1)\n stack_rep = repper(self.frame.stack)\n block_stack_rep = repper(self.frame.block_stack)\n log.debug(' %sframe.stack: %s' % (indent, stack_rep))\n log.debug(' %sblocks : %s' % (indent, block_stack_rep))\n log.info('%s%s' % (indent, op))\n return\n\n def dispatch(self, byteName, arguments, opoffset):\n \"\"\" Dispatch by bytename to the corresponding methods.\n Exceptions are caught and set on the virtual machine.\"\"\"\n why = None\n self.in_exception_processing = False\n try:\n if byteName.startswith('UNARY_'):\n self.unaryOperator(byteName[6:])\n elif byteName.startswith('BINARY_'):\n self.binaryOperator(byteName[7:])\n elif byteName.startswith('INPLACE_'):\n self.inplaceOperator(byteName[8:])\n elif 'SLICE+' in byteName:\n self.sliceOperator(byteName)\n else:\n if hasattr(self.byteop, byteName):\n bytecode_fn = getattr(self.byteop, byteName, None)\n if not bytecode_fn:\n raise VMError('Unknown bytecode type: %s\\n\\t%s' % (\n self.instruction_info(byteName, arguments, opoffset),\n byteName))\n why = bytecode_fn(*arguments)\n except:\n self.last_exception = sys.exc_info()\n if not self.in_exception_processing:\n if self.last_exception[0] != SystemExit:\n log.info('exception in the execution of instruction:\\n\\t%s' % self.instruction_info(byteName, arguments, opoffset))\n self.last_traceback = traceback_from_frame(self.frame)\n self.in_exception_processing = True\n why = 'exception'\n\n return why\n\n def manage_block_stack(self, why):\n \"\"\" Manage a frame's block stack.\n Manipulate the block stack and data stack for looping,\n exception handling, or returning.\"\"\"\n assert why != 'yield'\n block = self.frame.block_stack[(-1)]\n if block.type == 'loop' and why == 'continue':\n self.jump(self.return_value)\n why = None\n return why\n else:\n self.pop_block()\n self.unwind_block(block)\n if block.type == 'loop' and why == 'break':\n why = None\n self.jump(block.handler)\n return why\n if self.version < 3.0:\n if block.type == 'finally' or block.type == 'setup-except' and why == 'exception' or block.type == 'with':\n if why == 'exception':\n exctype, value, tb = self.last_exception\n self.push(tb, value, exctype)\n else:\n if why in ('return', 'continue'):\n self.push(self.return_value)\n self.push(why)\n why = None\n self.jump(block.handler)\n return why\n else:\n if why == 'exception' and block.type in ('setup-except', 'finally'):\n self.push_block('except-handler')\n exctype, value, tb = self.last_exception\n self.push(tb, value, exctype)\n self.push(tb, value, exctype)\n why = None\n self.jump(block.handler)\n return why\n if block.type == 'finally':\n if why in ('return', 'continue'):\n self.push(self.return_value)\n self.push(why)\n why = None\n self.jump(block.handler)\n return why\n return why\n\n def run_frame(self, frame):\n \"\"\"Run a frame until it returns (somehow).\n\n Exceptions are raised, the return value is returned.\n\n \"\"\"\n self.push_frame(frame)\n self.f_code = self.frame.f_code\n self.linestarts = dict(self.opc.findlinestarts(self.f_code, dup_lines=True))\n opoffset = 0\n while True:\n byteName, arguments, opoffset, line_number = self.parse_byte_and_args()\n if log.isEnabledFor(logging.INFO):\n self.log(byteName, arguments, opoffset, line_number)\n why = self.dispatch(byteName, arguments, opoffset)\n if why == 'exception':\n if not self.in_exception_processing:\n if self.last_exception[0] != SystemExit:\n log.info('exception in the execution of instruction:\\n\\t%s' % self.instruction_info(byteName, arguments, opoffset))\n self.last_traceback = traceback_from_frame(self.frame)\n self.in_exception_processing = True\n if why == 'reraise':\n why = 'exception'\n if why != 'yield':\n while why and frame.block_stack:\n why = self.manage_block_stack(why)\n\n if why:\n break\n\n self.pop_frame()\n if why == 'exception':\n if self.last_exception and self.last_exception[0]:\n six.reraise(*self.last_exception)\n else:\n raise VMError('Borked exception recording')\n self.in_exception_processing = False\n return self.return_value\n\n UNARY_OPERATORS = {'POSITIVE': operator.pos, \n 'NEGATIVE': operator.neg, \n 'NOT': operator.not_, \n 'CONVERT': repr, \n 'INVERT': operator.invert}\n\n def unaryOperator(self, op):\n x = self.pop()\n self.push(self.UNARY_OPERATORS[op](x))\n\n BINARY_OPERATORS = {'POWER': pow, \n 'MULTIPLY': operator.mul, \n 'DIVIDE': getattr(operator, 'div', lambda x, y: None), \n 'FLOOR_DIVIDE': operator.floordiv, \n 'TRUE_DIVIDE': operator.truediv, \n 'MODULO': operator.mod, \n 'ADD': operator.add, \n 'SUBTRACT': operator.sub, \n 'SUBSCR': operator.getitem, \n 'LSHIFT': operator.lshift, \n 'RSHIFT': operator.rshift, \n 'AND': operator.and_, \n 'XOR': operator.xor, \n 'OR': operator.or_}\n if PYTHON_VERSION >= 3.5:\n BINARY_OPERATORS['MATRIX_MULTIPLY'] = operator.matmul\n\n def binaryOperator(self, op):\n x, y = self.popn(2)\n self.push(self.BINARY_OPERATORS[op](x, y))\n\n def inplaceOperator(self, op):\n x, y = self.popn(2)\n if op == 'POWER':\n x **= y\n elif op == 'MULTIPLY':\n x *= y\n elif op in ('DIVIDE', 'FLOOR_DIVIDE'):\n x //= y\n elif op == 'TRUE_DIVIDE':\n x /= y\n elif op == 'MODULO':\n x %= y\n elif op == 'ADD':\n x += y\n elif op == 'SUBTRACT':\n x -= y\n elif op == 'LSHIFT':\n x <<= y\n elif op == 'RSHIFT':\n x >>= y\n elif op == 'AND':\n x &= y\n elif op == 'XOR':\n x ^= y\n elif op == 'OR':\n x |= y\n elif op == 'MATRIX_MULTIPLY':\n operator.imatmul(x, y)\n else:\n raise VMError('Unknown in-place operator: %r' % op)\n self.push(x)\n\n def sliceOperator(self, op):\n start = 0\n end = None\n op, count = op[:-2], int(op[(-1)])\n if count == 1:\n start = self.pop()\n elif count == 2:\n end = self.pop()\n elif count == 3:\n end = self.pop()\n start = self.pop()\n l = self.pop()\n if end is None:\n end = len(l)\n if op.startswith('STORE_'):\n l[start:end] = self.pop()\n elif op.startswith('DELETE_'):\n del l[start:end]\n else:\n self.push(l[start:end])\n return", "sub_path": "pycfiles/x_python-1.1.0-py2.7/pyvm2.py", "file_name": "pyvm2.py", "file_ext": "py", "file_size_in_byte": 17448, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "xdis.PYTHON3", "line_number": 16, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "xdis.PYTHON3", "line_number": 18, "usage_type": "name"}, {"api_name": "six.moves.reprlib.Repr", "line_number": 22, "usage_type": "call"}, {"api_name": "six.moves.reprlib", "line_number": 22, "usage_type": "name"}, {"api_name": "xdis.PYTHON_VERSION", "line_number": 38, "usage_type": "name"}, {"api_name": "xdis.IS_PYPY", "line_number": 38, "usage_type": "name"}, {"api_name": "xdis.op_imports.get_opcode_module", "line_number": 54, "usage_type": "call"}, {"api_name": "xpython.byteop.get_byteop", "line_number": 55, "usage_type": "call"}, {"api_name": "xpython.pyobj.Block", "line_number": 95, "usage_type": "call"}, {"api_name": "xdis.util.CO_NEWLOCALS", "line_number": 125, "usage_type": "name"}, {"api_name": "xpython.pyobj.Frame", "line_number": 128, "usage_type": "call"}, {"api_name": "linecache.checkcache", "line_number": 152, "usage_type": "call"}, {"api_name": "linecache.getline", "line_number": 153, "usage_type": "call"}, {"api_name": "xdis.op_has_argument", "line_number": 224, "usage_type": "call"}, {"api_name": "xdis.PYTHON_VERSION", "line_number": 225, "usage_type": "name"}, {"api_name": "xdis.util.code2num", "line_number": 226, "usage_type": "call"}, {"api_name": "xdis.util.code2num", "line_number": 234, "usage_type": "call"}, {"api_name": "xdis.PYTHON_VERSION", "line_number": 261, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 308, "usage_type": "call"}, {"api_name": "xpython.pyobj.traceback_from_frame", "line_number": 312, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 377, "usage_type": "attribute"}, {"api_name": "xpython.pyobj.traceback_from_frame", "line_number": 384, "usage_type": "call"}, {"api_name": "six.reraise", "line_number": 398, "usage_type": "call"}, {"api_name": "operator.pos", "line_number": 404, "usage_type": "attribute"}, {"api_name": "operator.neg", "line_number": 405, "usage_type": "attribute"}, {"api_name": "operator.not_", "line_number": 406, "usage_type": "attribute"}, {"api_name": "operator.invert", "line_number": 408, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 415, "usage_type": "attribute"}, {"api_name": "operator.floordiv", "line_number": 417, "usage_type": "attribute"}, {"api_name": "operator.truediv", "line_number": 418, "usage_type": "attribute"}, {"api_name": "operator.mod", "line_number": 419, "usage_type": "attribute"}, {"api_name": "operator.add", "line_number": 420, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 421, "usage_type": "attribute"}, {"api_name": "operator.getitem", "line_number": 422, "usage_type": "attribute"}, {"api_name": "operator.lshift", "line_number": 423, "usage_type": "attribute"}, {"api_name": "operator.rshift", "line_number": 424, "usage_type": "attribute"}, {"api_name": "operator.and_", "line_number": 425, "usage_type": "attribute"}, {"api_name": "operator.xor", "line_number": 426, "usage_type": "attribute"}, {"api_name": "operator.or_", "line_number": 427, "usage_type": "attribute"}, {"api_name": "xdis.PYTHON_VERSION", "line_number": 428, "usage_type": "name"}, {"api_name": "operator.matmul", "line_number": 429, "usage_type": "attribute"}, {"api_name": "operator.imatmul", "line_number": 462, "usage_type": "call"}]} +{"seq_id": "58302382", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0004_auto_20160129_1946'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tag',\n name='priority',\n field=models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(10)], verbose_name='priority'),\n ),\n ]\n", "sub_path": "apps/blog/migrations/0005_tag_priority.py", "file_name": "0005_tag_priority.py", "file_ext": "py", "file_size_in_byte": 522, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.core.validators.MaxValueValidator", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.core", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "34398542", "text": "from sklearn.metrics import confusion_matrix, cohen_kappa_score\n\ndef quadratic_weighted_kappa(c_matrix):\n num = 0.\n denom = 0.\n\n for i in range(c_matrix.shape[0]):\n for j in range(c_matrix.shape[1]):\n n = c_matrix.shape[0]\n wij = ((i-j) ** 2.)\n oji = c_matrix[i,j]\n eij = c_matrix[i, :].sum() * c_matrix[:,j].sum() / c_matrix.sum()\n\n num += wij * oji\n denom += wij * eij\n\n return 1. - num / denom\n\n\ny_true = [1,2,3,4,3]\ny_pred = [2,2,4,4,5]\n\nc_matrix = confusion_matrix(y_true, y_pred, labels=[1,2,3,4,5])\n\nkappa = quadratic_weighted_kappa(c_matrix)\n#print(kappa)\n#0.6153846153846154\n\nkappa = cohen_kappa_score(y_true, y_pred, weights='quadratic')\n#print(kappa)\n#0.6153846153846154", "sub_path": "evalMetrics/quadraticWeightedKappa.py", "file_name": "quadraticWeightedKappa.py", "file_ext": "py", "file_size_in_byte": 718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sklearn.metrics.confusion_matrix", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.metrics.cohen_kappa_score", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "316785615", "text": "import pandas as pd\nfrom kronall import *\nfrom pauli_matrix import *\nfrom mapping import *\nfrom plot import *\nimport time\n\n\n# In[91]:\n\n\n\n# In[150]:\nstart=time.time()\nnumber=12\neig1=[]\neig2=[]\nmatrix1=[]\nmatrix2=[]\nmatrix3=[]\ncount1=[]\nsign=[1]*number\nfor index1 in range(int(number/2)+1):\n\tif index1>0:\n\t\tsign[index1-1]=-1\n\tmatrice=diag_pauli(cr_ham([i1,z,z],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\t\tprint(1)\n\tmatrix1.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,x,i],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\t\tprint(2)\n\tmatrix2.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,i,x],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\t\tprint(3)\n\tmatrix3.append(matrix)\n\tcount1.append(count(2,number,sign))\n\nt=1\n\nu1=np.linspace(0,2,60)\n\nfor index in np.arange(0,60):\n\tu=u1[index]\n\teig3=[]\n\tfor index1 in range(int(number/2)+1):\n\t\tmatrix=0.5*u*matrix1[index1]-t*matrix2[index1]-t*matrix3[index1]\n\t\tcount2=count1[index1]\n\t\teig=1000\n\t\ti1=1\n\t\twhile i1==1:\n\t\t\ti_eig=np.argmin(matrix-eig*count2)\n\t\t\tco=count2[i_eig] \n\t\t\teig=matrix[i_eig]/co\n\t\t\tif min(matrix-eig*count2)>=-0.01:\n\t\t\t\tbreak\n\t\teig3.append(eig)\n\teig1.append(min(eig3)+0.5*u)\n\tprint(eig+0.5*u)\t\n\teigvalue,eigvector=np.linalg.eig(0.5*u*np.kron(z,z)-t*np.kron(x,i)-t*np.kron(i,x))\n\teig2.append(min(eigvalue)+0.5*u)\n\nfrom matplotlib import pyplot as plt\nplt.plot(np.linspace(0.0,2.0,60),eig1,'ro',label='simulated')\nplt.plot(np.linspace(0.0,2.0,60),eig2,label='exact')\nplt.xlabel('U')\nplt.ylabel('Energy (a.u.)')\nplt.legend(loc='best',prop={'size':10})\nplt.savefig('plot.png')\nend=time.time()\nprint(float(end-start))\n", "sub_path": "mapping_new/hei.py", "file_name": "hei.py", "file_ext": "py", "file_size_in_byte": 1735, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "time.time", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "time.time", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "185500466", "text": "from Apriori import *\nimport os, os.path\nimport cherrypy\nimport rpy2.robjects as ro\nimport rpy2.robjects.packages as rpackages\nfrom rpy2.robjects.packages import importr\nimport mysql.connector\nfrom Banco import *\n\n\nclass AprioriApp(object):\n\t@cherrypy.expose\n\tdef index(self):\n\t\treturn open('view.html')\n\t\n\tdef compras(self):\n\t\treturn open('verifica_vencendor.html')\n\n@cherrypy.expose\nclass AprioriCNPJ(object):\n\t@cherrypy.tools.json_out()\n\tdef GET(self, cnpj):\n\t\tcnpjs = Banco().searchCNPJS(cnpj)\n\t\tregras = Apriori().extractRules(cnpjs)\n\t\tcnpjs = Banco().extractCNPJs(regras)\n\t\t\n\t\treturn Banco().formatCNPJS(regras, cnpj)\n\n@cherrypy.expose\nclass CompraPorCNPJ(object):\n\t@cherrypy.tools.json_out()\n\tdef GET(self, idcompra):\n\t\tcompras_cnpj = Banco().searchCompras(idcompra)\n\t\treturn idcompra\n\n\nif __name__ == '__main__':\n\tconf = {\n\t\t'/': {\n\t\t\t'tools.sessions.on': True,\n\t\t\t'tools.staticdir.debug': True,\n\t\t\t'tools.staticdir.root': os.path.dirname(os.path.abspath(__file__))\n\t\t},\n\t\t'/cnpj': {\n\t\t\t'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n\t\t\t'tools.response_headers.on': True,\n\t\t\t'tools.response_headers.headers': [('Content-Type', 'application/json')],\n\t\t},\n\t\t'/compras': {\n\t\t\t'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n\t\t\t'tools.response_headers.on': True,\n\t\t\t'tools.response_headers.headers': [('Content-Type', 'application/json')],\n\t\t},\n\t\t'/css': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'css/')\n }\n\t}\n\twebapp = AprioriApp()\n\twebapp.cnpj = AprioriCNPJ()\n\twebapp.compras = CompraPorCNPJ()\n\tcherrypy.quickstart(webapp, '/', conf)\n", "sub_path": "fornecedores_rass/final.py", "file_name": "final.py", "file_ext": "py", "file_size_in_byte": 1657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cherrypy.expose", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cherrypy.tools.json_out", "line_number": 21, "usage_type": "call"}, {"api_name": "cherrypy.tools", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cherrypy.expose", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cherrypy.tools.json_out", "line_number": 31, "usage_type": "call"}, {"api_name": "cherrypy.tools", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cherrypy.expose", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 42, "usage_type": "call"}, {"api_name": "cherrypy.dispatch.MethodDispatcher", "line_number": 45, "usage_type": "call"}, {"api_name": "cherrypy.dispatch", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cherrypy.dispatch.MethodDispatcher", "line_number": 50, "usage_type": "call"}, {"api_name": "cherrypy.dispatch", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 56, "usage_type": "call"}, {"api_name": "cherrypy.quickstart", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "119167486", "text": "\"\"\"\ncomplex-valued convolutional filter W = A+iB\ncomplex vector h = x + iy\n\nW ∗h = (A ∗ x − B ∗ y) + i(B ∗ x+ A ∗ y)\n\"\"\"\nimport matplotlib.pyplot as plt\nimport librosa\nimport scipy.signal\n\nimport torch\nimport torch.nn as nn\n\nfrom model.complex_nn import CConv2d, CConvTranspose2d, CBatchNorm2d\nfrom model.ISTFT import ISTFT\nfrom data.STFT import STFT\n# from data.conv_stft import *\n\nfrom utils.utils import display_feature\n\n\nclass EncoderBlock(nn.Module):\n\n def __init__(self, in_channels=1, out_channels=45, kernel_size=(7, 5), stride=(2, 2),\n padding=(0, 0), bias=False):\n super(EncoderBlock, self).__init__()\n\n self.cConv = CConv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, bias=bias)\n self.cBN = CBatchNorm2d(out_channels)\n self.leaky_relu = nn.LeakyReLU(inplace=True, negative_slope=0.1)\n\n def forward(self, x):\n cConv = self.cConv(x)\n cBN = self.cBN(cConv)\n output = self.leaky_relu(cBN)\n\n return output\n\n\nclass DecoderBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding=(0, 0), output_padding=(0, 0),\n last=False, bias=False):\n\n super(DecoderBlock, self).__init__()\n self.last = last\n\n self.Trans_cConv = CConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, output_padding=output_padding,bias=bias)\n self.cBN = CBatchNorm2d(num_features=out_channels)\n self.leaky_relu = nn.LeakyReLU(inplace=True, negative_slope=0.1)\n\n def forward(self, x):\n\n Trans_cConv = self.Trans_cConv(x)\n # Paper) last_decoder_layer 에서는 BN과 Activation 을 사용하지 않음\n if self.last:\n output = Trans_cConv\n # mask_real = Trans_cConv[..., 0]\n # mask_imag = Trans_cConv[..., 1]\n #\n # mask_mag = (mask_real ** 2 + mask_imag ** 2)**0.5\n # real_phase = mask_real / (mask_mag + 1e-8)\n # imag_phase = mask_imag / (mask_mag + 1e-8)\n #\n # mask_phase = torch.atan2(imag_phase, real_phase)\n # mask_mag = torch.tanh(mask_mag)\n #\n # return mask_mag, mask_phase\n\n # display_feature(Trans_cConv[..., 0], \"Decoder_8_real\")\n # display_feature(Trans_cConv[..., 1], \"Decoder_8_imag\")\n # mask_phase = Trans_cConv / (torch.abs(Trans_cConv) + 1e-8)\n # print(\"mask_ph: \", mask_phase[0])\n # mask_mag = torch.tanh(torch.abs(Trans_cConv))\n # print(\"mask_mag: \", mask_mag[0])\n # output = mask_phase * mask_mag # [batch, channel, 1539, 214, 2 ]\n # real = output[..., 0]\n # imag = output[..., 1]\n # mag = torch.abs(torch.sqrt(real ** 2 + imag ** 2))\n # phase = torch.atan2(imag, real)\n\n # real_db = librosa.amplitude_to_db(real.cpu().detach().numpy())\n # imag_db = librosa.amplitude_to_db(imag.cpu().detach().numpy())\n # phase_db = librosa.amplitude_to_db(phase.cpu().detach().numpy())\n # mag_db = librosa.amplitude_to_db(mag.cpu().detach().numpy())\n\n #display_spectrogram(real_db, \"mask_Real\")\n #display_spectrogram(imag_db, \"mask_Imag\")\n #display_spectrogram(mag_db, \"mask_mag\")\n #display_spectrogram(phase_db, \"mask_phase\")\n\n else:\n normed = self.cBN(Trans_cConv)\n output = self.leaky_relu(normed)\n\n return output\n\nclass DCUNet16(nn.Module):\n\n def __init__(self, args, n_fft=64, hop_length=16):\n super(DCUNet16, self).__init__()\n\n # ISTFT hyperparam\n self.args = args\n self.n_fft = n_fft\n self.hop_length = hop_length\n # self.stft = STFT(fft_length=n_fft, hop_length=hop_length, normalized=True)\n self.istft = ISTFT(n_fft=n_fft, hop_length=hop_length)\n # self.stft = ConvSTFT(400, 100, 512, 'hanning', 'complex', fix=True).cuda(args.gpu)\n # self.istft = ConviSTFT(400, 100, 512, 'hanning', 'complex', fix=True).cuda(args.gpu)\n\n # Encoder(downsampling)\n self.downsample0 = EncoderBlock(kernel_size=(7, 5), stride=(2, 2), padding=(3, 2), in_channels=1, out_channels=32)\n self.downsample1 = EncoderBlock(kernel_size=(7, 5), stride=(2, 1), padding=(3, 2), in_channels=32, out_channels=32)\n self.downsample2 = EncoderBlock(kernel_size=(7, 5), stride=(2, 2), padding=(3, 2), in_channels=32, out_channels=64)\n self.downsample3 = EncoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=64, out_channels=64)\n self.downsample4 = EncoderBlock(kernel_size=(5, 3), stride=(2, 2), padding=(2, 1), in_channels=64, out_channels=64)\n self.downsample5 = EncoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=64, out_channels=64)\n self.downsample6 = EncoderBlock(kernel_size=(5, 3), stride=(2, 2), padding=(2, 1), in_channels=64, out_channels=64)\n self.downsample7 = EncoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=64, out_channels=64)\n\n # Decoder(Upsampling)\n self.upsample0 = DecoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=64,\n out_channels=64)\n self.upsample1 = DecoderBlock(kernel_size=(5, 3), stride=(2, 2), padding=(2, 1), in_channels=128,\n out_channels=64)\n self.upsample2 = DecoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=128,\n out_channels=64)\n self.upsample3 = DecoderBlock(kernel_size=(5, 3), stride=(2, 2), padding=(2, 1), in_channels=128,\n out_channels=64, output_padding=(0, 1))\n self.upsample4 = DecoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=128,\n out_channels=64)\n self.upsample5 = DecoderBlock(kernel_size=(7, 5), stride=(2, 2), padding=(3, 2), in_channels=128,\n out_channels=32)\n self.upsample6 = DecoderBlock(kernel_size=(7, 5), stride=(2, 1), padding=(3, 2), in_channels=64,\n out_channels=32, output_padding=(1, 0))\n self.upsample7 = DecoderBlock(kernel_size=(7, 5), stride=(2, 2), padding=(3, 2), in_channels=64, out_channels=1,\n bias=True, last=True, output_padding=(0, 1))\n\n def forward(self, input, is_istft=True):\n # print(\"input:\", x.size())\n # print(noisy_stft.size())\n real = input[..., 0]\n imag = input[..., 1]\n # inputs = torch.stack([real, imag], dim=-1).unsqueeze(1)\n #\n spec_mag = torch.sqrt(real ** 2 + imag ** 2 + 1e-8)\n spec_phase = torch.atan2(imag, real)\n # print(\"spec\", spec_mag.size())\n # inp = inputs\n # downsampling/encoding\n # print(\" --[Encoder]-- \")\n # print(\" Input(spec): \", x.size())\n # display_feature(x[..., 0], \"input_real\")\n # display_feature(x[..., 1], \"input_imag\")\n d0 = self.downsample0(input)\n # display_feature(d0[..., 0], \"Encoder_1_real\")\n # display_feature(d0[..., 1], \"Encoder_1_imag\")\n # print(\" d0: \", d0.size())\n d1 = self.downsample1(d0)\n # display_feature(d1[..., 0], \"Encoder_2_real\")\n # display_feature(d1[..., 1], \"Encoder_2_imag\")\n # print(\" d1: \", d1.size())\n d2 = self.downsample2(d1)\n # display_feature(d2[..., 0], \"Encoder_3_real\")\n # display_feature(d2[..., 1], \"Encoder_3_imag\")\n # print(\" d2: \", d2.size())\n d3 = self.downsample3(d2)\n # display_feature(d3[..., 0], \"Encoder_4_real\")\n # display_feature(d3[..., 1], \"Encoder_4_imag\")\n # print(\" d3: \", d3.size())\n d4 = self.downsample4(d3)\n # display_feature(d4[..., 0], \"Encoder_5_real\")\n # display_feature(d4[..., 1], \"Encoder_5_imag\")\n # print(\" d4: \", d4.size())\n d5 = self.downsample5(d4)\n # display_feature(d5[..., 0], \"Encoder_6_real\")\n # display_feature(d5[..., 1], \"Encoder_6_imag\")\n # print(\" d5: \", d5.size())\n d6 = self.downsample6(d5)\n # display_feature(d6[..., 0], \"Encoder_7_real\")\n # display_feature(d6[..., 1], \"Encoder_7_imag\")\n # print(\" d6: \", d6.size())\n d7 = self.downsample7(d6)\n # display_feature(d7[..., 0], \"Encoder_8_real\")\n # display_feature(d7[..., 1], \"Encoder_8_imag\")\n # print(\" d7: \", d7.size())\n\n # print(\" --[Decoder]-- \")\n # bridge 첫번째 Decoder에 skip connection X\n u0 = self.upsample0(d7)\n # display_feature(u0[..., 0], \"Decoder_1_real\")\n # display_feature(u0[..., 1], \"Decoder_1_imag\")\n\n # skip-connection\n c0 = torch.cat((u0, d6), dim=1)\n # print(\" u0: \", u0.size())\n # print(d6.size())\n # print(\" concat(u0,d6): \", d6.size())\n\n u1 = self.upsample1(c0)\n # display_feature(u1[..., 0], \"Decoder_2_real\")\n # display_feature(u1[..., 1], \"Decoder_2_imag\")\n c1 = torch.cat((u1, d5), dim=1)\n # print(\" u1: \", u1.size())\n # print(\" concat(u1,d5): \", c1.size())\n\n u2 = self.upsample2(c1)\n # display_feature(u2[..., 0], \"Decoder_3_real\")\n # display_feature(u2[..., 1], \"Decoder_3_imag\")\n c2 = torch.cat((u2, d4), dim=1)\n # print(\" u2: \", u2.size())\n # print(\" concat(u2,d4): \", c2.size())\n\n u3 = self.upsample3(c2)\n # display_feature(u3[..., 0], \"Decoder_4_real\")\n # display_feature(u3[..., 1], \"Decoder_4_imag\")\n c3 = torch.cat((u3, d3), dim=1)\n # print(\" u3: \", u3.size())\n # print(\" concat(u3,d3): \", c3.size())\n\n u4 = self.upsample4(c3)\n # display_feature(u4[..., 0], \"Decoder_5_real\")\n # display_feature(u4[..., 1], \"Decoder_5_imag\")\n c4 = torch.cat((u4, d2), dim=1)\n # print(\" u4: \", u4.size())\n # print(\" concat(u4,d2): \", c4.size())\n\n u5 = self.upsample5(c4)\n # display_feature(u5[..., 0], \"Decoder_6_real\")\n # display_feature(u5[..., 1], \"Decoder_6_imag\")\n # print(\" u5: \", u5.size())\n # print(d1.size())\n c5 = torch.cat((u5, d1), dim=1)\n # print(\" concat(u5,d1): \", c5.size())\n\n u6 = self.upsample6(c5)\n # display_feature(u6[..., 0], \"Decoder_7_real\")\n # display_feature(u6[..., 1], \"Decoder_7_imag\")\n # print(\" d0 \", d0.size())\n # print(\" u6: \", u6.size())\n c6 = torch.cat((u6, d0), dim=1)\n\n # print(\" concat(u6,d0): \", c6.size())\n\n # u7 = self.upsample7(c6)\n\n # mask_mag, mask_phase = self.upsample7(c6)\n mask = self.upsample7(c6)\n # print(mask.size())\n\n mask_real = mask[..., 0]\n mask_imag = mask[..., 1]\n mask_mag = (mask_real ** 2 + mask_imag ** 2) ** 0.5\n real_phase = mask_real / (mask_mag + 1e-8)\n imag_phase = mask_imag / (mask_mag + 1e-8)\n\n mask_phase = torch.atan2(imag_phase, real_phase)\n mask_mag = torch.tanh(mask_mag)\n est_mag = mask_mag * spec_mag # magnitude mask 입히고\n est_phase = spec_phase + mask_phase # todo phase 더하나?\n\n real = est_mag * torch.cos(est_phase) # todo 이 공식 좀더 자세히 본 기억있음\n imag = est_mag * torch.sin(est_phase)\n spec = torch.stack([real, imag], dim=-1)\n if is_istft:\n # print(est.size())\n output = self.istft(spec)\n output = torch.clamp_(output, -1, 1)\n\n return output\n\n\ndef display_spectrogram(x, title):\n plt.figure(figsize=(15, 10))\n plt.pcolormesh(x[0][0], cmap='hot') # 여기서는 Batch shape가 추가\n plt.colorbar(format=\"%+2.f dB\")\n plt.title(title)\n plt.show()\n\n\nif __name__ == \"__main__\":\n a = torch.randn(2, 1, 1539, 214 ,2)\n\n b = DCUNet16(args=\"aa\", n_fft=3076, hop_length=772)\n print(b(a).size())", "sub_path": "model/DCUNet.py", "file_name": "DCUNet.py", "file_ext": "py", "file_size_in_byte": 12212, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.nn.Module", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "model.complex_nn.CConv2d", "line_number": 28, "usage_type": "call"}, {"api_name": "model.complex_nn.CBatchNorm2d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "model.complex_nn.CConvTranspose2d", "line_number": 49, "usage_type": "call"}, {"api_name": "model.complex_nn.CBatchNorm2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "model.ISTFT.ISTFT", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.atan2", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.atan2", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.cos", "line_number": 265, "usage_type": "call"}, {"api_name": "torch.sin", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 267, "usage_type": "call"}, {"api_name": "torch.clamp_", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pcolormesh", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 285, "usage_type": "call"}]} +{"seq_id": "482903076", "text": "#爬虫初始化函数\ndef init():\n import requests\n from bs4 import BeautifulSoup\n import json\n import re\n\n#163门户网站抓取函数\ndef MainPage(url):\n Result = []\n res = BeautifulSoup((requests.get(url)).text,'html.parser')\n for Res in res.select('.cm_fb'):\n Result.append(Res.select('a')[0].text)\n Result.append(Res.select('a')[0]['href'])\n return Result\n\n#163网站内文抓取函数\ndef News(newsurl):\n result = {}\n news = BeautifulSoup((requests.get(newsurl)).text,'html.parser')\n result['title'] = news.select('h1')[0].text\n result['time'] = news.select('.post_time_source')[0].contents[0].rstrip('\\u3000来源: ')\n result['article'] = news.select('.post_text')[0].text\n return result\n\ninit()\n", "sub_path": "Python/Spider/文章提取.py", "file_name": "文章���取.py", "file_ext": "py", "file_size_in_byte": 760, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "53452252", "text": "# Train and Evaluate the Predictor\nfrom preprocessing import *\nfrom sklearn import linear_model\nimport xgboost as xgb\nimport numpy as np\nfrom sklearn.metrics import precision_score, recall_score, accuracy_score\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\nimport shap\n\ndataset = data_preprocessing(threshold=2)\ndataset[\"avg_agi\"].replace('', np.nan, inplace=True)\ndataset.dropna(subset=[\"avg_agi\"], inplace=True)\ntrain, test, valid = train_test_validation(dataset)\n\n# Baseline Model\nmodel = linear_model.LogisticRegression(C = 0.005, class_weight='balanced')\ntrain_data, train_label = data_label(train)\nvalid_data, valid_label = data_label(valid)\ntest_data, test_label = data_label(test)\nmodel.fit(train_data, train_label)\n# Baseline accuracy\nprint(\"Baseline accuracy: \", model.score(test_data, test_label))\n\n# XGBoost Model\nD_train = xgb.DMatrix(train_data, label=train_label)\nD_valid = xgb.DMatrix(valid_data, label=valid_label)\nparam = {\n # 'eta': 0.3, \n # 'max_depth': 3, \n # 'objective': 'multi:softprob', \n # 'num_class': 2,\n 'objective': 'binary:logistic',\n 'tree_method': 'hist',\n 'eval_metrix': 'auc',\n 'eta': 0.3,\n 'gamma': 0,\n 'min_child_weight': 0.01,\n 'max_depth': 6,\n 'max_delta_step': 1,\n 'subsample': 0.85,\n 'colsample_bytree': 0.45,\n 'colsample_bylevel': 0.7,\n 'colsample_bynode': 1.0,\n 'lambda': 5,\n 'alpha': 0.2\n } \nsteps = 100 # The number of training iterations\nmodel = xgb.train(param, D_train, steps)\npreds = model.predict(D_valid)\nbest_preds = np.asarray([1 if p >= 0.5 else 0 for p in preds])\n# XGBoost Metrics\nprint(\"Precision = {}\".format(precision_score(valid_label, best_preds, average='macro')))\nprint(\"Recall = {}\".format(recall_score(valid_label, best_preds, average='macro')))\nprint(\"Accuracy = {}\".format(accuracy_score(valid_label, best_preds)))\n# ROC Curve\n# Roc Curve\nfpr, tpr, _ = roc_curve(valid_label, best_preds)\nroc_auc = auc(fpr, tpr)\nplt.figure()\nplt.plot(fpr, tpr, color='darkorange',\\\nlw=2, label='ROC curve (area = %0.2f)' % roc_auc)\nplt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\nplt.xlim([-0.02, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC curve')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n# Shap Feature Importance analysis\nshap.initjs()\nexplainer = shap.TreeExplainer(model)\nshap_values = explainer.shap_values(train_data)\nshap.summary_plot(shap_values, train_data)", "sub_path": "Publish_Code/train_evaluate_predictor.py", "file_name": "train_evaluate_predictor.py", "file_ext": "py", "file_size_in_byte": 2486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.nan", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 17, "usage_type": "name"}, {"api_name": "xgboost.DMatrix", "line_number": 26, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 27, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "shap.initjs", "line_number": 73, "usage_type": "call"}, {"api_name": "shap.TreeExplainer", "line_number": 74, "usage_type": "call"}, {"api_name": "shap.summary_plot", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "144488854", "text": "# -*- coding: utf-8 -*- \n# @Time : 2019/10/17 20:02 \n# @Author : hzz \n# @File : test_trainmodel.py \n# @Software: PyCharm\nimport time\nimport unittest\nfrom common.loggers import Logger\nfrom common.myconfig import Myconfig\nfrom aicenter.modelplant import Modelpage\nfrom aicenter.ai_loginpage import Login\nfrom aicenter_testcase import test_predictionmodel\n\nconfig = Myconfig()\nlog = Logger('trainmodel').getlog()\n\n\nclass TestTrainModel(unittest.TestCase):\n def setUp(self):\n \"\"\"\n\n \"\"\"\n self.driver = Login().ai_login()\n time.sleep(1)\n\n def tearDown(self):\n \"\"\"\n\n \"\"\"\n self.driver.quit()\n\n def test_1(self):\n turn_trainmodel(self.driver)\n log.info('训练模型-新建工程')\n log.info('创建工程')\n project_name = '自动化创建的工程'+time.strftime('%y%m%d%H%M%S', time.localtime())\n project_result = trainmodel_creatproject(self.driver, project_name)\n time.sleep(1)\n self.assertEqual(project_name, project_result)\n log.info('创建模型')\n model_name = '自动化创建的模型'+time.strftime('%y%m%d%H%M%S', time.localtime())\n model_result = trainmodel_creatmodel(self.driver, model_name)\n time.sleep(1)\n self.assertEqual('新增模型成功', model_result)\n log.info('训练模型')\n model_status = trainmodel_train(self.driver, 'test_data_demo_6', 'KEY', 'LBALE')\n self.assertEqual('成功', model_status)\n\n def test_2(self):\n log.info('训练模型-使用原有工程')\n time.sleep(1)\n select_project(self.driver, '自动化创建的工程')\n log.info('创建模型')\n model_result = trainmodel_creatmodel(self.driver,\n '自动化创建的模型'+time.strftime('%y%m%d%H%M%S', time.localtime()))\n self.assertEqual('新增模型成功', model_result)\n log.info('训练模型')\n model_status = trainmodel_train(self.driver, 'sampsampleYBCSB20191101160512')\n self.assertEqual('成功', model_status)\n\n def test_delete_model(self):\n log.info('删除模型')\n time.sleep(1)\n\n def test_3(self):\n log.info('训练模型')\n time.sleep(1)\n select_project(self.driver, '自动化创建的工程')\n log.info('创建模型')\n model_result = trainmodel_creatmodel(self.driver,\n '自动化创建的模型'+time.strftime('%y%m%d%H%M%S', time.localtime()))\n self.assertEqual('新增模型成功', model_result)\n log.info('训练模型')\n model_status = trainmodel_train(self.driver, 'sampsampleYBCSB20191101160512', 1)\n self.assertEqual('成功', model_status)\n test_predictionmodel.predictionmodel(self.driver)\n\n\ndef turn_trainmodel(driver):\n model = Modelpage(driver)\n model.click_modelplant()\n model.click_model_tool()\n time.sleep(1)\n\n\ndef trainmodel_creatproject(driver, project_name):\n \"\"\"\n\n :return:\n \"\"\"\n model = Modelpage(driver)\n model.click_creat_project()\n model.input_project_name(project_name)\n model.input_project_desc('---自动化创建的工程---')\n model.click_project_primary()\n # model.click_first_project()\n time.sleep(3)\n return model.get_first_project_name()\n\n\ndef trainmodel_creatmodel(driver, model_name=None):\n \"\"\"\n\n :param model_name:\n :param driver:\n :return:\n \"\"\"\n if model_name:\n model_name = '自动化创建的模型' + time.strftime('%y%m%d%H%M%S', time.localtime())\n model = Modelpage(driver)\n model.click_creat_model()\n model.input_model_name(model_name)\n model.select_model_type('二分类')\n model.input_model_desc('---自动化创建的模型---')\n model.click_model_primary()\n return model.get_addmoel_succeed()\n\n\ndef trainmodel_train(driver, tablename, id_col, tab_col, forecast=None):\n \"\"\"\n :param tab_col:\n :param id_col:\n :param forecast:\n :param tablename:\n :param driver:\n :return:\n \"\"\"\n model = Modelpage(driver)\n try:\n if not model.judgepage():\n trainmodel_creatmodel(driver)\n model.click_sanmpledb()\n time.sleep(1)\n model.input_tablename(tablename)\n model.click_next_button()\n model.select_id(id_col)\n model.select_tag(tab_col)\n time.sleep(1)\n model.click_next_button()\n time.sleep(1)\n model.click_next_button()\n time.sleep(1)\n model.select_feature_cross('否')\n model.select_feature_filtrate('否')\n model.click_trainmodel()\n model.wait_train()\n time.sleep(1)\n model_status = model.get_model_status()\n if forecast:\n model.click_forecast()\n return model_status\n except Exception as e:\n log.error('训练模型失败%s', e)\n\n\ndef delete_model(driver):\n model = Modelpage(driver)\n try:\n # model.click_first_model()\n model.click_first_more()\n model.click_delete_project()\n time.sleep(1)\n except Exception as e:\n log.error('删除模型失败%s', e)\n\n\ndef select_model(driver, model_name):\n model = Modelpage(driver)\n try:\n model.by_name(model_name)\n except Exception as e:\n log.error('选择模型是失败%s', e)\n\n\ndef select_project(driver, project_name):\n model = Modelpage(driver)\n try:\n model.by_name(project_name)\n except Exception as e:\n log.error('选择项目失败%s', e)\n\n\n\n\n\n", "sub_path": "src/python/aicenter_testcase/test_trainmodel.py", "file_name": "test_trainmodel.py", "file_ext": "py", "file_size_in_byte": 5521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "common.myconfig.Myconfig", "line_number": 14, "usage_type": "call"}, {"api_name": "common.loggers.Logger", "line_number": 15, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 18, "usage_type": "attribute"}, {"api_name": "aicenter.ai_loginpage.Login", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 36, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 41, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 41, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 55, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 55, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 71, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 71, "usage_type": "call"}, {"api_name": "aicenter_testcase.test_predictionmodel.predictionmodel", "line_number": 76, "usage_type": "call"}, {"api_name": "aicenter_testcase.test_predictionmodel", "line_number": 76, "usage_type": "name"}, {"api_name": "aicenter.modelplant.Modelpage", "line_number": 80, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}, {"api_name": "aicenter.modelplant.Modelpage", "line_number": 91, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 109, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 109, "usage_type": "call"}, {"api_name": "aicenter.modelplant.Modelpage", "line_number": 110, "usage_type": "call"}, {"api_name": "aicenter.modelplant.Modelpage", "line_number": 128, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 133, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 138, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 140, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 142, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 147, "usage_type": "call"}, {"api_name": "aicenter.modelplant.Modelpage", "line_number": 157, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 162, "usage_type": "call"}, {"api_name": "aicenter.modelplant.Modelpage", "line_number": 168, "usage_type": "call"}, {"api_name": "aicenter.modelplant.Modelpage", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "564423425", "text": "# -*- coding: utf-8 -*-\nfrom argh.decorators import arg\n\nimport lain_sdk.mydocker as docker\nfrom lain_cli.utils import check_phase, get_domain, lain_yaml, ClusterConfig\nfrom lain_sdk.util import error, info\n\n\n@arg('phase', help=\"lain cluster phase id, can be added by lain config save\")\n@arg('-r', '--registry', help='registry url')\ndef tag(phase, registry=None):\n \"\"\"\n Tag release and meta images\n \"\"\"\n\n check_phase(phase)\n params = dict(name=phase)\n if registry is not None:\n params['registry'] = registry\n cluster_config = ClusterConfig(**params)\n info(\"Taging meta and relese image ...\")\n yml = lain_yaml(ignore_prepare=True)\n meta_version = yml.meta_version\n if meta_version is None:\n error(\"please git commit.\")\n return None\n meta_tag = \"%s:meta-%s\" % (yml.appname, meta_version)\n release_tag = \"%s:release-%s\" % (yml.appname, meta_version)\n phase_meta_tag = docker.gen_image_name(yml.appname, 'meta', meta_version, cluster_config.registry)\n phase_release_tag = docker.gen_image_name(yml.appname, 'release', meta_version, cluster_config.registry)\n meta_code = docker.tag(meta_tag, phase_meta_tag)\n release_code = docker.tag(release_tag, phase_release_tag)\n if meta_code or release_code:\n error(\"Error lain tag.\")\n else:\n info(\"Done lain tag.\")\n", "sub_path": "lain_cli/tag.py", "file_name": "tag.py", "file_ext": "py", "file_size_in_byte": 1342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "lain_cli.utils.check_phase", "line_number": 16, "usage_type": "call"}, {"api_name": "lain_cli.utils.ClusterConfig", "line_number": 20, "usage_type": "call"}, {"api_name": "lain_sdk.util.info", "line_number": 21, "usage_type": "call"}, {"api_name": "lain_cli.utils.lain_yaml", "line_number": 22, "usage_type": "call"}, {"api_name": "lain_sdk.util.error", "line_number": 25, "usage_type": "call"}, {"api_name": "lain_sdk.mydocker.gen_image_name", "line_number": 29, "usage_type": "call"}, {"api_name": "lain_sdk.mydocker", "line_number": 29, "usage_type": "name"}, {"api_name": "lain_sdk.mydocker.gen_image_name", "line_number": 30, "usage_type": "call"}, {"api_name": "lain_sdk.mydocker", "line_number": 30, "usage_type": "name"}, {"api_name": "lain_sdk.mydocker.tag", "line_number": 31, "usage_type": "call"}, {"api_name": "lain_sdk.mydocker", "line_number": 31, "usage_type": "name"}, {"api_name": "lain_sdk.mydocker.tag", "line_number": 32, "usage_type": "call"}, {"api_name": "lain_sdk.mydocker", "line_number": 32, "usage_type": "name"}, {"api_name": "lain_sdk.util.error", "line_number": 34, "usage_type": "call"}, {"api_name": "lain_sdk.util.info", "line_number": 36, "usage_type": "call"}, {"api_name": "argh.decorators.arg", "line_number": 9, "usage_type": "call"}, {"api_name": "argh.decorators.arg", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "628468131", "text": "import numpy as np\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nimport time\n\nclass Config(object):\n \"\"\"配置参数\"\"\"\n\n def __init__(self, dataset, embedding):\n self.model_name = 'SMPCNN'\n self.train_path = dataset + '../../python/data/SMP2019/data/txt/train_x0.txt' # 训练集\n self.dev_path = dataset + '../../python/data/SMP2019/data/txt/dev_x0.txt' # 验证集\n self.test_path = dataset + '../../python/data/SMP2019/data/txt/test_x.txt' # 测试集\n self.class_list = [x.strip() for x in open(\n dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单\n self.vocab_path = dataset + '/data/vocab.pkl' # 词表\n self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果\n self.log_path = dataset + '/log/' + self.model_name\n self.embedding_pretrained = torch.tensor(\n np.load(dataset + '/data/' + embedding)[\"embeddings\"].astype('float32')) \\\n if embedding != 'random' else None # 预训练词向量\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备\n\n self.dropout = 0.5 # 随机失活\n self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练\n self.num_classes = len(self.class_list) # 类别数\n self.n_vocab = 0 # 词表大小,在运行时赋值\n self.num_epochs = 20 # epoch数\n self.batch_size = 128 # mini-batch大小\n self.pad_size = 32 # 每句话处理成的长度(短填长切)\n self.learning_rate = 1e-3 # 学习率\n self.embed = self.embedding_pretrained.size(1) \\\n if self.embedding_pretrained is not None else 300 # 字向量维度\n self.hidden_size = 256 # 隐藏层大小\n self.n_gram_vocab = 250499\n\nfilepath = \"../THUCNews/data/vocab.pkl\"\ninf = pickle.load(open(filepath,'rb'),encoding='iso-8859-1')\nprint(inf)\nprint(inf.get(''))\n# embedding_SougouNews = '../THUCNews/data/embedding_SougouNews.npz'\n# data_embedding_SougouNews = np.load(embedding_SougouNews)\n# print(data_embedding_SougouNews['embeddings'][0])", "sub_path": "models/SMP.py", "file_name": "SMP.py", "file_ext": "py", "file_size_in_byte": 2234, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.tensor", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "42292640", "text": "from matplotlib import pyplot as plt\nimport random\nimport matplotlib\n\nfont = {'family': \"Microsoft Yahei\", 'size': '10'}\nmatplotlib.rc('font', **font)\n\nplt.figure(figsize=(20, 8))\nx = range(120)\ny = [random.uniform(20, 35) for i in range(120)]\n\nplt.plot(x, y, color='b', linestyle='-', linewidth='3')\n_x_ticks = [\"10点{}分\".format(i) for i in range(60)]\n_x_ticks += [\"11点{}分\".format(i - 60) for i in range(60, 120)]\nplt.xticks(list(x)[::5], _x_ticks[::5], rotation=45)\nplt.xlabel(\"时间\")\nplt.ylabel(\"温度 单位(C)\")\nplt.title(\"10点到12点温度变化情况\")\n# plt.savefig(\"./折线图1.svg\")\nplt.show()\n", "sub_path": "matplotlib折线图2.py", "file_name": "matplotlib折线图2.py", "file_ext": "py", "file_size_in_byte": 621, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "matplotlib.rc", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "501188055", "text": "# Copyright 2016 Jon Wayne Parrott\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport platform\nimport shutil\n\nfrom nox.command import Command\nfrom nox.logger import logger\n\n\nclass ProcessEnv(object):\n \"\"\"A environment with a 'bin' directory and a set of 'env' vars.\"\"\"\n\n def __init__(self, bin=None, env=None):\n self._bin = bin\n self.env = os.environ.copy()\n\n if env is not None:\n self.env.update(env)\n\n if self.bin:\n self.env['PATH'] = ':'.join([self.bin, self.env.get('PATH')])\n\n @property\n def bin(self):\n return self._bin\n\n def run(self, args, in_venv=True):\n \"\"\"Runs a command. By default, the command runs within the\n environment.\"\"\"\n return Command(\n args=args,\n env=self.env if in_venv else None,\n silent=True,\n path=self.bin if in_venv else None).run()\n\n\nclass VirtualEnv(ProcessEnv):\n \"\"\"Virtualenv management class.\"\"\"\n\n def __init__(self, location, interpreter=None, reuse_existing=False):\n self.location = os.path.abspath(location)\n self.interpreter = interpreter\n self.reuse_existing = reuse_existing\n super(VirtualEnv, self).__init__()\n\n def _clean_location(self):\n \"\"\"Deletes any existing virtualenv\"\"\"\n if os.path.exists(self.location):\n if self.reuse_existing:\n return False\n else:\n shutil.rmtree(self.location)\n\n return True\n\n @property\n def bin(self):\n \"\"\"Returns the location of the virtualenv's bin folder.\"\"\"\n if platform.system() == 'Windows':\n return os.path.join(self.location, 'Scripts')\n else:\n return os.path.join(self.location, 'bin')\n\n def create(self):\n \"\"\"Create the virtualenv.\"\"\"\n if not self._clean_location():\n logger.debug('Re-using existing virtualenv.')\n return False\n\n cmd = ['virtualenv', self.location]\n\n if self.interpreter:\n cmd.extend(['-p', self.interpreter])\n\n self.run(cmd, in_venv=False)\n\n return True\n\n def install(self, *args):\n self.run(('pip', 'install', '--upgrade') + args)\n", "sub_path": "nox/virtualenv.py", "file_name": "virtualenv.py", "file_ext": "py", "file_size_in_byte": 2726, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.environ.copy", "line_number": 28, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "nox.command.Command", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 65, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "nox.logger.logger.debug", "line_number": 80, "usage_type": "call"}, {"api_name": "nox.logger.logger", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "368814559", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 2 15:44:59 2018\n\n@author: alekriley\n\"\"\"\n\nimport numpy as np\nimport seaborn as sne\nimport matplotlib.pyplot as plt\nimport scipy.stats as sts\nimport tensorflow as tf\n\ndef mini_batch(batch_size,array_size):\n indices = np.arange(array_size)\n np.random.shuffle(indices)\n i = 0\n while i+batch_size < array_size:\n yield indices[i:i+batch_size]\n i += batch_size\n if not i == array_size: yield indices[i:array_size]\n\nclass VAE:\n def __init__(self,sess,n_features,n_hidden,latent_size,lr=0.005):\n self.sess = sess\n self.batch = tf.placeholder(tf.float32,[None,n_features])\n self.latent_size = latent_size\n \n self.eweights = tf.Variable(tf.truncated_normal([n_features,n_hidden],stddev=tf.sqrt(1./n_hidden)))\n self.ebias = tf.Variable(tf.ones([1,n_hidden])*0.01)\n self.encode = tf.nn.relu(tf.matmul(self.batch,self.eweights) + self.ebias)\n \n self.e1weights = tf.Variable(tf.truncated_normal([n_hidden,n_hidden],stddev=tf.sqrt(1./n_hidden)))\n self.e1bias = tf.Variable(tf.ones([1,n_hidden])*0.01)\n self.encode1 = tf.nn.relu(tf.matmul(self.encode,self.e1weights) + self.e1bias)\n \n self.lweights = tf.Variable(tf.truncated_normal([n_hidden,self.latent_size*2],stddev=tf.sqrt(0.5/latent_size)))\n self.lbias = tf.Variable(tf.ones([1,self.latent_size*2])*0.01)\n self.latent = tf.matmul(self.encode1,self.lweights) + self.lbias\n \n self.z = tf.random_normal([tf.shape(self.batch)[0],self.latent_size])*tf.exp(self.latent[:,self.latent_size:])+self.latent[:,:self.latent_size]\n \n self.dweights = tf.Variable(tf.truncated_normal([self.latent_size,n_hidden],stddev=tf.sqrt(1./n_hidden)))\n self.dbias = tf.Variable(tf.ones([1,n_hidden])*0.01)\n self.decode = tf.nn.relu(tf.matmul(self.z,self.dweights) + self.dbias)\n \n self.d1bias = tf.Variable(tf.ones([1,n_hidden])*0.01)\n self.decode1 = tf.nn.relu(tf.matmul(self.decode,self.e1weights,False,True) + self.d1bias)\n \n self.rbias = tf.Variable(tf.ones([1,n_features])*0.01)\n self.reconstruct = tf.matmul(self.decode1,self.eweights,False,True) + self.rbias\n \n self.rloss = tf.reduce_sum(tf.squared_difference(self.reconstruct,self.batch),1)\n self.kloss = tf.reduce_sum(0.5*(tf.square(self.latent[:,:self.latent_size])+tf.exp(self.latent[:,self.latent_size:])-\\\n self.latent[:,self.latent_size:]-1),1)\n self.loss = tf.reduce_mean(self.rloss+self.kloss)\n self.train = tf.train.AdamOptimizer(lr).minimize(self.loss)\n \n def learn(self,batch):\n return self.sess.run([self.loss,self.train],{self.batch : batch})\n def reconstruction(self,batch):\n return self.sess.run([self.reconstruct,self.latent[:,:self.latent_size],self.latent[:,self.latent_size:]],{self.batch : batch})\n def generate(self,sample_size):\n return self.sess.run(self.reconstruct,{self.z : np.random.randn(sample_size,self.latent_size)})\n\nnp.random.seed(0)\n\nn_samples = 5000\nmeans = np.random.randn(8).reshape(2,4)*7\ndata = np.vstack([sts.multivariate_normal(means[:,j]).rvs(n_samples//4) for j in range(means.shape[1])])\n\nnp.random.seed(None)\n\nsne.set_style('darkgrid')\n\ntf.reset_default_graph()\nsess = tf.Session()\nvae = VAE(sess,data.shape[1],5,2,0.005)\nsess.run(tf.global_variables_initializer())\n\nloss = []\n\nepochs = 500\nbatch_size = 64\nfor epoch in range(epochs):\n print('Epoch {}'.format(epoch))\n for batch in mini_batch(batch_size,n_samples//2):\n loss.append(vae.learn(data[::2,:][batch])[0])\n \nreconstruction,mean,var = vae.reconstruction(data); var=np.exp(var)\nsample = vae.generate(1000)\n \nfig = plt.figure(figsize=(13,7))\ngs = plt.GridSpec(3,2,fig)\nax1 = plt.subplot(gs[0,:])\nsne.scatterplot(data[:,0],data[:,1],color=np.repeat(['red','green','yellow','blue'],n_samples//4),ax=ax1)\nax2 = plt.subplot(gs[1,0])\nsne.scatterplot(reconstruction[:,0],reconstruction[:,1],color=np.repeat(['red','green','yellow','blue'],n_samples//4),ax=ax2)\nax3 = plt.subplot(gs[1,1])\nsne.scatterplot(sample[:,0],sample[:,1],ax=ax3)\nax4 = plt.subplot(gs[2,:])\nax4.plot(range(len(loss)),loss,lw=0.5)\nplt.show()\n", "sub_path": "vae.py", "file_name": "vae.py", "file_ext": "py", "file_size_in_byte": 4304, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.arange", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.sqrt", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.sqrt", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.sqrt", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.exp", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.sqrt", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.squared_difference", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.exp", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 71, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 71, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 73, "usage_type": "attribute"}, {"api_name": "seaborn.set_style", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.GridSpec", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}]} +{"seq_id": "44840144", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\ndef imshow(image, ax=None, title=None):\n \"\"\"Imshow for Tensor. Provided by Udacity as part of this project.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n \n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.numpy().transpose((1, 2, 0))\n \n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n \n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n \n ax.imshow(image)\n \n return ax", "sub_path": "imshow.py", "file_name": "imshow.py", "file_ext": "py", "file_size_in_byte": 706, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "222211571", "text": "from cv2 import CascadeClassifier, cvtColor, COLOR_BGR2GRAY\nfrom facenet.src.align.detect_face import create_mtcnn, detect_face\n\n\nclass HaarcascadeDetector:\n def __init__(self, model_path, min_size):\n self._detector = CascadeClassifier(model_path)\n self._min_size = min_size\n\n def detect(self, img):\n img = cvtColor(img, COLOR_BGR2GRAY)\n faces = self._detector.detectMultiScale(\n image=img,\n scaleFactor=1.4,\n minNeighbors=5,\n minSize=(self._min_size, self._min_size)\n )\n\n updated_faces = [\n (x, y, x + w, y + h) for x, y, w, h in faces\n ]\n\n return updated_faces\n\n\nclass MTCNNDetector:\n _THRESHOLDS = [0.6, 0.7, 0.7]\n _FACTOR = 0.709\n\n def __init__(self, sess, model_path, min_size):\n self._pnet, self._rnet, self._onet = create_mtcnn(sess, model_path)\n self._min_size = min_size\n\n def detect(self, img):\n faces, _ = detect_face(\n img,\n self._min_size,\n self._pnet, self._rnet, self._onet,\n self._THRESHOLDS,\n self._FACTOR\n )\n\n return faces\n", "sub_path": "Recogniton-AI/Face-Recognition/src/myfacenet/detector.py", "file_name": "detector.py", "file_ext": "py", "file_size_in_byte": 1160, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 11, "usage_type": "argument"}, {"api_name": "facenet.src.align.detect_face.create_mtcnn", "line_number": 31, "usage_type": "call"}, {"api_name": "facenet.src.align.detect_face.detect_face", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "592298300", "text": "\"\"\"Rename label to text in options table\n\nRevision ID: d1f37ec5225a\nRevises: 8804ebf2ff21\nCreate Date: 2020-05-19 23:00:20.009535\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd1f37ec5225a'\ndown_revision = '8804ebf2ff21'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('options', sa.Column('text', sa.String(), nullable=True))\n op.drop_column('options', 'label')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('options', sa.Column('label', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.drop_column('options', 'text')\n # ### end Alembic commands ###\n", "sub_path": "backend/app/alembic/versions/d1f37ec5225a_rename_label_to_text_in_options_table.py", "file_name": "d1f37ec5225a_rename_label_to_text_in_options_table.py", "file_ext": "py", "file_size_in_byte": 811, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "542491286", "text": "#! /usr/bin/env python\nfrom __future__ import print_function\nimport sys\nfrom collections import Counter\n\nc = Counter()\nlen_dict = {}\n\nline_counter = 0\nfor line in sys.stdin:\n line_counter += 1\n if line_counter % 10000 == 0:\n print(\"%d lines processed\" % (line_counter), file=sys.stderr)\n tokens = line.split()\n l = len(tokens)\n if l not in len_dict:\n len_dict[l] = 1\n else:\n len_dict[l] += 1\n\nmax_len = sorted(len_dict.keys())[-1]\n\nless_or_equal_to_i = 0\nfor i in xrange(max_len + 1):\n if i in len_dict:\n less_or_equal_to_i += len_dict[i]\n ratio = 100.0 * less_or_equal_to_i / line_counter\n print(\"less or equal to %d : %d, %2.2f%%\" % (i, less_or_equal_to_i, ratio), file=sys.stderr)\n\nfor key,f in sorted(c.items(), key=lambda x: x[1], reverse=True):\n print(key+\" \"+ str(f))\n", "sub_path": "data_stats.py", "file_name": "data_stats.py", "file_ext": "py", "file_size_in_byte": 841, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "collections.Counter", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 28, "usage_type": "attribute"}]} +{"seq_id": "141526055", "text": "#!/usr/bin/env python3\n\"\"\"\nGiven a sorted dictionary (array of words) of an alien language, find order of characters in the language.\n\nEXAMPLES:\n\n Input: words = [\"baa\", \"abcd\", \"abca\", \"cab\", \"cad\"]\n Output: \"bdac\"\"\n\n Input: words = [\"z\", \"x\"]\n Output: \"zx\"\n\n Input: words = [\"z\"]\n Output: \"z\"\n\n Input: words = [\"z\", \"x\", \"z\"]\n Output: \"\"\n Reason: Circula relationship.\n\n Input: words = [\"abc\", \"ab\"]\n Output: \"\"\n Reason: The 2nd word 'ab' is a prefix of the 1st. This is not valid.\n\n\nNOTE:\n - You may assume all letters are in lowercase.\n - If the order is invalid, return an empty string.\n - There may be multiple valid order of letters, return any one of them is fine.\n\nAPPROACHES:\n 1. Extract dependency rules from the input.\n 2. Putting dependency rules into a graph with letters as nodes and\n dependencies as edges. \n 3. Also track the number of inputs for each node.\n 4. Topologically sorting the graph nodes, starting with those without\n any inputs.\n \nTECHNIQUES:\n - Topological sortting.\n\nREFERENCE\n - https://www.geeksforgeeks.org/given-sorted-dictionary-find-precedence-characters/\n - https://www.geeksforgeeks.org/topological-sorting/\n - https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm\n\n\"\"\"\n\nfrom typing import List\nfrom collections import defaultdict, Counter, deque\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.edges = set()\n self.in_degree = 0\n\n\nclass Solution:\n\n def alienOrder_v1(self, words: List[str]) -> str:\n \"\"\"Use a Node structure to store the graph data.\"\"\"\n # Step 0. Create nodes\n nodes = dict()\n for c in set([c for w in words for c in w]):\n nodes[c] = Node(c)\n\n # Step 1. Build the edges (dependency)\n for w1, w2 in zip(words, words[1:]):\n for c1, c2 in zip(w1, w2):\n if c1 != c2:\n n1 = nodes[c1]\n if c2 not in n1.edges:\n n1.edges.add(c2)\n nodes[c2].in_degree += 1\n break\n else:\n if len(w2) < len(w1):\n return ''\n\n # Step 2. Topology Sorting\n results = list()\n queue = [n for n in nodes.values() if n.in_degree == 0]\n while queue:\n n = queue.pop(0)\n results.append(n.val)\n for c in n.edges:\n n2 = nodes[c]\n n2.in_degree -= 1\n if n2.in_degree == 0:\n queue.append(n2)\n\n # If not all letters are in output, that means there was a cycle and so\n # no valid ordering. Return \"\" as per the problem description.\n if len(results) < len(nodes):\n return \"\"\n\n return ''.join(results)\n\n def alienOrder_v2(self, words: List[str]) -> str:\n \"\"\"Use two structures (dict and Counter) to store the information.\"\"\"\n # Step 0: create data structures + the in_degree of each unique letter to 0.\n adj_list = defaultdict(set)\n in_degree = Counter({c: 0 for word in words for c in word})\n\n # Step 1: We need to populate adj_list and in_degree.\n # For each pair of adjacent words...\n for w1, w2 in zip(words, words[1:]):\n for c, d in zip(w1, w2):\n if c != d:\n if d not in adj_list[c]:\n adj_list[c].add(d)\n in_degree[d] += 1\n break\n\n # Check that second word isn't a prefix of first word. E.g. 'holly' ad 'ho'd\n # This is an invalid case and returns ''\n else:\n if len(w2) < len(w1):\n return \"\"\n\n # Step 2: We need to repeatedly pick off nodes with an indegree of 0.\n output = []\n queue = deque([c for c in in_degree if in_degree[c] == 0])\n while queue:\n c = queue.popleft()\n output.append(c)\n for d in adj_list[c]:\n in_degree[d] -= 1\n if in_degree[d] == 0:\n queue.append(d)\n\n # If not all letters are in output, that means there was a cycle and so\n # no valid ordering. Return \"\" as per the problem description.\n if len(output) < len(in_degree):\n return \"\"\n\n # Otherwise, convert the ordering we found into a string and return it.\n return \"\".join(output)\n\n\n# ---------------------------\n# Main & Helper Functions\n# ---------------------------\ndef main():\n \"\"\"Main function\"\"\"\n\n # Test data\n test_data = [\n [[\"baa\", \"abcd\", \"abca\", \"cab\", \"cad\"], \"bdac\"],\n [[\"wrt\", \"wrf\", \"er\", \"ett\", \"rftt\"], \"wertf\"],\n [[\"z\", \"x\"], \"zx\"],\n [[\"z\", \"x\", \"z\"], \"\"], # circular\n [[\"za\", \"zb\", \"ca\", \"cb\"], \"abzc\"],\n [[\"abc\", \"ab\"], \"\"], # 2nd word is a prefix of 1st. Thus, invalid\n [[\"z\", \"z\"], \"z\"],\n [[\"zy\", \"zx\"], \"zyx\"],\n [[\"ab\", \"adc\"], \"abcd\"],\n [[\"ri\", \"xz\", \"qxf\", \"jhsguaw\", \"dztqrbwbm\",\n \"dhdqfb\", \"jdv\", \"fcgfsilnb\", \"ooby\"], \"\"],\n ]\n\n sol = Solution()\n for words, expected in test_data:\n print(\"# Input = {}\".format(words))\n out1 = sol.alienOrder_v1(words)\n out2 = sol.alienOrder_v2(words)\n print(\" v1 = '{}' : {}\".format(\n out1, 'ok' if len(out1) == len(expected) else 'ERROR'))\n print(\" v2 = '{}' : {}\".format(\n out2, 'ok' if len(out2) == len(expected) else 'ERROR'))\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "python3/trees_and_graphs/alien_dictionary.py", "file_name": "alien_dictionary.py", "file_ext": "py", "file_size_in_byte": 5541, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "typing.List", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 100, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 103, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 104, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "608125546", "text": "'''\nCreated on Apr 25, 2016\n\n:author: iitow\n'''\nfrom modules.environment import EnvManager\nfrom modules.log import message\nimport types\nimport sys\n\n\nclass DecoMeta(type):\n '''\n This is a meta class for decorating all classes\n '''\n def __new__(cls, name, bases, attrs):\n '''\n Allows for grabbing class info for parsing\n '''\n for attr_name, attr_value in attrs.iteritems():\n if isinstance(attr_value, types.FunctionType):\n attrs[attr_name] = cls.deco(attr_value)\n return super(DecoMeta, cls).__new__(cls, name, bases, attrs)\n\n @classmethod\n def deco(cls, func):\n '''\n We use this to append defaults actions here\n '''\n def wrapper(*args, **kwargs):\n '''\n This is a decorator for adding global key,value pairs\n '''\n # filter updates\n filter_args = []\n filter_kwargs = {}\n keywords = {}\n # filter parameters\n for arg in args:\n if isinstance(arg, str):\n filter_args.append(EnvManager()._sanitize(arg))\n else:\n filter_args.append(arg)\n # filter defaults\n for key, value in kwargs.iteritems():\n if isinstance(value, str):\n # create environment variable\n if 'set_env' == key:\n keywords[key] = EnvManager()._sanitize(value)\n else:\n if '$' in key:\n key = EnvManager()._sanitize(key)\n filter_kwargs[key] = EnvManager()._sanitize(value)\n else:\n filter_kwargs[key] = value\n result = func(*filter_args, **filter_kwargs)\n if keywords.get('set_env'):\n EnvManager().set(keywords.get('set_env'),\n result,\n reset=True)\n return result\n sys.stdout.flush()\n return wrapper\n\n\nclass Plugin(object):\n ''' This is the base class for a plugin\n '''\n __metaclass__ = DecoMeta\n\n def __init__(self, action_manager):\n '''\n Plugin constructor\n '''\n self.action_manager = action_manager\n self.verbose = self.action_manager.verbose\n self.debug = self.action_manager.debug\n self.EnvManager = self.action_manager.EnvManager\n", "sub_path": "src/goephor/core/plugins/pluginable.py", "file_name": "pluginable.py", "file_ext": "py", "file_size_in_byte": 2471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "types.FunctionType", "line_number": 21, "usage_type": "attribute"}, {"api_name": "modules.environment.EnvManager", "line_number": 41, "usage_type": "call"}, {"api_name": "modules.environment.EnvManager", "line_number": 49, "usage_type": "call"}, {"api_name": "modules.environment.EnvManager", "line_number": 52, "usage_type": "call"}, {"api_name": "modules.environment.EnvManager", "line_number": 53, "usage_type": "call"}, {"api_name": "modules.environment.EnvManager", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "423824642", "text": "from . import BaseHelper\nimport json\n\n\nclass DjangoHelper(BaseHelper):\n def __init__(self, request):\n self.request = request\n\n def get_current_path(self):\n return self.request.get_full_path()\n\n def get_params(self):\n return self.request.GET.dict()\n\n def get_body(self):\n return self.request.body\n\n def redirect(self, url):\n from django.shortcuts import redirect\n return redirect(url)\n\n @staticmethod\n def cache_get(key):\n try:\n with open('wework_cache.txt', 'r') as f:\n data = f.read()\n data = {} if len(data) == 0 else json.loads(data)\n try:\n return data[key]\n except KeyError:\n return None\n except FileNotFoundError:\n return None\n\n @staticmethod\n def cache_set(key, value, **kwargs):\n filename = 'wework_cache.txt'\n try:\n with open(filename, 'r') as f:\n data = f.read()\n data = {} if len(data) == 0 else json.loads(data)\n except FileNotFoundError:\n data = {}\n\n with open(filename, 'w+') as f:\n data[key] = value\n f.write(json.dumps(data))\n", "sub_path": "wework/helpers/official.py", "file_name": "official.py", "file_ext": "py", "file_size_in_byte": 1246, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 20, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "522632601", "text": "# import config\n\n# config.a = 10\n# config.b = \"alphabet\"\n\n# Python program to test\n# internet speed\n\n# import speedtest\n\n\n# st = speedtest.Speedtest()\n\n# option = int(input('''What speed do you want to test:\n\n# 1) Download Speed\n\n# 2) Upload Speed\n\n# 3) Ping\n\n# Your Choice: '''))\n\n\n# if option == 1:\n\n# \tprint(st.download())\n\n# elif option == 2:\n\n# \tprint(st.upload())\n\n# elif option == 3:\n\n# \tservernames =[]\n\n# \tst.get_servers(servernames)\n\n# \tprint(st.results.ping)\n\n# else:\n\n# \tprint(\"Please enter the correct choice !\")\n\n\n# import datetime\n# import time\n# d = datetime.datetime.now()\n\n# print(d)\n\n\n# now = datetime.datetime.now()\n\n# t = now.strftime(\"%Y/%m/%d\")\n\n# print(t)\n\n# date = datetime.datetime(2003,8,1,12,4,5)\n# for i in range(5): \n# date += datetime.timedelta(days=1)\n# print(date) \n\nfrom datetime import datetime,timedelta\nimport time\n\ndef last_day(d, day_name):\n days_of_week = ['sunday','monday','tuesday','wednesday',\n 'thursday','friday','saturday']\n target_day = days_of_week.index(day_name.lower())\n delta_day = target_day - d.isoweekday()\n if delta_day >= 0: delta_day -= 7 # go back 7 days\n return d + timedelta(days=delta_day)\n\n\nprint(last_day(1,'sunday'))", "sub_path": "update.py", "file_name": "update.py", "file_ext": "py", "file_size_in_byte": 1228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "datetime.timedelta", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "391016137", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pylab\n\n# Read filename from command line or use default\n#(https://levelup.gitconnected.com/the-easy-guide-to-python-command-line-arguments-96b4607baea1)\nparser = argparse.ArgumentParser(description='Optional filename input')\nparser.add_argument(\"-fname1\", default=\"sim.txt\", help=\"filename\")\nparser.add_argument(\"-fname2\", default=\"rup.txt\", help=\"filename\")\nargs = parser.parse_args()\nfname1 = args.fname1\nfname2 = args.fname2\n\n# Import data and print overview\ndat=np.loadtxt(fname = fname1)\nhmin=np.loadtxt(fname = fname2)\n\n# Plot time-evolution of film\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n#\"\"\"\n#plt.plot(dat[:,0],dat[:,1],label=r'$\\tilde{t} = 0$', linewidth=3)\n#plt.plot(dat[:,0],dat[:,2],label=r'$\\tilde{t} \\simeq 0.50 \\tilde{t}_r$', linewidth=3)\n#plt.plot(dat[:,0],dat[:,3],label=r'$\\tilde{t} \\simeq 0.95 \\tilde{t}_r$', linewidth=3)\n#plt.plot(dat[:,0],dat[:,4],label=r'$\\tilde{t} \\simeq \\tilde{t}_r$', linewidth=3)\nplt.plot(dat[:,0],dat[:,1],label=r'$\\tilde{t} = 0$', linewidth=3)\nplt.plot(dat[:,0],dat[:,2],label=r'$\\tilde{t} = T/100$', linewidth=3)\nplt.plot(dat[:,0],dat[:,3],label=r'$\\tilde{t} = T/10$', linewidth=3)\nplt.plot(dat[:,0],dat[:,4],label=r'$\\tilde{t} = T$', linewidth=3)\nplt.xlim(-4.44,4.44)\n#plt.ylim(0,1.2)\nplt.ylim(0,2)\nplt.ylabel(r'Dimensionless Height $\\tilde{h}$',fontsize=16)\nplt.xlabel(r'Periodic Width $\\tilde{x}$',fontsize=16)\nxtics = [r'$\\frac{-\\pi}{\\tilde{k}_m}$',r'0',r'$\\frac{\\pi}{\\tilde{k}_m}$']\nplt.xticks(np.arange(-4.44,8.88,4.44),xtics,fontsize=14)\nplt.yticks(fontsize=14)\n#plt.legend(loc='lower right', fontsize=16)\nplt.legend(loc='upper center', fontsize=16)\nplt.show()\n#\"\"\"\n\n# Plot time-evolution of minimum height\n\"\"\"\nx = np.linspace(0, 18, 1000)\n#x = np.linspace(0, 2.035, 1000)\n#plt.plot(hmin[:,1],hmin[:,2], \\\n# linewidth=3,label=r'$-\\ln[(\\tilde{t}_r-\\tilde{t})^{1/5}]$')\n#plt.plot(hmin[:,0],hmin[:,2], \\\n# linewidth=3,label=r'$-\\ln(\\tilde{t}_r-\\tilde{t})$')\nplt.plot(hmin[:,1],hmin[:,2], 'r2', markersize = 10, \\\n linewidth=3,label=r'$-\\ln[(\\tilde{t}_r-\\tilde{t})^{1/5}]$')\nplt.plot(hmin[:,0],hmin[:,2], 'b1', markersize = 10, \\\n linewidth=3,label=r'$-\\ln(\\tilde{t}_r-\\tilde{t})$')\nz = np.polyfit(hmin[:,1],hmin[:,2], 1)\np = np.poly1d(z)\npylab.plot(hmin[:,1],p(hmin[:,1]),\"--\")\nz = np.polyfit(hmin[:,0],hmin[:,2], 1)\np = np.poly1d(z)\npylab.plot(hmin[:,0],p(hmin[:,0]),\"--\")\n#plt.plot(x,5.6*(x-1.8),linewidth=3,label=r'Slope=5.6')\n#plt.plot(x,1*(x-7.9),linewidth=3,label=r'Slope=1')\n\n#plt.plot(x,-np.log(x**(1/5)),linewidth=3,label=r'Slope=1')\nplt.xlim(2,18)\nplt.ylim(5,12)\nplt.ylabel(r'$-\\ln(\\tilde{h}_{min})$',fontsize=16)\n#plt.xlabel(r'$-\\ln[(\\tilde{t}_r-\\tilde{t})^{1/5}]$',fontsize=16)\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\nplt.legend(loc='upper center', fontsize=16)\nplt.show()\n#\"\"\"\n", "sub_path": "fluid/visualise.py", "file_name": "visualise.py", "file_ext": "py", "file_size_in_byte": 2866, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "414716615", "text": "import pymysql\nimport redis\n\n# 连接MySQL\nCONN_MYSQL = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='mysql', db='hlj', charset='utf8')\nCURSOR_MYSQL = CONN_MYSQL.cursor()\n# 连接Redis\nPOOL = redis.ConnectionPool(host='127.0.0.1', port=6379)\nCONN_REDIS = redis.Redis(connection_pool=POOL)\n\nRedisSetName = \"hlj:name\"\n\ntry:\n while True:\n name = CONN_REDIS.spop(RedisSetName)\n if name:\n name = name.decode(\"utf-8\")\n print(\"-> \", name)\n sql = \"INSERT INTO name (name) VALUES ('%s')\" % name\n CURSOR_MYSQL.execute(sql)\n CONN_MYSQL.commit()\n else:\n print(\"No data -> stop!\")\n break\nexcept Exception as e:\n raise e\nfinally:\n CURSOR_MYSQL.close()\n CONN_MYSQL.close()\n print(\"数据库连接已经关闭\")\n", "sub_path": "MixNotes/other/转移redis数据到mysql.py", "file_name": "转移redis数据到mysql.py", "file_ext": "py", "file_size_in_byte": 829, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pymysql.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "redis.ConnectionPool", "line_number": 8, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "327776292", "text": "import tempfile\r\nimport pyutilib.th as unittest\r\nimport sys\r\nimport os.path\r\nimport os\r\nimport glob\r\nimport pandas as pd\r\nimport shutil\r\nimport datetime\r\nfrom datetime import datetime\r\nimport shutil\r\nimport mape_maker\r\ndir_sep = '/'\r\nfrom mape_maker import __main__ as mapemain\r\ndir_sep = \"/\"\r\np = str(mape_maker.__path__)\r\nl = p.find(\"'\")\r\nr = p.find(\"'\", l+1)\r\nmape_maker_path = p[l+1:r]\r\nfile_path = mape_maker_path + dir_sep + \"samples\"\r\n# whether to skip the last two tests\r\nquick_test = False\r\n# whether to run only one example\r\nskip_all_but_one = False\r\n\r\nclass TestUM(unittest.TestCase):\r\n\r\n def _basic_dict(self):\r\n basedict = {\"input_file\": \"\",\r\n \"target_mape\": None,\r\n \"simulated_timeseries\": \"forecasts\",\r\n \"base-process\": \"ARMA\",\r\n \"a\": 4,\r\n \"output_dir\": None,\r\n \"number_simulations\": 1,\r\n \"input_start_dt\": None,\r\n \"input_end_dt\": None,\r\n \"simulation_start_dt\": None,\r\n \"simulation_end_dt\": None,\r\n \"title\": None,\r\n \"seed\": None,\r\n \"load_pickle\": False,\r\n \"curvature\": None,\r\n \"time_limit\": 3600,\r\n \"curvature_target\": None,\r\n \"mip_gap\": 0.3,\r\n \"solver\": \"gurobi\",\r\n \"latex_output\": False,\r\n \"show\": True,\r\n \"verbosity\": 2,\r\n \"verbosity_output\": None\r\n }\r\n return basedict\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n # make a temp dir\r\n self.temp_dir = tempfile.mkdtemp()\r\n sys.path.insert(1, self.temp_dir)\r\n # change to the temp directory\r\n os.chdir(self.temp_dir)\r\n self.cwd = os.getcwd()\r\n print(\"temporary directory:\", self.cwd)\r\n # path to the RTS wind data\r\n\r\n self.wind_data = file_path + dir_sep + \"based_rts_gmlc\" + \\\r\n dir_sep + \"Wind_rts_gmlc_based\" + dir_sep + \\\r\n \"processed_file.csv\"\r\n\r\n def test_commmand(self):\r\n \"\"\"\r\n here is the command :\r\n python -m mape_maker \"mape_maker/samples/based_rts_gmlc/Wind_rts_gmlc_based/processed_file.csv\" -st \"actuals\" -s 1234 -n 5 -bp \"ARMA\" -o \"wind_forecasts_actuals\" -is \"2020-2-1 00:00:00\" -ie \"2020-5-1 00:00:00\" -sd \"2020-2-2 00:00:00\" -ed \"2020-3-2 00:00:00\"\r\n :return:\r\n \"\"\"\r\n print(\"Running \", str(self.id()).split('.')[2])\r\n parm_dict = self._basic_dict()\r\n parm_dict[\"input_file\"] = self.wind_data\r\n parm_dict[\"simulated_timeseries\"] = \"actuals\"\r\n parm_dict[\"number_simulations\"] = 5\r\n parm_dict[\"base-process\"] = \"ARMA\"\r\n parm_dict[\"output_dir\"] = \"wind_forecasts_actuals\"\r\n parm_dict[\"seed\"] = 1234\r\n parm_dict[\"simulation_start_dt\"] = datetime(year=2020, month=2, day=2, hour=0, minute=0, second=0)\r\n parm_dict[\"simulation_end_dt\"] = datetime(year=2020, month=3, day=2, hour=0, minute=0, second=0)\r\n parm_dict[\"input_start_dt\"] = datetime(year=2020, month=2, day=1, hour=0, minute=0, second=0)\r\n parm_dict[\"input_end_dt\"] = datetime(year=2020, month=5, day=1, hour=0, minute=0, second=0)\r\n parm_list = list(parm_dict.values())\r\n mapemain.main_func(*parm_list)\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()", "sub_path": "test/x_rts_wind_test.py", "file_name": "x_rts_wind_test.py", "file_ext": "py", "file_size_in_byte": 3497, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "mape_maker.__path__", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pyutilib.th.TestCase", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pyutilib.th", "line_number": 26, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 61, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "call"}, {"api_name": "mape_maker.__main__.main_func", "line_number": 89, "usage_type": "call"}, {"api_name": "mape_maker.__main__", "line_number": 89, "usage_type": "name"}, {"api_name": "pyutilib.th.main", "line_number": 92, "usage_type": "call"}, {"api_name": "pyutilib.th", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "553624870", "text": "from urlparse import urljoin\n\nimport logging\nimport requests\n\n\nlog = logging.getLogger('query')\n\nclass GDCIndexClient(object):\n\n def __init__(self, uri):\n self.uri = uri\n self.metadata = dict()\n\n def get_related_files(self, uuid):\n # type: str -> List[str]\n if uuid in self.metadata.keys():\n return self.metadata[uuid]['related_files']\n return []\n\n def get_annotations(self, uuid):\n # type: str -> List[str]\n if uuid in self.metadata.keys():\n return self.metadata[uuid]['annotations']\n return []\n\n def get_md5sum(self, uuid):\n # type: str -> str\n if uuid in self.metadata.keys():\n return self.metadata[uuid]['md5sum']\n\n def get_filesize(self, uuid):\n # type: str -> long\n if uuid in self.metadata.keys():\n return long(self.metadata[uuid]['file_size'])\n\n def get_access(self, uuid):\n # type: str -> long\n if uuid in self.metadata.keys():\n return self.metadata[uuid]['access']\n\n def _get_metadata(self, uuids):\n # type: List[str] -> Dict[str]str\n \"\"\" Capture the metadata of all the UUIDs while making\n as little open connections as possible.\n\n self.metadata = {\n str file_id: {\n str access\n str file_size\n str md5sum\n List[str] annotations\n List[str] related files\n }\n }\n \"\"\"\n\n metadata_query = {\n 'fields': 'file_id,file_size,md5sum,annotations.annotation_id,' \\\n 'metadata_files.file_id,index_files.file_id,access',\n 'filters': '{\"op\":\"and\",\"content\":['\n '{\"op\":\"in\",\"content\":{'\n '\"field\":\"files.file_id\",\"value\":'\n '[\"' + '\",\"'.join(uuids) + '\"]}}]}',\n 'from': '0',\n 'size': str(len(uuids)), # one big request\n }\n\n active_meta_url = urljoin(self.uri, 'v0/files')\n legacy_meta_url = urljoin(self.uri, 'v0/legacy/files')\n\n active_json_resp = dict()\n legacy_json_resp = dict()\n\n # using a POST request lets us avoid the MAX URL character length limit\n r_active = requests.post(active_meta_url, json=metadata_query, verify=False)\n r_legacy = requests.post(legacy_meta_url, json=metadata_query, verify=False)\n\n if r_active.status_code == requests.codes.ok:\n active_json_resp = r_active.json()\n\n if r_legacy.status_code == requests.codes.ok:\n legacy_json_resp = r_legacy.json()\n\n r_active.close()\n r_legacy.close()\n\n if not active_json_resp.get('data') and not legacy_json_resp.get('data'):\n log.debug('Unable to retrieve file metadata information. '\n 'continuing downloading as if they were large files')\n return self.metadata\n\n active_hits = active_json_resp['data']['hits']\n legacy_hits = legacy_json_resp['data']['hits']\n\n for h in active_hits + legacy_hits:\n related_returns = h.get('index_files', []) + h.get('metadata_files', [])\n related_files = [ r['file_id'] for r in related_returns ]\n\n annotations = [ a['annotation_id'] for a in h.get('annotations', []) ]\n\n # set the metadata as a class data member so that it can be\n # references as much as needed without needing to calculate\n # everything over again\n if h['id'] not in self.metadata.keys():\n # don't want to overwrite\n self.metadata[h['id']] = {\n 'access': h['access'],\n 'file_size': h['file_size'],\n 'md5sum': h['md5sum'],\n 'annotations': annotations,\n 'related_files': related_files,\n }\n\n return self.metadata\n\n\n def separate_small_files(self,\n ids, # type: Set[str]\n chunk_size, # type: int\n related_files=False, # type: bool\n annotations=False, # type: bool\n ):\n # type: (...) -> (List[str], List[List[str]])\n \"\"\" Separate big and small files\n\n Separate the small files from the larger files in\n order to combine them into single grouped downloads. This will reduce\n the number of open connections needed to be made for many small files.\n\n On top of that, separate the small files by open and controlled access\n so that if a controlled grouping failed, you can handle it as the same\n edge case.\n \"\"\"\n\n bigs = []\n smalls_open = []\n smalls_control = []\n potential_smalls = set()\n\n # go through all the UUIDs and pick out the ones with\n # relate and annotation files so they can be handled by parcel\n log.debug('Grouping ids by size')\n\n self._get_metadata(ids)\n for uuid in ids:\n if uuid not in self.metadata.keys():\n bigs.append(uuid)\n continue\n\n rf = self.get_related_files(uuid)\n af = self.get_annotations(uuid)\n\n # check for related files\n if related_files and rf and uuid not in bigs:\n bigs.append(uuid)\n\n # check for annotation files\n if annotations and af and uuid not in bigs:\n bigs.append(uuid)\n\n # if uuid has no related or annotation files\n # then proceed to the small file sorting with them\n if not af and not rf:\n potential_smalls |= set([uuid])\n\n # the following line is to trigger the first if statement\n # to start the process off properly\n bundle_open_size = chunk_size + 1\n bundle_control_size = chunk_size + 1\n\n i_open = -1\n i_control = -1\n\n for uuid in potential_smalls:\n # grouping of file exceeds chunk_size, create a new grouping\n if bundle_open_size > chunk_size:\n smalls_open.append([])\n i_open += 1\n bundle_open_size = 0\n\n if bundle_control_size > chunk_size:\n smalls_control.append([])\n i_control += 1\n bundle_control_size = 0\n\n # individual file is more than chunk_size, big file download\n if self.get_filesize(uuid) > chunk_size:\n bigs.append(uuid)\n\n # file size is less than chunk_size then group and tarfile it\n else:\n if self.get_access(uuid) == 'open':\n smalls_open[i_open].append(uuid)\n bundle_open_size += self.get_filesize(uuid)\n\n elif self.get_access(uuid) == 'controlled':\n smalls_control[i_control].append(uuid)\n bundle_control_size += self.get_filesize(uuid)\n\n # they are still small files to be downloaded in a group\n smalls = smalls_open + smalls_control\n\n # for logging/reporting purposes\n total_count = len(bigs) + sum([ len(s) for s in smalls ])\n if len(potential_smalls) > total_count:\n log.warning('There are less files to download than originally given')\n log.warning('Number of files originally given: {0}'\\\n .format(len(potential_smalls)))\n\n log.debug('{0} total number of files to download'.format(total_count))\n log.debug('{0} groupings of files'.format(len(smalls)))\n\n smalls = [ s for s in smalls if s != [] ]\n\n return bigs, smalls\n", "sub_path": "gdc_client/query/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 7730, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 69, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 70, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 76, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 77, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 79, "usage_type": "attribute"}, {"api_name": "requests.codes", "line_number": 82, "usage_type": "attribute"}]} +{"seq_id": "358854166", "text": "# -*- coding: utf-8 -*-\n__author__ = 'Brice Olivier'\n\nimport matplotlib.pyplot as plt\nimport mne\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nfrom sea.config import OUTPUT_PATH\nimport uuid\n\n\nPHASE_NAMES = ['Fast Forward', 'Normal Reading', 'Information Search', 'Slow Confirmation']\nPHASE_NAMES_SHORT = ['FF', 'NR', 'IS', 'SC']\n\n\nclass MeltedMODWTDataFrame(pd.DataFrame):\n \"\"\"\n TODO:\n * subsample df to select channels (with missing channel interpolation), subjects\n * topomaps per stg\n * corr per stg\n \"\"\"\n\n _metadata = ['channel_info']\n\n def __init__(self, *args, **kwargs):\n channel_info = kwargs.pop('channel_info', None)\n super(MeltedMODWTDataFrame, self).__init__(*args, **kwargs)\n self.channel_info = channel_info\n\n @property\n def _constructor(self):\n return MeltedMODWTDataFrame\n\n @staticmethod\n def concat(melted_modwt_dataframes):\n assert all([type(melted_modwt_dataframe) == MeltedMODWTDataFrame\n for melted_modwt_dataframe in melted_modwt_dataframes])\n melted_modwt_dataframe = pd.concat(melted_modwt_dataframes)\n melted_modwt_dataframe.channel_info = melted_modwt_dataframe[0].channel_info\n return melted_modwt_dataframe\n\n def plot_var_heatmap(self, last_x_scales=None, robust=False, normalize_power_spectrum=False):\n assert all([col in self.columns for col in ['PHASE', 'CHANNEL', 'SCALE']])\n # nb_phases = self['PHASE'].astype(int).max()\n scale_names = ['sc1', 'sc2', r'$\\gamma$ +', r'$\\gamma$ -', r'$\\beta$', r'$\\alpha$', r'$\\theta$']\n nb_scales = len(self['SCALE'].unique())\n last_x_scales = nb_scales if (last_x_scales is None) or (last_x_scales > nb_scales) else last_x_scales\n df = self.groupby(['PHASE', 'CHANNEL', 'SCALE']).var().reset_index()\n df = df[df['SCALE'].isin(range(nb_scales - last_x_scales, nb_scales))]\n values = df.VALUE\n if normalize_power_spectrum:\n #values /= (df.SCALE.astype(float) + 1)\n values /= 2**(df.SCALE.astype(float))\n if robust:\n vmin = values.quantile(q=0.10)\n vmax = values.quantile(q=0.90)\n else:\n vmin = values.min()\n vmax = values.max()\n fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)\n cbar_ax = fig.add_axes([.91, .3, .03, .4])\n for i, ax in enumerate(axes.flat):\n if i in df['PHASE'].unique():\n v = df[df['PHASE'] == i].pivot(index='SCALE', columns='CHANNEL', values='VALUE')\n sns.heatmap(v, ax=ax, vmin=vmin, vmax=vmax, cbar=i == 0, cbar_ax=None if i else cbar_ax,\n yticklabels=scale_names[-last_x_scales:])\n ax.set_title(PHASE_NAMES[i])\n ax.set_xlabel('')\n ax.set_ylabel('')\n # fig.tight_layout() # seaborn.heatmap ax is tight_layout() incompatible\n plt.show()\n\n def plot_corr_heatmap(self, last_x_scales=None):\n assert all([col in self.columns for col in ['PHASE', 'CHANNEL', 'SCALE']])\n nb_scales = len(self['SCALE'].unique())\n scale_names = ['sc1', 'sc2', r'$\\gamma$ +', r'$\\gamma$ -', r'$\\beta$', r'$\\alpha$', r'$\\theta$']\n last_x_scales = nb_scales if (last_x_scales is None) or (last_x_scales > nb_scales) else last_x_scales\n channel_names = self['CHANNEL'].unique().tolist()\n df = self.groupby(['SCALE', 'CHANNEL', 'PHASE'])['VALUE'].apply(lambda x: [elem for elem in x]).reset_index()\n for scale in range(nb_scales - last_x_scales, nb_scales):\n fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)\n cbar_ax = fig.add_axes([.91, .3, .03, .4])\n for i, ax in enumerate(axes.flat):\n if i in df.loc[df['SCALE'] == i, 'PHASE'].unique():\n sub_gb = df.loc[(df.SCALE == scale) & (df.PHASE == i)]\n corr_mat = np.corrcoef([sub_gb.loc[j, 'VALUE'] for j in sub_gb.index])\n sns.heatmap(corr_mat, ax=ax, xticklabels=channel_names, yticklabels=channel_names,\n vmin=0, vmax=1, cbar=i == 0, cbar_ax=None if i else cbar_ax)\n ax.set_title(PHASE_NAMES[i])\n ax.set_xlabel('')\n ax.set_ylabel('')\n fig.suptitle(scale_names[scale])\n plt.show()\n\n def plot_topomap(self, groupby=None, robust=False, last_x_scales=None,\n is_file_output=False, normalize_power_spectrum=False):\n self['TEXT_TYPE'] = self['TEXT'].apply(lambda x: x.split('-')[1][0])\n if groupby is not None:\n assert all([col in self.columns for col in groupby])\n nb_scales = len(self['SCALE'].unique()) if 'SCALE' in groupby else 1\n scale_names = ['sc1', 'sc2', r'$\\gamma$ +', r'$\\gamma$ -', r'$\\beta$', r'$\\alpha$', r'$\\theta$']\n last_x_scales = nb_scales if (last_x_scales is None) or (last_x_scales > nb_scales) else last_x_scales\n nb_phases = self['PHASE'].astype(int).max() + 1 if 'PHASE' in groupby else 1\n subject_names = self['SUBJECT'].unique()\n nb_subjects = len(subject_names) if 'SUBJECT' in groupby else 1\n text_types = self['TEXT_TYPE'].unique()\n nb_text_types = len(text_types) if 'TEXT_TYPE' in groupby else 1\n self['SCALE'] = self['SCALE'].astype(float)\n gb = self[self['SCALE'].isin(range(nb_scales - last_x_scales, nb_scales))].groupby(\n groupby).var().reset_index()\n values = gb.VALUE\n if normalize_power_spectrum:\n #values /= (gb.SCALE.astype(float) + 1)\n values /= 2**(gb.SCALE.astype(float))\n if robust:\n vmin = values.quantile(q=0.10)\n vmax = values.quantile(q=0.90)\n else:\n vmin = values.min()\n vmax = values.max()\n\n for text_type_id in range(nb_text_types):\n if nb_text_types == 1:\n text_type = text_types\n else:\n text_type = text_types[text_type_id]\n for subject_id in range(nb_subjects):\n if nb_subjects == 1:\n subject_name = subject_names\n else:\n subject_name = subject_names[subject_id]\n fig, axes = plt.subplots(nrows=nb_phases, ncols=last_x_scales, sharex=True, sharey=True)\n for i, ax in enumerate(axes.flat):\n scale_id = nb_scales - 1 - i % (last_x_scales)\n phase_id = int(i / (last_x_scales))\n if phase_id in self['PHASE'].unique():\n gb_values = np.array(self[(self['SCALE'] == scale_id) &\n (self['PHASE'] == phase_id) &\n (self['TEXT_TYPE'].isin(text_type)) &\n (self['SUBJECT'].isin(subject_name))\n ].groupby(['CHANNEL']).var().VALUE)\n if normalize_power_spectrum:\n #gb_values = gb_values / (scale_id + 1)\n gb_values = gb_values / (2**scale_id)\n mne.viz.plot_topomap(gb_values, self.channel_info, axes=ax,\n vmin=vmin, vmax=vmax, show=False)\n if phase_id == self['PHASE'].astype(int).max():\n ax.set_xlabel(scale_names[scale_id])\n if scale_id == nb_scales - 1 % (last_x_scales):\n ax.set_ylabel(PHASE_NAMES_SHORT[phase_id])\n plot_title = ''\n if nb_text_types == 1:\n plot_title += 'all text types'\n else:\n plot_title += 'text type %s' % text_type\n if nb_subjects == 1:\n plot_title += ', all subjects'\n else:\n plot_title += ', subject %s' % subject_name\n #fig.text(0.5, 0.98, plot_title, ha='center')\n #fig.text(0.5, 0.01, 'scale', ha='center')\n #fig.text(0.01, 0.5, 'phase', va='center', rotation='vertical')\n fig.tight_layout(rect=[0, 0, .9, 1])\n if is_file_output:\n file_path = uuid.uuid4().hex + '.png'\n if not os.path.exists(OUTPUT_PATH):\n os.makedirs(OUTPUT_PATH)\n file_path = os.path.join(OUTPUT_PATH, file_path)\n plt.savefig(file_path)\n print('topomap - %s, saved to %s' % (plot_title, file_path))\n else:\n mne.viz.utils.plt_show()\n\n", "sub_path": "sea/melted_modwt_dataframe.py", "file_name": "melted_modwt_dataframe.py", "file_ext": "py", "file_size_in_byte": 8782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pandas.DataFrame", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "numpy.corrcoef", "line_number": 89, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 140, "usage_type": "call"}, {"api_name": "mne.viz.plot_topomap", "line_number": 148, "usage_type": "call"}, {"api_name": "mne.viz", "line_number": 148, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 169, "usage_type": "call"}, {"api_name": "sea.config.OUTPUT_PATH", "line_number": 169, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 170, "usage_type": "call"}, {"api_name": "sea.config.OUTPUT_PATH", "line_number": 170, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "sea.config.OUTPUT_PATH", "line_number": 171, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "mne.viz.utils.plt_show", "line_number": 175, "usage_type": "call"}, {"api_name": "mne.viz", "line_number": 175, "usage_type": "attribute"}]} +{"seq_id": "602615229", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 14 16:33:29 2016\r\n\r\n@author: takaiguchi\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom sklearn.manifold import TSNE\r\nfrom sklearn.cluster import KMeans\r\nimport os\r\nfrom PIL import Image\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn import decomposition\r\n\r\n#X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])\r\n#model = TSNE(n_components=2, random_state=0)\r\n#np.set_printoptions(suppress=True)\r\n#Y = model.fit_transform(X)\r\n#y_pred = KMeans(n_clusters=2).fit_predict(Y)\r\npath = 'C:/Users/takaiguchi/Documents/Fun with Data Science/Kaggle/State Farm Distracted Driver/train/'\r\nfolders = os.listdir(path)\r\n\r\nfrms = 100\r\ndata = np.empty((len(folders)*frms,640*480))\r\nfor i in range(len(folders)):\r\n folder = folders[i]\r\n files = os.listdir(path+folder)\r\n for j in range(frms):\r\n file = files[i]\r\n img = Image.open(path+folder+'/'+file)\r\n data[(i-1)*frms+j,:] = np.asarray(img.getdata())[:,1]\r\nmodel = TSNE(n_components = 2, random_state=0, perplexity=15.0)\r\nnp.set_printoptions(suppress=True)\r\n\r\nX_pca = decomposition.TruncatedSVD(n_components=100).fit_transform(data)\r\n\r\nTSNEres = model.fit_transform(X_pca)\r\n\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(111);\r\n\r\ncolors = [\"#000000\",\"#199999\",\"#333332\",\"#4CCCCB\",\"#666664\",\"#7FFFFD\",\"#999996\",\"#B3332F\",\"#CCCCC8\",\"#E66661\"]\r\nfor i in range(10):\r\n ax1.scatter(TSNEres[i*frms:(i+1)*frms,0],TSNEres[i*frms:(i+1)*frms,1],c=colors[i])\r\n \r\nplt.show()\r\n", "sub_path": "tsne+kmeans.py", "file_name": "tsne+kmeans.py", "file_ext": "py", "file_size_in_byte": 1476, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.listdir", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 25, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.decomposition", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "225467715", "text": "import multiprocessing as mp\nfrom datetime import datetime\n\nfrom astar import manhattan_distance, hamming_distance\nfrom statistics import start_run, write_file, initialize\n\n\ndef compute(data):\n # Put the call to the A* in here.\n goal_state = (0, 1, 2, 3, 4, 5, 6, 7, 8)\n return start_run(start=data[0], weights=data[1], heuristics=[manhattan_distance, hamming_distance], goal=goal_state)\n\n\ndef process_array(array_with_weights, runs):\n p = mp.Pool()\n res = p.map(compute, array_with_weights)\n write_file(res, runs)\n\n\nif __name__ == \"__main__\":\n number_of_examples = 1\n\n start_time = datetime.now()\n print(f\"Start: {start_time}\")\n\n work_to_do = initialize(number_of_examples)\n process_array(work_to_do, number_of_examples)\n\n end_time = datetime.now()\n print(f\"End: {end_time}\")\n print(f\"Duration: {end_time - start_time}\")\n", "sub_path": "Exercise1/worker.py", "file_name": "worker.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "statistics.start_run", "line_number": 11, "usage_type": "call"}, {"api_name": "astar.manhattan_distance", "line_number": 11, "usage_type": "name"}, {"api_name": "astar.hamming_distance", "line_number": 11, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 15, "usage_type": "call"}, {"api_name": "statistics.write_file", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "statistics.initialize", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "117448128", "text": "from channels.generic import BaseConsumer\nimport re\nfrom channels import Group\nfrom uuid import uuid4\nfrom pprint import pprint\nimport time\nfrom bot.models import Log\nimport socket\n\nclass ChatClient(BaseConsumer):\n\n method_mapping = {\n 'create': 'create',\n }\n\n sockets = {}\n\n # Creates a new stream client\n def create(self, message, **kwargs) :\n msg = message.content\n network = msg.get('network')\n port = int(msg.get('port'))\n user = msg.get('user')\n usersocket = self.sockets.get(user)\n if not usersocket :\n self.sockets.update({user:socket.socket()})\n usersocket = self.sockets.get(user)\n usersocket.connect((network, port))\n while True :\n try :\n data = [self.parse(line.strip(), message, usersocket) for line in usersocket.recv(1024).decode('utf-8').split(\"\\n\") if line]\n if not data :\n usersocket.close()\n del(self.sockets[user])\n break\n except KeyboardInterrupt :\n self.send(\"QUIT :BAD BYE!\", usersocket)\n\n def parse(self, line, message, usersocket) :\n print(\"< %s\" % line)\n user = message.get('user')\n if 'NOTICE' in line :\n self.send(\"NICK %s\" % (user), usersocket)\n self.send(\"USER %s %s %s :%s\" % (user, user, user, user), usersocket)\n if 'PING' in line :\n self.send(\"PONG %s\" % line.split(\":\")[1], usersocket)\n self.send(\"JOIN #topsecret\" , usersocket)\n if 'PRIVMSG' in line :\n nick = line.split('!')[0][1:]\n msg = ':'.join(line.split(':')[2:])\n channel = line.split()[2]\n log = Log(\n protocol = 'IRC',\n user = user,\n nickname = nick,\n message = msg,\n channel = channel,\n network = message.get('network')\n )\n log.save()\n\n\n # Sends a message over given stream client\n def send(self, line, usersocket):\n print(\"> %s\" % line)\n line += \"\\r\\n\"\n usersocket.send(line.encode('utf-8'))\n", "sub_path": "bot/oldclient.py", "file_name": "oldclient.py", "file_ext": "py", "file_size_in_byte": 2183, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "channels.generic.BaseConsumer", "line_number": 10, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 26, "usage_type": "call"}, {"api_name": "bot.models.Log", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "411842852", "text": "import torch\nfrom torchvision.models import mobilenet\n\nMODEL_FILE_NAME = \"mobilenet_v2.pt\"\n\n\ndef create_model(out_dir=\"./\"):\n model = mobilenet.mobilenet_v2(pretrained=True)\n model.eval()\n traced_model = torch.jit.trace(model, torch.randn(1, 3, 224, 224))\n traced_model.save(out_dir + MODEL_FILE_NAME)\n\n\nif __name__ == \"__main__\":\n create_model()\n print(f\"{MODEL_FILE_NAME} model file is created.\")\n", "sub_path": "model/create_mobilenet_v2_model.py", "file_name": "create_mobilenet_v2_model.py", "file_ext": "py", "file_size_in_byte": 417, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torchvision.models.mobilenet.mobilenet_v2", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.models.mobilenet", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.jit.trace", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.jit", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "433443025", "text": "from interference.clusters.processor import Processor\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple\n\nimport numpy\n\nfrom scipy.spatial.distance import cdist\n\nimport numpy as np\n\nfrom enum import Enum\n\n\nclass Cluster:\n def __init__(self, tag: str, center: numpy.ndarray, index: int) -> None:\n self.center = center\n self.radius = 0\n self.tags = [tag]\n self.index = index\n\n def add_radius(self, tag: str, embedding: numpy.ndarray) -> None:\n self.tags.append(tag)\n\n def _adapt(self, distance: float, embedding: numpy.ndarray):\n direction = embedding - self.center\n self.radius = distance / 2\n self.center : numpy.ndarray = embedding - (direction / np.linalg.norm(direction)) * self.radius\n\n def add_threshold(self, distance: float, tag: str, embedding: numpy.ndarray) -> None:\n self.add_radius(tag, embedding)\n self._adapt(distance, embedding)\n\n def update_radius(self, tag: str, embedding: numpy.ndarray) -> None:\n pass\n\n def update_threshold(self, distance: float, tag: str, embedding: numpy.ndarray) -> None:\n self.update_radius(tag, embedding)\n self._adapt(distance, embedding)\n\n def remove(self, tag: str) -> None:\n self.tags.remove(tag)\n\n\nclass SearchResultType(Enum):\n RADIUS = 1\n THRESHOLD = 2\n OUTSIDE = 3\n\n\nclass ECM(Processor):\n\n def __init__(self, distance_threshold: float) -> None:\n self.clusters: Dict[int, Cluster] = {}\n self.distance_threshold = distance_threshold\n self.tag_to_cluster: Dict[str, int] = {}\n self.cluster_index = 0\n\n self.cached_cluster_keys: List[int] = []\n self.cached_cluster_centers: List[numpy.ndarray] = []\n self.cached_cluster_radiuses: List[float] = []\n\n def update(self, tag: str, embedding: numpy.ndarray) -> None:\n result, (searched_index, searched_distance) = self._search_index_and_distance(embedding)\n old_index = self.get_cluster_by_tag(tag)\n old_cluster = self.clusters[old_index]\n\n if result == SearchResultType.OUTSIDE:\n self._remove_from_cluster(old_cluster, tag)\n\n cluster = self._create_cluster(tag, embedding)\n index = cluster.index\n\n elif result == SearchResultType.RADIUS:\n if searched_index == old_index:\n old_cluster.update_radius(tag, embedding)\n\n index = searched_index\n else:\n self._remove_from_cluster(old_cluster, tag)\n\n new_cluster = self.clusters[searched_index]\n new_cluster.add_radius(tag, embedding)\n\n index = searched_index\n\n # elif result == SearchResultType.THRESHOLD:\n else:\n if searched_index == old_index:\n old_cluster.update_threshold(searched_distance, tag, embedding)\n\n index = searched_index\n else:\n self._remove_from_cluster(old_cluster, tag)\n\n new_cluster = self.clusters[searched_index]\n new_cluster.add_threshold(searched_distance, tag, embedding)\n\n index = searched_index\n\n self.tag_to_cluster[tag] = index\n self._invalidate_cached()\n\n def _remove_from_cluster(self, cluster: Cluster, tag: str) -> None:\n cluster.remove(tag)\n if len(cluster.tags) == 0:\n del self.clusters[cluster.index]\n\n def _create_cluster(self, tag: str, embedding: numpy.ndarray) -> Cluster:\n cluster = Cluster(tag, embedding, self.cluster_index)\n self.clusters[self.cluster_index] = cluster\n self.cluster_index += 1\n return cluster\n\n def remove(self, tag: str) -> None:\n index = self.get_cluster_by_tag(tag)\n cluster = self.clusters[index]\n\n del self.tag_to_cluster[tag]\n\n self._remove_from_cluster(cluster, tag)\n self._invalidate_cached()\n\n def get_cluster_by_tag(self, tag: str) -> int:\n return self.tag_to_cluster[tag]\n\n def get_tags_in_cluster(self, cluster_id: int) -> Sequence[str]:\n return self.clusters[cluster_id].tags\n\n def get_cluster_ids(self) -> Sequence[int]:\n return list(self.clusters.keys())\n\n def process(self, tag: str, embedding: numpy.ndarray) -> None:\n if len(self.clusters) == 0:\n cluster = self._create_cluster(tag, embedding)\n\n else:\n search_result, (index, distance) = self._search_index_and_distance(embedding)\n\n if search_result == SearchResultType.RADIUS:\n cluster = self.clusters[index]\n cluster.add_radius(tag, embedding)\n\n elif search_result == SearchResultType.THRESHOLD:\n cluster = self.clusters[index]\n cluster.add_threshold(distance, tag, embedding)\n\n # search_result == SearchResultType.OUTSIDE\n else:\n cluster = self._create_cluster(tag, embedding)\n\n self.tag_to_cluster[tag] = cluster.index\n self._invalidate_cached()\n\n def _invalidate_cached(self):\n self.cached_cluster_keys = []\n self.cached_cluster_centers = []\n self.cached_cluster_radiuses = []\n\n def _ensure_cached(self):\n if not self.cached_cluster_keys and len(self.clusters) > 0:\n self.cached_cluster_keys = []\n self.cached_cluster_centers = []\n self.cached_cluster_radiuses = []\n for index, cluster in self.clusters.items():\n self.cached_cluster_keys.append(index)\n self.cached_cluster_centers.append(cluster.center)\n self.cached_cluster_radiuses.append(cluster.radius)\n\n\n def _search_index_and_distance(self, embedding: numpy.ndarray) -> \\\n Tuple[SearchResultType, Tuple[int, float]]:\n\n self._ensure_cached()\n\n distances = cdist(\n np.array([embedding]),\n np.array(self.cached_cluster_centers),\n 'euclidean'\n )[0]\n\n diffs = distances - self.cached_cluster_radiuses\n\n possible_indexes = np.where(diffs <= 0)[0]\n\n possible = distances[possible_indexes]\n\n min_index: Optional[int] = None if possible.size == 0 else possible_indexes[possible.argmin()]\n\n if min_index is not None:\n return SearchResultType.RADIUS, (self.cached_cluster_keys[min_index], distances[min_index])\n\n distances_plus_radiuses = distances + self.cached_cluster_radiuses\n lowest_distance_and_radius_index = np.argmin(distances_plus_radiuses)\n lowest_distance_and_radius: float = distances_plus_radiuses[lowest_distance_and_radius_index]\n\n actual_index = self.cached_cluster_keys[lowest_distance_and_radius_index]\n\n if lowest_distance_and_radius > 2 * self.distance_threshold:\n return SearchResultType.OUTSIDE, (actual_index, lowest_distance_and_radius)\n\n else:\n return SearchResultType.THRESHOLD, (actual_index, lowest_distance_and_radius)\n\n def describe(self) -> Dict[str, Any]:\n \"\"\"\n This describes this clustering algorithm's parameters\n \"\"\"\n\n return {\n \"name\": \"ECM\",\n \"parameters\": {\n \"distance threshold\": self.distance_threshold\n }\n }\n\n def safe_file_name(self) -> str:\n return f\"ECM = distance_threshold={self.distance_threshold}\"\n\n def predict(self, embedding: numpy.ndarray) -> int:\n search_result, (index, _) = self._search_index_and_distance(embedding)\n\n # FIXME: What should predict do in this case?\n if search_result == SearchResultType.OUTSIDE:\n return index\n\n elif search_result == SearchResultType.THRESHOLD:\n return index\n\n #elif search_result == SearchResultType.RADIUS:\n else:\n return index", "sub_path": "interference/clusters/ecm.py", "file_name": "ecm.py", "file_ext": "py", "file_size_in_byte": 7820, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.ndarray", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 35, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 43, "usage_type": "name"}, {"api_name": "interference.clusters.processor.Processor", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 58, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 58, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 107, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 125, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 169, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 182, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 186, "usage_type": "name"}, {"api_name": "numpy.argmin", "line_number": 192, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 170, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 203, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 203, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 218, "usage_type": "attribute"}]} +{"seq_id": "341919760", "text": "import numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nimport torch.nn.init as weight_init\nfrom torch.autograd import Variable\n\nchannels = 10\n\nclass Encoder(nn.Module):\n\tdef __init__(self, latent_size=2048, time_latent_size=64, hidden_latent_size=1024):\n\t\tsuper(Encoder, self).__init__()\n\t\tself.latent_size = latent_size\n\t\tself.leaky = nn.LeakyReLU(0.2, inplace=True)\n\n\t\tself.conv0 = nn.Conv2d(channels, 32, 5, stride=1, padding=(2, 2))\n\t\tself.batch0 = nn.BatchNorm2d(32)\n\t\tself.maxpool0 = nn.MaxPool2d(2, return_indices=True)\n\t\t# 32 x 64 x 64\n\t\t# # 32 x 32 x 32\n\n\t\tself.conv1 = nn.Conv2d(32, 64, 5, stride=1, padding=(2, 2))\n\t\tself.batch1 = nn.BatchNorm2d(64)\n\t\tself.maxpool1 = nn.MaxPool2d(2, return_indices=True)\n\t\t# 64 x 32 x 32\n\t\t## 64 x 16 x 16\n\n\t\tself.conv2 = nn.Conv2d(64, 128, 1, stride=1)\n\t\tself.batch2 = nn.BatchNorm2d(128)\n\t\tself.maxpool2 = nn.MaxPool2d(2, return_indices=True)\n\t\t# 128 x 16 x 16\n\t\t# #128 x 8 x 8\n\n\t\t#self.hidden_units = 128*16*16\n\t\tself.hidden_units = 128*8*8\n\n\t\tself.linear0 = nn.Linear(self.hidden_units, hidden_latent_size)\n\t\tself.linear1 = nn.Linear(hidden_latent_size, latent_size)\n\n\t\tself.tlinear0 = nn.Linear(1, time_latent_size)\n\t\tself.tlinear1 = nn.Linear(time_latent_size, time_latent_size)\n\t\tself.tlinear2 = nn.Linear(time_latent_size, time_latent_size)\n\n\n\tdef forward(self, x, t):\n\t\tx = self.leaky(self.conv0(x))\n\t\tx = self.batch0(x)\n\t\tx, mpi0 = self.maxpool0(x)\n\t\t#print(x.shape)\n\n\t\tx = self.leaky(self.conv1(x))\n\t\tx = self.batch1(x)\n\t\tx, mpi1 = self.maxpool1(x)\n\t\t#print(x.shape)\n\n\t\tx = self.leaky(self.conv2(x))\n\t\tx = self.batch2(x)\n\t\tx, mpi2 = self.maxpool2(x)\n\t\t#print(x.shape)\n\n\t\tx = x.view(-1, self.hidden_units)\n\t\tx = self.leaky(self.linear0(x))\n\t\tx = self.linear1(x)\n\t\t#print(x.shape)\n\n\t\tt = t.view(-1, 1)\n\t\tt = self.leaky(self.tlinear0(t))\n\t\tt = self.leaky(self.tlinear1(t))\n\t\tt = self.tlinear2(t)\n\n\t\t#print(x.shape, t.shape)\n\t\tout = torch.cat((x, t), 1)\n\t\treturn out, [mpi0, mpi1, mpi2]\n\n\nclass Decoder(nn.Module):\n\tdef __init__(self, latent_size=2048, time_latent_size=64, hidden_latent_size=1024):\n\t\tsuper(Decoder, self).__init__()\n\t\tself.fc_size = latent_size + time_latent_size\n\n\t\tself.linear3 = nn.Linear(self.fc_size, hidden_latent_size)\n\t\t#self.linear4 = nn.Linear(hidden_latent_size, 128*16*16)\n\t\tself.linear4 = nn.Linear(hidden_latent_size, 128*8*8)\n\n\t\tself.unpool0 = nn.MaxUnpool2d(2)\n\t\tself.deconv0 = nn.ConvTranspose2d(128, 64, 1, stride=1)\n\t\tself.batch0 = nn.BatchNorm2d(64)\n\n\t\tself.unpool1 = nn.MaxUnpool2d(2)\n\t\tself.deconv1 = nn.ConvTranspose2d(64, 32, 5, stride=1, padding=(2, 2))\n\t\tself.batch1 = nn.BatchNorm2d(32)\n\n\t\tself.unpool2 = nn.MaxUnpool2d(2)\n\t\tself.deconv2 = nn.ConvTranspose2d(32, 1, 5, stride=1, padding=(2, 2))\n\n\n\tdef forward(self, x, mpis):\n\t\tx = F.relu(self.linear3(x))\n\t\tx = F.relu(self.linear4(x))\n\n\t\t#x = x.view(-1, 128, 16, 16)\n\t\tx = x.view(-1, 128, 8, 8)\n\t\tx = self.unpool0(x, mpis[2])\n\t\tx = F.relu(self.deconv0(x))\n\t\tx = self.batch0(x)\n\n\t\tx = self.unpool1(x, mpis[1])\n\t\tx = F.relu(self.deconv1(x))\n\t\tx = self.batch1(x)\n\n\t\tx = self.unpool2(x, mpis[0])\n\t\tx = self.deconv2(x)\n\n\t\treturn x\n\n\n\nclass EncoderDecoder(nn.Module):\n\tdef __init__(self, args):\n\t\tsuper(EncoderDecoder, self).__init__()\n\t\tlatent_size = args.latent_size\n\t\ttime_latent_size = args.time_latent\n\t\thidden_latent = args.hidden_latent\n\t\tself.encoder = Encoder(latent_size, time_latent_size, hidden_latent)\n\t\tself.decoder = Decoder(latent_size, time_latent_size, hidden_latent)\n\n\tdef forward(self, x, t):\n\t\tx, mpis = self.encoder(x, t)\n\t\tx = self.decoder(x, mpis)\n\n\t\treturn x\n", "sub_path": "model_stacked.py", "file_name": "model_stacked.py", "file_ext": "py", "file_size_in_byte": 3582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.MaxUnpool2d", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.MaxUnpool2d", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.MaxUnpool2d", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "439990639", "text": "from Tkinter import *\nfrom scipy import stats\nimport numpy as np\nimport random, time\n# import matplotlib.pyplot as plt\n\n# Particle filter algorithm - finding position of robot in a 2 dimensional space using noisy sensors\n# ===================================================== THEORY =========================================================================================\n# First we need to model the problem as Hidden Markov Model (HMM)\n# Notation: S - state space which can be discrete, continuous, defined on a range (smin, smax), t - time,\n# X - probability distribution of state, Y - probability distribution of observations, y(t) - observation at time t\n# B(X) - sampling distribution of X\n# 1) State space, X which produces a sequence of hidden (unobserved) state x(t) i.e. true location of the robot\n# 2) Transition model - P(x(t) | x(t-1)) i.e. what is the probability of robot being 1 step to the right at the next time step?\n# 3) Sequence of observations - readings Y(t) from noisy sensor and P(Y | X) - what is my sensor error?\n# What are we solving here, inference problem? P(X(t) | Y(t=0,1,2,3....t-1))\n#\n# SOLUTION\n# Elapse time step - compute P(X(t) | y(1:t-1)) i.e. what is my probability distribution of X given history of observations?\n# For every possible value of x in state space, P(x(t) | y(1:t-1)) = Summation over x(t-1) of P(x(t-1) | y(1:t-1)) * P(x(t) | x(t-1))\n# Note the recurrence relation here, answer for current time step is dependent on answer for previous time step, so we can use dynamic programming here.\n# \n# Observe step - Compute P(X(t) | y(1:t))\n# P(x(t) | y(1:t)) = P(x(t) | y(1:t-1)) * P(y(t) | x(t))\n#\n#\n# ============================================ MOTIVATION FOR PARTICLE FILTER ==========================================================================\n# Time complexity of elapse time step is |S|^2 because we have to perform the summation for every state to arrive at a distribution. \n# Thus the motivation for particle filters -> approximate solution to the above\n# We use N particles (samples) to represent P(X)\n# P(x) approximated by fraction of particles with value x, if N << |S|, we have many states with P(x) = 0 by pigeonhole principle\n# Start with a prior distribution of where the robot is at time t = 0, if no clue at all, just use a uniform distribution\n# 1) Elapse time step - each particle is moved by sampling its next position from the transition model\n# x' = sample(P(X' | x))\n# We approximate the new distribution using samples (particles) and thus the reduction in complexity\n# 2) Observe step - downweight samples based on the evidence.\n# w(x) = P(y|x)\n# B(X) = P(y|X) * B'(X)\n# Normalize all the particles so sum of B(X) = 1\n# If we iterate through these 2 steps, over time some of these particles are going to vanish to 0,\n# which means we are getting coarser approximation of the true distribution. Thus step 3.\n# 3) Resampling - Rather than tracking weighted samples, we resample.\n# N times, we choose from weighted sample distribution. Draw with replacement.\n# Notice that we are sampling from the sampling distrubution, which is a reduced state space, thus reduction in complexity.\n# weighted particles -> distribution -> unweighted particles\n# Iterate till convergence.\n# \n# So what is being filtered out and when? \n# 1) elapse time step, when we sample under the transition dynamics of the world, as N << |S| most states will end up with 0 or low probability\n# 2) Resampling - we are drawing from a sample distribution which has reduced state space\n\n# Constants\n\n# State space is CANVAS_WIDTH * CANVAS_HEIGHT\nCANVAS_WIDTH = 400\nCANVAS_HEIGHT = 400\nPARTICLE_RADIUS = 3\nNUM_OF_PARTICLES = 1000\nROBOT_RADIUS = 5\nROBOT_POS = (CANVAS_HEIGHT/2, CANVAS_WIDTH/2)\nSAMPLE_SIZE = 50\nSENSOR_SIGMA = 10\nSENSOR_COVARIANCE = np.array([[SENSOR_SIGMA, 0], [0, SENSOR_SIGMA]])\nROBOT_SIGMA = 50\nSENSOR_MEAN = np.array([20,20])\n\n# Global data structures\nINIT_STATE_TABLE = None\nTRANSITION_TABLE = None\nOBS_ERROR_TABLE = None\nPARTICLE_LOCATION = {}\nPARTICLE_WEIGHT = {}\n\nERROR_TOLERANCE = 0.00001\nUPDATE_INTERVAL = 400\nPARTICLE_DELAY = 400\n\n# Initialize prior state distribution, distribution = [\"uniform\", \"gaussian\"]\ndef init_state(distribution, **vargs):\n\tglobal INIT_STATE_TABLE\n\tif distribution == \"uniform\":\n\t\tprob = 1.0 / (CANVAS_HEIGHT * CANVAS_WIDTH)\n\t\tINIT_STATE_TABLE = np.array([CANVAS_HEIGHT, CANVAS_WIDTH]).fill(prob)\n\t\tinit_particles(distribution, **vargs)\n\telif distribution == \"gaussian\":\n\t\traise Exception(\"Not implemented\")\n\telse:\n\t\traise Exception(\"Invalid distribution - use one of uniform, gaussian\")\n\n# Construct the transition model probabilities, model = [\"random\", \"gaussian\", \"gaussian-with-drift\", \"stationary\"]\ndef init_transition_model(model, **vargs):\n\tglobal TRANSITION_TABLE\n\tif model == \"random\":\n\t\tTRANSITION_TABLE = np.random.rand(CANVAS_HEIGHT, CANVAS_WIDTH, CANVAS_HEIGHT, CANVAS_WIDTH)\n\t\tfor x in range(TRANSITION_TABLE.shape[0]):\n\t\t\tfor y in range(TRANSITION_TABLE.shape[1]):\n\t\t\t\tnorm = np.sum(TRANSITION_TABLE[x,y])\n\t\t\t\tTRANSITION_TABLE[x,y,:,:] /= norm\n\telif model == \"gaussian\":\n\t\ttry:\n\t\t\tcov = vargs[\"covariance\"]\n\t\texcept KeyError:\n\t\t\traise Exception(\"Please specify covariance matrix (standard deviation)\")\n\t\tTRANSITION_TABLE = np.zeros((2 * CANVAS_HEIGHT, 2 * CANVAS_WIDTH))\n\t\tif cov is None:\n\t\t\t# Default\n\t\t\tcov = np.array([[ROBOT_SIGMA,0],[0,ROBOT_SIGMA]])\n\t\tcoords_x, coords_y = np.mgrid[0:2 * CANVAS_HEIGHT, 0:2 * CANVAS_WIDTH]\n\t\tcoords = np.dstack((coords_x, coords_y))\n\t\t# Generate a multivariate truncated gaussian with mean (x,y) and bounded by (0,CANVAS_HEIGHT) \n\t\t# in the x direction, bounded by (0, CANVAS_WIDTH) in the y direction, with covariance matrix cov\n\t\t# rescale by a, b\n\t\t# Note this is a hack, this is not a truncated multivariate norm distribution. \n\t\tmean = np.array([CANVAS_HEIGHT, CANVAS_WIDTH])\n\t\trv = stats.multivariate_normal(mean, cov)\n\t\tTRANSITION_TABLE[:,:] = rv.pdf(coords)\n\t\tnorm = np.sum(TRANSITION_TABLE)\n\t\tTRANSITION_TABLE /= norm\n\t\tTRANSITION_TABLE = np.cumsum(TRANSITION_TABLE).reshape((2 * CANVAS_HEIGHT, 2 * CANVAS_WIDTH))\n\t\t# plt.contourf(coords_x, coords_y, rv.pdf(coords))\n\t\t# plt.show()\n\t\t# assert(abs(np.sum(TRANSITION_TABLE) - 1.0) < ERROR_TOLERANCE)\n\telif model == \"gaussian-with-drift\":\n\t\traise Exception(\"Not implemented\")\n\telif model == \"stationary\":\n\t\traise Exception(\"Not implemented\")\n\telse:\n\t\traise Exception(\"Invalid model - use one of random, gaussian, gaussian-with-drift, stationary\")\n\n# Construct the table of P(y|x), distribution = [\"random\", \"gaussian\"]\ndef init_obs_given_state(distribution, **vargs):\n\tglobal OBS_ERROR_TABLE\n\tif distribution == \"random\":\n\t\traise Exception(\"not implemented\")\n\telif distribution == \"gaussian\":\n\t\t# Typical scenario, sensor gives a reading +- some degree of accuracy. So Y = X + error, error ~ N(SENSOR_MEAN, SENSOR_COVARIANCE)\n\t\ttry:\n\t\t\tcov = vargs[\"covariance\"]\n\t\texcept KeyError:\n\t\t\traise Exception(\"Please specify covariance matrix (standard deviation)\")\n\t\tOBS_ERROR_TABLE = np.zeros((2 * CANVAS_HEIGHT, 2 * CANVAS_WIDTH))\n\t\tif cov is None:\n\t\t\t# Default\n\t\t\tcov = np.array([[SENSOR_SIGMA,0],[0,SENSOR_SIGMA]])\n\t\tcoords_x, coords_y = np.mgrid[0:2 * CANVAS_HEIGHT, 0:2 * CANVAS_WIDTH]\n\t\tcoords = np.dstack((coords_x, coords_y))\n\t\t# Generate a multivariate truncated gaussian with mean (x,y) and bounded by (0,2*CANVAS_HEIGHT) \n\t\t# in the x direction, bounded by (0, 2*CANVAS_WIDTH) in the y direction, with covariance matrix cov\n\t\t# rescale by a, b\n\t\t# Note this is a hack, this is not a truncated multivariate norm distribution. \n\t\ttry:\n\t\t\tmean = vargs[\"sensor_mean\"]\n\t\texcept KeyError:\n\t\t\t# print(\"sensor_mean is not given, using default\")\n\t\t\tmean = np.array([CANVAS_WIDTH + SENSOR_MEAN[0], CANVAS_HEIGHT + SENSOR_MEAN[1]])\n\t\trv = stats.multivariate_normal(mean, cov)\n\t\tOBS_ERROR_TABLE[:,:] = rv.pdf(coords)\n\t\tnorm = np.sum(OBS_ERROR_TABLE)\n\t\tOBS_ERROR_TABLE /= norm\n\t\t# plt.contourf(coords_x, coords_y, rv.pdf(coords))\n\t\t# plt.show()\n\t\tassert(abs(np.sum(OBS_ERROR_TABLE) - 1.0) < ERROR_TOLERANCE)\n\telse:\n\t\traise Exception(\"Invalid model - use one of random, gaussian\")\n\tpass\n\ndef init_particles(distribution, **vargs):\n\tif distribution == \"uniform\":\n\t\tx_samples = np.random.randint(low=0, high=CANVAS_HEIGHT, size=NUM_OF_PARTICLES)\n\t\ty_samples = np.random.randint(low=0, high=CANVAS_WIDTH, size=NUM_OF_PARTICLES)\n\t\tsamples = np.dstack((x_samples, y_samples))\n\t\tfor i in range(0, samples.shape[1]):\n\t\t\tPARTICLE_LOCATION[i] = samples[0,i]\n\t\t\tPARTICLE_WEIGHT[i] = 1.0\n\telse:\n\t\traise Exception(\"Invalid distribution - must be one of gaussian, uniform\")\n\ndef elapse_time_step(distribution):\n\tglobal PARTICLE_LOCATION\n\tif distribution == \"gaussian\":\n\t\tfor (idx, coords) in PARTICLE_LOCATION.iteritems():\n\t\t\ttransition_model_given_x = get_transition_table_slice(TRANSITION_TABLE, coords).flatten()\n\t\t\trand = random.random()\n\t\t\t# Another method is compute sample from closed form, TODO\n\t\t\traw_idx = bin_search(transition_model_given_x, 0, transition_model_given_x.size-1, rand, 0)\n\t\t\tx = raw_idx // CANVAS_HEIGHT\n\t\t\ty = raw_idx % CANVAS_WIDTH\n\t\t\t# start = time.time()\n\t\t\tPARTICLE_LOCATION[idx] = [x,y]\n\t\t\t# end = time.time()\n\t\t\t# print(str((end - start) * 1000) + \" ms\")\n\n# Observation is np.array([x,y])\ndef weight_particles(observation):\n\tglobal PARTICLE_WEIGHT\n\ttotal = 0\n\tfor (idx, location) in PARTICLE_LOCATION.iteritems():\n\t\tactual_x = location[0]\n\t\tactual_y = location[1]\n\t\tobs_error_table_slice = get_transition_table_slice(OBS_ERROR_TABLE, location)\n\t\tprob = obs_error_table_slice[observation[0], observation[1]]\n\t\ttotal += prob\n\t\tPARTICLE_WEIGHT[idx] *= prob\n\t# Normalize so sum of weights = 1\n\tfor idx, weight in PARTICLE_WEIGHT.iteritems():\n\t\tPARTICLE_WEIGHT[idx] /= total\n\n# Sample particles with probability according to their weights\ndef resample():\n\t# Stochastic Universal Sampling - O(N) \n\t# Build the wheel\n\ttotal = sum(v for v in PARTICLE_WEIGHT.values())\n\twheel = [0]\n\tfor (idx, weight) in PARTICLE_WEIGHT.iteritems():\n\t\twheel.append(wheel[-1] + weight / total)\n\t\t# Take this chance to reset the weight\n\t\tPARTICLE_WEIGHT[idx] = 1.0\n\t\n\trand = random.random()\n\tstep_size = 1.0 / NUM_OF_PARTICLES\n\tnew_particles = [] # Stores index of particle selected\n\tnew_particles.append(bin_search(wheel, 0, len(wheel)-1, rand, 0))\n\twhile len(new_particles) < NUM_OF_PARTICLES: # Sampling with replacement\n\t\trand += step_size\n\t\tif rand > 1:\n\t\t\trand %= 1\n\t\tnew_particles.append(bin_search(wheel, 0, len(wheel)-1, rand, 0))\n\n\tglobal PARTICLE_LOCATION\n\ttemp_particle_location = {}\n\tcount = 0\n\tfor i in new_particles:\n\t\tprev_location = PARTICLE_LOCATION[i-1] # Because we added in interval 0 in wheel which increases all subsequent particle index by 1\n\t\ttemp_particle_location[count] = prev_location\n\t\tcount += 1\n\tPARTICLE_LOCATION = temp_particle_location\n\n# Binary search for floats\ndef bin_search(wheel, start, end, num, num_discarded_start_of_list):\n\tif (end - start <= 0): return 1\n\tmid = (end - start)//2 + start\n\ttry:\n\t\tif wheel[mid] < num and num <= wheel[mid+1]:\n\t\t\treturn mid - start + num_discarded_start_of_list + 1\n\t\telif (end - start + 1) == 2:\n\t\t\tif num <= wheel[mid]:\n\t\t\t\treturn 1\n\t\t\telif wheel[end] > num:\n\t\t\t\treturn end\n\t\t\telse:\n\t\t\t\treturn end # Don't know what else to return\n\t\telif wheel[mid] >= num:\n\t\t\treturn bin_search(wheel, start, mid, num, num_discarded_start_of_list)\n\t\telse:\n\t\t\treturn bin_search(wheel, mid+1, end, num, num_discarded_start_of_list + (mid - start + 1))\n\texcept Exception as e:\n\t\t# pass\n\t\tprint(wheel, num)\n\n# Moves robot according to predefined motion dynamics\ndef move_robot():\n\tglobal ROBOT_POS\n\ttransition_model_given_x = get_transition_table_slice(TRANSITION_TABLE, ROBOT_POS)\n\trand = random.random()\n\tfor x in range(0, transition_model_given_x.shape[0]):\n\t\tfor y in range(0, transition_model_given_x.shape[1]):\n\t\t\tif rand < transition_model_given_x[x,y]:\n\t\t\t\t# update location of robot\n\t\t\t\tROBOT_POS = (x,y)\n\t\t\t\tbreak\n\t\telse:\n\t\t\tcontinue\n\t\tbreak\n\ndef get_transition_table_slice(tbl, position):\n\tstart_x = CANVAS_WIDTH - position[0]\n\tend_x = CANVAS_WIDTH + (CANVAS_WIDTH - position[0])\n\tstart_y = CANVAS_HEIGHT - position[1]\n\tend_y = CANVAS_HEIGHT + (CANVAS_HEIGHT - position[1])\n\treturn tbl[start_x:end_x, start_y:end_y]\n\n# Given current location of robot, what is my observation\ndef get_observation():\n\t# Y = X + some gaussian noise\n\t# This is not a truncated normal bounded by the canvas, thus there is non-zero\n\t# probability extending to -inf, +inf in both dimensions, we need to account for that.\n\tnoises = np.random.multivariate_normal(SENSOR_MEAN, SENSOR_COVARIANCE, 1)\n\tnew_x = ROBOT_POS[0] + int(round(noises[0,0]))\n\tnew_y = ROBOT_POS[1] + int(round(noises[0,1]))\n\tnew_x = min(new_x, CANVAS_WIDTH-1)\n\tnew_x = max(new_x, 0)\n\tnew_y = min(new_y, CANVAS_HEIGHT-1)\n\tnew_y = max(new_y, 0)\n\treturn [new_x, new_y]\n\ndef particle_filter():\n\tstart = time.time()\n\telapse_time_step(\"gaussian\")\n\tobs = get_observation()\n\tweight_particles(obs)\n\tresample()\n\tend = time.time()\n\tprint(str((end - start) * 1000) + \" ms\")\n\nclass Application(Canvas):\n\tdef update_clock(self):\n\t\tnow = time.strftime(\"%H:%M:%S\")\n\t\tself.label.configure(text=now)\n\t\tself.root.after(UPDATE_INTERVAL, self.update_clock)\n\t\tmove_robot()\n\t\tself.update_robot()\n\t\tparticle_filter()\n\n\tdef update_robot(self):\n\t\tx1, y1 = (ROBOT_POS[0] - PARTICLE_RADIUS), (ROBOT_POS[1] - PARTICLE_RADIUS)\n\t\tx2, y2 = (ROBOT_POS[0] + PARTICLE_RADIUS), (ROBOT_POS[1] + PARTICLE_RADIUS)\n\t\tself.c.delete('robot')\n\t\tself.c.create_oval(x1, y1, x2, y2, fill=\"red\", tag='robot')\n\n\tdef update_particles(self, event=None):\n\t\tself.c.delete('particles')\n\t\tfor (idx, coord) in PARTICLE_LOCATION.iteritems():\n\t\t\tx1, y1 = (coord[0] - PARTICLE_RADIUS), (coord[1] - PARTICLE_RADIUS)\n\t\t\tx2, y2 = (coord[0] + PARTICLE_RADIUS), (coord[1] + PARTICLE_RADIUS)\n\t\t\tself.c.create_oval(x1, y1, x2, y2, fill=\"green\", tag='particles')\n\t\tself.root.after(UPDATE_INTERVAL, self.update_particles)\n\n\tdef create_grid(self):\n\t\twidth = self.c.winfo_width()\n\t\theight = self.c.winfo_height()\n\t\tself.c.delete('grid_line')\n\n\t\tfor i in range(0, width, 10):\n\t\t\tself.c.create_line([(i,0), (i,height)], tag='grid_line')\n\t\t\tself.c.create_line([(0,i), (width,i)], tag='grid_line')\n\n\tdef __init__(self, master=None):\n\t\tself.update_count = 0\n\t\tself.c = Canvas(master, height=CANVAS_HEIGHT, width=CANVAS_WIDTH, bg='white')\n\t\tself.c.pack()\n\t\tself.root = master\n\t\tself.label = Label(text=\"\")\n\t\tself.label.pack()\n\t\tself.update_particles()\n\t\tself.update_robot()\n\t\tself.create_grid()\n\t\tself.update_clock()\n\t\ttime.sleep(PARTICLE_DELAY / 1000.0)\n\t\tself.update_particles()\n\t\t# self.c.bind('', self.update_particles)\n\ndef main():\n\tinit_state(\"uniform\")\n\tinit_transition_model(\"gaussian\", covariance=None)\n\tinit_obs_given_state(\"gaussian\", covariance=None)\n\tprint(\"Initialization complete\")\n\troot = Tk()\n\tapp = Application(master=root)\n\troot.mainloop()\n\t# root.destroy()\n\nif __name__ == \"__main__\":\n\tmain()", "sub_path": "demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 15074, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.mgrid", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.dstack", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 115, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 115, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.mgrid", "line_number": 145, "usage_type": "attribute"}, {"api_name": "numpy.dstack", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 155, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 156, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 156, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.dstack", "line_number": 171, "usage_type": "call"}, {"api_name": "random.random", "line_number": 183, "usage_type": "call"}, {"api_name": "random.random", "line_number": 219, "usage_type": "call"}, {"api_name": "random.random", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.random.multivariate_normal", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 287, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 297, "usage_type": "call"}, {"api_name": "time.time", "line_number": 302, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 307, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 348, "usage_type": "call"}]} +{"seq_id": "466147952", "text": "\"\"\"\nTo complete this assignment, you should use this API endpoint that has a static\nsubset of the Google Data:\nhttp://python-data.dr-chuck.net/geojson\n\nThis API uses the same parameters (sensor and address) as the Google API. This\nAPI also has no rate limit so you can test as often as you like. If you visit\nthe URL with no parameters, you get a list of all of the address values which\ncan be used with this API.\n\nTo call the API, you need to provide a sensor=false parameter and the address\nthat you are requesting as the address= parameter that is properly URL encoded\nusing the urllib.urlencode() fuction as shown in\nhttp://www.pythonlearn.com/code/geojson.py\n\nTEST DATA / SAMPLE EXECUTION\nYou can test to see if your program is working with a location of \"South Federal\nUniversity\" which will have a place_id of \"ChIJJ8oO7_B_bIcR2AlhC8nKlok\".\n\"\"\"\n\n\"\"\"\nJSON Response\n{\n \"results\": [\n {\n \"access_points\": [],\n \"address_components\": [\n {\n \"long_name\": \"#300\",\n \"short_name\": \"#300\",\n \"types\": [\n \"subpremise\"\n ]\n },\n {\n \"long_name\": \"4001\",\n \"short_name\": \"4001\",\n \"types\": [\n \"street_number\"\n ]\n },\n {\n \"long_name\": \"700 East\",\n \"short_name\": \"700 E\",\n \"types\": [\n \"route\"\n ]\n },\n {\n \"long_name\": \"Salt Lake City\",\n \"short_name\": \"Salt Lake City\",\n \"types\": [\n \"locality\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"Salt Lake County\",\n \"short_name\": \"Salt Lake County\",\n \"types\": [\n \"administrative_area_level_2\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"Utah\",\n \"short_name\": \"UT\",\n \"types\": [\n \"administrative_area_level_1\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"United States\",\n \"short_name\": \"US\",\n \"types\": [\n \"country\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"84107\",\n \"short_name\": \"84107\",\n \"types\": [\n \"postal_code\"\n ]\n }\n ],\n \"formatted_address\": \"4001 700 E #300, Salt Lake City, UT 84107, USA\",\n \"geometry\": {\n \"location\": {\n \"lat\": 40.6849285,\n \"lng\": -111.8700525\n },\n \"location_type\": \"ROOFTOP\",\n \"viewport\": {\n \"northeast\": {\n \"lat\": 40.68627748029149,\n \"lng\": -111.8687035197085\n },\n \"southwest\": {\n \"lat\": 40.6835795197085,\n \"lng\": -111.8714014802915\n }\n }\n },\n \"place_id\": \"ChIJBVZvCm6KUocRoh4bYfH-h2M\",\n \"plus_code\": {\n \"compound_code\": \"M4MH+XX Salt Lake City, Utah, United States\",\n \"global_code\": \"85GCM4MH+XX\"\n },\n \"types\": [\n \"establishment\",\n \"point_of_interest\",\n \"university\"\n ]\n }\n ],\n \"status\": \"OK\"\n}\n\"\"\"\n\nimport urllib.request as ur\nimport urllib.parse as up\nimport json\n\nservice_url = \"http://py4e-data.dr-chuck.net/json?\"\n\naddress_input = input(\"Enter location: \")\nparams = {\"sensor\": \"false\", \"address\": address_input, \"key\": 42}\nurl = service_url + up.urlencode(params)\n\nprint(\"Receiving:\", url)\ndata = ur.urlopen(url).read()\nprint(\"Retrieved\", len(data), \"characters\")\njson_obj = json.loads(data)\nplace_id = json_obj[\"results\"][0][\"place_id\"]\nprint(\"Place ID:\", place_id)\n", "sub_path": "Access Web Data/Using_GeoJSON_API.py", "file_name": "Using_GeoJSON_API.py", "file_ext": "py", "file_size_in_byte": 3566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "urllib.parse.urlencode", "line_number": 131, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 131, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 134, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 134, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "314401682", "text": "'''\nfunction_pipe.py\n\nCopyright 2012-2017 Research Affiliates\n\nAuthors: Christopher Ariza, Max Moroz\n\nCommon usage:\nimport function_pipe as fpn\n'''\n\nimport functools\nimport inspect\nimport collections\nimport types\nimport sys\n\n\n#-------------------------------------------------------------------------------\n# FunctionNode utilities\n\ndef compose(*funcs):\n '''\n Given a list of functions, execute them from right to left, passing\n the returned value of the right f to the left f. Store the reduced function in a FunctionNode\n '''\n # call right first, then left of each pair; each reduction retruns a function\n reducer = functools.reduce(lambda f, g:\n lambda *args, **kaargs: f(g(*args, **kaargs)), funcs)\n # args are reversed to show execution from right to left\n return FunctionNode(reducer, doc_function=compose, doc_args=reversed(funcs))\n\ndef _wrap_unary(func):\n '''Decorator for operator overloads. Given a higher order function that takes one args, wrap it in a FunctionNode function and provide documentation labels.\n '''\n def unary(lhs):\n # wrapped function will prepare correct class, even if a constant\n cls = PipeNode if isinstance(lhs, PipeNode) else FunctionNode\n return cls(func(lhs),\n doc_function=func,\n doc_args=(lhs,)\n )\n return unary\n\ndef _wrap_binary(func):\n '''Decorator for operators. Given a higher order function that takes two args, wrap it in a FunctionNode function and provide documentation labels.\n '''\n def binary(lhs, rhs):\n # wrapped function will prepare correct class, even if a constant\n cls = PipeNode if isinstance(lhs, PipeNode) else FunctionNode\n return cls(func(lhs, rhs),\n doc_function=func,\n doc_args=(lhs, rhs)\n )\n return binary\n\n\ndef _repr(f, doc_args=True):\n '''Provide a string representation of the FN, recursively representing defined arguments.\n '''\n def get_function_name(f):\n '''Get a string representation of the callable, or its code if it is a lambda. In some cases, `f` may not be function, so just return a string.\n '''\n f_type = type(f)\n if f_type is not types.FunctionType or not hasattr(f, '__name__'):\n # functool partial types do not have __name__ attrs, and are not FunctionTypes\n return str(f)\n if f.__name__ == '':\n # split on all white space, and rejoin with single space\n return ' '.join(inspect.getsource(f).split())\n return f.__name__\n\n # find FunctionNode; using hasattr because of testing context issues\n if hasattr(f, '_doc_function'):\n if f._doc_function:\n doc_f = get_function_name(f._doc_function)\n if doc_args:\n args = kwargs = ''\n if f._doc_args:\n args = (str(_repr(v)) for v in f._doc_args)\n if f._doc_kwargs:\n kwargs = (k + '=' + str(_repr(f)) for k, v\n in f._doc_kwargs.items())\n if not args and not kwargs:\n return doc_f\n return doc_f + '(' + ','.join(args) + ','.join(kwargs) + ')'\n return doc_f\n else: # we don't know its structure, use _function\n return get_function_name(f._function)\n return get_function_name(f)\n\n\n\nclass FunctionNode:\n '''A wrapper for a callable that can reside in an expression of numerous FunctionNodes, or be modified with unary or binary operators.\n '''\n __slots__ = (\n '_function',\n '_doc_function',\n '_doc_args',\n '_doc_kwargs',\n )\n\n #---------------------------------------------------------------------------\n def __init__(self,\n function,\n *,\n doc_function=None,\n doc_args=None,\n doc_kwargs=None\n ):\n '''\n Args:\n function: a callable\n doc_function: the function to display; will be set to `function` if nor provided\n '''\n # if a function node, re-wrap\n if isinstance(function, FunctionNode):\n for attr in self.__slots__:\n setattr(self, attr, getattr(function, attr))\n else:\n if callable(function):\n self._function = function\n else:\n # if not a callable, we upgrade a constant, non function value to be a function that returns that value\n self._function = lambda *args, **kwargs: function\n\n # if not supplied, doc_function is set to function\n self._doc_function = doc_function if doc_function else self._function\n self._doc_args = doc_args\n self._doc_kwargs = doc_kwargs\n\n @property\n def unwrap(self):\n '''The doc_function should be set to the core function being wrapped, no matter the level of wrapping.\n '''\n # if the stored function is using pipe_kwarg_bind, need to go lower\n doc_func = self\n while hasattr(doc_func, '_doc_function'):\n doc_func = getattr(doc_func, '_doc_function')\n return doc_func\n\n def __call__(self, *args, **kwargs):\n '''Call the wrapped function.\n '''\n return self._function(*args, **kwargs)\n\n def __str__(self):\n return ''.format(_repr(self))\n\n __repr__ = __str__\n\n #__name__ = ''.format(_repr(self, doc_args=False))\n #__name__ = __str__\n\n def partial(self, *args, **kwargs):\n '''Return a new FunctionNode with a partialed function with args and kwargs'\n '''\n fn = FunctionNode(functools.partial(self._function, *args, **kwargs))\n for attr in self.__slots__:\n if not getattr(fn, attr):\n setattr(fn, attr, getattr(self, attr))\n return fn\n\n #---------------------------------------------------------------------------\n # all unary operators return a function; the _wrap_unary decorator then wraps this function in a FunctionNode\n\n @_wrap_unary\n def __neg__(self):\n return lambda *args, **kwargs: self(*args, **kwargs) * -1\n\n @_wrap_unary\n def __invert__(self):\n '''This is generally expected to be a Boolean inversion, such as ~ (not) applied to a numpy array or pd.Series.\n '''\n return lambda *args, **kwargs: self(*args, **kwargs).__invert__()\n\n @_wrap_unary\n def __abs__(self):\n '''Absolute value; most common usage us on Numpy or Pandas objects, and thus here we np.abs.\n '''\n import numpy as np\n return lambda *args, **kwargs: np.abs(self(*args, **kwargs))\n\n #---------------------------------------------------------------------------\n # all binary operators return a function; the _wrap_binary decorator then wraps this function in a FunctionNode definition and supplies appropriate doc args. Note both left and righ sides are wrapped in FNs to permit operations on constants\n\n @_wrap_binary\n def __add__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) +\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __sub__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) -\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __mul__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) *\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __truediv__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) /\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __pow__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) **\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __radd__(rhs, lhs):\n return (lambda *args, **kwargs:\n rhs.__class__(lhs)(*args, **kwargs) +\n rhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __rsub__(rhs, lhs):\n return (lambda *args, **kwargs:\n rhs.__class__(lhs)(*args, **kwargs) -\n rhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __rmul__(rhs, lhs):\n return (lambda *args, **kwargs:\n rhs.__class__(lhs)(*args, **kwargs) *\n rhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __rtruediv__(rhs, lhs):\n return (lambda *args, **kwargs:\n rhs.__class__(lhs)(*args, **kwargs) /\n rhs.__class__(rhs)(*args, **kwargs))\n\n # comparison operators, expected to return booleans\n @_wrap_binary\n def __eq__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) ==\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __lt__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) <\n lhs.__class__(rhs)(*args, **kwargs))\n @_wrap_binary\n def __le__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) <=\n lhs.__class__(rhs)(*args, **kwargs))\n @_wrap_binary\n def __gt__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) >\n lhs.__class__(rhs)(*args, **kwargs))\n @_wrap_binary\n def __ge__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) >=\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __ne__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) !=\n lhs.__class__(rhs)(*args, **kwargs))\n\n #---------------------------------------------------------------------------\n # composition operators\n\n def __rshift__(lhs, rhs):\n '''Composition; return a function that will call LHS first, then RHS\n '''\n return compose(rhs, lhs)\n\n def __rrshift__(rhs, lhs):\n '''Composition; return a function that will call LHS first, then RHS\n '''\n return compose(rhs, lhs)\n\n def __lshift__(lhs, rhs):\n '''Composition; return a function that will call RHS first, then LHS\n '''\n return compose(lhs, rhs)\n\n def __llshift__(rhs, lhs):\n '''Composition; return a function that will call RHS first, then LHS\n '''\n return compose(lhs, rhs)\n\n def __or__(lhs, rhs):\n '''Only implemented for PipeNode.\n '''\n raise NotImplementedError()\n\n def __ror__(rhs, lhs):\n '''Only implemented for PipeNode.\n '''\n raise NotImplementedError()\n\n\n#-------------------------------------------------------------------------------\n# PipeNode and utiltiies\n\n# PipeNode kwargs\nPREDECESSOR_RETURN = 'predecessor_return'\nPREDECESSOR_PN = 'predecessor_pn'\nPN_INPUT = 'pn_input'\nPN_INPUT_SET = {PN_INPUT}\nPIPE_NODE_KWARGS = {PREDECESSOR_RETURN, PREDECESSOR_PN, PN_INPUT}\n\n\nclass PipeNode(FunctionNode):\n '''The multi-call structure of PipeNodes moves a FunctionNode between three states.\n '''\n\n # states\n FACTORY = 'FACTORY'\n EXPRESSION = 'EXPRESSION'\n PROCESS = 'PROCESS'\n\n __slots__ = FunctionNode.__slots__ + (\n '_call_state',\n '_predecessor'\n )\n\n #---------------------------------------------------------------------------\n def __init__(self,\n function,\n *,\n doc_function=None,\n doc_args=None,\n doc_kwargs=None,\n call_state=None,\n predecessor=None\n ):\n super().__init__(function=function,\n doc_function=doc_function,\n doc_args=doc_args,\n doc_kwargs=doc_kwargs\n )\n self._call_state = call_state\n self._predecessor = predecessor\n\n def __str__(self):\n return ''.format(_repr(self))\n\n def partial(*args, **kwargs):\n '''PipeNode calling is dictated by the PipeNode protocol; partial-like behavior in expressions shold be achived with functions decorated with the pipe_node_factory decorator.\n '''\n raise NotImplementedError()\n\n #---------------------------------------------------------------------------\n # pipe node properties\n\n @property\n def call_state(self):\n return self._call_state\n\n @property\n def predecessor(self):\n return self._predecessor\n\n #---------------------------------------------------------------------------\n # composition operators\n\n def __rshift__(lhs, rhs):\n '''Only implemented for FunctionNode.\n '''\n raise NotImplementedError()\n\n def __rrshift__(rhs, lhs):\n '''Only implemented for FunctionNode.\n '''\n raise NotImplementedError()\n\n def __lshift__(lhs, rhs):\n '''Only implemented for FunctionNode.\n '''\n raise NotImplementedError()\n\n def __llshift__(rhs, lhs):\n '''Only implemented for FunctionNode.\n '''\n raise NotImplementedError()\n\n def __or__(lhs, rhs):\n '''Call RHS with LHS as an argument; left is passed as kwarg PREDECESSOR_PN. This calls the RHS immediately and does not return an FN unless prepared as a PipeNode\n '''\n return rhs(**{PREDECESSOR_PN:lhs})\n\n def __ror__(rhs, lhs):\n return rhs(**{PREDECESSOR_PN:lhs})\n\n\n #---------------------------------------------------------------------------\n def __getitem__(self, pn_input):\n '''Call self with the passed PipeNodeInput.\n '''\n pni = pn_input if pn_input else PipeNodeInput()\n return self(**{PN_INPUT:pni})\n\n\n#-------------------------------------------------------------------------------\n# decorator utilities\n\ndef _broadcast(factory_args,\n factory_kwargs,\n processing_args,\n processing_kwargs):\n '''Factor args/kwargs are those given to pipe_node_factory at the expression level. Processing args/kwargs are those given as the initial input, and used to call all processing functions. After calling factor args with processing args, the result is used as core_callable args\n '''\n core_callable_args = [arg(*processing_args, **processing_kwargs)\n if isinstance(arg, PipeNode) else arg\n for arg in factory_args]\n\n core_callable_kwargs = {kw: arg(*processing_args, **processing_kwargs)\n if isinstance(arg, PipeNode) else arg\n for kw, arg in factory_kwargs.items()}\n\n return core_callable_args, core_callable_kwargs\n\n\ndef core_logger(core_callable):\n '''A decorator to provide output on the execution of each core callable call. Alternative decorators can be used to partial pipe_node_factory and pipe_node.\n '''\n def wrapped(*args, **kwargs):\n prefix = '|'\n print('|', str(core_callable), file=sys.stderr)\n post = core_callable(*args, **kwargs)\n return post\n return wrapped\n\n#-------------------------------------------------------------------------------\n# decorators\n\n\ndef pipe_kwarg_bind(*key_positions):\n '''Using FN labels as arguments, define the what positional arguments of the wrapped function will receive from the common FN kwargs.\n '''\n def decorator(f):\n def wrapped(*args, **kwargs):\n # extract args from kwargs based on order of key_positions\n target_args = []\n for pos, k in enumerate(key_positions):\n target_args.append(kwargs.pop(k))\n target_kwargs = {k:v for k, v in kwargs.items()\n if k not in PIPE_NODE_KWARGS}\n return f(*target_args, *args, **target_kwargs)\n return PipeNode(wrapped, doc_function=f)\n return decorator\n\n\ndef pipe_node_factory(core_callable,\n core_decorator=core_logger):\n '''This is a decorator.\n\n Upgrade keyword only arguments from a function that needs expression level args.\n '''\n decorated_core_callable = core_decorator(core_callable)\n\n def factory_f(*f_args, **f_kwargs):\n '''This is the function returned by the decorator, used to create the FunctionNode that resides in expressions after being called with arguments.\n\n f_args and f_kwargs are passed to the core_callable; if f_args or f_kwargs are FunctionNode instances, they will be called with the processing args and kwargs (including PN_INPUT), either from process_f or (if innermost) from expression args.\n '''\n def expression_f(*e_args, **e_kwargs):\n '''This is the FunctionNode that resides in expressions prior to `|` operator evalation. When called with `|`, the predecessor is passed is in e_kwargs as PREDECESSOR_PN. In this usage the e_args will always be empty.\n\n When in the innermost position, expression_f is never called with `|` but with the PN_INPUT; this sitation is identified and the core_callable is called immediately.\n\n e_args will only be used as an innermost call.\n '''\n # identify innermost condition as when the expression level kwargs consists only of PN_INPUT\n if set(e_kwargs.keys()) == PN_INPUT_SET:\n # as this is innermost, processing args (i.e., PipeNodeInput) are given here at the expression level (as no Pipe operator has been used to call the innermost)\n core_callable_args, core_callable_kwargs = _broadcast(\n factory_args=f_args,\n factory_kwargs=f_kwargs,\n processing_args=e_args, # not p_args\n processing_kwargs=e_kwargs) # not p_kwargs\n\n # pack PipeNode protocol kwargs; when used as innermost, a core_callable can only expect to have a PN_INPUT\n core_callable_kwargs[PN_INPUT] = e_kwargs[PN_INPUT]\n\n return decorated_core_callable(*core_callable_args,\n **core_callable_kwargs)\n\n predecessor_pn = e_kwargs.get(PREDECESSOR_PN)\n\n def process_f(*p_args, **p_kwargs):\n # call the predecssor PipeNode (here a process_f) with these processing args; these are always the args given as the initial input to the innermost function, generally a PipeNodeInput\n predecessor_return = predecessor_pn(*p_args, **p_kwargs)\n\n core_callable_args, core_callable_kwargs = _broadcast(\n factory_args=f_args,\n factory_kwargs=f_kwargs,\n processing_args=p_args,\n processing_kwargs=p_kwargs)\n\n # pack PipeNode protocol kwargs\n core_callable_kwargs[PN_INPUT] = p_kwargs[PN_INPUT]\n core_callable_kwargs[PREDECESSOR_PN] = predecessor_pn\n core_callable_kwargs[PREDECESSOR_RETURN] = predecessor_return\n\n return decorated_core_callable(*core_callable_args,\n **core_callable_kwargs)\n\n # we must return a PipeNode here, as this is the final thing returned and might be passed on to another series func\n return PipeNode(process_f,\n doc_function=core_callable,\n #doc_args=e_args,\n #doc_kwargs=e_kwargs, # TODO: does not work\n call_state=PipeNode.PROCESS,\n predecessor=predecessor_pn)\n return PipeNode(expression_f,\n doc_function=core_callable,\n doc_args=f_args,\n doc_kwargs=f_kwargs,\n call_state=PipeNode.EXPRESSION)\n # return a function node so as to make doc_function available in test\n return PipeNode(factory_f,\n doc_function=core_callable,\n call_state=PipeNode.FACTORY)\n\n\ndef pipe_node(core_callable, core_decorator=core_logger):\n '''Decorate a function that takes no expression-level args.\n '''\n # create a factory and call it once with no args to get an expresion-level function\n return pipe_node_factory(core_callable,\n core_decorator=core_decorator)()\n\n\n#-------------------------------------------------------------------------------\nclass PipeNodeInput:\n '''PipeNode input to support store and recall; subclassable to expose other attributes and parameters.\n '''\n\n def __init__(self):\n self._store = collections.OrderedDict()\n\n def store(self, key, value):\n if key in self._store:\n raise KeyError('cannot store the same key', key)\n self._store[key] = value\n\n def recall(self, key):\n return self._store[key]\n\n def store_items(self):\n return self._store.items()\n\n\n#-------------------------------------------------------------------------------\n# utility PipeNodes\n\n@pipe_node_factory\ndef store(label, **kwargs):\n kwargs[PN_INPUT].store(label, kwargs[PREDECESSOR_RETURN])\n return kwargs[PREDECESSOR_RETURN]\n\n@pipe_node_factory\ndef recall(label, **kwargs):\n return kwargs[PN_INPUT].recall(label)\n\n@pipe_node_factory\ndef call(*args, **kwargs):\n '''Call the PipeNode arguments with the PipeNodeInput as necessary (which happens in the broadcast routine in handling *args)\n '''\n return args[-1] # the last result is returned\n\n", "sub_path": "function_pipe.py", "file_name": "function_pipe.py", "file_ext": "py", "file_size_in_byte": 21523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "functools.reduce", "line_number": 28, "usage_type": "call"}, {"api_name": "types.FunctionType", "line_number": 65, "usage_type": "attribute"}, {"api_name": "inspect.getsource", "line_number": 70, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 183, "usage_type": "call"}, {"api_name": "{'np': 'numpy'}.__slots__", "line_number": 330, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 437, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 549, "usage_type": "call"}]} +{"seq_id": "372369657", "text": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport av\nimport torch as th\nfrom torch.utils.data import Dataset\nimport pandas as pd\nimport os\nimport numpy as np\nimport random\nimport ffmpeg\nimport time\nimport re\nimport csv\nimport pickle\nfrom joblib import Parallel, delayed\nimport subprocess\n\ndef valid_video(root_dir, vid_idx, video_id):\n vid_path = os.path.join(root_dir, video_id + '.mp4')\n try:\n probe = ffmpeg.probe(vid_path)\n video_stream = next((\n stream for stream in probe['streams'] if stream['codec_type'] == 'video'), \n None\n )\n if video_stream and float(video_stream['duration']) > 4.1:\n print(f\"{vid_idx}: True\", flush=True)\n return True\n else:\n print(f\"{vid_idx}: False (duration short)\", flush=True)\n return False\n except:\n print(f\"{vid_idx}: False\", flush=True)\n return False\n\n\ndef filter_videos(root_dir, vid_paths):\n all_indices = Parallel(n_jobs=30)(delayed(valid_video)(root_dir, vid_idx, vid_paths[vid_idx][0]) for vid_idx in range(0, len(vid_paths)))\n valid_indices = [i for i, val in enumerate(all_indices) if val]\n return valid_indices\n\n\nclass HT100M_Dataset(Dataset):\n \"\"\"HowTo100M Video-Text loader.\"\"\"\n\n def __init__(\n self,\n csv_file='data/howto.csv',\n video_root='/datasets01/HowTo100M/022520/videos',\n caption_root='/private/home/mandelapatrick/data/howto100m_csv',\n token_to_word_path='data/dict.npy',\n min_time=4.0,\n fps=16,\n num_frames=16,\n size=224,\n crop_only=False,\n center_crop=True,\n benchmark=False,\n max_words=20,\n num_candidates=1,\n random_left_right_flip=False,\n num_clips=2\n ):\n \"\"\"\n Args:\n \"\"\"\n print(\"Loading HT100M dataset\")\n assert isinstance(size, int)\n\n # Get csv file\n csv_file = os.path.join(os.path.dirname(__file__), csv_file)\n if not os.path.exists(csv_file):\n i = 0\n file_list = []\n for file_name in os.listdir(video_root):\n if i % 1000 == 0:\n print(i, file_name)\n file_list.append(file_name)\n i += 1\n \n with open(csv_file, 'w', newline='') as outcsv:\n fieldnames = ['video_id']\n writer = csv.DictWriter(outcsv, fieldnames=fieldnames)\n writer.writeheader()\n for id, vid_id in enumerate(file_list):\n if i % 1000 == 0:\n print(i, flush=True)\n writer.writerow({'video_id': vid_id.split('.')[0]})\n \n # Get video paths\n with open(csv_file, newline='') as f:\n reader = csv.reader(f)\n self._path_to_videos = list(reader)\n\n # Get valid indices\n vid_valid_file = os.path.join(os.path.dirname(__file__), 'data/howto_valid_filtered_audio.pkl')\n if not os.path.exists(vid_valid_file):\n self.valid_indices = filter_videos(video_root, self._path_to_videos)\n with open(vid_valid_file, 'wb') as handle:\n pickle.dump(\n self.valid_indices, \n handle, \n protocol=pickle.HIGHEST_PROTOCOL\n )\n else:\n with open(vid_valid_file, 'rb') as handle:\n self.valid_indices = pickle.load(handle)\n\n self.video_root = video_root\n self.caption_root = caption_root\n self.min_time = min_time\n self.size = size\n self.num_frames = num_frames\n self.fps = fps\n self.num_sec = self.num_frames / float(self.fps)\n self.crop_only = crop_only\n self.center_crop = center_crop\n self.benchmark = benchmark\n self.max_words = max_words\n token_to_word = np.load(os.path.join(os.path.dirname(__file__), token_to_word_path))\n self.word_to_token = {}\n for i, t in enumerate(token_to_word):\n self.word_to_token[t] = i + 1\n self.num_candidates = num_candidates\n self.random_flip = random_left_right_flip\n self.num_clips = num_clips\n self._num_retries = 10\n self.num_reverse_clips = 2\n\n def __len__(self):\n return len(self.valid_indices)\n\n def _get_video_ffmpeg(self, video_path, start, end):\n start_seek = random.randint(start, int(max(start, end - self.num_sec)))\n cmd = (\n ffmpeg\n .input(video_path, ss=start_seek, t=self.num_sec + 0.1)\n .filter('fps', fps=self.fps)\n )\n if self.center_crop:\n aw, ah = 0.5, 0.5\n else:\n aw, ah = random.uniform(0, 1), random.uniform(0, 1)\n if self.crop_only:\n cmd = (\n cmd.crop('(iw - {})*{}'.format(self.size, aw),\n '(ih - {})*{}'.format(self.size, ah),\n str(self.size), str(self.size))\n )\n else:\n cmd = (\n cmd.crop('(iw - min(iw,ih))*{}'.format(aw),\n '(ih - min(iw,ih))*{}'.format(ah),\n 'min(iw,ih)',\n 'min(iw,ih)')\n .filter('scale', self.size, self.size)\n )\n if self.random_flip and random.uniform(0, 1) > 0.5:\n cmd = cmd.hflip()\n out, _ = (\n cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')\n .run(capture_stdout=True, quiet=True)\n )\n video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3])\n video = th.from_numpy(video)\n video = video.permute(3, 0, 1, 2)\n if video.shape[1] < self.num_frames:\n zeros = th.zeros((3, self.num_frames - video.shape[1], self.size, self.size), dtype=th.uint8)\n video = th.cat((video, zeros), axis=1)\n # return video[:, :self.num_frames]\n video = video.float()\n video = video / 255.0\n return video[:, :self.num_frames], start_seek\n\n def _split_text(self, sentence):\n w = re.findall(r\"[\\w']+\", str(sentence))\n return w\n\n def _words_to_token(self, words):\n words = [self.word_to_token[word] for word in words if word in self.word_to_token]\n if words:\n we = self._zero_pad_tensor_token(th.LongTensor(words), self.max_words)\n return we\n else:\n return th.zeros(self.max_words, dtype=th.long)\n\n def _zero_pad_tensor_token(self, tensor, size):\n if len(tensor) >= size:\n return tensor[:size]\n else:\n zero = th.zeros(size - len(tensor)).long()\n return th.cat((tensor, zero), dim=0)\n\n def words_to_ids(self, x):\n return self._words_to_token(self._split_text(x))\n\n def _find_nearest_candidates(self, caption, ind):\n start, end = ind, ind\n diff = caption['end'][end] - caption['start'][start]\n n_candidate = 1\n while n_candidate < self.num_candidates:\n if start == 0:\n return 0\n elif end == len(caption) - 1:\n return start - (self.num_candidates - n_candidate)\n elif caption['end'][end] - caption['start'][start - 1] < caption['end'][end + 1] - caption['start'][start]:\n start -= 1\n else:\n end += 1\n n_candidate += 1\n return start\n\n def _get_text(self, caption):\n cap = pd.read_csv(caption)\n ind = random.randint(0, len(cap) - 1)\n if self.num_candidates == 1:\n words = self.words_to_ids(cap['text'].values[ind])\n else:\n words = th.zeros(self.num_candidates, self.max_words, dtype=th.long)\n cap_start = self._find_nearest_candidates(cap, ind)\n for i in range(self.num_candidates):\n words[i] = self.words_to_ids(cap['text'].values[max(0, min(len(cap['text']) - 1, cap_start + i))])\n start, end = cap['start'].values[ind], cap['end'].values[ind]\n #TODO: May need to be improved for edge cases. \n if end - start < self.min_time:\n diff = self.min_time - end + start\n start = max(0, start - diff / 2)\n end = start + self.min_time \n return words, int(start), int(end) \n\n def __getitem__(self, idx):\n \n for i_try in range(self._num_retries):\n \n # Get video id and path\n index_capped = self.valid_indices[idx]\n video_id = self._path_to_videos[index_capped][0]\n video_path = os.path.join(self.video_root, video_id + '.mp4')\n video_list = []\n text_list = []\n audio_list = []\n \n while len(video_list) < self.num_clips:\n # Get caption\n text, start, end = self._get_text(os.path.join(self.caption_root, video_id + '.csv'))\n\n # Decode video\n video = None\n try:\n video, start_sec = self._get_video_ffmpeg(video_path, start, end)\n except Exception as e:\n print(f\"Failed to load video from {video_path} with error {e}\")\n if video is None:\n # let's try another video\n if i_try > self._num_retries // 2:\n idx = random.randint(0, len(self.valid_indices) - 1)\n break\n \n video_list.append(video)\n text_list.append(text)\n\n if len(video_list) == self.num_clips:\n break\n\n if i_try == self._num_retries - 1:\n raise RuntimeError(\n \"Failed to fetch video after {} retries.\".format(\n self._num_retries\n )\n )\n\n # Add reversal option\n for i in range(self.num_clips):\n # Clone frames and spec\n frames = video_list[i].clone()\n text = text_list[i].clone()\n\n for r_ix in range(self.num_reverse_clips):\n # Reverse audio and video\n if r_ix % 2 == 1:\n frames = frames.flip(1) # C T H W \n text = text.flip(0) # T\n \n video_list.append(frames)\n text_list.append(text)\n \n if self.num_reverse_clips == 2:\n video_list = [video_list[i] for i in [0, 2, 1, 3]]\n text_list = [text_list[i] for i in [0, 2, 1, 3]]\n\n if self.num_clips > 1:\n video = th.cat(video_list, dim=0)\n text = th.cat(text_list, dim=0)\n else:\n video = video_list[0]\n text = text_list[0]\n\n label = 0\n vid_idx = index_capped\t\n\n return video, text, label, vid_idx, index_capped", "sub_path": "datasets/HT100MDataset.py", "file_name": "HT100MDataset.py", "file_ext": "py", "file_size_in_byte": 10905, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "ffmpeg.probe", "line_number": 22, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 39, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 44, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 76, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 84, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 101, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 121, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 135, "usage_type": "call"}, {"api_name": "ffmpeg.input", "line_number": 137, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 144, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 165, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 169, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 170, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 186, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 193, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 215, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 220, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path", "line_number": 239, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 257, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 293, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 294, "usage_type": "call"}]} +{"seq_id": "601568287", "text": "import sys\nimport socket\nimport json\nfrom time import sleep\n\nimport machine\nfrom machine import Timer\nfrom network import WLAN\nimport _thread\n\nimport LIS2HH12\nimport LTR329ALS01\nimport MPL3115A2\nimport SI7006A20\nfrom pycom import rgbled, heartbeat\n\ncolor = 0x000000\n\nclass PySense:\n def __init__(self):\n self._accelerometer = LIS2HH12.LIS2HH12()\n self._lightSensor = LTR329ALS01.LTR329ALS01()\n self._tempPressureAlt = MPL3115A2.MPL3115A2()\n self._tempHumidity = SI7006A20.SI7006A20()\n\n def getRoll(self):\n return self._accelerometer.roll()\n\n def getPitch(self):\n return self._accelerometer.pitch()\n\n def getAcceleration(self):\n return self._accelerometer.acceleration()\n\n def getLuminosity(self):\n return self._lightSensor.light()\n\n def getTemperature1(self):\n return self._tempPressureAlt.temperature()\n\n def getTemperature2(self):\n return self._tempHumidity.temperature()\n\n def getHumidity(self):\n return self._tempHumidity.humidity()\n\n def getPressure(self):\n return self._tempPressureAlt.pressure()\n\n def getAltitude(self):\n return self._tempPressureAlt.altitude()\n\n def getData(self):\n acceleration = self.getAcceleration()\n\n data = {\n \"gyro\": {\n \"pitch\": self.getPitch(),\n \"roll\": self.getRoll(),\n \"x\": acceleration[0],\n \"y\": acceleration[1],\n \"z\": acceleration[2]\n },\n \"light\": self.getLuminosity(),\n \"temperature1\": self.getTemperature1(),\n \"temperature2\": self.getTemperature2(),\n \"humidity\": self.getHumidity()\n }\n\n if self.inPressureMode():\n data[\"pressure\"] = self.getPressure()\n elif self.inAltitudeMode():\n data[\"altitude\"] = self.getAltitude()\n\n return data\n\n def inPressureMode(self):\n return self._tempPressureAlt.mode == MPL3115A2.PRESSURE\n\n def inAltitudeMode(self):\n return self._tempPressureAlt.mode == MPL3115A2.ALTITUDE\n\nclass PyServer:\n def __init__(self, port):\n self._port = port\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.bind(('', port))\n self._sensor = PySense()\n\n def listen(self, wlan: WlanThreaded):\n global color\n print('Listening for sockets.')\n self._socket.settimeout(5)\n self._socket.listen(1)\n while True:\n try:\n color = 0x000030\n print('Waiting for client connection')\n accepting = True\n while accepting:\n try:\n (clientSocket, address) = self._socket.accept()\n color = 0x003000\n print('Client connected from <%s>' % (str(address)))\n accepting = False\n except OSError as e:\n if e.errno != 11:\n raise e\n elif not wlan.is_connected():\n print('Disconnected from WLAN network')\n raise e\n while True:\n data = json.dumps(self._sensor.getData()) + '\\n'\n print(data)\n clientSocket.send(data)\n sleep(0.5)\n except KeyboardInterrupt:\n self._socket.close()\n break\n except OSError as e:\n self._socket.close()\n break\n except socket.error as e:\n self._socket.close()\n break\n\nclass WlanThreaded:\n TIMEOUT = 5000\n def __init__(self, ssid:str='', key:str=''):\n self._ssid = ssid\n self._key = key\n self._wlan = WLAN(mode=WLAN.STA)\n self._running = False\n _thread.start_new_thread(self._connect, ())\n\n def is_connected(self):\n return self._wlan.isconnected()\n\n def print_wlan(self):\n print(self._wlan.ifconfig())\n\n def stop(self):\n self._running = False\n\n def _connect(self):\n global color\n self._running = True\n try:\n while self._running:\n color = 0x300000\n print('Connecting to <%s>' % (self._ssid))\n self._wlan.scan()\n timer = Timer.Chrono()\n timer.start()\n self._wlan.connect(ssid=self._ssid, auth=(WLAN.WPA2, self._key), timeout=WlanThreaded.TIMEOUT)\n while not self._wlan.isconnected():\n if not self._running:\n break\n duration = timer.read_ms()\n if duration > WlanThreaded.TIMEOUT:\n break\n sleep(0.1)\n if self._running and self._wlan.isconnected():\n self.print_wlan()\n self._start_server()\n except KeyboardInterrupt:\n pass\n self._running = False\n color = 0xff0000\n print('Disconnecting from <%s>' % (self._ssid))\n while True:\n try:\n self._wlan.disconnect()\n break\n except KeyboardInterrupt:\n pass\n color = 0x000000\n print('Shutting down')\n\n def _start_server(self):\n global color\n color = 0x300030\n print('Starting server')\n pyServer = PyServer(10000)\n pyServer.listen(self)\n color = 0x300030\n print('Stopped server')\n\nheartbeat(False)\nwlan = WlanThreaded('brw-pi', 'brentreinaertwout')\ntry:\n while True:\n rgbled(0x000000)\n sleep(0.5)\n rgbled(color)\n sleep(0.1)\nexcept KeyboardInterrupt:\n wlan.stop()\n sys.exit(0)\n", "sub_path": "master/networking-and-interfacing-iot-platforms/practica/3/1.2-wifi-rpi/pycom/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5837, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "LIS2HH12.LIS2HH12", "line_number": 21, "usage_type": "call"}, {"api_name": "LTR329ALS01.LTR329ALS01", "line_number": 22, "usage_type": "call"}, {"api_name": "MPL3115A2.MPL3115A2", "line_number": 23, "usage_type": "call"}, {"api_name": "SI7006A20.SI7006A20", "line_number": 24, "usage_type": "call"}, {"api_name": "MPL3115A2.PRESSURE", "line_number": 78, "usage_type": "attribute"}, {"api_name": "MPL3115A2.ALTITUDE", "line_number": 81, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 86, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 86, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 86, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 113, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 116, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 123, "usage_type": "attribute"}, {"api_name": "network.WLAN", "line_number": 132, "usage_type": "call"}, {"api_name": "network.WLAN.STA", "line_number": 132, "usage_type": "attribute"}, {"api_name": "_thread.start_new_thread", "line_number": 134, "usage_type": "call"}, {"api_name": "machine.Timer.Chrono", "line_number": 153, "usage_type": "call"}, {"api_name": "machine.Timer", "line_number": 153, "usage_type": "name"}, {"api_name": "network.WLAN.WPA2", "line_number": 155, "usage_type": "attribute"}, {"api_name": "network.WLAN", "line_number": 155, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 162, "usage_type": "call"}, {"api_name": "pycom.heartbeat", "line_number": 189, "usage_type": "call"}, {"api_name": "pycom.rgbled", "line_number": 193, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 194, "usage_type": "call"}, {"api_name": "pycom.rgbled", "line_number": 195, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 196, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 199, "usage_type": "call"}]} +{"seq_id": "293128523", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name=\"index\"),\n\turl(r'^show/(?P\\w+)$', views.show),\n\turl(r'^createproduct$', views.createProduct, name=\"create_product\"),\n\turl(r'^createcategory$', views.createCategory, name=\"create_category\"),\n\t# url(r'^upload_image$', views.upload_pic, name=\"upload_pic\"),\n\turl(r'^item_description/(?P\\w+)$', views.item_description, name=\"item_description\"),\n]\n", "sub_path": "apps/products/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "575997199", "text": "import argparse\nimport subprocess\nimport sys\n\nfrom . file_io import load_env, save_env, env_file\n\n\ndef install():\n parser = argparse.ArgumentParser(\n description='Install packages in the active environment')\n parser.add_argument('--pip', action='store_true',\n help=\"Install packages from PyPI with pip\")\n parser.add_argument('package_spec', nargs='+')\n parser.add_argument('-c', nargs='?', dest='channel',\n help=\"Conda channel for packages\")\n \n args = parser.parse_args(sys.argv[2:])\n\n try:\n env = load_env()\n except FileNotFoundError:\n print(f\"This environment isn't managed by Abode, use conda instead.\")\n return None\n\n if args.pip:\n for each in env['dependencies']:\n try:\n pip_packages = each['pip']\n except TypeError:\n continue\n # Found the pip dependencies\n pip_packages.extend(args.package_spec)\n else:\n env['dependencies'].extend(args.package_spec)\n\n if args.channel:\n env['channels'].insert(0, args.channel)\n\n save_env(env)\n\n subprocess.run(['conda', 'env', 'update', '-f', env_file(env['name'])])", "sub_path": "abode/install.py", "file_name": "install.py", "file_ext": "py", "file_size_in_byte": 1221, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "file_io.load_env", "line_number": 20, "usage_type": "call"}, {"api_name": "file_io.save_env", "line_number": 39, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 41, "usage_type": "call"}, {"api_name": "file_io.env_file", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "242172306", "text": "\"\"\"\nProgram predpostavlja obstojeco, urejeno bazo s podatki za dolocen prodajni segment. Omogoca klcanje podatkov iz baze in vrne excel datoteko s strukturiranim zapisom prodajnega programa\n\n\"\"\"\nfrom pathlib import Path\nfrom re import sub, split\nfrom itertools import count\nfrom csv import writer, reader, QUOTE_MINIMAL\nfrom collections import defaultdict\nimport sqlite3\nfrom openpyxl import Workbook, styles\n\nimport pomozne_funcV2 as pfun\nimport pomozne_func_sqlite as pfuns\n\n# region VpisExcel\n\nsegment = 'chiller'\nskupine = None\nizvedbe = None\nvelikosti = None\nvelikosti_cevni = None\nprevodi = dict()\n\ndef poisci_skupine():\n global skupine\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT DISTINCT \"{}\" \n FROM \"GENERAL\" \n ORDER BY NumID'''.format('Group'))\n skupine = [i[0] for i in baza.fetchall()]\n b.close()\n return skupine\n\ndef poisci_izvedbe(skupina):\n global izvedbe\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT DISTINCT \"{}\" \n FROM \"GENERAL\"\n WHERE \"Group\"=\"{}\"\n ORDER BY NumID'''.format('Version', skupina))\n izvedbe = [i[0] for i in baza.fetchall()]\n b.close()\n return izvedbe\n\ndef poisci_velikosti(skupina, izvedba):\n global velikosti\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT DISTINCT \"{}\" \n FROM \"GENERAL\"\n WHERE \"Group\"=\"{}\" AND Version=\"{}\"\n ORDER BY NumID'''.format('Size', skupina, izvedba))\n velikosti = [i[0] for i in baza.fetchall() if i[-1] != 'T']\n b.close()\n return velikosti\n\ndef poisci_velikosti_cevni(skupina, izvedba):\n global velikosti_cevni\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT DISTINCT \"{}\" \n FROM \"GENERAL\"\n WHERE \"Group\"=\"{}\" AND Version={}\n ORDER BY NumID'''.format('Size', skupina, izvedba))\n velikosti_cevni = [i[0] for i in baza.fetchall() if i[-1] == 'T']\n b.close()\n return velikosti_cevni\n\nclass ExDatoteka:\n st_opis = 0\n st_list = 0\n st_zvezek = 0\n zvezek = Workbook()\n ex_skupina = 'Neznano'\n ex_izvedbe = set()\n\n def __init__(self, ex_skupina):\n ExDatoteka.ex_skupina = ex_skupina\n ExDatoteka.st_opis = 0\n ExDatoteka.st_zvezek += 1\n ExDatoteka.zvezek = Workbook()\n\n @classmethod\n def ex_shrani(self):\n ExDatoteka.zvezek.save(self.ex_skupina + '.xlsx')\n \n @classmethod\n def ex_zbrisi_sheet(self):\n std = ExDatoteka.zvezek.get_sheet_by_name('Sheet')\n ExDatoteka.zvezek.remove_sheet(std)\n\nclass ExStran(ExDatoteka):\n\n ex_opis = '***Splošni opis enote***'\n # postavke = [('Hladilna moč', 'kW'), ('EER (EN14511 metoda)', ''), \n # ('ESEER (EN14511 metoda)', ''), ('SEER (Reg. EU 2016/2281)', ''), ('Električna moč', 'kW'), ('El. priključek', ''), \n # ('Zvočni tlak (SPL)', 'dB(A)'), ('Zvočna moč (PWL)', 'dB(A)'),\n # ('Število hladilnih krogov', ''), ('Število kompresorjev', ''), ('Dolžina', 'mm'), ('Širina', 'mm'),('Višina', 'mm'), ('Teža', 'kg')]\n \n postavke = [('Hladilna moč', 'kW'), ('EER (EN14511 metoda)', ''), \n ('ESEER (EN14511 metoda)', ''), ('SEER (Reg. EU 2016/2281)', ''),\n ('El. priključek', ''), \n ('Zvočni tlak (SPL)', 'dB(A)'), ('Zvočna moč (PWL)', 'dB(A)'),\n ('Število hladilnih krogov', ''), ('Število kompresorjev', ''), ('Dolžina', 'mm'), ('Širina', 'mm'),('Višina', 'mm'), ('Teža', 'kg')]\n\n def __init__(self, ex_izvedba):\n self.ex_izvedba = ex_izvedba\n \n if not ex_izvedba in ExDatoteka.zvezek.sheetnames:\n temp_ime_lista = '_'.join(\n [ExDatoteka.ex_skupina, ex_izvedba])\n ExDatoteka.zvezek.create_sheet(temp_ime_lista)\n self.ex_stran = ExDatoteka.zvezek[temp_ime_lista]\n\n ExDatoteka.ex_izvedbe.add(ex_izvedba)\n ExDatoteka.st_list += 1\n ExDatoteka.st_opis = 0\n \n def temp_naslovna_vr(self):\n naslovna = ['Zap. št.', 'Prodajni program', 'Količina',\n 'Cena/kos', 'Prodajna cena']\n for stolpec, vrednost in enumerate(naslovna, 1):\n _ = self.ex_stran.cell(column=stolpec, row=1, value=vrednost)\n\n def temp_dimenzioniraj(self):\n self.ex_stran.column_dimensions['A'].width = 5\n self.ex_stran.column_dimensions['B'].width = 60\n self.ex_stran.column_dimensions['C'].width = 10\n self.ex_stran.column_dimensions['D'].width = 15\n self.ex_stran.column_dimensions['E'].width = 15\n double = styles.Side(border_style=\"double\", color=\"111111\")\n for c in self.ex_stran[1]:\n c.fill = styles.PatternFill(\"solid\", fgColor='ffff99')\n c.alignment = styles.Alignment(wrap_text=True)\n c.border = styles.Border(bottom=double)\n for v in self.ex_stran.iter_rows():\n v[1].alignment = styles.Alignment(wrap_text=True)\n if v[0].value:\n for c in v:\n c.font = styles.Font(bold=True)\n elif v[2].value:\n for c in v[3:]:\n c.number_format = '0.00'\n\n def ex_zapisi_podatke(self, objekt):\n i = objekt.t_dol * (ExDatoteka.st_opis-1) + 3\n self.ex_stran.cell(column=1, row=i, \n value=str(ExDatoteka.st_opis) + '.')\n self.ex_stran.cell(column=2, row=i, \n value=' '.join(['Hladilni agregat Climaveneta', ExDatoteka.ex_skupina + '/', self.ex_izvedba, objekt.velikost]))\n self.ex_stran.cell(column=2, row=i+1, value=objekt.ex_opis)\n self.ex_stran.cell(column=2, row=i+2, \n value=' '.join(\n ['PROIZVAJALEC:', \n 'Mitsubishi Electric Hydronics & IT Cooling Systems S.p.A, Italija']))\n self.ex_stran.cell(column=2, row=i+3, \n value='UVOZNIK: REAM d.o.o., Trzin')\n self.ex_stran.cell(column=2, row=i+5, value='TEHNIČNI OPIS:')\n for j in range(len(objekt.tehnicni_podatki)):\n if objekt.tehnicni_podatki[j]:\n _ = '{}: {} {}'.format(ExStran.postavke[j][0], \n objekt.tehnicni_podatki[j], ExStran.postavke[j][1])\n self.ex_stran.cell(column=2, row=i+j+6, value=_)\n vr = i+len(objekt.tehnicni_podatki)+5\n self.ex_stran.cell(column=3, row=vr, value=1)\n self.ex_stran.cell(column=4, row=vr, value=0.00)\n self.ex_stran.cell(column=5, row=vr, \n value=r'=$C{}*$D{}'.format(vr, vr))\n return None\n\nclass ExOpis(ExStran):\n\n def __init__(self, ob_stran, velikost):\n ExDatoteka.st_opis += 1\n self.velikost = velikost\n self.ex_productID = ' '.join(\n filter(None, \n [ExDatoteka.ex_skupina, ob_stran.ex_izvedba, self.velikost]))\n self.tehnicni_podatki = []\n self.t_dol = 0\n\n def ex_sestavi_naziv(self):\n return ' '.join(['Hladilni agragat Climaveneta', self.ex_productID])\n \n def ex_dimenzije(self):\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT \"A\", \"B\", \"H\" \n FROM GENERAL\n WHERE productID=?''', (self.ex_productID,))\n _ = [i for i in baza.fetchall()[0]]\n dimenzije = '{} x {} x {}'.format(*_)\n b.close()\n return dimenzije\n\n def ex_teh_opis(self):\n '''\n TEHNIČNI PODATKI:\n Hladilna moč:\n EER (EN14511 metoda):\n ESEER (EN14511 metoda):\n SEER (Reg. EU 2016/2281):\n El.moč:\n El. priključek:\n Zvočni tlak SPL:\n Zvočna moč PWL:\n Število hladilnih krogov:\n Število kompresorjev:\n Dolžina:\n Širina:\n Višina:\n Teža:\n '''\n\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT \"Cooling capacity\" \n FROM COOLING_EUROVENT\n WHERE productID=?''', (self.ex_productID,))\n self.tehnicni_podatki.append(baza.fetchall()[0][0])\n baza.execute('''SELECT \"EER\", \"ESEER\" \n FROM COOLING_EUROVENT\n WHERE productID=?''', (self.ex_productID,))\n for i in baza.fetchall()[0]:\n try:\n ba, de = i.split(',')\n self.tehnicni_podatki.append(ba+','+de[:2])\n except:\n if i:\n self.tehnicni_podatki.append(i)\n else:\n self.tehnicni_podatki.append('-')\n baza.execute('''SELECT \"SEER\"\n FROM SEASONAL_EFF_COOLING\n WHERE productID=?''', (self.ex_productID,))\n i = baza.fetchall()[0][0].split(',')\n try:\n ba, de = i\n self.tehnicni_podatki.append(ba+','+de[:2])\n except:\n self.tehnicni_podatki.append('-')\n baza.execute('''SELECT \"Total power input\" \n FROM COOLING_GROSS\n WHERE productID=?''', (self.ex_productID,))\n # self.tehnicni_podatki.append(baza.fetchall()[0][0])\n baza.execute('''SELECT \"Power supply\"\n FROM GENERAL\n WHERE productID=?''', (self.ex_productID,))\n if '400' in baza.fetchall()[0][0]:\n self.tehnicni_podatki.append('400V/ 3F/ 50Hz')\n else:\n self.tehnicni_podatki.append('230V/ 1F/ 50Hz')\n baza.execute('''SELECT \"Sound Pressure\", \n \"Sound power level in cooling\", \"No. Circuits\", \"Compressors nr.\", \"A\", \"B\", \"H\", \"Operating weight\"\n FROM GENERAL\n WHERE productID=?''', (self.ex_productID,))\n self.tehnicni_podatki.extend([i for i in baza.fetchall()[0]])\n b.close()\n self.t_dol = len(list(filter(None,self.tehnicni_podatki))) + 8\n print(self.tehnicni_podatki)\n return self.tehnicni_podatki\n\n\ndef main2():\n poisci_skupine()\n for sk in skupine:\n dat = ExDatoteka(sk)\n # datoteka se nanasa na skupini agregatov\n for iz in poisci_izvedbe(sk):\n stran = ExStran(iz)\n # dat.zamenjaj_list()\n # dodaj stil strani\n for vel in poisci_velikosti(sk, iz):\n opis = ExOpis(stran, vel)\n opis.ex_teh_opis()\n stran.ex_zapisi_podatke(opis)\n # print(opis.tehnicni_podatki)\n stran.temp_naslovna_vr()\n stran.temp_dimenzioniraj()\n dat.ex_zbrisi_sheet()\n dat.ex_shrani()\n# main2()\n \n# endregion", "sub_path": "CLIMAVENETA/V2/02_beri_bazo_chiller.py", "file_name": "02_beri_bazo_chiller.py", "file_ext": "py", "file_size_in_byte": 10825, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sqlite3.connect", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 62, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 76, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 84, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 134, "usage_type": "call"}, {"api_name": "openpyxl.styles", "line_number": 134, "usage_type": "name"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 136, "usage_type": "call"}, {"api_name": "openpyxl.styles", "line_number": 136, "usage_type": "name"}, {"api_name": "openpyxl.styles.Alignment", "line_number": 137, "usage_type": "call"}, {"api_name": "openpyxl.styles", "line_number": 137, "usage_type": "name"}, {"api_name": "openpyxl.styles.Border", "line_number": 138, "usage_type": "call"}, {"api_name": "openpyxl.styles", "line_number": 138, "usage_type": "name"}, {"api_name": "openpyxl.styles.Alignment", "line_number": 140, "usage_type": "call"}, {"api_name": "openpyxl.styles", "line_number": 140, "usage_type": "name"}, {"api_name": "openpyxl.styles.Font", "line_number": 143, "usage_type": "call"}, {"api_name": "openpyxl.styles", "line_number": 143, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 189, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 218, "usage_type": "call"}]} +{"seq_id": "493343556", "text": "from __future__ import print_function\n\nimport sys\nimport re\nfrom pyspark import SparkContext\nfrom csv import reader\nfrom csv import writer\nfrom StringIO import StringIO\n\n#filter if there is no id key code\n#filter if offense description is '' or key code is ''\n#filter if start,end date and time are ''\n#filter if borough or precint is ''\nnull_indices = (0,6,8,14,7,9,19,20,21,22,23)\nint_indices = (0,6,8,14,19,20)\nfloat_indices = (21,22)\ndef mark_null(line):\n for i in null_indices:\n if line[i] == '':\n line[i] = \"null\"\n return line\n\ndef mark_invalid_int(line):\n for i in int_indices:\n if line[i] == \"null\":\n pass\n else:\n reg = \"^[-]?\\d+$\"\n if re.match(reg, line[i]) is None:\n line[i] = \"invalid\"\n return line\n\ndef mark_invalid_float(line):\n for i in float_indices:\n if line[i] == \"null\":\n pass\n else:\n reg = \"^[-]?\\d+?\\.\\d+?$\"\n if re.match(reg, line[i]) is None:\n line[i] = \"invalid\"\n return line\n\ndef mark_invalid_coord(line):\n if line[23] == \"null\":\n return line\n reg = \"^(\\([-+]?\\d{1,2}[.]\\d+),\\s*([-+]?\\d{1,3}[.]\\d+\\))$\"\n res = re.match(reg, line[23])\n if res is None:\n line[23] = \"invalid\"\n return line\n\ndef mark_invalid(line):\n line = mark_null(line)\n line = mark_invalid_int(line)\n line = mark_invalid_float(line)\n return mark_invalid_coord(line)\n\n\n# def filter_file(line):\n# for index in indices:\n# if line[index] == '':\n# return False\n# return True\n# #return (line[1] == '' and line[2] == '') or (line[3] == '' and line[4] == '')\n\ndef repack(line):\n res = StringIO(\"\")\n writer(res).writerow(line)\n return res.getvalue().strip()\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: bigram \", file=sys.stderr)\n exit(-1)\n sc = SparkContext()\n lines = sc.textFile(sys.argv[1], 1)\n\n lines = lines.mapPartitions(lambda line: reader(line))\\\n .map(lambda line: mark_invalid(line))\\\n .map(lambda line: repack(line))\\\n .saveAsTextFile(\"filter_lines\")\n sc.stop()\n", "sub_path": "clean/mark_null_invalid_columns.py", "file_name": "mark_null_invalid_columns.py", "file_ext": "py", "file_size_in_byte": 2200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "re.match", "line_number": 29, "usage_type": "call"}, {"api_name": "re.match", "line_number": 39, "usage_type": "call"}, {"api_name": "re.match", "line_number": 47, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 67, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 72, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pyspark.SparkContext", "line_number": 75, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "262500679", "text": "#!/usr/bin/python3\n\"\"\"This is the file storage class for AirBnB\"\"\"\nimport json\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom sqlalchemy import create_engine, MetaData\nfrom models.base_model import Base\nimport os\n\n\nclass DBStorage():\n \"\"\"manager of mysql database\"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.format(\n os.environ.get(\"HBNB_MYSQL_USER\"),\n os.environ.get(\"HBNB_MYSQL_PWD\"),\n os.environ.get(\"HBNB_MYSQL_HOST\"),\n os.environ.get(\"HBNB_MYSQL_DB\")),\n pool_pre_ping=True)\n if (os.environ.get(\"HBNB_MYSQL_USER\") == \"test\"):\n Base.metadata.drop_all(bind=self.__engine)\n\n def all(self, cls=None):\n \"\"\"\n all\n \"\"\"\n current = []\n objects = {}\n my_tables = {'cities': 'City', 'states': 'State', 'users': 'User',\n 'amenities': 'Amenity', 'places': 'Place',\n 'reviews': 'Review'}\n if cls:\n if type(cls) == str:\n current = self.__session.query(eval(cls)).all()\n else:\n current = self.__session.query(cls).all()\n else:\n tables = self.__engine.table_names()\n for table in tables:\n current.append(self.__session.query(\n eval(my_tables[table])).all())\n for obj in current:\n if type(obj) == list:\n for o in obj:\n key = \"{}.{}\".format(o.__class__.__name__, o.id)\n objects[key] = o\n else:\n key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n objects[key] = obj\n return objects\n\n def new(self, obj):\n \"\"\"sets __object to given obj\n Args:\n obj: given object\n \"\"\"\n\n self.__session.add(obj)\n self.save()\n\n def save(self):\n \"\"\"serialize the file path to JSON file path\n \"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\" doc \"\"\"\n if obj:\n self.__session.delete(obj)\n self.save()\n\n def reload(self):\n \"\"\"serialize the file path to JSON file path\n \"\"\"\n Base.metadata.create_all(bind=self.__engine)\n Session = scoped_session(sessionmaker(bind=self.__engine,\n expire_on_commit=False))\n self.__session = Session()\n\n def close(self):\n \"\"\" doc \"\"\"\n self.__session.close()\n", "sub_path": "models/engine/db_storage.py", "file_name": "db_storage.py", "file_ext": "py", "file_size_in_byte": 2877, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 28, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 30, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.base_model.Base.metadata.drop_all", "line_number": 31, "usage_type": "call"}, {"api_name": "models.base_model.Base.metadata", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.base_model.Base", "line_number": 31, "usage_type": "name"}, {"api_name": "models.base_model.Base.metadata.create_all", "line_number": 85, "usage_type": "call"}, {"api_name": "models.base_model.Base.metadata", "line_number": 85, "usage_type": "attribute"}, {"api_name": "models.base_model.Base", "line_number": 85, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.scoped_session", "line_number": 86, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "50035909", "text": "# built-in libraries\nimport collections\nimport functools\nimport itertools\nimport json\nimport logging\n\n# external libraries\n# ...\n\n# internal libraries\n# ...\n\n# exports\n__all__ = (\"coroutine\", \"default\", \"object_hook\")\n\n# constants\nCLOUD = {} # image catelog\nSTONE = {} # type catelog\n\n\ndef coroutine(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n gen = func(*args, **kwargs)\n next(gen)\n return gen\n wrapper.__name__ = func.__name__\n wrapper.__dict__ = func.__dict__\n wrapper.__doc__ = func.__doc__\n return wrapper\n\n\ndef default(obj):\n \"\"\"Return a serializable version of `object`\"\"\"\n try:\n return next({key: type.default(obj)}\n for ((key, cls), type)\n in STONE.items()\n if isinstance(obj, cls))\n except StopIteration:\n raise TypeError\n\n\ndef object_hook(dct):\n \"\"\"Return value instead of the `dict`\"\"\"\n return next((type.object_hook(obj)\n for ((key, cls), type)\n in STONE.items()\n if key in dct), dct)\n\n\nclass Type(collections.namedtuple\n (\"Type\", (\"default\", \"object_hook\"))):\n\n def __new__(cls, key, type, default, object_hook):\n obj = super(Type, cls).__new__(cls, default, object_hook)\n STONE[key, type] = obj\n return obj\n\n\nclass Event(object):\n __slots__ = (\"cbs\",)\n\n def __init__(self, cbs=None):\n self.cbs = cbs or []\n\n\nclass Item(collections.namedtuple\n (\"Item\", (\"data\", \"ctrl\"))):\n\n def __new__(cls, data, ctrl):\n ctrl = {key: Event()\n for key in ctrl}\n return super(Item, cls).__new__(cls, data, ctrl)\n \n\nclass Mask(collections.namedtuple\n (\"Mask\", (\"gets\", \"sets\"))):\n pass\n\n\nclass Mode(collections.namedtuple\n (\"Mode\", (\"data\", \"ctrl\"))):\n\n def __new__(cls, ins, outs, reqs, pros):\n data = Mask(reqs, pros)\n ctrl = Mask(ins, outs)\n return super(Mode, cls).__new__(cls, data, ctrl)\n\n\nclass Node(collections.namedtuple\n (\"Node\", (\"init\", \"main\"))):\n\n def __new__(cls, evs, args, ins, reqs, outs, pros):\n init = Mode(evs, (), args, ())\n main = Mode(ins, outs, reqs, pros)\n return super(Node, cls).__new__(cls, init, main)\n\n\nclass Edge(collections.namedtuple\n (\"Edge\", (\"data\", \"ctrl\"))):\n\n def __new__(cls, data=None, ctrl=None):\n data = data or {}\n ctrl = ctrl or {}\n return super(Edge, cls).__new__(cls, data, ctrl)\n \n\nclass Face(collections.namedtuple\n (\"Face\", (\"data\", \"ctrl\"))):\n\n def __new__(cls, node, edge, item):\n data = iterdata(node, edge, item)\n ctrl = iterctrl(node, edge, item)\n return super(Face, cls).__new__(cls, data, ctrl)\n\n@coroutine\ndef iterdata(node, edge, item):\n pros = yield\n \n # XXX takes advantage of there currently being no `data` provided\n # ... while in `init` mode\n mode = node.init\n if len(mode.data.gets) > 0:\n reqs = (logging.debug(\"get data: %s\", key)\n or item.data.get(edge.data.get(key, key))\n for key in mode.data.gets)\n pros = yield reqs\n \n mode = node.main\n while True:\n if len(mode.data.gets) > 0:\n reqs = (logging.debug(\"get data: %s\", key)\n or item.data.get(edge.data.get(key, key))\n for key in mode.data.gets)\n pros = yield reqs\n \n if len(mode.data.sets) > 0:\n item.data.update({edge.data.get(key, key):\n logging.debug(\"set data: %s=%s\", key, pro)\n or pro\n for key, pro\n in zip(mode.data.sets, pros)\n if pro is not None}\n if pros is not None\n else {})\n pros = yield\n\n\n@coroutine\ndef iterctrl(node, edge, item):\n yield\n \n # XXX takes advantage of there currently being no `ctrl` output\n # ... while in `init` mode\n mode = node.init\n ins = (logging.debug(\"get ctrl: %s\", key)\n or item.ctrl.get(edge.ctrl.get(key, key))\n for key in mode.ctrl.gets)\n outs = yield ins # always called\n \n mode = node.main\n while True:\n if len(mode.ctrl.gets) > 0:\n ins = (logging.debug(\"get ctrl: %s\", key)\n or item.ctrl.get(edge.ctrl.get(key, key))\n for key in mode.ctrl.gets)\n outs = yield ins\n \n if len(mode.ctrl.sets) > 0:\n evs = (((logging.debug(\"set ctrl: %s=%s\", key, out)\n or item.ctrl.get(edge.ctrl.get(key, key)), out)\n for key, out in zip(mode.ctrl.sets, outs)\n if out is not None)\n if outs is not None\n else ())\n outs = yield evs\n\n \nclass Task(collections.namedtuple\n (\"Task\", (\"p\", \"gen\"))):\n pass\n\n\nclass Image(object):\n __slots__ = (\"tag\", \"nodes\", \"proc\")\n\n def __init__(self, tag, **nodes):\n self.tag = tag\n self.nodes = nodes\n\n def __call__(self, func):\n func = coroutine(func)\n \n @coroutine\n @functools.wraps(func)\n def wrapper(**args):\n yield\n try:\n logging.debug(\"exec %s init\", self.tag)\n gen = func(**args) # create generator\n evs = yield\n while True:\n logging.debug(\"exec %s main\", self.tag)\n yield gen.send(evs)\n except StopIteration:\n return\n finally:\n pass\n \n self.proc = wrapper\n CLOUD[self.tag] = self\n return wrapper\n\n\ndef run(task, model):\n img = CLOUD[task[\"tag\"]]\n faces = {arg: Face(node,\n Edge(**task[\"maps\"].get(arg, {})),\n model[task[\"keys\"][arg]])\n for (arg, node)\n in img.nodes.items()}\n gen = img.proc(**faces)\n obj = Task(task[\"p\"], gen)\n any(ev.cbs.append(obj)\n for face in faces.values()\n for ev in next(face.ctrl) or ())\n return obj\n\n\n", "sub_path": "ouroboros/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 6271, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "functools.wraps", "line_number": 23, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 53, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 69, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 78, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 83, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 92, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 101, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 110, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 126, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 134, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 141, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 158, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 166, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 172, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 181, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 201, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 205, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 197, "usage_type": "call"}]} +{"seq_id": "626635507", "text": "#!/usr/bin/env python3\nimport os\nimport json\nimport difflib\nfrom typing import Dict, List\n\ncwd = os.getcwd()\nmisc_file = cwd + '/json/character_misc.json'\n\nshortcuts = {\n 'cd' : 'fnddf',\n 'wr' : 'fff',\n 'hcf' : 'bdbddff',\n 'hcb' : 'fdfddbb',\n 'qcf' : 'ddff',\n 'qcb' : 'ddbb'\n}\n\ndef get_character(char_name : str):\n contents = None\n\n with open(misc_file) as char_misc_file:\n contents = char_misc_file.read()\n contents = json.loads(contents)\n\n if contents != None:\n char_details = filter_dictionary('name', char_name, contents)\n\n if char_details:\n return char_details\n else:\n names = dict_key_to_list('name', contents)\n\n guessed_char = difflib.get_close_matches(char_name, names, n=2, cutoff=0.4)\n\n if guessed_char:\n guessed_char = guessed_char[0]\n\n to_return_char = filter_dictionary('name', guessed_char, contents)\n\n if to_return_char:\n return to_return_char\n else:\n return None\n else:\n return None\n\n return None\n\ndef get_move(character_json : str, char_move : str) -> Dict[str, str]:\n char_move_list = None\n char_json = cwd + '/json/' + character_json\n \n with open(char_json, 'r', encoding=\"utf8\") as char_json_file:\n char_move_list = char_json_file.read()\n char_move_list = json.loads(char_move_list)\n\n if char_move_list != None:\n if char_move == \"ra\":\n to_return_move = filter_dictionary('Name', 'Rage Art', char_move_list)\n return to_return_move\n\n if char_move == \"rd\":\n to_return_move = filter_dictionary('Name', 'Rage Drive', char_move_list)\n return to_return_move\n\n char_move = replace_shortcuts(char_move)\n\n to_return_move = filter_dictionary('Command', char_move, char_move_list)\n\n if to_return_move:\n return to_return_move\n else:\n move_inputs = dict_key_to_list('Command', char_move_list)\n\n guessed_move = difflib.get_close_matches(char_move, move_inputs, n=2, cutoff=0.4)\n\n if guessed_move:\n guessed_move = guessed_move[0]\n\n to_return_move = filter_dictionary('Command', guessed_move, char_move_list)\n\n if to_return_move:\n return to_return_move\n else:\n return None\n else:\n return None\n\ndef filter_dictionary(to_compare_key : str, to_compare_value : str, dictionary : List[dict]) -> Dict[str, str]:\n to_return_item : dict = None\n value_split = to_compare_value.split(' ')\n\n # First check if move is equal to move list item, then if move is contained in move list item\n for item in dictionary:\n item_clean = item[to_compare_key].lower().strip().replace(' ', '')\n \n if len(value_split) == 1:\n value_clean = to_compare_value.lower().strip().replace(' ', '')\n\n if item_clean == value_clean:\n to_return_item = item\n break\n else:\n value_clean = [value.lower().strip().replace(' ','') for value in value_split]\n\n if all(value in item_clean for value in value_clean):\n to_return_item = item\n break\n\n if to_return_item != None:\n return to_return_item\n else:\n for item in dictionary:\n item_clean = item[to_compare_key].lower().strip().replace(' ', '')\n\n if len(value_split) == 1:\n value_clean = to_compare_value.lower().strip().replace(' ', '')\n\n if value_clean in item_clean:\n to_return_item = item\n break\n else:\n value_clean = [value.lower().strip().replace(' ','') for value in value_split]\n\n if all(value in item_clean for value in value_clean):\n to_return_item = item\n break\n \n return to_return_item\n\ndef replace_shortcuts(char_move : str) -> str:\n for key, item in shortcuts.items():\n if char_move == key or char_move.__contains__(key):\n return char_move.replace(key, item)\n return char_move\n\ndef dict_key_to_list(key : str, dictionary : Dict[str, str]) -> List[str]:\n to_return_list = []\n for item in dictionary:\n to_add_item = item[key].lower().strip().replace(' ', '')\n to_return_list.append(to_add_item)\n\n return to_return_list", "sub_path": "infofinder.py", "file_name": "infofinder.py", "file_ext": "py", "file_size_in_byte": 4547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.getcwd", "line_number": 7, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "difflib.get_close_matches", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 56, "usage_type": "call"}, {"api_name": "difflib.get_close_matches", "line_number": 76, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 90, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 90, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "41922044", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n#-----------------------------------------------------------------------------\n# Name: Script phân loại file theo định dạng và dung lượng\n#\n# Purpose:\n#\n# Version: 1.1\n#\n# Author:\n#\n# Created: 06/02/2020\n# Updated: 06/02/2020\n#\n# Copyright: -\n#\n#-----------------------------------------------------------------------------\n\"\"\"\n\n#Import lib\nimport os, re, fnmatch, shutil\nfrom datetime import datetime\nimport pyunpack #pip install pyunpack\n\nclass AnalyzeFolder:\n\n def __init__(self, input_folder, output_folder, pattern, size_group, limit_size_copy = -1):\n self.input_folder = os.path.normpath(input_folder)\n self.output_folder = os.path.normpath(output_folder)\n self.pattern = pattern\n self.size_group = size_group\n self.size_group.sort()\n self.limit_size_copy = limit_size_copy\n \n if not os.path.exists(self.input_folder):\n print(self.input_folder)\n print(\"Folder input not exists\")\n exit()\n\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n\n #Check size file and create folder\n def get_file_size(self, file_path):\n return os.path.getsize(file_path)\n\n #Get list file\n def prepare_data_file(self, target_files, nontarget_files):\n print(\"Prepare data file : Waiting\")\n try:\n for path, subdirs, files in os.walk(self.input_folder):\n for name in files:\n file_path = os.path.join(path, name)\n if re.search(self.pattern, name):\n size_byte = self.get_file_size(file_path)\n size_mb = size_byte/1024/1024 #Convert Byte to Megabyte\n target_files[file_path] = size_mb\n print(\"Prepare data file : Indexing {0}\".format(file_path))\n else:\n nontarget_files[file_path] = \"-\"\n print(\"Prepare data file : Complete\")\n return True\n except:\n return False\n\n def create_and_copy_file(self, dicFile):\n print(\"Copy file : Waiting\")\n limit_copy = self.limit_size_copy * 1024 * 1024 #Byte\n current_copy = 0 #Byte\n for file, size in dicFile.items():\n size_byte = size * 1024 * 1024 #Byte\n iter_size_group = iter(self.size_group)\n next(iter_size_group)\n for limit in size_group:\n limit_end = next(iter_size_group, \"\")\n #print(\"-- {0} ~ {1}\".format(limit, limit_end))\n if size >= limit and (limit_end == \"\" or size < limit_end):\n\n #Check limited copy setting\n if limit_copy > 0 and (limit_copy - size_byte) > 0:\n limit_copy = limit_copy - size_byte\n current_copy = current_copy + size_byte\n #print(limit_copy)\n elif self.limit_size_copy != -1:\n print(\"Stop copy because the next file is {2}MB, but setting copy limit is {0}/{1}MB\".format(round(current_copy/1024/1024, 0), self.limit_size_copy, round(size, 0)))\n print(file)\n exit()\n\n org_file_name = os.path.basename(file)\n filename, file_extension = os.path.splitext(org_file_name)\n \n #Create folder storage\n folder_name = \"Size {0}MB ~ {1}MB\".format(limit, limit_end)\n if limit_end == \"\":\n folder_name = \"Size {0}MB ~\".format(limit) \n folder_path = os.path.join(self.output_folder, folder_name)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n #Create folder extension\n if file_extension == \"\":\n file_extension = \"non_extension_files\"\n folder_path = os.path.join(folder_path, file_extension)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n #Copy file\n random_file_name = \"File_\" + datetime.now().strftime('%Y%m%d%H%M%S%f')+ \"_(Duplicate with {0})\".format(org_file_name) + file_extension\n file_dist_random_path = os.path.join(folder_path, random_file_name)\n file_dist_org_path = os.path.join(folder_path, org_file_name)\n\n folder_extract_to = \"\"\n if os.path.exists(file):\n if not os.path.exists(file_dist_org_path):\n shutil.copyfile(file, file_dist_org_path)\n folder_extract_to = file_dist_org_path\n print(\"Created file {0}\".format(file_dist_org_path))\n else:\n shutil.copyfile(file, file_dist_random_path)\n folder_extract_to = file_dist_random_path\n print(\"Created file {0}\".format(file_dist_random_path))\n\n #if file_extension == \".zip\":\n filename, file_extension = os.path.splitext(folder_extract_to)\n folder_extract_to = filename\n if not os.path.exists(folder_extract_to):\n os.makedirs(folder_extract_to)\n try:\n #with zipfile.ZipFile(file, 'r') as zip_ref:\n # zip_ref.extractall(folder_extract_to)\n pyunpack.Archive(file).extractall(folder_extract_to)\n pyunpack.extract_archive(file, folder_extract_to)\n print(\"Extracted zip : {0}\".format(file))\n except:\n os.rmdir(folder_extract_to)\n print(\"Extract fail : {0}\".format(file))\n \n break\n print(\"Copy file : Complete\")\n\n \n def analyze(self):\n target_files = {}\n nontarget_files = {}\n result_get_list = self.prepare_data_file(target_files, nontarget_files)\n\n if result_get_list == True:\n #print(target_files)\n #print(nontarget_files)\n self.create_and_copy_file(target_files)\n else:\n print(\"Có lỗi xảy ra.\")\n\n#Start application\nfolder_input = \"X:\\\\01-WIN10-TMP\\\\Desktop\\\\testPY\\\\in\"\nfolder_output = \"X:\\\\01-WIN10-TMP\\\\Desktop\\\\testPY\\\\out\"\npattern_file = \"^.*.*$\"\nsize_group = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 100, 200, 400, 500, 1000] #MB\nlimit_copy_size = -1 #MB (-1 is unlimited)\ntest = AnalyzeFolder(folder_input, folder_output, pattern_file, size_group, limit_copy_size)\ntest.analyze()\nexit()\n", "sub_path": "File_Filter.py", "file_name": "File_Filter.py", "file_ext": "py", "file_size_in_byte": 6963, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.path.normpath", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 110, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 117, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 129, "usage_type": "call"}, {"api_name": "pyunpack.Archive", "line_number": 133, "usage_type": "call"}, {"api_name": "pyunpack.extract_archive", "line_number": 134, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "62323617", "text": "# Что такое генератор?\nimport sys\n\n# миллион элементов в списке занимает память\nnums = []\nfor num in range(1, 10 ** 6 + 1, 2):\n nums.append(num ** 2)\nprint(type(nums), sys.getsizeof(nums))\n\n# миллион объектов не выдаются разом, поэтому памяти занимает минимум\nnums_gen = (num ** 2 for num in range(1, 10 ** 6 + 1, 2))\nprint(type(nums_gen), sys.getsizeof(nums_gen))\n\n\n# профилируем чтобы понять не в ущерб ли используем генератор\nfrom time import perf_counter\n\nstart = perf_counter()\nnums_sum = sum(nums)\nprint(nums_sum, perf_counter() - start)\n\nstart = perf_counter()\nnums_gen_sum = sum(nums_gen)\nprint(nums_gen_sum, perf_counter() - start)\n\n\n# второй пример профилирования - в чём разница?\nfrom time import perf_counter\n\nstart = perf_counter()\nnums = []\nfor num in range(1, 10 ** 5 + 1, 2):\n nums.append(num ** 2)\nnums_sum = sum(nums)\nprint(nums_sum, perf_counter() - start)\n\nstart = perf_counter()\nnums_gen = (num ** 2 for num in range(1, 10 ** 5 + 1, 2))\nnums_gen_sum = sum(nums_gen)\nprint(nums_gen_sum, perf_counter() - start)\n\n\n# t = list(1, 2, 3, 4, 5, 6)[left:right:step]\n\n# генераторы не поддерживают слайсы\nnums = []\nfor num in range(1, 10 ** 6 + 1, 2):\n nums.append(num ** 2)\n\nnums_gen = (num ** 2 for num in range(1, 10 ** 6 + 1, 2))\n\nprint(nums[:3])\nprint(next(nums_gen), next(nums_gen), next(nums_gen), sep=', ')\n\n\n# но мы можем получить следующие несколько значений - генератор «помнит своё состояние»\nfrom itertools import islice\n\nprint(*islice(nums_gen, 3))\nprint(*islice(nums_gen, 3))\n\n# перерыв\n\n# генераторы одноразовые\nnums_gen_sum = sum(nums_gen)\nprint(nums_gen_sum)\n\nnums_gen_sum = sum(nums_gen)\nprint(nums_gen_sum)\n\n\ndef letters_generator(start, end):\n for code in range(ord(start), ord(end) + 1):\n yield chr(code)\n print('end func generator')\n\n\neng_uppercase_letters = letters_generator('A', 'Z')\nprint(*eng_uppercase_letters, sep='')\n\n# List Comprehensions - не является генератором\nnums_cube = [num ** 3 for num in range(5 + 1)]\nprint(type(nums_cube), *nums_cube)\n\nweather_data = [\n [-17.5, -18.9, -21.0, -16.1],\n [-9.3, -11.7, -14.3, -15.8],\n]\nflat_weather_data = [el for row in weather_data for el in row if el > -19]\nprint(flat_weather_data)\n\n\n# Dict Comprehensions\neng_ru_nums = {'one': 'один', 'first': 'один', 'two': 'два'}\nru_eng_nums = {val: key for key, val in eng_ru_nums.items()}\nprint(ru_eng_nums)\n\n\n# Множества в Python (Хэш-таблицы)\nbasket = ['apple', 'dell', 'samsung', 'apple', 'huawei', 'asus', 'samsung']\nunique_brands = [el for el in basket if basket.count(el) == 1]\nprint(unique_brands)\n\nunique_brands = set()\ntmp = set()\nfor el in basket:\n if el not in tmp:\n unique_brands.add(el)\n else:\n unique_brands.discard(el)\n tmp.add(el)\nprint(unique_brands)\n\n\n# сохранение последовательности элементов\nunique_brands_ord = [el for el in basket if el in unique_brands]\nprint(unique_brands_ord)\n\n\n# ещё методы множества\nchat_1 = {'user_1', 'user_5', 'user_7', 'user_8', 'user_11'}\nchat_2 = {'user_1', 'user_2', 'user_2', 'user_7', 'user_9', 'user_10'}\n# пересечения по множествам\nchats_common = chat_1.intersection(chat_2)\nprint(chats_common) # {'user_1', 'user_7'}\nprint(chat_1 & chat_2)\n\n\n# только пользователи конкретного чата\nchat_1_only = chat_1 - chat_2\nchat_2_only = chat_2 - chat_1\nprint(chat_1_only) # {'user_11', 'user_5', 'user_8'}\nprint(chat_1.difference(chat_2))\nprint(chat_2_only) # {'user_9', 'user_10', 'user_2'}\nprint(chat_2.difference(chat_1))\n# объединение пользователей двух множеств\nboth_chats = chat_1.union(chat_2)\nprint(both_chats)\nprint(chat_1 | chat_2)\n\n# Снова множества — frozenset\nchat_1 = frozenset(('user_1', 'user_5', 'user_7', 'user_8', 'user_11'))\nchat_2 = frozenset(('user_1', 'user_2', 'user_2', 'user_7', 'user_9'))\n\nchats_common = chat_1.intersection(chat_2)\nprint(chats_common)\n\n\n# Set Comprehensions\nimport random\n\nrandom_nums = {random.randint(1, 100) for _ in range(10)}\nprint(len(random_nums), random_nums)\n\n\n\nprint('end')", "sub_path": "Shishkin_Anatoliy_lesson_5/code_5.py", "file_name": "code_5.py", "file_ext": "py", "file_size_in_byte": 4501, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.getsizeof", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 12, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 18, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 20, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 22, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 24, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 30, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 35, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 37, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 40, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 59, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 60, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "262908337", "text": "#!/usr/bin/python3\n\"\"\"\nUnittest for base module\n\"\"\"\nimport io\nimport unittest\nimport unittest.mock\nimport json\nfrom models.base import Base\nfrom models.rectangle import Rectangle\nfrom models.square import Square\n\n\nclass Test_Base(unittest.TestCase):\n \"\"\" Tests for Base Class \"\"\"\n\n def setUp(self):\n Base._Base__nb_objects = 0\n\n def test_base_id(self):\n b1 = Base()\n self.assertEqual(b1.id, 1)\n b2 = Base()\n self.assertEqual(b2.id, 2)\n b3 = Base()\n self.assertEqual(b3.id, 3)\n b4 = Base(12)\n self.assertEqual(b4.id, 12)\n b5 = Base()\n self.assertEqual(b5.id, 4)\n\n def test_base_type(self):\n b1 = Base()\n self.assertTrue(type(b1) is Base)\n\n @unittest.mock.patch('sys.stdout', new_callable=io.StringIO)\n def test_base_json_string(self, mock_stdout):\n r1 = Rectangle(10, 7, 2, 8)\n dictionary = r1.to_dictionary()\n self.assertDictEqual(dictionary, {'x': 2, 'y': 8, 'id': 1, 'height': 7,\n 'width': 10})\n json_dictionary = Base.to_json_string([dictionary])\n self.assertEqual(str([dictionary]).replace(\"'\", '\"'), json_dictionary)\n json_empty = Base.to_json_string([])\n self.assertEqual(str([]), json_empty)\n json_none = Base.to_json_string(None)\n self.assertEqual(str([]), json_none)\n print(type(dictionary))\n print(type(json_dictionary))\n self.assertEqual(mock_stdout.getvalue(),\n \"\"\"\n\n\"\"\")\n\n def test_base_save_json(self):\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(2, 4)\n Rectangle.save_to_file([r1, r2])\n with open(\"Rectangle.json\", \"r\") as f:\n data = json.load(f)\n a = [r1.to_dictionary(), r2.to_dictionary()]\n self.assertEqual(a, data)\n\n def test_base_string_json(self):\n list_input = [\n {'id': 89, 'width': 10, 'height': 4},\n {'id': 7, 'width': 1, 'height': 7}\n ]\n json_list_input = Rectangle.to_json_string(list_input)\n list_output = Rectangle.from_json_string(json_list_input)\n self.assertEqual(list_input, list_output)\n\n @unittest.mock.patch('sys.stdout', new_callable=io.StringIO)\n def test_base_create(self, mock_stdout):\n r1 = Rectangle(3, 5, 1)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle.create(**r1_dictionary)\n print(r1)\n print(r2)\n print(r1 is r2)\n print(r1 == r2)\n s1 = Square(3)\n s1_dictionary = s1.to_dictionary()\n s2 = Square.create(**s1_dictionary)\n print(s1)\n print(s2)\n print(s1 is s2)\n print(s1 == s2)\n self.assertEqual(mock_stdout.getvalue(),\n \"\"\"[Rectangle] (1) 1/0 - 3/5\n[Rectangle] (1) 1/0 - 3/5\nFalse\nFalse\n[Square] (3) 0/0 - 3\n[Square] (3) 0/0 - 3\nFalse\nFalse\n\"\"\")\n\n def test_base_file(self):\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(2, 4)\n list_rectangles_input = [r1, r2]\n Rectangle.save_to_file(list_rectangles_input)\n list_rectangles_output = Rectangle.load_from_file()\n self.assertNotEqual(list_rectangles_input, list_rectangles_output)\n d1 = [i.to_dictionary() for i in list_rectangles_input]\n d2 = [i.to_dictionary() for i in list_rectangles_output]\n self.assertEqual(d1, d2)\n\n def tearDown(self):\n pass\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "0x0C-python-almost_a_circle/tests/test_models/test_base.py", "file_name": "test_base.py", "file_ext": "py", "file_size_in_byte": 3510, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "unittest.TestCase", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.base.Base._Base__nb_objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.base.Base", "line_number": 18, "usage_type": "name"}, {"api_name": "models.base.Base", "line_number": 21, "usage_type": "call"}, {"api_name": "models.base.Base", "line_number": 23, "usage_type": "call"}, {"api_name": "models.base.Base", "line_number": 25, "usage_type": "call"}, {"api_name": "models.base.Base", "line_number": 27, "usage_type": "call"}, {"api_name": "models.base.Base", "line_number": 29, "usage_type": "call"}, {"api_name": "models.base.Base", "line_number": 33, "usage_type": "call"}, {"api_name": "models.base.Base", "line_number": 34, "usage_type": "name"}, {"api_name": "models.rectangle.Rectangle", "line_number": 38, "usage_type": "call"}, {"api_name": "models.base.Base.to_json_string", "line_number": 42, "usage_type": "call"}, {"api_name": "models.base.Base", "line_number": 42, "usage_type": "name"}, {"api_name": "models.base.Base.to_json_string", "line_number": 44, "usage_type": "call"}, {"api_name": "models.base.Base", "line_number": 44, "usage_type": "name"}, {"api_name": "models.base.Base.to_json_string", "line_number": 46, "usage_type": "call"}, {"api_name": "models.base.Base", "line_number": 46, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 36, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 36, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.rectangle.Rectangle", "line_number": 56, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle", "line_number": 57, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle.save_to_file", "line_number": 58, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle", "line_number": 58, "usage_type": "name"}, {"api_name": "json.load", "line_number": 60, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle.to_json_string", "line_number": 69, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle", "line_number": 69, "usage_type": "name"}, {"api_name": "models.rectangle.Rectangle.from_json_string", "line_number": 70, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle", "line_number": 70, "usage_type": "name"}, {"api_name": "models.rectangle.Rectangle", "line_number": 75, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle.create", "line_number": 77, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle", "line_number": 77, "usage_type": "name"}, {"api_name": "models.square.Square", "line_number": 82, "usage_type": "call"}, {"api_name": "models.square.Square.create", "line_number": 84, "usage_type": "call"}, {"api_name": "models.square.Square", "line_number": 84, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 73, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 73, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 73, "usage_type": "attribute"}, {"api_name": "models.rectangle.Rectangle", "line_number": 101, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle", "line_number": 102, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle.save_to_file", "line_number": 104, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle", "line_number": 104, "usage_type": "name"}, {"api_name": "models.rectangle.Rectangle.load_from_file", "line_number": 105, "usage_type": "call"}, {"api_name": "models.rectangle.Rectangle", "line_number": 105, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "533097867", "text": "\n#!/usr/bin/python\nimport paho.mqtt.client as mqtt\n# get the credentials of the devises\nfrom not_show import intro_json, UPDATE_TOPICS, MQTT_CREDENTIALS\nimport sys\nimport os\nimport json\nimport zlib\nimport hashlib\nimport multiprocessing\nimport time\nimport base64\nimport ssl\n\"\"\"\nFor converting to windows:\n pip install pyinstaller\nGo to your program’s directory and run:\n pyinstaller yourprogram.py\n\"\"\"\n\n# The parameters introduce by the user:\nbusiness = \"\" # The name of the enterprise to make the update\ndevise = \"\" # The model, or ESP32 or ATMEGA\nupdate_file_name = \"\" # The file direction of the update\n\n# Check if the update is possible:\nif (len(sys.argv) != 4):\n print(\"Invalid number of parameters !!\")\n sys.exit()\nelse:\n business = str(sys.argv[1]).lower()\n devise = str(sys.argv[2]).lower()\n update_file_name = str(sys.argv[3])\n # print(update_file_name)\n # Check if it makes sense the words introduce:\n if (business == 'infrico' or business == 'solidy'):\n if (devise == 'esp32' or devise == 'atmega'):\n print(\"Comienza el proceso de update...\")\n else: \n print(\"Invalid parameters!!\")\n else:\n print(\"Invalid parameters!!\")\n sys.exit()\n\n# First I open the file:\nfile = open(update_file_name, \"rb\")\nupdate_bin = file.read()\n# Obtein the md5 checksum:\nmd5_value = hashlib.md5(update_bin).hexdigest()\n# I create the string file and compress to zlib:\nupdate_zlib = zlib.compress(update_bin, level=9)\nupdate_zlib_encoded = base64.b64encode(update_zlib)\n# and close the file\nfile.close()\n\n\n# and close the file\n\ndef received_message(mqttc, obj, msg):\n # I get the version of the firmware in order to increase when I finish the update\n intro_info = str(msg.payload)\n offset = intro_info.find('firmware') + len('firmware\": \"')\n version = ''\n for i in range(0, 8):\n version += intro_info[offset + i]\n version = int(version[5:])\n # Copy the current version and increase 1 time\n intro_json['firmware'] =intro_json['firmware'][:5] + str(version + 1)\n # Fill the json correctly:\n intro_json['md5'] = md5_value\n intro_json['model'] = devise\n topic_to_update = UPDATE_TOPICS['esp32_intro'].replace('x', business)\n json_msg = json.dumps(intro_json)\n mqttc.unsubscribe(topic_to_update, 0)\n # I publish the new version json and then the version \n print('Increase the version')\n mqttc.publish(UPDATE_TOPICS['esp32_bin'].replace('x', business), update_zlib_encoded, 0, True)\n print('Publish the firmware in /bin ')\n mqttc.publish(topic_to_update, json_msg, 0, True)\n\n# Configure the mqtt client:\nmqttc = mqtt.Client()\nmqttc.on_message = received_message\n# I connect to broker and set all the permisions:\nmqttc.tls_set('C:/Users/Asus/Desktop/ESP_32_noob/MQTT_auto_replay/ca.crt', tls_version=ssl.PROTOCOL_TLSv1_2)\nmqttc.tls_insecure_set(True)\nmqttc.username_pw_set(MQTT_CREDENTIALS['USER'],MQTT_CREDENTIALS['PASS'])\nmqttc.connect(MQTT_CREDENTIALS['HOST'], 8883, 60)\nmqttc.publish('Start', '{\"Start_ALL\":1}', 0, False)\n\n# I generate the first update json:\nmqttc.subscribe(UPDATE_TOPICS['esp32_intro'].replace('x', business), 0)\nmqttc.loop_forever()\n", "sub_path": "MQTT_auto_replay/mqtt.py", "file_name": "mqtt.py", "file_ext": "py", "file_size_in_byte": 3211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 44, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 50, "usage_type": "call"}, {"api_name": "zlib.compress", "line_number": 52, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 53, "usage_type": "call"}, {"api_name": "not_show.intro_json", "line_number": 69, "usage_type": "name"}, {"api_name": "not_show.intro_json", "line_number": 71, "usage_type": "name"}, {"api_name": "not_show.intro_json", "line_number": 72, "usage_type": "name"}, {"api_name": "not_show.UPDATE_TOPICS", "line_number": 73, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "not_show.intro_json", "line_number": 74, "usage_type": "argument"}, {"api_name": "not_show.UPDATE_TOPICS", "line_number": 78, "usage_type": "name"}, {"api_name": "paho.mqtt.client.Client", "line_number": 83, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 83, "usage_type": "name"}, {"api_name": "ssl.PROTOCOL_TLSv1_2", "line_number": 86, "usage_type": "attribute"}, {"api_name": "not_show.MQTT_CREDENTIALS", "line_number": 88, "usage_type": "name"}, {"api_name": "not_show.MQTT_CREDENTIALS", "line_number": 89, "usage_type": "name"}, {"api_name": "not_show.UPDATE_TOPICS", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "228637968", "text": "\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport sys\nimport msgpack\nimport json\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom autolab_core import RigidTransform\nimport os\nimport cv2\nimport imageio\n\n\n\n\n\ndef main(bin_fn, video_path, dest_fn, plot):\n\n # Read file as binary and unpack data using MessagePack library\n with open(bin_fn, \"rb\") as f:\n data = msgpack.unpackb(f.read(), use_list=False, raw=False)\n\n # The point data is tagged \"landmarks\"\n key_frames = data[\"keyframes\"]\n\n print(\"Point cloud has {} points.\".format(len(key_frames)))\n\n key_frame = {int(k): v for k, v in key_frames.items()}\n\n if plot:\n x = []\n y = []\n z = []\n t = []\n for key in sorted(key_frame.keys()):\n point = key_frame[key]\n trans_cw = np.asarray(point[\"trans_cw\"])\n rot_cw = np.asarray(point[\"rot_cw\"])\n\n rigid_cw = RigidTransform(rot_cw, trans_cw)\n\n pos = np.matmul(rigid_cw.rotation, trans_cw)\n\n x.append(pos[0])\n y.append(pos[1])\n z.append(pos[2])\n t.append(float(point[\"ts\"]))\n\n\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.scatter(x, z)\n plt.show()\n\n plt.ylabel('Height')\n plt.xlabel('Time')\n plt.scatter(x=t, y=y)\n print(t)\n\n\n # # new a figure and set it into 3d\n fig = plt.figure()\n\n plt.show()\n\n \n else:\n # Write point coordinates into file, one point for one line\n with open(dest_fn, \"w\") as f:\n video_name = video_path.split(\"/\")[-1][:-4]\n if not os.path.exists(video_name):\n os.mkdir(video_name)\n\n vidcap = cv2.VideoCapture(video_path)\n fps = int(vidcap.get(cv2.CAP_PROP_FPS)) + 1\n print(fps)\n count = 0\n\n for key in sorted(key_frame.keys()):\n point = key_frame[key]\n\n # position capture\n trans_cw = np.asarray(point[\"trans_cw\"])\n rot_cw = np.asarray(point[\"rot_cw\"])\n\n rigid_cw = RigidTransform(rot_cw, trans_cw)\n\n pos = np.matmul(rigid_cw.rotation, trans_cw)\n\n f.write(\"{}, {}, {}\\n\".format(pos[0], pos[1], pos[2]))\n\n vidcap.set(cv2.CAP_PROP_POS_FRAMES, fps * float(point[\"ts\"]))\n\n\n # image capture\n success, image = vidcap.read()\n\n if not success:\n print(\"capture failed\")\n else:\n cv2.imwrite(os.path.join(video_name, str(count) +\".jpg\"), image)\n\n count+=1\n\n\n\n\n print(\"Finished\")\n\n\nif __name__ == \"__main__\":\n\n bin_fn = '/home/paulo/catkin_ws/openvslam/build/mapa_direita.msg'\n video_path = '/home/paulo/catkin_ws/openvslam/build/testes/direita.MP4'\n dest_fn = '/home/paulo/catkin_ws/openvslam/build/try'\n main(bin_fn, video_path,dest_fn, plot=False)\n", "sub_path": "util/map_location_extractor.py", "file_name": "map_location_extractor.py", "file_ext": "py", "file_size_in_byte": 3042, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "msgpack.unpackb", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 40, "usage_type": "call"}, {"api_name": "autolab_core.RigidTransform", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 86, "usage_type": "call"}, {"api_name": "autolab_core.RigidTransform", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 94, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}]} +{"seq_id": "8063044", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport select\nimport h5py\nfrom scipy import sparse\nimport time\n\nclass Util:\n \n \n def softmax(self, X):\n '''numerically stable softmax function\n '''\n max_row_values = np.matrix(np.max(X,axis=1)).T\n result = np.exp(X - max_row_values)\n sums = np.matrix(np.sum(result,axis=1)) \n return result/sums\n \n \n def evolutionary_optimization(self, weight, X,y, func, percentile=3, population= 500, noise_variance=0.5, direction='max', epochs = 12): \n best_error = 0 if direction == 'max' else 1\n rdm = np.random.RandomState(1234)\n best_noise = 0\n t0 = time.time()\n best_weight = weight\n for epoch in range(epochs):\n best_weights = []\n best_errors = []\n best_noises = []\n for i in range(population):\n noise = rdm.normal(0,noise_variance,(weight.shape))\n #noise = gpu.randn(m,1,1)/5.0\n current = func(X,y, noise, weight)\n #print 'Cross validation error: {0}'.format(current)\n best_weights.append(weight)\n best_errors.append(current)\n best_noises.append(noise)\n if direction == 'max' and current > best_error or (direction == 'min' and current < best_error): \n best_error = current \n best_noise = noise \n best_weight = weight \n \n print ('EPOCH: {0}, best_error: {1}'.format(epoch,best_error)) \n if direction == 'max':\n idx = np.where(np.array(best_errors) >= np.percentile(best_errors, q=percentile))[0]\n else:\n idx = np.where(np.array(best_errors) <= np.percentile(best_errors, q=percentile))[0]\n \n weight = np.mean((np.array(best_weights)[idx] + np.array(best_noises)[idx]),axis=0) \n \n \n print (best_error)\n print (best_noise.T) \n print (best_weight.T)\n print (time.time() - t0)\n \n def strings_to_classes(self, strings):\n ret_classes = []\n dict_classes = {}\n i = 0\n for val in strings:\n if val not in dict_classes.keys():\n dict_classes[val] = i \n ret_classes.append(i)\n i+=1\n else:\n ret_classes.append(dict_classes[val]) \n \n return np.array(ret_classes)\n \n def hyperparameter_fitting(self, fun, data, means, lower_vals, upper_vals, positive=True, iter=20): \n def get_new_params(data, means, lower_vals, upper_vals, positive=True):\n data = np.array(data) \n ret_params = np.zeros_like(np.array(means))\n if data.shape[0] > 5:\n best_result_idx = np.argmax(data[:,-1])\n means = data[best_result_idx,:-1] \n \n for i, mean in enumerate(means):\n lower = lower_vals[i]\n upper = upper_vals[i] \n if data.shape[0] > 10: \n for j in range(len(means)):\n upper = np.percentile(data[:,-1], 75) \n variance = np.var(data[data[:,-1] > upper,j],axis=0) \n #mean = np.mean(data[data[:,-1] > upper,j],axis=0)\n else: \n variance = ((upper - lower)/ (2* 1.96))**2\n \n rdm_value = np.random.normal(mean,variance)\n if positive: \n while rdm_value <= 0:\n rdm_value = np.random.normal(mean,variance) \n \n ret_params[i] = rdm_value \n \n return ret_params\n \n params = get_new_params(data, means, lower_vals, upper_vals)\n param_data = []\n for epoch in range(iter):\n cv_score = fun(params)\n print ('CV score: {0}'.format(cv_score))\n param_data.append(params.tolist() + [cv_score])\n params = get_new_params(param_data,means, lower_vals, upper_vals)\n \n print ('Best parameter: {0}'.format(get_new_params(param_data,means, lower_vals, upper_vals)))\n \n def create_t_matrix(self, y):\n classes = np.max(y)\n t = np.zeros((y.shape[0], classes+1))\n for i in range(y.shape[0]):\n t[i, y[i]] = 1\n \n return t\n \n def create_balanced_set_index(self, y, X): \n labels_and_cases = []\n labels = np.max(y)\n a = np.zeros((labels+1,))\n for i in range(a.shape[0]):\n a[i] = np.sum(y==i) \n labels_and_cases.append(np.where(y==i)[0].tolist())\n \n a_original = a.copy() \n X_new = np.zeros((X.shape)) \n y_new = np.zeros((X.shape[0]))\n for row in range(X.shape[0]):\n next_label = np.argmax(a)\n if len(labels_and_cases[next_label]) > 0: \n y_new[row] = next_label\n X_new[row] = X[labels_and_cases[next_label].pop()]\n a += a_original*(np.arange(0,labels+1)!=next_label)\n \n return y_new, X_new\n \n def create_balanced_index_vector(self, y): \n labels_and_cases = []\n labels = np.max(y)\n a = np.zeros((labels+1,))\n for i in range(a.shape[0]):\n a[i] = np.sum(y==i) \n labels_and_cases.append(np.where(y==i)[0].tolist())\n \n a_original = a.copy() \n y_idx = []\n for row in range(y.shape[0]):\n next_label = np.argmax(a)\n if len(labels_and_cases[next_label]) > 0: \n y_idx.append(labels_and_cases[next_label].pop()) \n a += a_original*(np.arange(0,labels+1)!=next_label)\n \n return np.array(y_idx)\n \n def save_sparse_matrix(self, filename,x): \n x = sparse.csr_matrix(x)\n data=x.data\n indices=x.indices\n indptr=x.indptr\n shape=x.shape\n file = h5py.File(filename,'w')\n file.create_dataset(\"indices\", data=indices)\n file.create_dataset(\"indptr\", data=indptr)\n file.create_dataset(\"data\", data=data)\n file.create_dataset(\"shape\", data=shape)\n file.close()\n\n def load_sparse_matrix(self, filename):\n f = h5py.File(filename,'r')\n z = sparse.csr_matrix( (f['data'],f['indices'],f['indptr']), shape=f['shape'])\n return z\n \n def create_batches(self, X, size):\n count = np.round(X.shape[0]/(1.0*size),0)\n return np.array(np.split(X,count))\n \n def create_sparse_weight(self, input_size, output_size, sparsity = 15): \n rdm = np.random.RandomState(1234) \n weight = np.zeros((input_size, output_size))\n for axon in range(output_size): \n idxes = rdm.randint(0,input_size, (sparsity,))\n rdm_weights = rdm.randn(sparsity)\n for idx, rdm_weights in zip(idxes, rdm_weights):\n weight[idx,axon] = rdm_weights \n return weight\n \n def create_uniform_rdm_weight(self,input_size,output_size):\n rdm = np.random.RandomState(1234) \n return rdm.uniform(low=-4*np.sqrt(6./(input_size+output_size)),\n high=4*np.sqrt(6./(input_size+output_size)),\n size=(input_size,output_size))\n \n \n def create_t_dataset(self, y): \n if y != None:\n Y = np.matrix(y)\n Y = Y.T if Y.shape[0] == 1 else Y\n \n no_labels = np.max(y)\n t = np.zeros((Y.shape[0],no_labels+1))\n for i in range(Y.shape[0]):\n t[i,Y[i,0]] = 1\n \n return t\n else:\n return None \n \n def shuffle_set(self, data_set_X, data_set_y, data_set_t):\n n = data_set_X.shape[0]\n rdm_idx = np.arange(0,n)\n np.random.shuffle(rdm_idx)\n new_X = np.zeros((data_set_X.shape))\n new_y = np.zeros((data_set_y.shape))\n new_t = np.zeros((data_set_t.shape))\n for i in range(n):\n new_X[i,:] = data_set_X[rdm_idx[i],:]\n new_y[i] = data_set_y[rdm_idx[i]]\n new_t[i,:] = data_set_t[rdm_idx[i],:]\n \n \n def plot_results(self, valid, train, epochs, filename):\n plt.hold(True) \n print ('Printing result...')\n plt.axis([0,epochs,0,0.05])\n plt.title('Epochs: ' + str(epochs) + ', ' +'Hidden layer units: ')\n plt.plot(range(epochs),valid,color='blue')\n plt.plot(range(epochs),train,color='red')\n plt.tight_layout()\n plt.savefig(filename +'.png')\n plt.hold(False)\n \n def plot_weights(self, weight, filename):\n print ('Printing weights...')\n hist, bins = np.histogram(weight,bins = 50)\n width = 0.7*(bins[1]-bins[0])\n center = (bins[:-1]+bins[1:])/2\n plt.bar(center, hist, align = 'center', width = width)\n plt.savefig(filename + '.png')\n \n def heardEnter(self):\n i,o,e = select.select([sys.stdin],[],[],0.0001)\n for s in i:\n if s == sys.stdin:\n input = sys.stdin.readline()\n return True\n return False ", "sub_path": "util_tweet.py", "file_name": "util_tweet.py", "file_ext": "py", "file_size_in_byte": 9535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.matrix", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "time.time", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 157, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 157, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 162, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 170, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 171, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 171, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 212, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "numpy.histogram", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "select.select", "line_number": 242, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 242, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 244, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 245, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 245, "usage_type": "attribute"}]} +{"seq_id": "224101786", "text": "import os\nimport json\n\nimport pygments\nimport pygments.lexers\nimport pygments.formatters\n\nfrom azcat.guess_ext import guess_ext_by_contents, guess_ext_by_filename\n\ndef load_module (type_, name):\n try:\n m = getattr(getattr(__import__(\"azcat.{0}.{1}\".format(type_, name)), type_), name)\n except ImportError:\n return None\n return m\n\ndef _load_formatter (name):\n return load_module(\"formatters\", name)\n\ndef _load_highlighter (name):\n return load_module(\"highlighters\", name)\n\n\ndef pretty_print (src, s, out, with_formatter, ext=None):\n \"\"\" `src' is a filepath to be formatted. `out' is a file object\n to be written.\"\"\"\n\n if ext == \"h\":\n ext = \"c\" # XXX: Pygments does not highlight .h files\n elif ext is None:\n ext = guess_ext_by_filename(src)\n if ext == \"\":\n ext = guess_ext_by_contents(s)\n\n # format\n if with_formatter:\n f = _load_formatter(ext)\n if f is not None:\n ext,s = f.format(s)\n\n # highlight\n h = _load_highlighter(ext)\n if h is None:\n try:\n lexer = pygments.lexers.get_lexer_by_name(ext)\n except pygments.util.ClassNotFound:\n lexer = pygments.lexers.get_lexer_for_mimetype(\"text/plain\")\n fmt = pygments.formatters.Terminal256Formatter(encoding=\"utf-8\")\n pygments.highlight(s, lexer, fmt, out)\n else:\n h.highlight(out, s)\n out.close()\n", "sub_path": "azcat/pretty_print.py", "file_name": "pretty_print.py", "file_ext": "py", "file_size_in_byte": 1425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "azcat.guess_ext.guess_ext_by_filename", "line_number": 31, "usage_type": "call"}, {"api_name": "azcat.guess_ext.guess_ext_by_contents", "line_number": 33, "usage_type": "call"}, {"api_name": "pygments.lexers.get_lexer_by_name", "line_number": 45, "usage_type": "call"}, {"api_name": "pygments.lexers", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygments.util", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygments.lexers.get_lexer_for_mimetype", "line_number": 47, "usage_type": "call"}, {"api_name": "pygments.lexers", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygments.formatters.Terminal256Formatter", "line_number": 48, "usage_type": "call"}, {"api_name": "pygments.formatters", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygments.highlight", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "285161168", "text": "from django import forms\nfrom django.forms import ValidationError\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate\nfrom django.contrib import messages\nfrom .models import camera\nimport re\nregex1=\"[\\w\\-]+\"\n\n\nclass CameraForm(forms.Form):\n camName = forms.CharField(max_length=64,label=\"Kamera Adı\",required=True,\n help_text='Kamera Adınızı Sadece \"A-Z\" \"a-z\" \"0-9\" \"-\" ve \"_\" Kullanarak Yazınız!',\n widget=forms.TextInput(attrs={'class': 'form-control rounded-0'}))\n camUrl = forms.CharField(max_length=256,label=\"Kamera URL' si\",required=True,\n help_text='Kamera Adresinizi \"http://mycam12345.com\" Şeklinde Yazınız',\n widget=forms.TextInput(attrs={'class': 'form-control rounded-0'}))\n\n\n def clean(self):\n cam_name=self.cleaned_data.get(\"camName\")\n cam_url=self.cleaned_data.get(\"camUrl\")\n cameracount=camera.objects.filter(cam_name=cam_name).count()\n print (cameracount)\n Errors=\"\"\n g=0\n if cameracount==0:\n if cam_name==None or cam_name==\"\" or cam_url==None or cam_url==\"\" :\n Errors+=\"Kamera Adı Alanı Boş Geçilemez?\\n\"\n g+=1\n else:\n if not re.search(regex1,cam_name) :\n Errors+=\"Kamera Adı İçin Geçersiz Karakter!\\n\"\n g+=1\n \n if g>0:\n values={\n \"Errors\":Errors,\n \"Durum\":\"0\"\n }\n return values\n\n\n\n values = {\n \"camName\": cam_name,\n \"camUrl\": cam_url,\n \"Durum\":\"1\"\n }\n\n return values\n else:\n Errors+=\"Bu Ada Sahip Bir Kamera Daha Önce Eklenmiş!\"\n values={\n \"Errors\":Errors,\n \"Durum\":\"0\"\n }\n return values\n\n\n\nclass AlertForm(forms.Form):\n a_start = forms.TimeField(label=\"Başlangıç Saati\",required=True,\n help_text='Saat:Dakika Şeklinde Giriş Yapınız',\n widget=forms.TimeInput(attrs={'class': 'form-control rounded-0','placeholder':'Saat Seçin'}))\n a_end = forms.TimeField(label=\"Bitiş Saati\",required=True,\n help_text='Saat:Dakika Şeklinde Giriş Yapınız',\n widget=forms.TimeInput(attrs={'class': 'form-control rounded-0','placeholder':'Saat Seçin'}))\n\n def clean(self):\n starttime=self.cleaned_data.get(\"a_start\")\n endtime=self.cleaned_data.get(\"a_end\")\n Errors=\"\"\n g=0\n if starttime==None or starttime==\"\" or endtime==None or endtime==\"\" :\n Errors+=\"Saat:Dakika Formatını Doğru Girdiğinizden Emin olun\\n\"\n g+=1\n \n if g>0:\n values={\n \"Errors\":Errors,\n \"Durum\":\"0\"\n \n }\n return values\n\n\n\n values = {\n \"Durum\":\"1\",\n \"starttime\":starttime,\n \"endtime\":endtime\n }\n\n return values\n ", "sub_path": "camera553/Kamera553/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.forms.Form", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 15, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 17, "usage_type": "name"}, {"api_name": "models.camera.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "models.camera.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.camera", "line_number": 23, "usage_type": "name"}, {"api_name": "re.search", "line_number": 32, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 62, "usage_type": "name"}, {"api_name": "django.forms.TimeField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 63, "usage_type": "name"}, {"api_name": "django.forms.TimeInput", "line_number": 65, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 65, "usage_type": "name"}, {"api_name": "django.forms.TimeField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 66, "usage_type": "name"}, {"api_name": "django.forms.TimeInput", "line_number": 68, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "515029659", "text": "from bandits.bandit import Bandit\nfrom distributions.betadistribution import BetaDistribution\nimport numpy as np\nimport copy\nfrom scipy.stats import beta\nimport matplotlib.pyplot as plt\n\nclass InfluenceLimiter_study():\n def __init__(self, bandit, agency, reward_reports, initial_reputation, track_reputation= True):\n self.bandit = bandit\n self.agency = agency\n self.posterior_history = {}\n self.prediction_history = {}\n self.reward_reports = reward_reports\n self.initial_reputation = initial_reputation\n self.track_reputation = track_reputation\n super().__init__()\n \n def reset(self):\n self.bandit.reset()\n self.posterior_history = {}\n self.prediction_history = {}\n self.__initialize_reputations()\n\n def __initialize_reputations(self):\n self.agent_reputations = [self.initial_reputation for agent in self.agency.agents]\n # self.agent_reputations = [int(agent.trustworthy == True) for agent in self.agency.agents]\n if self.track_reputation:\n self.agent_reputations_track = [[self.initial_reputation] for agent in self.agency.agents]\n\n def plot_posterior_history(self, arm):\n x = np.linspace(0, 1.0, 100)\n for (index, dist) in enumerate(self.prediction_history[arm]):\n a, b = dist.get_params()\n y = beta.pdf(x, a, b)\n plt.plot(x, y, label=index)\n plt.legend()\n plt.show()\n \n def _compute_IL_posterior(self, t):\n # print(\"reputations:\", self.agent_reputations)\n for (arm_index, arm) in enumerate(self.bandit.arms):\n # self.posterior_history[arm_index] = [BetaDistribution(1, 1)]\n self.prediction_history[arm_index]=[]\n\n pre_alpha, pre_beta = copy.deepcopy(arm.reward_dist.get_params())\n new_mean = copy.deepcopy(arm.reward_dist.mean())\n weight = 1\n running_weighted_sum = weight * new_mean\n q_tilde = running_weighted_sum/weight\n\n self.posterior_history[arm_index] = [BetaDistribution(q_tilde, 1-q_tilde)]\n k = 2/(len(self.agency.agents) + 1)\n prev_ema = self._compute_SMA(arm_index)\n \n #iterate through each agent and process their report\n for agent_index, agent in enumerate(self.agency.agents):\n gamma = min(1, self.agent_reputations[agent_index])\n current_ema = (self.agency.agent_reports[agent_index][arm_index] - prev_ema) * k + prev_ema\n alpha_j = current_ema * (agent.num_reports) \n beta_j = (1-current_ema) * (agent.num_reports)\n\n self.prediction_history[arm_index].append(BetaDistribution(alpha_j, beta_j))\n\n q_j = copy.deepcopy(current_ema)\n\n running_weighted_sum += gamma * q_j\n weight += gamma\n\n q_tilde = running_weighted_sum/weight\n\n alpha_tilde = q_tilde * (agent.num_reports) \n beta_tilde = (1-q_tilde) * (agent.num_reports)\n self.posterior_history[arm_index].append(BetaDistribution(alpha_tilde, beta_tilde))\n \n # print(\"final:\", alpha_tilde + pre_alpha, beta_tilde + pre_beta)\n arm.influence_reward_dist.set_params(alpha_tilde + pre_alpha, beta_tilde + pre_beta)\n\n def select_arm(self, t, influence_limit = True):\n self._compute_IL_posterior(t)\n return self.bandit.select_arm(t, influence_limit = influence_limit)\n\n def _update_reputations(self, arm, reward):\n for index, agent in enumerate(self.agency.agents):\n gamma = min(1, self.agent_reputations[index])\n q_tile_j_1 = self.posterior_history[arm][index].mean()\n q_j = self.prediction_history[arm][index].mean()\n \n self.agent_reputations[index] += gamma * (self.scoring_rule(reward, q_tile_j_1) - self.scoring_rule(reward, q_j))\n if self.track_reputation == True:\n self.agent_reputations_track[index].append(self.agent_reputations[index])\n \n def _compute_T_posterior(self, selected_arm, reward):\n self.bandit.arms[selected_arm].reward_dist.update(reward)\n\n def update(self, arm, reward):\n # print(\"pre_rep update:\", self.agent_reputations)\n self._update_reputations(arm, reward)\n # print(\"post_rep update:\", self.agent_reputations)\n self._compute_T_posterior(arm, reward)\n \n def plot_reputations(self):\n for (index, reputations) in enumerate(self.agent_reputations_track):\n plt.plot(reputations, label=index)\n plt.legend()\n plt.xlabel(\"Round (t)\")\n plt.ylabel(\"Reputation\")\n plt.show()\n\n def scoring_rule(self, r, q, rule = \"quadratic\"):\n if r == 1:\n return (1-q)**2\n else:\n return (q)**2\n\n", "sub_path": "influencelimiters/influencelimiter_study_4.py", "file_name": "influencelimiter_study_4.py", "file_ext": "py", "file_size_in_byte": 4861, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.linspace", "line_number": 32, "usage_type": "call"}, {"api_name": "scipy.stats.beta.pdf", "line_number": 35, "usage_type": "call"}, {"api_name": "scipy.stats.beta", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 46, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 47, "usage_type": "call"}, {"api_name": "distributions.betadistribution.BetaDistribution", "line_number": 52, "usage_type": "call"}, {"api_name": "distributions.betadistribution.BetaDistribution", "line_number": 63, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 65, "usage_type": "call"}, {"api_name": "distributions.betadistribution.BetaDistribution", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}]} +{"seq_id": "481856599", "text": "import serial\nfrom serial.tools import list_ports\n\n# Get list of available ports\navailablePorts = list_ports.comports()\n\n# Print list by name\nportNumber = 0\nfor port in availablePorts:\n print(portNumber, port.device)\n portNumber += 1\n\n# Connect to the port we want\nser = serial.Serial(availablePorts[2].device, 9600)\nwhile True:\n print(str(ser.read(5)))\n ser.flush()\n\n", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 384, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "serial.tools.list_ports.comports", "line_number": 5, "usage_type": "call"}, {"api_name": "serial.tools.list_ports", "line_number": 5, "usage_type": "name"}, {"api_name": "serial.Serial", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "159677055", "text": "# pylint: disable=no-member\nfrom typing import Dict, List, Tuple, Any, Optional\nfrom copy import deepcopy\nimport math\n\nimport numpy\nfrom overrides import overrides\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.modules.linear import Linear\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.util import START_SYMBOL, END_SYMBOL\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder\nfrom allennlp.modules.seq2seq_decoders import DecoderNet\nfrom allennlp.models.model import Model\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.nn.initializers import InitializerApplicator\nfrom allennlp.nn import util\nfrom allennlp.nn.beam_search import BeamSearch\nfrom allennlp.modules.seq2seq_encoders.bidirectional_language_model_transformer import (\n subsequent_mask,\n PositionwiseFeedForward,\n SublayerConnection,\n PositionalEncoding,\n MultiHeadedAttention,\n)\n\nfrom xlwomt.metrics import TokenSequenceAccuracy\nfrom xlwomt.models.combiner import TransformerCombiner, AttentionCombiner\n\n\n@Model.register(\"ensemble_transformer_single_encoder\")\nclass EnsembleSequenceSingleEncoderTransformer(Model):\n def __init__(self,\n vocab: Vocabulary,\n source_embedder: TextFieldEmbedder,\n target_embedder: Embedding,\n encoder: Seq2SeqEncoder,\n max_decoding_steps: int,\n decoding_dim: int,\n feedforward_hidden_dim: int,\n num_layers: int,\n num_attention_heads: int,\n combiner_module: TransformerCombiner,\n use_positional_encoding: bool = True,\n positional_encoding_max_steps: int = 5000,\n dropout_prob: float = 0.1,\n residual_dropout_prob: float = 0.2,\n attention_dropout_prob: float = 0.2,\n beam_size: int = 1,\n target_namespace: str = \"tokens\",\n label_smoothing_ratio: Optional[float] = None,\n initializer: Optional[InitializerApplicator] = None) -> None:\n super(EnsembleSequenceSingleEncoderTransformer, self).__init__(vocab)\n\n self._target_namespace = target_namespace\n self._label_smoothing_ratio = label_smoothing_ratio\n self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)\n self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)\n self._token_based_metric = TokenSequenceAccuracy()\n\n # Beam Search\n self._max_decoding_steps = max_decoding_steps\n self._beam_search = BeamSearch(self._end_index, max_steps=max_decoding_steps, beam_size=beam_size)\n\n # Encoder\n self._encoder = encoder\n\n # Vocabulary and embedder\n self._source_embedder = source_embedder\n self._target_embedder = target_embedder\n\n target_vocab_size = self.vocab.get_vocab_size(self._target_namespace)\n assert target_vocab_size == self._target_embedder.num_embeddings\n\n target_embedding_dim = self._target_embedder.get_output_dim()\n\n self._decoding_dim = decoding_dim\n # Sequence Decoder Features\n self._output_projection_layer = Linear(\n self._decoding_dim, target_vocab_size\n )\n\n self._decoder = Decoder(\n num_layers=num_layers,\n decoding_dim=decoding_dim,\n target_embedding_dim=target_embedding_dim,\n feedforward_hidden_dim=feedforward_hidden_dim,\n num_attention_heads=num_attention_heads,\n use_positional_encoding=use_positional_encoding,\n positional_encoding_max_steps=positional_encoding_max_steps,\n dropout_prob=dropout_prob,\n residual_dropout_prob=residual_dropout_prob,\n attention_dropout_prob=attention_dropout_prob,\n combiner=combiner_module,\n num_sources=3\n )\n\n # Parameter checks and cleanup\n if self._target_embedder.get_output_dim() != self._decoder.target_embedding_dim:\n raise ConfigurationError(\n \"Target Embedder output_dim doesn't match decoder module's input.\"\n )\n #\n if self._encoder.get_output_dim() != self._decoder.get_output_dim():\n raise ConfigurationError(\n f\"Encoder output dimension {self._encoder.get_output_dim()} should be\"\n f\" equal to decoder dimension {self._self_attention.get_output_dim()}.\"\n )\n\n if initializer:\n initializer(self)\n\n # Print the model\n print(self)\n\n def take_step(self,\n last_predictions: torch.Tensor,\n state: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n \"\"\"\n Take a decoding step. This is called by the beam search class.\n # Parameters\n last_predictions : `torch.Tensor`\n A tensor of shape `(group_size,)`, which gives the indices of the predictions\n during the last time step.\n state : `Dict[str, torch.Tensor]`\n A dictionary of tensors that contain the current state information\n needed to predict the next step, which includes the encoder outputs,\n the source mask, and the decoder hidden state and context. Each of these\n tensors has shape `(group_size, *)`, where `*` can be any other number\n of dimensions.\n # Returns\n Tuple[torch.Tensor, Dict[str, torch.Tensor]]\n A tuple of `(log_probabilities, updated_state)`, where `log_probabilities`\n is a tensor of shape `(group_size, num_classes)` containing the predicted\n log probability of each class for the next step, for each item in the group,\n while `updated_state` is a dictionary of tensors containing the encoder outputs,\n source mask, and updated decoder hidden state and context.\n Notes\n -----\n We treat the inputs as a batch, even though `group_size` is not necessarily\n equal to `batch_size`, since the group may contain multiple states\n for each source sentence in the batch.\n \"\"\"\n # shape: (group_size, num_classes)\n output_projections, state = self._decoder_step(last_predictions, state)\n\n # shape: (group_size, num_classes)\n class_log_probabilities = F.log_softmax(output_projections, dim=-1)\n\n return class_log_probabilities, state\n\n @overrides\n def forward(self, # type: ignore\n source_tokens_0: Dict[str, torch.LongTensor],\n source_tokens_1: Dict[str, torch.LongTensor],\n source_tokens_2: Dict[str, torch.LongTensor],\n metadata: List[Dict[str, Any]],\n target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:\n \"\"\"\n Make forward pass with decoder logic for producing the entire target sequence.\n Parameters\n ----------\n source_tokens_0 : ``Dict[str, torch.LongTensor]``\n The output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n source_tokens_1 : ``Dict[str, torch.LongTensor]``\n The output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n source_tokens_2 : ``Dict[str, torch.LongTensor]``\n The output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n metadata: List[Dict[str, Any]]\n Additional information for prediction\n target_tokens : ``Dict[str, torch.LongTensor]``, optional (default = None)\n Output of `Textfield.as_array()` applied on target `TextField`. We assume that the\n target tokens are also represented as a `TextField`.\n Returns\n -------\n Dict[str, torch.Tensor]\n \"\"\"\n state = self._encode(source_tokens=[source_tokens_0,\n source_tokens_1,\n source_tokens_2])\n\n if target_tokens:\n # state = self._decoder.init_decoder_state(state)\n # The `_forward_loop` decodes the input sequence and computes the loss during training\n # and validation.\n output_dict = self._forward_loop(state, target_tokens)\n else:\n output_dict = {}\n\n if not self.training:\n # state = self._init_decoder_state(state)\n predictions = self._forward_beam_search(state)\n output_dict.update(predictions)\n if target_tokens:\n # shape: (batch_size, max_predicted_sequence_length)\n predicted_tokens = self.decode(output_dict)[\"predicted_tokens\"]\n\n self._token_based_metric(predicted_tokens, [x[\"target_tokens\"] for x in metadata])\n\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Finalize predictions.\n This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test\n time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives\n within the ``forward`` method.\n This method trims the output predictions to the first end symbol, replaces indices with\n corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.\n \"\"\"\n predicted_indices = output_dict[\"predictions\"]\n if not isinstance(predicted_indices, numpy.ndarray):\n predicted_indices = predicted_indices.detach().cpu().numpy()\n all_predicted_tokens = []\n for indices in predicted_indices:\n # Beam search gives us the top k results for each source sentence in the batch\n # but we just want the single best.\n if len(indices.shape) > 1:\n indices = indices[0]\n indices = list(indices)\n # Collect indices till the first end_symbol\n if self._end_index in indices:\n indices = indices[:indices.index(self._end_index)]\n predicted_tokens = [self.vocab.get_token_from_index(x, namespace=self._target_namespace)\n for x in indices]\n all_predicted_tokens.append(predicted_tokens)\n output_dict[\"predicted_tokens\"] = all_predicted_tokens # type: ignore\n return output_dict\n\n def _encode(self, source_tokens: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Make forward pass on the encoder.\n # Parameters\n source_tokens : `List[Dict[str, torch.Tensor]]`\n List of the output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n # Returns\n Dict[str, torch.Tensor]\n Map consisting of the key `source_mask` with the mask over the\n `source_tokens` text field,\n and the key `encoder_outputs` with the output tensor from\n forward pass on the encoder.\n \"\"\"\n # shape: n_srcs list of (batch_size, max_input_sequence_length, encoder_input_dim)\n embedded_inputs = [self._source_embedder(src_toks) for src_toks in source_tokens]\n # print(embedded_inputs[0].shape)\n # shape: (batch_size, n_srcs, max_input_sequence_length, encoder_input_dim)\n embedded_inputs = torch.nn.utils.rnn.pad_sequence([e.permute(1, 0, 2) for e in embedded_inputs]).permute(2, 1, 0, 3)\n # print(embedded_inputs.shape)\n # shape: n_src size list of (batch_size, max_input_sequence_length)\n source_masks = [util.get_text_field_mask(src_toks) for src_toks in source_tokens]\n\n # shape: (batch_size, n_srcs, max_input_sequence_length)\n source_masks = torch.nn.utils.rnn.pad_sequence([s.permute(1, 0) for s in source_masks]).permute(2, 1, 0)\n # print(source_masks.shape)\n # import pdb; pdb.set_trace()\n # shape: List(batch_size, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = [self._encoder(embedded_inputs[:, src_idx, :, :], source_masks[:, src_idx, :])\n for src_idx in range(3)]\n\n # shape: (batch_size, n_srcs, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = torch.stack(encoder_outputs, dim=1)\n\n return {\"source_mask\": source_masks, \"encoder_outputs\": encoder_outputs}\n\n def _forward_loop(self,\n state: Dict[str, torch.Tensor],\n target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:\n\n # shape: (batch_size, n_srcs, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = state[\"encoder_outputs\"]\n\n # shape: (batch_size, n_srcs, max_input_sequence_length)\n source_mask = state[\"source_mask\"]\n\n # shape: (batch_size, max_target_sequence_length)\n targets = target_tokens[\"tokens\"]\n\n _, target_sequence_length = targets.size()\n\n # Prepare embeddings for targets. They will be used as gold embeddings during decoder training\n # shape: (batch_size, max_target_sequence_length, embedding_dim)\n target_embedding = self._target_embedder(targets)\n\n # shape: (batch_size, max_target_batch_sequence_length)\n target_mask = util.get_text_field_mask(target_tokens)\n\n _, decoder_output = self._decoder(\n previous_state=state,\n previous_steps_predictions=target_embedding[:, :-1, :],\n encoder_outputs=encoder_outputs,\n source_mask=source_mask,\n previous_steps_mask=target_mask[:, :-1]\n )\n\n # shape: (group_size, max_target_sequence_length, num_classes)\n logits = self._output_projection_layer(decoder_output).type(torch.FloatTensor)\n\n # Compute loss.\n loss = self._get_loss(logits, targets, target_mask)\n output_dict = {\"loss\": loss}\n\n return output_dict\n\n def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Prepare inputs for the beam search, does beam search and returns beam search results.\n \"\"\"\n batch_size = state[\"source_mask\"].size(dim=0)\n start_predictions = state[\"source_mask\"][:, 0, :].new_full((batch_size,), fill_value=self._start_index)\n\n # shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)\n # shape (log_probabilities): (batch_size, beam_size)\n all_top_k_predictions, log_probabilities = self._beam_search.search(\n start_predictions, state, self.take_step\n )\n\n output_dict = {\n \"class_log_probabilities\": log_probabilities,\n \"predictions\": all_top_k_predictions,\n }\n return output_dict\n\n def _decoder_step(\n self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n \"\"\"\n Decode current state and last prediction to produce produce projections\n into the target space, which can then be used to get probabilities of\n each target token for the next step.\n Inputs are the same as for `take_step()`.\n \"\"\"\n # shape: (batch_size, n_srcs, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = state[\"encoder_outputs\"]\n\n # shape: (batch_size, n_srcs, max_input_sequence_length)\n source_mask = state[\"source_mask\"]\n\n # shape: (group_size, steps_count, decoder_output_dim)\n previous_steps_predictions = state.get(\"previous_steps_predictions\")\n\n # shape: (batch_size, 1, target_embedding_dim)\n last_predictions_embeddings = self._target_embedder(last_predictions).unsqueeze(1)\n\n if previous_steps_predictions is None or previous_steps_predictions.shape[-1] == 0:\n # There is no previous steps, except for start vectors in `last_predictions`\n # shape: (group_size, 1, target_embedding_dim)\n previous_steps_predictions = last_predictions_embeddings\n else:\n # shape: (group_size, steps_count, target_embedding_dim)\n previous_steps_predictions = torch.cat(\n [previous_steps_predictions, last_predictions_embeddings], 1\n )\n\n decoder_state, decoder_output = self._decoder(\n previous_state=state,\n encoder_outputs=encoder_outputs,\n source_mask=source_mask,\n previous_steps_predictions=previous_steps_predictions,\n )\n state[\"previous_steps_predictions\"] = previous_steps_predictions\n\n # Update state with new decoder state, override previous state\n state.update(decoder_state)\n\n if self._decoder.decodes_parallel:\n decoder_output = decoder_output[:, -1, :]\n\n # shape: (group_size, num_classes)\n output_projections = self._output_projection_layer(decoder_output)\n\n return output_projections, state\n\n def _get_loss(self,\n logits: torch.FloatTensor,\n targets: torch.LongTensor,\n target_mask: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n Compute loss.\n Takes logits (unnormalized outputs from the decoder) of size (batch_size,\n num_decoding_steps, num_classes), target indices of size (batch_size, num_decoding_steps+1)\n and corresponding masks of size (batch_size, num_decoding_steps+1) steps and computes cross\n entropy loss while taking the mask into account.\n The length of ``targets`` is expected to be greater than that of ``logits`` because the\n decoder does not need to compute the output corresponding to the last timestep of\n ``targets``. This method aligns the inputs appropriately to compute the loss.\n During training, we want the logit corresponding to timestep i to be similar to the target\n token from timestep i + 1. That is, the targets should be shifted by one timestep for\n appropriate comparison. Consider a single example where the target has 3 words, and\n padding is to 7 tokens.\n The complete sequence would correspond to w1 w2 w3

\n and the mask would be 1 1 1 1 1 0 0\n and let the logits be l1 l2 l3 l4 l5 l6\n We actually need to compare:\n the sequence w1 w2 w3

\n with masks 1 1 1 1 0 0\n against l1 l2 l3 l4 l5 l6\n (where the input was) w1 w2 w3

\n \"\"\"\n # shape: (batch_size, num_decoding_steps)\n relevant_targets = targets[:, 1:].contiguous().to(logits.device)\n\n # shape: (batch_size, num_decoding_steps)\n relevant_mask = target_mask[:, 1:].contiguous().to(logits.device)\n\n return util.sequence_cross_entropy_with_logits(logits,\n relevant_targets,\n relevant_mask)\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n all_metrics: Dict[str, float] = {}\n if not self.training:\n all_metrics.update(self._token_based_metric.get_metric(reset=reset))\n return all_metrics\n\n\ndef _clones(module: nn.Module, num_layers: int):\n \"\"\"Produce N identical layers.\"\"\"\n return nn.ModuleList([deepcopy(module) for _ in range(num_layers)])\n\n\nclass Decoder(DecoderNet):\n \"\"\"\n Transformer N layer decoder with masking.\n Code taken from http://nlp.seas.harvard.edu/2018/04/03/attention.html\n \"\"\"\n\n def __init__(self,\n num_layers: int,\n decoding_dim: int,\n target_embedding_dim: int,\n feedforward_hidden_dim: int,\n num_attention_heads: int,\n combiner: TransformerCombiner,\n num_sources: int,\n use_positional_encoding: bool = True,\n positional_encoding_max_steps: int = 5000,\n dropout_prob: float = 0.1,\n residual_dropout_prob: float = 0.2,\n attention_dropout_prob: float = 0.2,\n ) -> None:\n super().__init__(decoding_dim, target_embedding_dim, decodes_parallel=True)\n\n self._decoding_dim = decoding_dim\n self._embed_scale = math.sqrt(decoding_dim)\n\n self._positional_embedder = (\n PositionalEncoding(input_dim=decoding_dim, max_len=positional_encoding_max_steps)\n if use_positional_encoding\n else None\n )\n self._dropout = nn.Dropout(dropout_prob)\n\n generic_attn = MultiHeadedAttention(num_attention_heads, decoding_dim, attention_dropout_prob)\n combined_attn = AttentionCombiner(num_sources, generic_attn, combiner)\n feed_forward = PositionwiseFeedForward(decoding_dim, feedforward_hidden_dim, dropout_prob)\n\n layer = DecoderLayer(\n size=decoding_dim,\n self_attn=deepcopy(generic_attn),\n src_attn=deepcopy(combined_attn),\n feed_forward=feed_forward,\n dropout=residual_dropout_prob\n )\n\n self._self_attention_layers = _clones(layer, num_layers)\n self.norm = nn.LayerNorm(layer.size)\n\n def init_decoder_state(\n self, encoder_out: Dict[str, torch.LongTensor]\n ) -> Dict[str, torch.Tensor]:\n return {}\n\n @overrides\n def forward(\n self,\n previous_state: Dict[str, torch.Tensor],\n encoder_outputs: torch.Tensor,\n source_mask: torch.Tensor,\n previous_steps_predictions: torch.Tensor,\n previous_steps_mask: Optional[torch.Tensor] = None,\n ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:\n\n # shape: (batch_size, n_srcs, max_input_sequence_length) ->\n # (batch_size, n_srcs, 1, max_input_sequence_length)\n source_mask = source_mask.unsqueeze(-2)\n future_mask = Variable(subsequent_mask(previous_steps_predictions.size(-2),\n device=source_mask.device)\n .type_as(source_mask.data))\n\n if previous_steps_mask is None:\n previous_steps_mask = future_mask\n else:\n previous_steps_mask = previous_steps_mask.unsqueeze(-2) & future_mask\n\n previous_steps_predictions = previous_steps_predictions * self._embed_scale\n if self._positional_embedder:\n previous_steps_predictions = self._positional_embedder(previous_steps_predictions)\n previous_steps_predictions = self._dropout(previous_steps_predictions)\n\n for layer in self._self_attention_layers:\n previous_steps_predictions = layer(previous_steps_predictions,\n encoder_outputs,\n source_mask,\n previous_steps_mask)\n\n decoded = self.norm(previous_steps_predictions)\n return {}, decoded\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"\n A single layer of transformer decoder.\n Code taken from http://nlp.seas.harvard.edu/2018/04/03/attention.html\n \"\"\"\n def __init__(\n self,\n size: int,\n self_attn: MultiHeadedAttention,\n src_attn: AttentionCombiner,\n feed_forward: F,\n dropout: float,\n ) -> None:\n super().__init__()\n self.size = size\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.feed_forward = feed_forward\n self.sublayer = _clones(SublayerConnection(size, dropout), 3)\n\n def forward(\n self, x: torch.Tensor, memory: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor\n ) -> torch.Tensor:\n\n \"\"\"Follow Figure 1 (right) for connections.\"\"\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n x = self.sublayer[1](x, lambda x: self.src_attn(x, memory, memory, src_mask))\n return self.sublayer[2](x, self.feed_forward)\n", "sub_path": "code/models/transformer_ensemble_single_enc.py", "file_name": "transformer_ensemble_single_enc.py", "file_ext": "py", "file_size_in_byte": 24642, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "allennlp.models.model.Model", "line_number": 37, "usage_type": "name"}, {"api_name": "allennlp.data.vocabulary.Vocabulary", "line_number": 39, "usage_type": "name"}, {"api_name": "allennlp.modules.TextFieldEmbedder", "line_number": 40, "usage_type": "name"}, {"api_name": "allennlp.modules.token_embedders.Embedding", "line_number": 41, "usage_type": "name"}, {"api_name": "allennlp.modules.Seq2SeqEncoder", "line_number": 42, "usage_type": "name"}, {"api_name": "xlwomt.models.combiner.TransformerCombiner", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 57, "usage_type": "name"}, {"api_name": "allennlp.nn.initializers.InitializerApplicator", "line_number": 57, "usage_type": "name"}, {"api_name": "allennlp.common.util.START_SYMBOL", "line_number": 62, "usage_type": "argument"}, {"api_name": "allennlp.common.util.END_SYMBOL", "line_number": 63, "usage_type": "argument"}, {"api_name": "xlwomt.metrics.TokenSequenceAccuracy", "line_number": 64, "usage_type": "call"}, {"api_name": "allennlp.nn.beam_search.BeamSearch", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn.modules.linear.Linear", "line_number": 84, "usage_type": "call"}, {"api_name": "allennlp.common.checks.ConfigurationError", "line_number": 105, "usage_type": "call"}, {"api_name": "allennlp.common.checks.ConfigurationError", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 122, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 123, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 153, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 159, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 159, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 160, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 161, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 163, "usage_type": "attribute"}, {"api_name": "overrides.overrides", "line_number": 157, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 163, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 211, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 221, "usage_type": "attribute"}, {"api_name": "overrides.overrides", "line_number": 210, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 239, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 239, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 239, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 257, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 257, "usage_type": "attribute"}, {"api_name": "allennlp.nn.util.get_text_field_mask", "line_number": 260, "usage_type": "call"}, {"api_name": "allennlp.nn.util", "line_number": 260, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 263, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 263, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 271, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 276, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 276, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 277, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 277, "usage_type": "attribute"}, {"api_name": "allennlp.nn.util.get_text_field_mask", "line_number": 295, "usage_type": "call"}, {"api_name": "allennlp.nn.util", "line_number": 295, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 306, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 277, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 314, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 314, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 334, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 334, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 360, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 335, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 335, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 335, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 384, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 385, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 386, "usage_type": "attribute"}, {"api_name": "allennlp.nn.util.sequence_cross_entropy_with_logits", "line_number": 415, "usage_type": "call"}, {"api_name": "allennlp.nn.util", "line_number": 415, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 386, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 421, "usage_type": "name"}, {"api_name": "overrides.overrides", "line_number": 419, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 420, "usage_type": "name"}, {"api_name": "allennlp.models.model.Model.register", "line_number": 36, "usage_type": "call"}, {"api_name": "allennlp.models.model.Model", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 427, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 427, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 429, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 429, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 429, "usage_type": "call"}, {"api_name": "allennlp.modules.seq2seq_decoders.DecoderNet", "line_number": 432, "usage_type": "name"}, {"api_name": "xlwomt.models.combiner.TransformerCombiner", "line_number": 444, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 455, "usage_type": "call"}, {"api_name": "allennlp.modules.seq2seq_encoders.bidirectional_language_model_transformer.PositionalEncoding", "line_number": 458, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 462, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 462, "usage_type": "name"}, {"api_name": "allennlp.modules.seq2seq_encoders.bidirectional_language_model_transformer.MultiHeadedAttention", "line_number": 464, "usage_type": "call"}, {"api_name": "xlwomt.models.combiner.AttentionCombiner", "line_number": 465, "usage_type": "call"}, {"api_name": "allennlp.modules.seq2seq_encoders.bidirectional_language_model_transformer.PositionwiseFeedForward", "line_number": 466, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 470, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 471, "usage_type": "call"}, {"api_name": "torch.nn.LayerNorm", "line_number": 477, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 477, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 480, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 480, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 481, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 481, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 487, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 487, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 488, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 489, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 490, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 491, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 491, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 497, "usage_type": "call"}, {"api_name": "allennlp.modules.seq2seq_encoders.bidirectional_language_model_transformer.subsequent_mask", "line_number": 497, "usage_type": "call"}, {"api_name": "overrides.overrides", "line_number": 484, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 492, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 492, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 492, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 521, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 521, "usage_type": "name"}, {"api_name": "allennlp.modules.seq2seq_encoders.bidirectional_language_model_transformer.MultiHeadedAttention", "line_number": 529, "usage_type": "name"}, {"api_name": "xlwomt.models.combiner.AttentionCombiner", "line_number": 530, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 531, "usage_type": "name"}, {"api_name": "allennlp.modules.seq2seq_encoders.bidirectional_language_model_transformer.SublayerConnection", "line_number": 539, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 542, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 543, "usage_type": "attribute"}]} +{"seq_id": "641411374", "text": "from django import forms\nfrom apps.tools.models import Calendar\n\n\nclass CalendarForm(forms.ModelForm):\n class Meta:\n model = Calendar\n\n fields = [\n 'id',\n 'title',\n 'color',\n 'allDay',\n 'start',\n 'startTimer',\n 'end',\n 'endTimer',\n ]\n labels = {\n 'title': 'Title to Events:',\n 'color': 'Category:',\n 'allDay': 'is All day:',\n 'start': 'Start Date:',\n 'startTimer': 'Start Timer:',\n 'end': 'End Date:',\n 'endTimer': 'End Timer',\n }\n widgets = {\n 'title': forms.TextInput(attrs={'placeholder': 'Title', 'class': 'form-control input-md'}),\n 'color': forms.Select(attrs={'class': 'form-control input-md'}, choices=(('#E74C3C','Very Importan'), ('#DC7633', 'Importan'), ('#27AE60', 'Event'))),\n 'allDay': forms.CheckboxInput(attrs={'data-off-color':\"danger\", 'class':\"switch\", 'data-size':\"mini\", 'data-on-text':\"YES\", 'data-off-text': \"NO\"}),\n 'start': forms.DateInput(attrs={'class': 'form-control input-md'}),\n 'startTimer': forms.TimeInput(attrs={'class': 'form-control'}),\n 'end': forms.DateInput(attrs={'class': 'form-control input-md'}),\n 'endTimer': forms.TimeInput(attrs={'class': 'form-control'}),\n }", "sub_path": "apps/tools/components/CalendarForm.py", "file_name": "CalendarForm.py", "file_ext": "py", "file_size_in_byte": 1399, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.forms.ModelForm", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "apps.tools.models.Calendar", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 30, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 30, "usage_type": "name"}, {"api_name": "django.forms.CheckboxInput", "line_number": 31, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 31, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 32, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 32, "usage_type": "name"}, {"api_name": "django.forms.TimeInput", "line_number": 33, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 33, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 34, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 34, "usage_type": "name"}, {"api_name": "django.forms.TimeInput", "line_number": 35, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "275895543", "text": "from common.gAPI import GoogleAPI\nimport pandas as pd\nfrom Edelweiss.scrapEdDB import ScrapData\nfrom common.common import CommonFunctions\nfrom common.DBOperations import DatabaseOp\nfrom Edelweiss.helpEdDB import HelpEdDB\nimport time\nimport os\nfrom pytz import timezone\nimport datetime\nimport threading\nimport Edelweiss.edleConfig as edleConfig\nimport config\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nclass ProcessEd(threading.Thread):\n\n def concate(self, df_now, previous_df):\n objCommon = CommonFunctions()\n fixed_columns = ['ID', 'ScrapedDate', 'ScripName', 'IndexORStocks', 'StrikePrice', 'OptionType', 'StrTradeDateTime', 'TradeDateTime', 'ExpiryDate', 'OI',\n 'COI', 'IV', 'VOL', 'MinuteOI', 'Flag']\n try:\n previous_df = objCommon.drop_extra_columns(previous_df, fixed_columns)\n df_now = objCommon.drop_extra_columns(df_now, fixed_columns)\n final = df_now.append(previous_df)\n #final.reset_index(inplace=True)\n except Exception as e:\n final = previous_df\n print('concat exception: ', e)\n\n return final\n\n def save_to_drive(self, folder_id, name_of_file, destination):\n objGAPI = GoogleAPI()\n try:\n service = objGAPI.intiate_gdAPI()\n # Search file id to check it is exists or not\n # def search_file(service, file_name, mime_type, folder_id, search_in_folder=False):\n file_id = objGAPI.search_file(service, str(name_of_file), 'text/csv', folder_id, True)\n if type(file_id) is int:\n objGAPI.upload_file(service, str(name_of_file), destination, folder_id, 'text/csv')\n if type(file_id) is str:\n objGAPI.delete_file(service, file_id)\n # time.sleep(1)\n objGAPI.upload_file(service, str(name_of_file), destination, folder_id, 'text/csv')\n return True\n except Exception as e:\n print('Exception while saving files on drive', e)\n return False\n\n def endupload(self, symbol, expiry_date, table_name, folder_id):\n try:\n exd = expiry_date.replace(' ', '_')\n file_name = symbol + '_' + exd + '.csv'\n objHDB = HelpEdDB()\n objGAPI = GoogleAPI()\n objCommon = CommonFunctions()\n result_df, st = objHDB.DB2CSV(symbol, table_name)\n destination = os.getcwd() + '/Edelweiss/sample_data/' + file_name\n\n result_df.to_csv(os.getcwd() + '/Edelweiss/sample_data/' + file_name, index=False)\n service = objGAPI.intiate_gdAPI()\n isDataAvailable, file_id = objCommon.check_pdata_exist(file_name, folder_id)\n if isDataAvailable == True:\n objGAPI.delete_file(service, file_id)\n objGAPI.upload_file(service, str(file_name), destination, folder_id, 'text/csv')\n except Exception as e:\n print('Exception while saving files on drive at the end of day', e)\n return False\n\n def process(self, symbol, table_name, expiry_date, iterations, threshold, pVtime):\n #objGAPI = GoogleAPI()\n objScrap = ScrapData()\n objCommon = CommonFunctions()\n try:\n exd = expiry_date.replace(' ', '_')\n file_name = symbol + '_' + exd + '.csv'\n status, pVtime = objScrap.start_scraping(str(symbol), expiry_date, threshold, pVtime)\n if iterations == 30:\n objHDB = HelpEdDB()\n if status == True:\n result_df, st = objHDB.DB2CSV(symbol, table_name)\n # if os.path.exists(os.getcwd() + '/Edelweiss/d_csv/' + file_name):\n # previous_df = pd.read_csv(os.getcwd() + '/Edelweiss/d_csv/' + file_name, index_col=0)\n # result_df = self.concate(current_df, previous_df)\n # else:\n # result_df = current_df\n\n destination = os.getcwd() + '/Edelweiss/sample_data/' + file_name\n\n result_df.to_csv(os.getcwd() + '/Edelweiss/sample_data/' + file_name, index=False)\n #service = objGAPI.intiate_gdAPI()\n #isDataAvailable, file_id = objCommon.check_pdata_exist(file_name, folder_id)\n # if isDataAvailable == True:\n # objGAPI.delete_file(service, file_id)\n # objGAPI.upload_file(service, str(file_name), destination, folder_id, 'text/csv')\n else:\n print(f\"Scrapping df empty for : {symbol}\")\n return False, pVtime\n return True, pVtime\n except Exception as e:\n print('Exception in Edle Scrapping Process:', e)\n return False, pVtime\n\n def start(self, q, result, isMarketON, diction): #FolderIDs\n while not q.empty():\n work = q.get()\n if work[1] == 'UPLOAD_THREAD':\n pass\n # print('In upload thread')\n # while True:\n # strcurrentTime = datetime.datetime.now(timezone('Asia/Calcutta')).strftime('%H:%M')\n # strcurrentTime = strcurrentTime.replace(':', '.')\n # print(\"strcurrentTime===\",strcurrentTime)\n # #if float(strcurrentTime) > float(15.30):\n # if float(strcurrentTime) > float(15.30):\n # print('Market is not ON. Try tomorrow or change isMarketON flag')\n # break\n # for a in range(1800):\n # sT = datetime.datetime.now(timezone('Asia/Calcutta')).strftime('%H:%M')\n # sT = sT.replace(':', '.')\n # #if float(sT) > float(15.30):\n # print(\"st===\", sT)\n # if float(sT) > float(15.30):\n # print('Market is not ON. Try tomorrow or change isMarketON flag')\n # break\n # time.sleep(1)\n # objGAPI = GoogleAPI()\n # service = objGAPI.intiate_gdAPI()\n # file_id = objGAPI.search_file(service, config.DB_Name, 'mime_type', '1llZZacQjhf2iNPjjpCBSSD4AdKFc5Con', True)\n # if file_id != 0:\n # objGAPI.delete_file(service, file_id)\n # objGAPI.upload_file(service, config.DB_Name, os.getcwd() + '/DB/' + config.DB_Name,\n # '1llZZacQjhf2iNPjjpCBSSD4AdKFc5Con', 'application/vnd.sqlite3')\n\n else:\n try:\n if isMarketON == 'TRUE':\n threshold = 1.0\n ns = threading.local()\n ns.iterations = 0\n\n #Get threshold\n ScrapedFor = work[1]\n if diction[ScrapedFor] == 'FALSE':\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[1]\n symbol = ScrapedFor[2]\n else:\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[0]\n symbol = ScrapedFor[1]\n # objDB = DatabaseOp()\n # conn = objDB.create_connection()\n # print(\"query====issue\")\n # ed = expDate.replace(' ', '-')\n # ed = ed.replace('20', '')\n # que = \"SELECT Threshold FROM Threshold WHERE ScripName='\"+symbol+\"' AND ExpiryDate='\"+str(ed)+\"'\"\n # #que = 'SELECT Threshold FROM Threshold WHERE ScripName=? AND ExpiryDate=?'\n # cur = conn.cursor()\n # #cur.execute(que, [symbol, str(ed)])\n # cur.execute(que)\n # rr = cur.fetchone()\n # if len(rr) != 0:\n # threshold = rr[0]\n # else:\n # threshold = 1.0\n # #print('No threshold existed for given expiry date')\n # conn.close()\n pVtime = ''\n while True:\n print('******************* Iterations : ', ns.iterations)\n strcurrentTime = datetime.datetime.now(timezone('Asia/Calcutta')).strftime('%H:%M')\n strcurrentTime = strcurrentTime.replace(':', '.')\n #print(\"else====vala ==strcurrentTime\",strcurrentTime)\n #if float(strcurrentTime) > float(15.30):\n if float(strcurrentTime) > float(15.30):\n print('Market is not ON. Try tomorrow or change isMarketON flag')\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n #folder_ID = FolderIDs[expDate]\n #self.endupload(symbol, expDate, table_name, folder_ID)\n break\n ScrapedFor = work[1]\n if diction[ScrapedFor] == 'FALSE':\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[1]\n symbol = ScrapedFor[2]\n #folder_ID = FolderIDs[expDate]\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n status, pVtime = self.process(symbol, table_name, expDate, ns.iterations, threshold, pVtime)\n else:\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[0]\n symbol = ScrapedFor[1]\n #folder_ID = FolderIDs[expDate]\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n status, pVtime = self.process(symbol, table_name, expDate, ns.iterations, threshold, pVtime)\n if status == True:\n ns.iterations += 1\n if ns.iterations == 31:\n ns.iterations = 0\n #Sleep for a minute before next scrapping\n time.sleep(59)\n\n else:\n it = 0\n pVtime = ''\n strcurrentTime = datetime.datetime.now(timezone('Asia/Calcutta')).strftime('%H:%M')\n strcurrentTime = strcurrentTime.replace(':', '.')\n #print(\"third====vala ==strcurrentTime\", strcurrentTime)\n #if float(strcurrentTime) > float(15.30):\n if float(strcurrentTime) > float(15.30):\n print('Market is not ON. Try tomorrow or change isMarketON flag')\n break\n ScrapedFor = work[1]\n if diction[ScrapedFor] == 'FALSE':\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[1]\n symbol = ScrapedFor[2]\n #folder_ID = FolderIDs[expDate]\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n status, pVtime = self.process(symbol, table_name, expDate, it, threshold, pVtime)\n else:\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[0]\n symbol = ScrapedFor[1]\n #folder_ID = FolderIDs[expDate]\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n status, pVtime = self.process(symbol, table_name, expDate, it, threshold, pVtime)\n\n except Exception as e:\n print(e)\n result[work[0]] = {}\n # signal to the queue that task has been processed\n q.task_done()\n return True\n\n\n#\n# obj = ProcessEd()\n# name_of_file = 'NIFTY_29_Apr_2021.csv'\n# previous_df = pd.read_csv(os.getcwd() + '/d_csv/' + name_of_file, index_col=0)\n# print(previous_df.head(1))\n# df_now = pd.read_csv(os.getcwd() + '/csv/' + name_of_file, index_col=0)\n# print(df_now.head(1))\n# d = obj.concate(previous_df, df_now)\n# print(d.head())\n# print(d.tail())", "sub_path": "Edelweiss_MYSQL_DB/Edelweiss/pEDDB.py", "file_name": "pEDDB.py", "file_ext": "py", "file_size_in_byte": 13017, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "warnings.filterwarnings", "line_number": 15, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 18, "usage_type": "attribute"}, {"api_name": "common.common.CommonFunctions", "line_number": 21, "usage_type": "call"}, {"api_name": "common.gAPI.GoogleAPI", "line_number": 36, "usage_type": "call"}, {"api_name": "Edelweiss.helpEdDB.HelpEdDB", "line_number": 57, "usage_type": "call"}, {"api_name": "common.gAPI.GoogleAPI", "line_number": 58, "usage_type": "call"}, {"api_name": "common.common.CommonFunctions", "line_number": 59, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 61, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 63, "usage_type": "call"}, {"api_name": "Edelweiss.scrapEdDB.ScrapData", "line_number": 75, "usage_type": "call"}, {"api_name": "common.common.CommonFunctions", "line_number": 76, "usage_type": "call"}, {"api_name": "Edelweiss.helpEdDB.HelpEdDB", "line_number": 82, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 91, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 93, "usage_type": "call"}, {"api_name": "threading.local", "line_number": 142, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 175, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 175, "usage_type": "call"}, {"api_name": "config.TableName", "line_number": 182, "usage_type": "attribute"}, {"api_name": "config.TableName", "line_number": 193, "usage_type": "attribute"}, {"api_name": "config.TableName", "line_number": 201, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 208, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 213, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 213, "usage_type": "call"}, {"api_name": "config.TableName", "line_number": 227, "usage_type": "attribute"}, {"api_name": "config.TableName", "line_number": 235, "usage_type": "attribute"}]} +{"seq_id": "490181817", "text": "# -*- coding: utf-8 -*-\r\nimport cv2\r\n#Import OpenCV\r\n#import cv2.cv as cv\r\n#Import Numpy\r\nimport numpy as np\r\n\r\ncamera_feed = cv2.VideoCapture(0)\r\n\r\ndef nothing(x):\r\n pass\r\n\r\n\r\ncv2.namedWindow('image')\r\ncv2.createTrackbar('h_max','image',179,179,nothing)\r\ncv2.createTrackbar('h_min','image',0,179,nothing)\r\ncv2.createTrackbar('s_max','image',255,255,nothing)\r\ncv2.createTrackbar('s_min','image',0,255,nothing)\r\ncv2.createTrackbar('v_max','image',255,255,nothing)\r\ncv2.createTrackbar('v_min','image',0,255,nothing)\r\n\r\n\r\nwhile(1):\r\n\r\n _,frame = camera_feed.read()\r\n #Convert the current frame to HSV\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n # 设定蓝色的阈值\r\n h_max=cv2.getTrackbarPos('h_max','image')\r\n h_min=cv2.getTrackbarPos('h_min','image')\r\n s_max=cv2.getTrackbarPos('s_max','image')\r\n s_min=cv2.getTrackbarPos('s_min','image')\r\n v_max=cv2.getTrackbarPos('v_max','image')\r\n v_min=cv2.getTrackbarPos('v_min','image')\r\n \r\n # -----------------pink------------------\r\n #lower_blue=np.array([110,50,50])\r\n lower_blue=np.array([h_min,s_min,v_min])\r\n #upper_blue=np.array([130,255,255])\r\n upper_blue=np.array([h_max,s_max,v_max])\r\n \r\n \r\n '''\r\n #Define the threshold for finding a blue object with hsv\r\n lower_blue = np.array([120,69,0])\r\n upper_blue = np.array([179,224,255])\r\n '''\r\n #Create a binary image, where anything blue appears white and everything else is black\r\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\r\n res=cv2.bitwise_and(frame,frame,mask=mask)\r\n #Get rid of background noise using erosion and fill in the holes using dilation and erode the final image on last time\r\n element = cv2.getStructuringElement(cv2.MORPH_RECT,(9,9))\r\n mask = cv2.erode(mask,element, iterations=2)\r\n mask = cv2.dilate(mask,element,iterations=2)\r\n\r\n mask = cv2.erode(mask,element)\r\n \r\n #Create Contours for all blue objects\r\n image1,contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n #image1,contours, hierarchy = cv2.findContours(mask, 1, 2)\r\n maximumArea = 0\r\n bestContour = None\r\n\r\n # for all object\r\n for contour in contours:\r\n '''\r\n #Straight Bounding Rectangle\r\n x,y,w,h = cv2.boundingRect(contour)\r\n cv2.rectangle(frame, (x,y),(x+w,y+h), (0,0,255), 3)\r\n '''\r\n\r\n #Rotated Rectangle\r\n rect = cv2.minAreaRect(contour)\r\n box = cv2.boxPoints(rect)\r\n box = np.int0(box)\r\n cv2.drawContours(frame,[box],0,(0,0,255),2)\r\n\r\n\r\n # #only for the biggest object\r\n # for contour in contours:\r\n # currentArea = cv2.contourArea(contour)\r\n # if currentArea > maximumArea:\r\n # bestContour = contour\r\n # maximumArea = currentArea\r\n # #Create a bounding box around the biggest blue object\r\n # if bestContour is not None:\r\n # x,y,w,h = cv2.boundingRect(bestContour)\r\n # cv2.rectangle(frame, (x,y),(x+w,y+h), (0,0,255), 3)\r\n \r\n\r\n #Show the original camera feed with a bounding box overlayed \r\n cv2.imshow('frame',frame)\r\n\r\n #cv2.imshow('hsv', hsv)\r\n\r\n #Show the contours in a seperate window\r\n #cv2.imshow('mask',mask)\r\n\r\n cv2.imshow('res',res)\r\n #Use this command to prevent freezes in the feed\r\n k = cv2.waitKey(5) & 0xFF\r\n #If escape is pressed close all windows\r\n if k == 27:\r\n break\r\n\r\n\r\ncv2.destroyAllWindows() ", "sub_path": "코드/PythonCode/opencv_basic.py", "file_name": "opencv_basic.py", "file_ext": "py", "file_size_in_byte": 3442, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cv2.VideoCapture", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.getTrackbarPos", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.getStructuringElement", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.MORPH_RECT", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.erode", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 60, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 60, "usage_type": "attribute"}, {"api_name": "cv2.minAreaRect", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.boxPoints", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "526722333", "text": "import numpy as np\nimport pytest\n\nimport pylas\n\n\n@pytest.fixture(params=['simple.las', 'simple.laz'])\ndef las(request):\n return pylas.open(request.param)\n\n\ndef test_classification_overflows(las):\n if not pylas.lib.USE_UNPACKED:\n c = las.classification\n c[0] = 54\n with pytest.raises(OverflowError):\n las.classification = c\n else:\n las.classification[0] = 54\n with pytest.raises(OverflowError):\n las.points_data.repack_sub_fields()\n\n\ndef test_classification_change(tmpdir, las):\n c = las.classification\n c[:] = 10\n\n las.classification = c\n assert np.allclose(c, las.classification)\n\n out = tmpdir.join('tmp.las').open('wb')\n las.write_to(out)\n out.close()\n\n out = tmpdir.join('tmp.las').open('rb')\n las = pylas.open(out)\n\n assert np.allclose(c, las.classification)\n\n\ndef test_synthetic_change(tmpdir, las):\n s = las.synthetic\n s[:] = False\n s[17] = True\n\n las.synthetic = s\n assert np.allclose(s, las.synthetic)\n\n out = tmpdir.join('tmp.las').open('wb')\n las.write_to(out)\n out.close()\n\n out = tmpdir.join('tmp.las').open('rb')\n las = pylas.open(out)\n\n assert np.allclose(s, las.synthetic)\n\n\ndef test_key_point_change(tmpdir, las):\n kp = las.key_point\n kp[:] = False\n kp[25] = True\n\n las.key_point = kp\n assert np.allclose(kp, las.key_point)\n\n out = tmpdir.join('tmp.las').open('wb')\n las.write_to(out)\n out.close()\n\n out = tmpdir.join('tmp.las').open('rb')\n las = pylas.open(out)\n\n assert np.allclose(kp, las.key_point)\n\n\ndef test_withheld_changes(tmpdir, las):\n withheld = las.withheld\n withheld[:] = False\n withheld[180] = True\n\n las.withheld = withheld\n assert np.allclose(withheld, las.withheld)\n\n out = tmpdir.join('tmp.las').open('wb')\n las.write_to(out)\n out.close()\n\n out = tmpdir.join('tmp.las').open('rb')\n las = pylas.open(out)\n\n assert np.allclose(withheld, las.withheld)\n\n\ndef dim_does_not_exists(las, dim_name):\n try:\n _ = getattr(las, dim_name)\n except ValueError:\n return True\n return False\n\n\ndef test_change_format(las):\n assert las.points_data.point_format_id == 3\n assert las.header.point_data_format_id == 3\n\n las.to_point_format(2)\n assert las.points_data.point_format_id == 2\n assert las.header.point_data_format_id == 2\n assert dim_does_not_exists(las, 'gps_time')\n\n las.to_point_format(1)\n assert las.points_data.point_format_id == 1\n assert las.header.point_data_format_id == 1\n assert dim_does_not_exists(las, 'red')\n assert dim_does_not_exists(las, 'green')\n assert dim_does_not_exists(las, 'blue')\n\n las.to_point_format(0)\n assert las.points_data.point_format_id == 0\n assert las.header.point_data_format_id == 0\n assert dim_does_not_exists(las, 'red')\n assert dim_does_not_exists(las, 'green')\n assert dim_does_not_exists(las, 'blue')\n assert dim_does_not_exists(las, 'gps_time')\n", "sub_path": "tests/test_modif_1_2.py", "file_name": "test_modif_1_2.py", "file_ext": "py", "file_size_in_byte": 2984, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pylas.open", "line_number": 9, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 7, "usage_type": "call"}, {"api_name": "pylas.lib", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 29, "usage_type": "call"}, {"api_name": "pylas.open", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 47, "usage_type": "call"}, {"api_name": "pylas.open", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 65, "usage_type": "call"}, {"api_name": "pylas.open", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 83, "usage_type": "call"}, {"api_name": "pylas.open", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "208533132", "text": "import json\r\nfrom sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\r\nimport pandas, numpy, textblob, string\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\nfrom sklearn import metrics, cross_validation\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nfrom sklearn.externals import joblib\r\n\r\nclass Model:\r\n\tdef __init__(self, xtrain_count, train_y, xvalid_count, valid_y):\r\n\t\tself.xtrain_count = xtrain_count\r\n\t\tself.train_y = train_y\r\n\t\tself.xvalid_count = xvalid_count\r\n\t\tself.valid_y = valid_y\r\n \r\n\tdef train_model(self, classifier):\r\n\t\tclassifier.fit(self.xtrain_count, self.train_y)\r\n\t\tpredictions = classifier.predict(self.xvalid_count)\r\n\t\treturn metrics.accuracy_score(predictions, self.valid_y)\r\n \r\n\tdef cross_validation(self, classifier):\r\n\t\tscores_accuracy = cross_val_score(classifier, self.xvalid_count, self.valid_y, cv=10, scoring='accuracy')\r\n\t\tscores_log_loss = cross_val_score(classifier, self.xvalid_count, self.valid_y, cv=10, scoring='neg_log_loss')\r\n\t\t\r\n\t\tprint('K-fold cross-validation results: ' + classifier.__class__.__name__)\r\n\t\tprint(classifier.__class__.__name__+\" average accuracy is %2.3f\" % scores_accuracy.mean())\r\n\t\tprint(classifier.__class__.__name__+\" average log_loss is %2.3f\" % -scores_log_loss.mean())\r\n\t\t\r\n \r\n\tdef confusion_matrix(self, classifier):\r\n\t\tclassifier.fit(self.xtrain_count, self.train_y)\r\n\t\tpredictions = classifier.predict(self.xvalid_count)\r\n\t\tconf_matrix = confusion_matrix(predictions, self.valid_y)\r\n\t\tprint('confusion matrix:' + classifier.__class__.__name__ )\r\n\t\tprint(conf_matrix)\r\n\r\n\tdef save_model(self, classifier, model_file):\r\n\t\tclassifier.fit(self.xtrain_count, self.train_y)\r\n\t\tjoblib.dump(classifier, model_file)\r\n \r\n\t\t\r\n\t\t", "sub_path": "core/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sklearn.metrics.accuracy_score", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 22, "usage_type": "name"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "142260402", "text": "# Standard Library Imports\nfrom __future__ import division, print_function\nfrom time import time\n\n# External Imports\nimport numpy as np\nfrom sklearn.decomposition import TruncatedSVD\nimport matplotlib.pyplot as plt\n# %matplotlib inline # use when in JUPYTER NOTEBOOK (or risk hang)\n# plt.ion() # allow ipython %run to terminate without closing figure\n\n# Internal Imports\nfrom utils import load_data\n\n# User Parameters\nDATASET = '/Users/Andy/Google Drive/Development/ML/kaggle/leaf-classification/KAGGLE_LEAF.mat'\n# DATASET = 'usps'\nVALIDATION_PERCENTAGE = .2\nTESTING_PERCENTAGE = .2\n# RANKS2TRY = range(10)\nRANKS2TRY = 'all' # must be 'all' or list of integers\nTIMER_ON = False\nassert 0 < VALIDATION_PERCENTAGE + TESTING_PERCENTAGE < 1\n\n# Load dataset\nX_train, Y_train, X_valid, Y_valid, X_test, Y_test = \\\n load_data(DATASET, VALIDATION_PERCENTAGE, TESTING_PERCENTAGE)\n\n# only used for score, has no affect on approximation\nbatch_size = len(Y_train) // 10\n\n# Classification by distance from best k-dimensional subspace \n# approximation from SVD of each label's example set\nif RANKS2TRY == 'all':\n ranks2try = list(range(1, X_train.shape[1])) # all must be less than full rank\nelse:\n ranks2try = RANKS2TRY\nvalid_accuracy = []\nfor rnk in ranks2try:\n start_time = time()\n print(\"\")\n print(\"rank = {} -- \".format(rnk), end='')\n distinct_labels = list(set(Y_train))\n svd = {}\n for l in distinct_labels:\n examples_labeled_l = \\\n np.array([x for x, y in zip(X_train, Y_train) if y == l])\n svd[l] = TruncatedSVD(n_components=rnk)\n svd[l].fit(examples_labeled_l)\n\n # Training Set Accuracy\n def svd_predict(X_, Y_, svd_dict_):\n X_, Y_ = np.array(X_), np.array(Y_)\n distinct_labels = svd_dict_.keys()\n distances = {}\n for l in distinct_labels:\n X_appr = svd_dict_[l].inverse_transform(\n svd_dict_[l].transform(X_))\n distances[l] = np.linalg.norm(X_ - X_appr, axis=1)\n distances = np.array(distances.values()).transpose()\n distance_minimizers = np.argmin(distances, axis=1)\n Y_predictions = [distinct_labels[idx] for idx in distance_minimizers]\n number_correct_ = np.sum(Y_predictions == Y_)\n return number_correct_\n\n batches = [(k*batch_size, (k+1)*batch_size) for k in\n range(len(Y_train) // batch_size)]\n ct = 0\n number_correct = 0\n for i0, i1 in batches:\n ct += 1\n number_correct_batch = \\\n svd_predict(X_train[i0: i1], Y_train[i0: i1], svd)\n # print(\"Training Batch {}/{} Accuracy: {}\"\n # \"\".format(ct, len(batches), number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n if len(Y_train) % batch_size:\n i0, i1 = i1, len(Y_train)\n number_correct_batch = \\\n svd_predict(X_train[i0: i1], Y_train[i0: i1], svd)\n # print(\"Training Remainder Batch Accuracy: {}\"\n # \"\".format(ct, len(batches), number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n print(\"Training / Validation Accuracy: {:.2f}% / \"\n \"\".format(100 * number_correct / len(Y_train)), end='')\n\n # Validation Set Accuracy\n if batch_size < len(Y_valid):\n batches = [(k*batch_size, (k+1)*batch_size) for k in\n range(len(Y_valid) // batch_size)]\n ct = 0\n number_correct = 0\n for i0, i1 in batches:\n ct += 1\n number_correct_batch = \\\n svd_predict(X_valid[i0: i1], Y_valid[i0: i1], svd)\n # print(\"valid Batch {}/{} Accuracy: {}\"\n # \"\".format(ct, len(batches), \n # number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n if len(Y_valid) % batch_size:\n i0, i1 = i1, len(Y_valid)\n number_correct_batch = \\\n svd_predict(X_valid[i0: i1], Y_valid[i0: i1], svd)\n # print(\"valid Remainder Batch Accuracy: {}\"\n # \"\".format(ct, len(batches), \n # number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n else:\n number_correct = svd_predict(X_valid, Y_valid, svd)\n if not number_correct/len(Y_valid):\n raise Exception()\n valid_accuracy.append(number_correct / len(Y_valid))\n print(\"{:.2f}%\".format(100*valid_accuracy[-1]))\n if TIMER_ON:\n print(\"Time to Train and Validate with this rank: {:.2f} seconds\"\n \"\".format(time() - start_time))\nprint(\"\\nWinner winner chicken dinner goes to rank =\",\n ranks2try[np.argmax(valid_accuracy)])\n\nplt.grid(True)\nplt.plot(ranks2try, valid_accuracy)\n\n# Now that we've found the best rank to use.\nrnk = ranks2try[np.argmax(valid_accuracy)]\ndistinct_labels = list(set(Y_train))\nsvd = {}\nfor l in distinct_labels:\n examples_labeled_l = np.array([x for x, y in\n zip(X_train, Y_train) if y == l])\n svd[l] = TruncatedSVD(n_components=rnk)\n svd[l].fit(examples_labeled_l)\n\n# Test Set Accuracy\nif batch_size < len(Y_test):\n batches = [(k*batch_size, (k+1)*batch_size) for k in\n range(len(Y_test) // batch_size)]\n ct = 0\n number_correct = 0\n for i0, i1 in batches:\n ct += 1\n number_correct_batch = \\\n svd_predict(X_test[i0: i1], Y_test[i0: i1], svd)\n # print(\"Test Batch {}/{} Accuracy: {}\"\n # \"\".format(ct, len(batches), number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n if len(Y_test) % batch_size:\n i0, i1 = i1, len(Y_test)\n number_correct_batch = \\\n svd_predict(X_test[i0: i1], Y_test[i0: i1], svd)\n # print(\"Test Remainder Batch Accuracy: {}\"\n # \"\".format(ct, len(batches), number_correct_batch/(i1 - i0))) \n number_correct += number_correct_batch\nelse:\n number_correct = svd_predict(X_test, Y_test, svd)\nprint(\"Test Accuracy with winner: {:.2f}%\"\n \"\".format(100 * number_correct / len(Y_test)))\n\nplt.show() # prevent python from terminating and closing figure", "sub_path": "experiments/svd_sklearn.py", "file_name": "svd_sklearn.py", "file_ext": "py", "file_size_in_byte": 6163, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "utils.load_data", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 63, "usage_type": "call"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}]} +{"seq_id": "423305411", "text": "# -*- coding: utf-8 -*-\nfrom scrapy.selector import Selector\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\n\nfrom items import TopicItem, AuthorInfo, ReplyItem\nclass KiwiSpider(CrawlSpider):\n name = \"kiwi\"\n allowed_domains = [\"douban.com\"]\n\n anchorTitleXPath = 'a/text()'\n anchorHrefXPath = 'a/@href'\n\n start_urls = [\n \"https://www.douban.com/group/topic/90895393/?start=0\",\n ]\n rules = (\n Rule(\n LinkExtractor(allow=(r'/group/[^/]+/discussion\\?start=\\d+',)),\n callback='parse_topic_list',\n follow=True\n ),\n Rule(\n LinkExtractor(allow=(r'/group/topic/\\d+/$',)), # 帖子内容页面\n callback='parse_topic_content',\n follow=True\n ),\n Rule(\n LinkExtractor(allow=(r'/group/topic/\\d+/\\?start=\\d+',)), # 帖子内容页面\n callback='parse_topic_content',\n follow=True\n ),\n )\n\n # 帖子详情页面\n def parse_topic_content(self, response):\n # 标题XPath\n titleXPath = '//html/head/title/text()'\n # 帖子内容XPath\n contentXPath = '//div[@class=\"topic-content\"]/p/text()'\n # 发帖时间XPath\n timeXPath = '//div[@class=\"topic-doc\"]/h3/span[@class=\"color-green\"]/text()'\n # 发帖人XPath\n authorXPath = '//div[@class=\"topic-doc\"]/h3/span[@class=\"from\"]'\n\n item = TopicItem()\n # 当前页面Url\n item['url'] = response.url\n # 标题\n titleFragment = Selector(response).xpath(titleXPath)\n item['title'] = str(titleFragment.extract()[0]).strip()\n\n # 帖子内容\n contentFragment = Selector(response).xpath(contentXPath)\n strs = [line.extract().strip() for line in contentFragment]\n item['content'] = '\\n'.join(strs)\n # 发帖时间\n timeFragment = Selector(response).xpath(timeXPath)\n if timeFragment:\n item['time'] = timeFragment[0].extract()\n\n # 发帖人信息\n authorInfo = AuthorInfo()\n authorFragment = Selector(response).xpath(authorXPath)\n if authorFragment:\n authorInfo['authorName'] = authorFragment[0].xpath(self.anchorTitleXPath).extract()[0]\n authorInfo['authorUrl'] = authorFragment[0].xpath(self.anchorHrefXPath).extract()[0]\n\n item['author'] = dict(authorInfo)\n\n # 回复列表XPath\n replyRootXPath = r'//div[@class=\"reply-doc content\"]'\n # 回复时间XPath\n replyTimeXPath = r'div[@class=\"bg-img-green\"]/h4/span[@class=\"pubtime\"]/text()'\n # 回复人XPath\n replyAuthorXPath = r'div[@class=\"bg-img-green\"]/h4'\n\n replies = []\n itemsFragment = Selector(response).xpath(replyRootXPath)\n for replyItemXPath in itemsFragment:\n replyItem = ReplyItem()\n # 回复内容\n contents = replyItemXPath.xpath('p/text()')\n strs = [line.extract().strip() for line in contents]\n replyItem['content'] = '\\n'.join(strs)\n # 回复时间\n timeFragment = replyItemXPath.xpath(replyTimeXPath)\n if timeFragment:\n replyItem['time'] = timeFragment[0].extract()\n # 回复人\n replyAuthorInfo = AuthorInfo()\n authorFragment = replyItemXPath.xpath(replyAuthorXPath)\n if authorFragment:\n replyAuthorInfo['authorName'] = authorFragment[0].xpath(self.anchorTitleXPath).extract()[0]\n replyAuthorInfo['authorUrl'] = authorFragment[0].xpath(self.anchorHrefXPath).extract()[0]\n\n replyItem['author'] = dict(replyAuthorInfo)\n # 添加进回复列表\n replies.append(dict(replyItem))\n\n item['reply'] = replies\n yield item\n\n # 帖子列表页面\n def parse_topic_list(self, response):\n # 帖子列表XPath(跳过表头行)\n topicRootXPath = r'//table[@class=\"olt\"]/tr[position()>1]'\n # 单条帖子条目XPath\n titleXPath = r'td[@class=\"title\"]'\n # 发帖人XPath\n authorXPath = r'td[2]'\n # 回复条数XPath\n replyCountXPath = r'td[3]/text()'\n # 发帖时间XPath\n timeXPath = r'td[@class=\"time\"]/text()'\n\n topicsPath = Selector(response).xpath(topicRootXPath)\n for topicItemPath in topicsPath:\n item = TopicItem()\n titlePath = topicItemPath.xpath(titleXPath)\n item['title'] = titlePath.xpath(self.anchorTitleXPath).extract()[0]\n item['url'] = titlePath.xpath(self.anchorHrefXPath).extract()[0]\n # 发帖时间\n timePath = topicItemPath.xpath(timeXPath)\n if timePath:\n item['time'] = timePath[0].extract()\n # 发帖人\n authorPath = topicItemPath.xpath(authorXPath)\n authInfo = AuthorInfo()\n authInfo['authorName'] = authorPath[0].xpath(self.anchorTitleXPath).extract()[0]\n authInfo['authorUrl'] = authorPath[0].xpath(self.anchorHrefXPath).extract()[0]\n item['author'] = dict(authInfo)\n # 回复条数\n replyCountPath = topicItemPath.xpath(replyCountXPath)\n item['replyCount'] = replyCountPath[0].extract()\n\n item['content'] = ''\n yield item\n\n parse_start_url = parse_topic_content", "sub_path": "python-demo/data-scrapy/spider.py", "file_name": "spider.py", "file_ext": "py", "file_size_in_byte": 5450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "scrapy.spiders.CrawlSpider", "line_number": 7, "usage_type": "name"}, {"api_name": "scrapy.spiders.Rule", "line_number": 18, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 19, "usage_type": "call"}, {"api_name": "scrapy.spiders.Rule", "line_number": 23, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 24, "usage_type": "call"}, {"api_name": "scrapy.spiders.Rule", "line_number": 28, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 29, "usage_type": "call"}, {"api_name": "items.TopicItem", "line_number": 46, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 50, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 54, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 58, "usage_type": "call"}, {"api_name": "items.AuthorInfo", "line_number": 63, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 64, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 79, "usage_type": "call"}, {"api_name": "items.ReplyItem", "line_number": 81, "usage_type": "call"}, {"api_name": "items.AuthorInfo", "line_number": 91, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 117, "usage_type": "call"}, {"api_name": "items.TopicItem", "line_number": 119, "usage_type": "call"}, {"api_name": "items.AuthorInfo", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "99630676", "text": "#!/usr/bin/python\n\nimport os\nimport sys\nimport quippy as qp\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-input', type=str, default=None,\n help='File to extract atoms from')\nparser.add_argument('-output', type=str, default='atoms.dat',\n help='Output file')\nparser.add_argument('-Z', type=int, default=[], nargs='+',\n help='Atomic numbers of atoms to list')\nparser.add_argument('-sp', type=str, default=[], nargs='+',\n help='Structure properties to extract')\nparser.add_argument('-ap', type=str, default=[], nargs='+',\n help='Atom properties to extract')\n\nargs = parser.parse_args()\n\n# Load atoms list\nal = qp.AtomsReader(args.input)\n\n# Open output file for writing\nf = open(args.output, 'w')\n\n# Central atoms\nZ = set(args.Z)\n\n# Initialize atom number and stucture number\nna = 0\nns = 0\n\n# Loop over atoms list\nfor i, at in enumerate(al):\n\n # Parse the desired structure properties\n structureProperties = []\n for sp in args.sp:\n structureProperties.append(at.params[sp])\n v = np.linalg.det(at.cell)\n\n # Parse the desired atom properties (Fortran indexed)\n atomProperties = []\n for ap in args.ap:\n atomProperties.append(at.properties[ap])\n\n for j, aa in enumerate(at):\n line = []\n\n # Write out the atom numbers\n if aa.number in Z:\n f.write('%6d %6d %6d %2s %12.8f %12.8f %12.8f ' % (na, aa.index,\n aa.number, aa.symbol, \n aa.position[0], aa.position[1], aa.position[2]))\n\n # Write out the atom properties\n for ap in atomProperties:\n aap = ap[j+1]\n f.write('%10s ' % str(aap))\n\n # Write out the structure properties\n f.write('%6d %12.8f ' % (ns, v))\n for sp in structureProperties:\n sp = str(sp)\n f.write('%10s ' % sp)\n f.write('\\n')\n na += 1\n ns += 1\n\nf.close()\n", "sub_path": "Scripts/atomLabels.py", "file_name": "atomLabels.py", "file_ext": "py", "file_size_in_byte": 1982, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "quippy.AtomsReader", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "376038598", "text": "__author__ = 'Lynn'\r\n__email__ = 'lynnn.hong@gmail.com'\r\n__date__ = '4/6/2016'\r\n\r\nimport math\r\nimport re\r\nimport json\r\nimport time\r\nfrom datetime import datetime, timedelta\r\nfrom newsCrawler3.newsCrawler import NewsCrawler\r\nfrom newsCrawler3.requestsHandler import Req\r\n\r\n\r\nclass NaverNews(NewsCrawler):\r\n # represent Naver news crawler\r\n\r\n def __init__(self, cnfDict, dbConnection, re_dict):\r\n NewsCrawler.__init__(self)\r\n self.cnfDict = cnfDict\r\n self.mysql = dbConnection\r\n self.re_dict = re_dict\r\n self.r = Req()\r\n self.url_format = \"http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=%i#&date=%s 00:00:00&page=%i\"\r\n self.post_url_format = \"http://news.naver.com/main/mainNews.nhn?componentId=%i&date=%s 00:00:00&page=%i\"\r\n self.news_url_format = \"http://news.naver.com/main/read.nhn?mode=LSD&mid=shm&sid1=%i&oid=%s&aid=%s\"\r\n\r\n def search_news(self, query):\r\n print(\"The entire date range in %s to %s\\r\\n\" % (self.cnfDict['start_date'], self.cnfDict['end_date']))\r\n url_format = \"http://news.naver.com/main/search/search.nhn?refresh=&so=%s&stPhoto=&stPaper=&stRelease=&detail=0&rcsection=&query=%s&x=24&y=9&sm=all.basic&pd=4&startDate=%s&endDate=%s&page=%i\"\r\n restStart = start_date = self.cnfDict['start_date']\r\n restEnd = end_date = self.cnfDict['end_date']\r\n while True:\r\n while True:\r\n page = 1\r\n url = url_format % (self.cnfDict['order'], str(query.replace(\" \", \"+\").encode(\"cp949\")).strip(\"b\").replace(\"\\\\x\", \"%\").strip(\"'\"), start_date, end_date, page)\r\n err_code, soup = self.r.access_page(url, self.cnfDict['retry'])\r\n if err_code != 1:\r\n print(\"Can't load the page %i\" % page)\r\n continue\r\n try:\r\n resultCheck = soup.find('div', {'class': 'result_header'}).find('span', {'class': 'result_num'}).text.strip()\r\n except Exception as e:\r\n print(e)\r\n print(\"There's a problem with page %i\" % page)\r\n continue\r\n entireCnt = int(re.search(r\"\\(.+ / (\\d+)건\\)\", resultCheck.replace(\",\", \"\")).group(1))\r\n if entireCnt <= 4000 or start_date == end_date:\r\n if start_date == end_date:\r\n print(\"The start date and the end date is now same. entire count: %i\\r\\n\" % entireCnt)\r\n print(\"Range in %s to %s\\r\\n\" % (start_date, end_date))\r\n last_page = self.check_page(url)\r\n print(\"There're %i pagelists...\" % last_page)\r\n print(str(datetime.now()) + \"\\r\\n\")\r\n for page in list(range(1, last_page + 1)):\r\n if page == 1:\r\n print(\"Start page: %i\" % page, end=\"\")\r\n else:\r\n print(\", %i\" % page, end=\"\")\r\n url = url_format % (self.cnfDict['order'], str(query.replace(\" \", \"+\").encode(\"cp949\")).strip(\"b\").replace(\"\\\\x\", \"%\").strip(\"'\"), start_date, end_date, page)\r\n err_code, soup = self.r.access_page(url, self.cnfDict['retry'])\r\n if err_code != 1:\r\n print(\"Can't load the page %i\" % page)\r\n continue\r\n for item in soup.findAll(\"ul\", {\"class\": \"srch_lst\"}):\r\n a_id = self.get_article(query, item, 0, 0, \"\")\r\n restStart = str((datetime.strptime(end_date, \"%Y-%m-%d\") + timedelta(days=1)).date())\r\n break\r\n else:\r\n print(\"Reduce date interval...\")\r\n dateInterval = datetime.strptime(end_date, \"%Y-%m-%d\") - datetime.strptime(start_date, \"%Y-%m-%d\")\r\n dateInterval = math.floor(int(dateInterval.days)/2)\r\n end_date = str((datetime.strptime(start_date, \"%Y-%m-%d\") + timedelta(days=dateInterval)).date())\r\n start_date = restStart\r\n end_date = restEnd\r\n\r\n if start_date > end_date:\r\n break\r\n\r\n def get_article(self, query, item, sub=0, mother_id=0, mother_url=\"\"):\r\n content, title, press, a_date, url1, url2, related_group = self.get_basic_info(item, sub)\r\n # if naver news, url1 is go_naver link. if external news, url1 is external news link.\r\n rep_url = url1\r\n if sub == 0 or (sub in (1, 2) and rep_url != mother_url):\r\n # not sub news OR (sub news AND not same with mother url)\r\n check = self.mysql.check_inserted_or_not(url1)\r\n if url2 is not None: # if there is naver news url\r\n a_id = self.getNewsAndComment(\"\", url1, check)\r\n #print(\"no article... skip....\") #STOP here\r\n #a_id = 0 # for related news\r\n else: # only has outer url on the press\r\n if check[0] == 1:\r\n # already retrieved url\r\n a_id = check[1][0]\r\n self.mysql.insert_srch_query(query, a_id)\r\n if sub in (1, 2):\r\n self.mysql.update_rel_article(mother_id, a_id) # STOP here\r\n else:\r\n # external news AND not yet retrieved\r\n err_code, soup = self.r.access_page(url1, self.cnfDict['retry'])\r\n if err_code != 1:\r\n a_id = self.mysql.insert_url_n_srch_query(query, url1, err_code) # STOP\r\n else: # there's no error\r\n a_cat = \"\"\r\n a_id = self.mysql.insert_url_n_srch_query(query, url1, err_code)\r\n isNaver = 0\r\n dsc = self.get_dsc(sub, content)\r\n from newsCrawler3.externalNews import ExternalNews\r\n ext = ExternalNews()\r\n err_code, a_body = ext.get_external_news(soup, dsc, url1, self.re_dict)\r\n if err_code != 1:\r\n self.mysql.update_error_code(a_id, err_code) # STOP\r\n else:\r\n r_datetime = datetime.now()\r\n var_tuple = (a_id, \"naver\", press, title.strip('\"'), a_body.replace(\"\\n\", \" \").strip(), a_date, a_cat, isNaver, r_datetime)\r\n self.mysql.insert_news(var_tuple)\r\n if sub in (1, 2):\r\n return a_id\r\n\r\n # if there're related news list, go ahead\r\n if sub == 0 and related_group is not None and related_group.find(\"span\", {\"class\": \"ico_bu\"}):\r\n self.get_related_news(query, related_group, a_id, rep_url)\r\n return 0\r\n\r\n\r\n def get_related_news(self, query, related_group, mother_id, mother_url):\r\n btn_more = related_group.find(\"a\", {\"class\": \"btn_more\"})\r\n\r\n # if there're more related news than 4\r\n if btn_more is not None:\r\n more_link = \"http://news.naver.com/main/search/search.nhn%s\" % btn_more.get('href')\r\n err_code, soup = self.r.access_page(more_link, self.cnfDict['retry'])\r\n if err_code != 1:\r\n pass\r\n else:\r\n for item in soup.findAll(\"ul\", {\"class\": \"srch_lst\"}):\r\n a_id= self.get_article(query, item, 2, mother_id, mother_url)\r\n # go to redirect again(this is new list page)\r\n if a_id is None:\r\n continue\r\n self.mysql.update_rel_article(mother_id, a_id)\r\n # if there're more related news equal or less than 4\r\n else:\r\n for item in related_group.findAll(\"li\"): # each related news items\r\n a_id = self.get_article(query, item, 1, mother_id, mother_url)\r\n self.mysql.update_rel_article(mother_id, a_id)\r\n\r\n\r\n def getNewsAndComment(self, section, newsLink, check):\r\n if check[0] == 1:\r\n # already retrieved url\r\n a_id = check[1][0]\r\n else:\r\n a_id = self.mysql.insert_url(newsLink)\r\n var_tuple = self.getNewsBody(a_id, section, newsLink)\r\n if var_tuple[0] in (404, 90, 91, 95): #404 error, sports, entertain, else\r\n # error\r\n self.mysql.update_error_code(a_id, var_tuple[0])\r\n else:\r\n self.mysql.insert_news(var_tuple)\r\n if self.cnfDict['comment'] is True:\r\n oid = re.findall(r\"oid=(\\d+)\", newsLink)[0]\r\n aid = re.findall(r\"aid=(\\d+)\", newsLink)[0]\r\n gno = \"news\" + oid + \",\" + aid\r\n while True:\r\n headers = {\r\n 'accept-encoding': 'gzip, deflate, sdch',\r\n 'accept-language': 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',\r\n 'accept': '*/*',\r\n 'referer': 'http://news.naver.com/main/ranking/read.nhn?mid=etc&sid1=111&rankingType=memo_week&oid=421&aid=0001843772&date=20160120&type=1&rankingSectionId=100&rankingSeq=1',\r\n 'cookie': 'NNB=QECI6G5YG4HFM; npic=WXjITWIJ4tuGHKilzfw6Nljt9irLqDTyXcPPQPdLWDv9+/zMx8hTr42F7iTT73h0CA==; BMR=s=1449599642101&r=http%3A%2F%2Fnstore.naver.com%2Fappstore%2Fweb%2Fdetail.nhn%3FproductNo%3D1483986&r2=https%3A%2F%2Fwww.google.co.kr%2F; nid_iplevel=1; nid_inf=-2131057769; page_uid=SqW7SdpyLflsscEivZossssssud-397948; _naver_usersession_=DSLHgTQaLsQL5PF8I4FVLQ==',\r\n }\r\n timeout = 10.0\r\n var_sql, var_tuple = self.getComment(a_id, gno, headers, timeout)\r\n break\r\n if var_tuple == ():\r\n #print(\"There's no comment. skip...\")\r\n pass\r\n else:\r\n #print(\"Insert to DB\")\r\n self.mysql.insert_comment(var_sql, var_tuple)\r\n time.sleep(2)\r\n return a_id #temp\r\n\r\n\r\n def getNewsBody(self, a_id, section, newsLink):\r\n err_code, soup = self.r.access_page(newsLink, self.cnfDict['retry'])\r\n if err_code == 1:\r\n try:\r\n header = soup.find('div', {'class': 'article_header'}).find('div', {'class': 'article_info'})\r\n except AttributeError: # naver entertainment news page\r\n if soup.find(\"div\", {\"class\": \"error_msg 404\"}) is not None:\r\n return(404,)\r\n elif newsLink.startswith(\"http://sports.news.naver.com\"):\r\n return(90,)\r\n else:\r\n try:\r\n redirect_url = soup.find('meta', {'property': 'og:url'})['content']\r\n if redirect_url.startswith(\"http://entertain.naver.com\"):\r\n return(91,) # just for now...\r\n else:\r\n print(\"Another naver child news site\")\r\n print(newsLink)\r\n return(95,)\r\n except Exception as e:\r\n print(\"Another exception page...\")\r\n print(e)\r\n print(newsLink)\r\n return(95,)\r\n press = soup.find('meta', {'property': 'me2:category1'})['content']\r\n title = header.find('h3', {'id': 'articleTitle'}).text\r\n a_datetime = header.find('span', {'class': 't11'}).text\r\n a_datetime = datetime.strptime(a_datetime, '%Y-%m-%d %H:%M')\r\n a_body = soup.find('div', {'id': 'articleBodyContents'}).text.strip() # should remove link\r\n isNaver = 1\r\n r_datetime = datetime.now()\r\n return a_id, \"naver\", press, title.replace(\"'\", \"\\'\"), a_body.replace(\"'\", \"\\'\").replace(\"\\n\", \" \"), \\\r\n a_datetime, section, isNaver, r_datetime\r\n else:\r\n return(404,)\r\n\r\n\r\n def getComment(self, a_id, gno, headers, timeout):\r\n var_sql_list = list()\r\n var_tuple = tuple()\r\n page = 1\r\n rdic = dict()\r\n rdic['count'] = dict()\r\n rdic['count']['comment'] = 0\r\n url = \"https://apis.naver.com/commentBox/cbox5/web_naver_list_jsonp.json?ticket=news&templateId=default_politics&_callback=window.__cbox_jindo_callback._8858&lang=ko&country=KR&objectId=\" + gno + \"&categoryId=&pageSize=20&indexSize=10&groupId=&page=%i&initialize=true&useAltSort=true&replyPageSize=100&moveTo=&sort=favorite&userType=\"\r\n while True:\r\n if page % 10 == 1:\r\n if page == 1:\r\n pass\r\n else:\r\n print(\", %i\" % page, end=\"\")\r\n e_num, res = self.r.access_page(url % page, self.cnfDict['retry'], headers=headers)\r\n if e_num == 1:\r\n try:\r\n t = re.search(r\"window\\.__cbox_jindo_callback\\._8858\\((.+)\\)\", res).group(1)\r\n rdic = json.loads(t)['result']\r\n anch = True\r\n except Exception as e:\r\n print(e)\r\n print(\"comment parsing error with gno %s...\" % gno)\r\n anch = False\r\n pass\r\n if anch is True:\r\n if 'commentList' not in rdic.keys():\r\n pass\r\n else:\r\n for reply in rdic['commentList']:\r\n maskUserId = reply['maskedUserId']\r\n encodedUserId = reply['userIdNo']\r\n if encodedUserId is None:\r\n encodedUserId = \"\"\r\n commentReplyNo = reply['commentNo']\r\n sRegDate = reply['regTime']\r\n if \"오전\" in sRegDate:\r\n sRegDate = sRegDate.replace(\"오전\", \"AM\")\r\n elif \"오후\" in sRegDate:\r\n sRegDate = sRegDate.replace(\"오후\", \"PM\")\r\n sRegDate = datetime.strptime(sRegDate, \"%Y-%m-%dT%H:%M:%S+0900\")\r\n snsType = \"\"\r\n incomingType = \"\"\r\n badCnt = reply['antipathyCount']\r\n goodCnt = reply['sympathyCount']\r\n likeCnt = goodCnt-badCnt\r\n replyCnt = reply['replyCount']\r\n content = reply['contents'].replace(\"\\n\\r\", \" \").replace(\"\\n\", \" \")\r\n if reply['best'] is False:\r\n isBest = 0\r\n elif reply['best'] is True:\r\n isBest = 1\r\n c_grade = \"\"\r\n c_pnt = 0\r\n c_nextGradePnt = 0\r\n var_sql_list.append(\"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\r\n var_tuple += (a_id, commentReplyNo, maskUserId, encodedUserId, sRegDate, content, badCnt, goodCnt, likeCnt, replyCnt, incomingType, snsType, isBest, c_grade, c_pnt, c_nextGradePnt)\r\n else:\r\n pass\r\n else:\r\n pass\r\n try:\r\n if page < math.ceil(rdic['count']['comment']/20):\r\n page += 1\r\n time.sleep(0.1)\r\n else:\r\n break\r\n except KeyError:\r\n print(\"KeyError...\")\r\n break\r\n return(\",\".join(var_sql_list).strip(\",\"), var_tuple)\r\n\r\n\r\n def get_dsc(self, sub, content):\r\n if sub == 0:\r\n # get news description part: to find text body easily\r\n dsc_list = content.find(\"p\", {\"class\": \"dsc\"}).get_text(strip=False).strip().split(\"...\")\r\n dsc_list = [x for x in dsc_list if x != \"\" and x != \" \" and x != \".\"]\r\n # if there's description, split it using '다.'\r\n if len(dsc_list) != 0:\r\n try:\r\n dsc = dsc_list[-1].split(\"다.\")[-2].strip() + \"다.\"\r\n except:\r\n dsc = dsc_list[-1].split(\"다.\")[0].strip() + \"다.\"\r\n # if there's no descripton, then use '다.' instead\r\n else:\r\n dsc = \"다.\"\r\n else: # sub in (1, 2)\r\n dsc = \"다.\"\r\n return dsc\r\n\r\n\r\n def get_basic_info(self, item, sub):\r\n if sub in (0, 2):\r\n content = item.find(\"div\", {\"class\": \"ct\"})\r\n info = content.find(\"div\", {\"class\": \"info\"})\r\n title = content.find(\"a\", {\"class\": \"tit\"}).get_text(strip=True)\r\n press = info.find(\"span\", {\"class\": \"press\"}).get_text(strip=True).replace(\"\\'\", \"\\\\'\")\r\n date_text = info.find(\"span\", {\"class\": \"time\"}).get_text(strip=True).strip(\"전\")\r\n date = self.get_news_date(date_text)\r\n url1 = content.find(\"a\", {\"class\": \"tit\"}).get('href')\r\n go_naver = info.find(\"a\", {\"class\": \"go_naver\"})\r\n if sub == 0:\r\n related_group = content.find(\"div\", {\"class\": \"related_group\"})\r\n else:# sub == 2:\r\n related_group = \"\"\r\n else: #sub == 1\r\n title = item.find(\"a\").get_text(strip=True).replace(\"\\'\", \"\\\\'\")\r\n press = item.find(\"span\", {\"class\": \"press\"}).get_text(strip=True).replace(\"\\'\", \"\\\\'\")\r\n date_text = item.find(\"span\", {\"class\": \"time\"}).get_text(strip=True).strip(\"전\")\r\n date = self.get_news_date(date_text)\r\n url1 = item.find(\"a\").get('href')\r\n go_naver = item.find(\"a\", {\"class\": \"go_naver\"})\r\n content = \"\"\r\n related_group = \"\"\r\n if go_naver is not None:\r\n url2 = url1\r\n url1 = go_naver.get('href')\r\n else:\r\n url2 = None\r\n return (content, title, press, date, url1, url2, related_group)\r\n\r\n\r\n def get_news_date(self, date_text):\r\n if date_text.endswith(\"분\"):\r\n date = datetime.now() - timedelta(minutes=int(date_text.split(\"분\")[0]))\r\n elif date_text.endswith(\"시간\"):\r\n date = datetime.now() - timedelta(hours=int(date_text.split(\"시간\")[0]))\r\n elif date_text.endswith(\"일\"):\r\n date = datetime.today() - timedelta(days=int(date_text.split(\"일\")[0]))\r\n else:\r\n date = date_text.replace(\".\", \"-\") + \" 00:00:00\"\r\n return date\r\n\r\n\r\n def check_page(self, nextUrl):\r\n print(\"start counting the entire pages...\")\r\n print(str(datetime.now()) + \"\\n\")\r\n current_no = 1\r\n next_no = 1\r\n while True:\r\n if current_no > next_no:\r\n break\r\n err_code, soup = self.r.access_page(nextUrl, self.cnfDict['retry'])\r\n current_no = next_no\r\n\r\n try:\r\n nextUrl = soup.find(\"div\", {\"class\": \"paging\"}).findAll(\"a\")[-1].get('href')\r\n nextUrl = \"http://news.naver.com\" + nextUrl\r\n next_no = int(nextUrl.split(\"&page=\")[1])\r\n except:\r\n current_no = 1\r\n break\r\n return current_no # int\r\n\r\n def search_all_section(self, section, sectionDict, sectionDictKo):\r\n start_date = datetime.strptime(self.cnfDict['start_date'], \"%Y-%m-%d\").date()\r\n end_date = datetime.strptime(self.cnfDict['end_date'], \"%Y-%m-%d\").date()\r\n while start_date <= end_date:\r\n print(\"start date %s\" % str(start_date))\r\n print(datetime.now())\r\n page = 1\r\n url = self.url_format % (sectionDict[section], str(start_date), page)\r\n err_code, soup = self.r.access_page(url, self.cnfDict['retry'])\r\n if err_code != 1:\r\n print(\"Error occurred on date: %s\" % str(start_date))\r\n else:\r\n comp_id = int(soup.find(\"a\", {\"id\": \"mainNewsComponentId\"})['name'])\r\n url = self.post_url_format % (comp_id, str(start_date), page)\r\n err_code, html = self.r.access_page(url, self.cnfDict['retry'], headers=None, isSoup=False)\r\n if err_code != 1:\r\n print(\"Error occurred on date: %s\" % str(start_date))\r\n else:\r\n lastPage = json.loads(html)['pagerInfo']['lastPage']\r\n while page <= lastPage:\r\n url = self.post_url_format % (comp_id, str(start_date), page)\r\n err_code, html = self.r.access_page(url, self.cnfDict['retry'], headers=None, isSoup=False)\r\n if err_code != 1:\r\n print(\"Error occurred on date %s and page %i\" % (str(start_date), page))\r\n else:\r\n for item in json.loads(html)['itemList']:\r\n aid = item['articleId']\r\n oid = item['officeId']\r\n news_url = self.news_url_format % (sectionDict[section], oid, aid)\r\n self.getNews(sectionDictKo[section], news_url)\r\n page += 1\r\n start_date = start_date + timedelta(days=1)\r\n\r\n def getNews(self, section, newsLink):\r\n check = self.mysql.check_inserted_or_not(newsLink)\r\n a_id = self.getNewsAndComment(section, newsLink, check)\r\n '''if check[0] == 1:\r\n # already retrieved url\r\n a_id = check[1][0]\r\n else:\r\n a_id = self.mysql.insert_url(newsLink)\r\n var_tuple = self.getNewsBody(a_id, section, newsLink)\r\n if var_tuple[0] in (404, 90, 91, 95): #404 error, sports, entertain, else\r\n # error\r\n self.mysql.update_error_code(a_id, var_tuple[0])\r\n else:\r\n self.mysql.insert_news(var_tuple)'''\r\n", "sub_path": "crawler/newsCrawler3/naverNews.py", "file_name": "naverNews.py", "file_ext": "py", "file_size_in_byte": 22423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "newsCrawler3.newsCrawler.NewsCrawler", "line_number": 14, "usage_type": "name"}, {"api_name": "newsCrawler3.newsCrawler.NewsCrawler.__init__", "line_number": 18, "usage_type": "call"}, {"api_name": "newsCrawler3.newsCrawler.NewsCrawler", "line_number": 18, "usage_type": "name"}, {"api_name": "newsCrawler3.requestsHandler.Req", "line_number": 22, "usage_type": "call"}, {"api_name": "re.search", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "math.floor", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 72, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 72, "usage_type": "call"}, {"api_name": "newsCrawler3.externalNews.ExternalNews", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 113, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 161, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 162, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 182, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 213, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 216, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 216, "usage_type": "name"}, {"api_name": "re.search", "line_number": 240, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 241, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 263, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 263, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 285, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 287, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 348, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 348, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 348, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 350, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 350, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 350, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 352, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 352, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 352, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 360, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 360, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 379, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 379, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 380, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 380, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 383, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 383, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 396, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 403, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 409, "usage_type": "call"}]} +{"seq_id": "346287640", "text": "import pickle\nfrom pathlib import Path\nimport os\n\nimport numpy as np\n\nPROJECT_DIR = Path(__file__).resolve().parents[2]\n\nDATA_DIR = os.path.join(PROJECT_DIR, \"data\")\nDATA_RAW_DIR = os.path.join(DATA_DIR, \"raw\")\nDATA_INTERIM_DIR = os.path.join(DATA_DIR, \"interim\")\nDATA_FEATURES_DIR = os.path.join(DATA_DIR, \"features\")\nDATA_DIMRED_DIR = os.path.join(DATA_DIR, \"dimred\")\n\nDATASET_CONFIGS_DIR = os.path.join(PROJECT_DIR, \"dataset_configs\")\n\n\ndef write_ndarray(path: str, array: np.ndarray, overwrite=False) -> bool:\n \"\"\"\n Wrapper for writing writing numpy array to file\n \"\"\"\n if os.path.isfile(path):\n if overwrite or str.lower(\n input(f\"There is already a file at {path}. Overwrite? [y/n]\")\n ) in (\"yes\", \"y\", \"t\"):\n np.save(path, array)\n return True\n return False\n\n\ndef stringify_funcall(func, *args, **kwargs):\n # Use protocol = 0 for ascii encoded bytes object:\n # https://stackoverflow.com/questions/30469575/how-to-pickle-and-unpickle-to-portable-string-in-python-3\n return pickle.dumps((func, args, kwargs), protocol=0).decode(\"ASCII\")\n\n\ndef unpickle_funcall(string: str):\n return pickle.loads(bytes(string, \"ASCII\"))\n", "sub_path": "src/common/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1197, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pathlib.Path", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 26, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 34, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "255698673", "text": "import os\nimport inspect\nimport multiprocessing\nimport Spartacus.Database\n\nfrom .import custom_exceptions\nfrom .import utils\n\n\ndef inserted_callback(p_queue=None, p_columns=None, p_row=None, p_key=None):\n \"\"\"Callback executed when a table fk was created in second database. Sends a row by queue to master process.\n\n Args:\n p_queue (multiprocessing.managers.BaseProxy): queue used to communicate to parent process. Created from a multiprocessing.Manager instance. Defaults to None.\n p_columns (list): list of columns that are present in p_row parameter.\n p_row (list): the row that was inserted in the database 2.\n p_key (list): the key used for comparison.\n\n Raises:\n custom_exceptions.InvalidParameterTypeException.\n \"\"\"\n\n if not isinstance(p_queue, multiprocessing.managers.BaseProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_queue\" parameter must be a \"multiprocessing.managers.BaseProxy\" instance.', p_queue)\n\n if not isinstance(p_columns, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_columns\" parameter must be a \"list\" instance.', p_columns)\n\n if not isinstance(p_row, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_row\" parameter must be a \"list\" instance.', p_row)\n\n if not isinstance(p_key, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_key\" parameter must be a \"list\" instance.', p_key)\n\n p_queue.put({\n 'type': 'tables_fks',\n 'row': {\n 'schema_name': p_row['namespace'],\n 'table_name': p_row['class_name'],\n 'constraint_name': p_row['constraint_name'],\n 'status': 'INSERTED',\n 'sql': inspect.cleandoc(doc=p_row['add_fk_ddl'])\n }\n })\n\n\ndef updated_callback(p_queue=None, p_columns=None, p_row_1=None, p_row_2=None, p_key=None, p_all_diffs=None):\n \"\"\"Callback executed when a table fk was updated in second database. Sends a row by queue to master process.\n\n Args:\n p_queue (multiprocessing.managers.BaseProxy): queue used to communicate to parent process. Created from a multiprocessing.Manager instance. Defaults to None.\n p_columns (list): list of columns that are present in p_row_1 and p_row_2 parameters.\n p_row_1 (list): the row as it is in database 1.\n p_row_2 (list): the row as it is in database 2.\n p_key (list): the key used for comparison.\n p_all_diffs (list): list of diffs. Each item has the following structure:\n {\n 'column' (str): the column that differs,\n 'old_value' (object): value in database 1,\n 'new_value' (object): value in database 2.\n }\n\n Raises:\n custom_exceptions.InvalidParameterTypeException.\n \"\"\"\n\n if not isinstance(p_queue, multiprocessing.managers.BaseProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_queue\" parameter must be a \"multiprocessing.managers.BaseProxy\" instance.', p_queue)\n\n if not isinstance(p_columns, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_columns\" parameter must be a \"list\" instance.', p_columns)\n\n if not isinstance(p_row_1, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_row_1\" parameter must be a \"list\" instance.', p_row_1)\n\n if not isinstance(p_row_2, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_row_2\" parameter must be a \"list\" instance.', p_row_2)\n\n if not isinstance(p_key, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_key\" parameter must be a \"list\" instance.', p_key)\n\n if not isinstance(p_all_diffs, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_all_diffs\" parameter must be a \"list\" instance.', p_all_diffs)\n\n for v_diff in p_all_diffs:\n p_queue.put({\n 'type': 'tables_fks',\n 'row': {\n 'schema_name': p_row_2['namespace'],\n 'table_name': p_row_2['class_name'],\n 'constraint_name': p_row_2['constraint_name'],\n 'status': 'UPDATED',\n 'sql': inspect.cleandoc(\n doc='''\\\n {p_drop}\n {p_add}\n '''.format(\n p_drop=p_row_2['drop_fk_ddl'],\n p_add=p_row_2['add_fk_ddl']\n )\n )\n }\n })\n\n\ndef deleted_callback(p_queue=None, p_columns=None, p_row=None, p_key=None):\n \"\"\"Callback executed when a table fk was dropped from second database. Sends a row by queue to master process.\n\n Args:\n p_queue (multiprocessing.managers.BaseProxy): queue used to communicate to parent process. Created from a multiprocessing.Manager instance. Defaults to None.\n p_columns (list): list of columns that are present in p_row parameter.\n p_row (list): the row that was inserted in the database 2.\n p_key (list): the key used for comparison.\n\n Raises:\n custom_exceptions.InvalidParameterTypeException.\n \"\"\"\n\n if not isinstance(p_queue, multiprocessing.managers.BaseProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_queue\" parameter must be a \"multiprocessing.managers.BaseProxy\" instance.', p_queue)\n\n if not isinstance(p_columns, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_columns\" parameter must be a \"list\" instance.', p_columns)\n\n if not isinstance(p_row, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_row\" parameter must be a \"list\" instance.', p_row)\n\n if not isinstance(p_key, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_key\" parameter must be a \"list\" instance.', p_key)\n\n p_queue.put({\n 'type': 'tables_fks',\n 'row': {\n 'schema_name': p_row['namespace'],\n 'table_name': p_row['class_name'],\n 'constraint_name': p_row['constraint_name'],\n 'status': 'DELETED',\n 'sql': inspect.cleandoc(doc=p_row['drop_fk_ddl'])\n }\n })\n\n\ndef compare_tables_fks(p_database_1=None, p_database_2=None, p_block_size=None, p_queue=None, p_is_sending_data_array=None, p_worker_index=None):\n \"\"\"Used to compare tables fks between databases.\n\n Args:\n p_database_1 (Spartacus.Database.PostgreSQL): the first database. Defaults to None.\n p_database_2 (Spartacus.Database.PostgreSQL): the second database. Defaults to None.\n p_block_size (int): Number of data records that the comparer will deal with at the same time. Defaults to None.\n p_queue (multiprocessing.managers.BaseProxy): queue used to communicate to parent process. Created from a multiprocessing.Manager instance. Defaults to None.\n p_is_sending_data_array (multiprocessing.managers.ArrayProxy): array used to control process that are still sending data. Defaults to None.\n p_worker_index (int): the worker sub process index. Defaults to None.\n\n Raises:\n custom_exceptions.InvalidParameterTypeException.\n custom_exceptions.InvalidParameterValueException\n \"\"\"\n\n try:\n if not isinstance(p_database_1, Spartacus.Database.PostgreSQL):\n raise custom_exceptions.InvalidParameterTypeException('\"p_database_1\" parameter must be a \"Spartacus.Database.PostgreSQL\" instance.', p_database_1)\n\n if not isinstance(p_database_2, Spartacus.Database.PostgreSQL):\n raise custom_exceptions.InvalidParameterTypeException('\"p_database_2\" parameter must be a \"Spartacus.Database.PostgreSQL\" instance.', p_database_2)\n\n if not isinstance(p_block_size, int):\n raise custom_exceptions.InvalidParameterTypeException('\"p_block_size\" parameter must be an \"int\" instance.', p_block_size)\n\n if p_block_size < 1:\n raise custom_exceptions.InvalidParameterValueException('\"p_block_size\" parameter must be a positive \"int\" instance.', p_block_size)\n\n if not isinstance(p_queue, multiprocessing.managers.BaseProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_queue\" parameter must be a \"multiprocessing.managers.BaseProxy\" instance.', p_queue)\n\n if not isinstance(p_is_sending_data_array, multiprocessing.managers.ArrayProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_is_sending_data_array\" parameter must be an \"multiprocessing.managers.ArrayProxy\" instance.', p_is_sending_data_array)\n\n if not isinstance(p_worker_index, int):\n raise custom_exceptions.InvalidParameterTypeException('\"p_worker_index\" parameter must be an \"int\" instance.', p_worker_index)\n\n if p_worker_index < 0:\n raise custom_exceptions.InvalidParameterTypeException('\"p_worker_index\" parameter must be an \"int\" instance greater than or equal to 0.', p_worker_index)\n\n #Prepare table query\n v_sql = '''\\\n WITH constraints AS (\n SELECT nc.nspname AS namespace,\n r.relname AS class_name,\n c.conname AS constraint_name,\n PG_GET_CONSTRAINTDEF(c.oid, true) AS constraint_definition,\n c.condeferrable AS is_deferrable,\n c.condeferred AS initially_deferred,\n r.oid AS regclass,\n c.oid AS sysid\n FROM pg_namespace nc,\n pg_namespace nr,\n pg_constraint c,\n pg_class r\n WHERE nc.oid = c.connamespace AND nr.oid = r.relnamespace AND c.conrelid = r.oid\n AND c.contype = 'f'\n AND nc.nspname NOT IN (\n 'information_schema',\n 'pg_catalog',\n 'pg_toast'\n )\n AND nc.nspname NOT LIKE 'pg%%temp%%'\n ),\n cs AS (\n SELECT namespace,\n class_name,\n QUOTE_IDENT(constraint_name) AS constraint_name,\n 'ALTER TABLE ' || TEXT(REGCLASS(regclass)) ||\n ' ADD CONSTRAINT ' || QUOTE_IDENT(constraint_name) ||\n E'\\n ' || constraint_definition || ';' AS sql\n FROM constraints\n ORDER BY sysid\n )\n SELECT namespace,\n class_name,\n constraint_name,\n sql AS add_fk_ddl,\n FORMAT(\n 'ALTER TABLE %s.%s DROP CONSTRAINT %s;',\n QUOTE_IDENT(namespace),\n QUOTE_IDENT(class_name),\n QUOTE_IDENT(constraint_name)\n ) AS drop_fk_ddl\n FROM cs\n ORDER BY 1,\n 2,\n 3\n '''\n\n utils.compare_datatables(\n p_database_1=p_database_1,\n p_database_2=p_database_2,\n p_block_size=p_block_size,\n p_key=['namespace', 'class_name', 'constraint_name'],\n p_sql=v_sql,\n p_inserted_callback=lambda p_columns, p_row, p_key: inserted_callback(p_queue=p_queue, p_columns=p_columns, p_row=p_row, p_key=p_key),\n p_updated_callback=lambda p_columns, p_row_1, p_row_2, p_key, p_all_diffs: updated_callback(p_queue=p_queue, p_columns=p_columns, p_row_1=p_row_1, p_row_2=p_row_2, p_key=p_key, p_all_diffs=p_all_diffs),\n p_deleted_callback=lambda p_columns, p_row, p_key: deleted_callback(p_queue=p_queue, p_columns=p_columns, p_row=p_row, p_key=p_key)\n )\n finally:\n p_queue.put(None)\n p_is_sending_data_array[p_worker_index] = False\n\n\ndef get_compare_tables_fks_tasks():\n \"\"\"Get list of tasks that will compare tables fks between databases.\n\n Args:\n\n Returns:\n list: list of tasks to be executed in a process pool. Each item is a dict instance with following strucutre:\n {\n 'function' (function): the function to be executed.\n 'kwds': keyworded args to be passed to the function.\n }\n \"\"\"\n\n return [{\n 'function': compare_tables_fks,\n 'kwds': {}\n }]\n", "sub_path": "workers/compare_tables_fks.py", "file_name": "compare_tables_fks.py", "file_ext": "py", "file_size_in_byte": 12484, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "multiprocessing.managers", "line_number": 23, "usage_type": "attribute"}, {"api_name": "inspect.cleandoc", "line_number": 42, "usage_type": "call"}, {"api_name": "multiprocessing.managers", "line_number": 67, "usage_type": "attribute"}, {"api_name": "inspect.cleandoc", "line_number": 93, "usage_type": "call"}, {"api_name": "multiprocessing.managers", "line_number": 119, "usage_type": "attribute"}, {"api_name": "inspect.cleandoc", "line_number": 138, "usage_type": "call"}, {"api_name": "Spartacus.Database.Database", "line_number": 160, "usage_type": "attribute"}, {"api_name": "Spartacus.Database", "line_number": 160, "usage_type": "name"}, {"api_name": "Spartacus.Database.Database", "line_number": 163, "usage_type": "attribute"}, {"api_name": "Spartacus.Database", "line_number": 163, "usage_type": "name"}, {"api_name": "multiprocessing.managers", "line_number": 172, "usage_type": "attribute"}, {"api_name": "multiprocessing.managers", "line_number": 175, "usage_type": "attribute"}]} +{"seq_id": "434322411", "text": "# Copyright 2018 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"Hook for pbr to build javascript as part of tarball.\"\"\"\n\nimport os\nimport subprocess\n\nimport pbr.packaging\n\n_old_from_git = pbr.packaging._from_git\n\n\ndef _build_javascript():\n if subprocess.call(['which', 'yarn']) != 0:\n return\n if not os.path.exists('web/node_modules/.bin/webpack'):\n r = subprocess.Popen(['yarn', 'install', '-d'], cwd=\"web/\").wait()\n if r:\n raise RuntimeError(\"Yarn install failed\")\n if not os.path.exists('zuul/web/static/index.html'):\n r = subprocess.Popen(['yarn', 'build'], cwd=\"web/\").wait()\n if r:\n raise RuntimeError(\"Yarn build failed\")\n\n\ndef _from_git(distribution):\n _build_javascript()\n return _old_from_git(distribution)\n\n\ndef setup_hook(config):\n pbr.packaging._from_git = _from_git\n", "sub_path": "zuul/_setup_hook.py", "file_name": "_setup_hook.py", "file_ext": "py", "file_size_in_byte": 1371, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pbr.packaging.packaging", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pbr.packaging", "line_number": 21, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 32, "usage_type": "call"}, {"api_name": "pbr.packaging.packaging", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pbr.packaging", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "550388916", "text": "import numpy as np\nimport paddle.fluid as fluid\nimport paddle.fluid.dygraph as dygraph #动态图加载\nfrom paddle.fluid.dygraph import Linear\nimport sklearn.datasets as sd\nclass Regressor(fluid.dygraph.Layer):\n def __init__(self):\n super(Regressor, self).__init__()\n self.fc = Linear(input_dim=13,output_dim=1,act=None)\n #网络的前向计算\n def forward(self, inputs):\n x = self.fc(inputs)\n return x\ndef load_data():\n data = sd.load_boston()\n size = int(len(data.data)*0.8)\n x = data.data[:size,:]\n y = data.target[:size]\n data = np.column_stack((x, y))\n maximum = data.max(axis=0)\n minimum = data.min(axis=0)\n avgs = data.sum(axis=0)/data.shape[0]\n global max_values\n global min_values\n global avg_values\n max_values = maximum\n min_values = minimum\n avg_values = avgs\n for i in range(data.shape[1]):\n data[:,i] = (data[:,i] - avg_values[i]) / (max_values[i] - min_values[i])\n print(data.shape)\n return data\ndef process():\n data = sd.load_boston()\n size = int(len(data.data) * 0.8)\n x = data.data[size:, :]\n y = data.target[size:]\n data = np.column_stack((x, y))\n for i in range(data.shape[1]):\n data[:, i] = (data[:, i] - avg_values[i]) / (max_values[i] - min_values[i])\n return data\nwith dygraph.guard():\n one = load_data()\n data = process()\n data = np.array(data).astype('float32')\n test_x = data[:,:-1]\n test_y = data[:,-1]\n x = fluid.dygraph.to_variable(test_x)\n model = Regressor()\n # 参数为保存模型参数的文件地址\n model_dict,_ = fluid.load_dygraph('LR_model')\n model.load_dict(model_dict)\n model.eval()\n # 参数为数据集的文件地址\n # test_data,label = load_one_example()\n # 将数据转为动态图的variable格式\n results = model(x)\n # 对结果做反归一化处理\n y = test_y * (max_values[-1] - min_values[-1]) + avg_values[-1]\n results = results * (max_values[-1] - min_values[-1]) + avg_values[-1]\n for result,real in zip(results,y):\n print(\"Inference result is {}, the corresponding label is {}\".format(result.numpy(), real))\n", "sub_path": "Paddle/day01/demo02.py", "file_name": "demo02.py", "file_ext": "py", "file_size_in_byte": 2158, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "paddle.fluid.dygraph", "line_number": 6, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 6, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_boston", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.column_stack", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_boston", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.column_stack", "line_number": 38, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.guard", "line_number": 42, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.to_variable", "line_number": 48, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph", "line_number": 48, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 48, "usage_type": "name"}, {"api_name": "paddle.fluid.load_dygraph", "line_number": 51, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "290400032", "text": "import pandas as pd\nimport numpy as np\nfrom sklearn import svm, metrics\nimport sklearn\n\ndata = pd.read_csv(\"/Users/alexchandy13/Documents/Programming/gitstuff/pitch-ML/baseballdf.csv\", sep=\",\")\ntarget = 'description'\n\nx = np.array(data.drop(data.columns[[0,13]], 1))\ny = np.array(data[target])\n\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)\n\nclf = svm.SVC(kernel=\"linear\",C=2)\n\nclf.fit(x_train,y_train)\ny_pred = clf.predict(x_test)\nacc = metrics.accuracy_score(y_test,y_pred)\nprint(acc)\nprint(clf.predict([[87,-2,6,3,2,0.63,-0.34,1.17,1,3100,5.8,7]]))\n\n", "sub_path": "pitchModel-python.py", "file_name": "pitchModel-python.py", "file_ext": "py", "file_size_in_byte": 604, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sklearn.svm.SVC", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 14, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "575296403", "text": "from websocket_server import WebsocketServer\nimport ipget # pip3 install ipget\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\n\n# 自身のIDを定義\nDEVICE_ID = \"D0001\"\n\n# Firebaseの設定\ncred = credentials.Certificate(\"key/halms-49316-firebase-adminsdk-y7wsu-6a5942aa12.json\")\n\n# RealtimeDBの定義\nfirebase_admin.initialize_app(cred, {\n 'databaseURL': 'https://halms-49316-default-rtdb.firebaseio.com/'\n})\n\n# 新規クライアント接続時処理\ndef new_client(client, server):\n print(\"New client has joined\")\n\n# 新規メッセージ受信時処理\ndef send_msg_allclient(client, server, message):\n print(message)\n server.send_message_to_all(message)\n\n# 自身のIPを取得\nhost = ipget.ipget().ipaddr(\"wlan0\")\n# host = ipget.ipget().ipaddr(\"eth0\")\nhost_address = host[:host.find('/')]\n\n# IPアドレスをDBへ保存\nref = db.reference('/devices')\nusers_ref = ref.child(DEVICE_ID)\nusers_ref.set({\n 'server': host_address\n})\n\nprint(host_address)\n\n# ソケットサーバを作成\nserver = WebsocketServer(50000, host=host_address)\n\n# 新しいクライアントが接続したときの処理\nserver.set_fn_new_client(new_client)\n\n# クライアントがメッセージを送信したときの処理\nserver.set_fn_message_received(send_msg_allclient)\nserver.run_forever()\n", "sub_path": "raspi_serve/socket_serve/serve.py", "file_name": "serve.py", "file_ext": "py", "file_size_in_byte": 1344, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "firebase_admin.credentials.Certificate", "line_number": 11, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 11, "usage_type": "name"}, {"api_name": "firebase_admin.initialize_app", "line_number": 14, "usage_type": "call"}, {"api_name": "ipget.ipget", "line_number": 28, "usage_type": "call"}, {"api_name": "firebase_admin.db.reference", "line_number": 33, "usage_type": "call"}, {"api_name": "firebase_admin.db", "line_number": 33, "usage_type": "name"}, {"api_name": "websocket_server.WebsocketServer", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "198837226", "text": "from rest_framework import serializers\n\nfrom .models import Indicator\n\nclass IndicatorListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Indicator\n fields = [\n # Don't want to show some of these details.'\n 'indicator_id',\n 'indicator_name',\n 'indicator_url',\n 'indicator_data_url',\n 'direct_indicator_source',\n 'original_indicator_source',\n 'update_cycle',\n 'scope',\n 'units',\n 'last_source_update_ts',\n 'when_to_update_ts',\n 'indicator_definition',\n # 'createdat',\n # 'updatedat',\n 'avg_equal',\n 'avg_population',\n 'avg_gdp'\n ]", "sub_path": "service/indicators/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 5, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "models.Indicator", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "67228768", "text": "from wordcloud import WordCloud\r\nimport numpy as np\r\nimport jieba\r\nfrom PIL import Image\r\ndef trans(data):\r\n alice_coloring = np.array(Image.open(\"A.jpg\"))\r\n wordcloud = WordCloud(background_color=\"white\",mask=alice_coloring,font_path = 'WE.TTF',width=1000, height=860, margin=2).generate(data)\r\n wordcloud.to_file(\"qw.jpg\")\r\ndef main():\r\n a=[]\r\n f = open('comment.txt', 'r', encoding=\"utf-8\").read()\r\n words=list(jieba.cut(f))\r\n for word in words:\r\n if len(word)>1:\r\n a.append(word)\r\n txt=r' '.join(a)\r\n trans(txt)\r\nif __name__ == '__main__':\r\n # main()\r\n url = \"https://static.zhihu.com/heifetz/main.app.bcbe6146eb81b5efaede.js\"\r\n import requests\r\n r = requests.get(url)\r\n print(r.text)\r\n f = open(\"js.txt\",\"w\",encoding=\"utf-8\")\r\n f.write(r.text)\r\n\r\n\r\n\r\n\r\n\r\n# width,height,margin可以设置图片属性\r\n\r\n# generate 可以对全部文本进行自动分词,但是他对中文支持不好,对中文的分词处理请看我的下一篇文章\r\n#wordcloud = WordCloud(font_path = r'D:\\Fonts\\simkai.ttf').generate(f)\r\n# 你可以通过font_path参数来设置字体集\r\n\r\n#background_color参数为设置背景颜色,默认颜色为黑色\r\n#\r\n# import matplotlib.pyplot as plt\r\n# plt.imshow(wordcloud)\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n#\r\n# wordcloud.to_file('test.png')\r\n", "sub_path": "知乎/zhi_Spider/ICE DATA/ICE_ARTICLE/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1336, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 6, "usage_type": "name"}, {"api_name": "wordcloud.WordCloud", "line_number": 7, "usage_type": "call"}, {"api_name": "wordcloud.to_file", "line_number": 8, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "269204593", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport multiprocessing\n\n\ndef run(pname):\n print(pname)\n\n\ndef main():\n for i in range(10):\n p = multiprocessing.Process(target=run, args=('Process-' + str(i),))\n p.start()\n p.join()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "book_02_Python性能分析与优化/chapter_05_多线程与多进程/code_3_多进程.py", "file_name": "code_3_多进程.py", "file_ext": "py", "file_size_in_byte": 295, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "multiprocessing.Process", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "616065058", "text": "\"\"\" This module defines the url patters for profiles \"\"\"\n\nfrom django.conf.urls import url\n\nfrom .views import (\n ProfileListApi, UpdateUserAPIView, UserProfileView\n)\n\napp_name = 'profiles'\n\nurlpatterns = [\n url(r'^profiles/(?P[\\w\\-]+)/?$',\n UserProfileView.as_view(), name='profile'),\n url(r'^profiles/(?P[\\w\\-]+)/edit/?$',\n UpdateUserAPIView.as_view(), name='profile_update'),\n url(r'^profiles/?$',\n ProfileListApi.as_view(), name='list_profiles'),\n]\n", "sub_path": "authors/apps/profiles/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "views.UserProfileView.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "views.UserProfileView", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "views.UpdateUserAPIView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "views.UpdateUserAPIView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "views.ProfileListApi.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "views.ProfileListApi", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "124325876", "text": "import json\nimport socket\nfrom threading import Thread\nfrom rdt import *\n\nclass Client:\n\tdef __init__(self, msgSize=1024):\n\t\tself.msgSize = msgSize\n\t\tself.ip = \"\"\n\n\tdef __call__(self):\n\t\tif(self.retrieveIP()):\n\t\t\twhile True:\n\t\t\t\tcommand = self.menu()\n\t\t\t\tif command == \"1\":\n\t\t\t\t\tself.requestFile()\n\t\t\t\telif command == \"2\":\n\t\t\t\t\tself.requestList()\n\t\t\t\telif command == \"3\":\n\t\t\t\t\tself.closeConnection()\n\t\t\t\t\tbreak\n\n\tdef requestFile(self):\n\t\tpair = json.dumps({\"METHOD\": \"GET\", \"FILENAME\": input(\"Please insert the file name: \")})\n\t\tsender.send(pair, client_rx_address)\n\n\t\twhile True:\n\t\t\tglobal rcv\n\t\t\tif rcv:\n\t\t\t\tmsg = json.loads(rcv)\n\t\t\t\tif msg[\"FILENAME\"] != \"\":\n\t\t\t\t\tprint(\"Retrieved file content: \" + msg[\"BODY\"])\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error retrieving file\")\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tbreak\n\t\t# s.close()\n\t\trcv = None\n\n\tdef requestList(self):\n\t\tpair = json.dumps({\"METHOD\": \"LIST\"})\n\t\tsender.send(pair, client_rx_address)\t\t\n\t\twhile True:\n\t\t\tglobal rcv\n\t\t\tif rcv:\n\t\t\t\tmsg = json.loads(rcv)\n\n\t\t\t\tif len(msg) > 0:\n\t\t\t\t\tprint(\"Files at server:\")\n\t\t\t\t\tfor file in msg:\n\t\t\t\t\t\tprint(\"- \" + file[\"FILENAME\"])\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error retrieving list\")\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tbreak\n\t\t# s.close()\n\t\trcv = None\n\t\n\tdef closeConnection(self):\n\t\tpair = json.dumps({\"METHOD\": \"EXIT\"})\n\t\tsender.send(pair, client_rx_address)\n\t\tprint(\"Closing connection with server\")\n\t\tquit(0)\n\t\t\n\tdef getDomain(self):\n\t\tdomain = input(\"To which domain would you like to connect? \")\n\t\treturn domain\n\n\tdef retrieveIP(self):\n\t\tpair = json.dumps({\"METHOD\": \"RETRIEVE\", \"DOMAIN\": self.getDomain()})\n\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\ts.connect((\"localhost\", 4000))\n\t\tprint(\"Retrieving IP from domain\")\n\t\ts.sendall(pair.encode())\n\n\t\tsuccess = False\n\t\twhile True:\n\t\t\trcv = s.recv(self.msgSize).decode(\"utf-8\")\n\n\t\t\tif rcv:\n\t\t\t\tmsg = json.loads(rcv)\n\n\t\t\t\tif msg[\"STATUS\"] == \"OK\":\n\t\t\t\t\tself.ip = msg[\"IP\"]\n\t\t\t\t\tprint(\"IP \" + self.ip + \" was successfully retrieved \")\n\t\t\t\t\tsuccess = True\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error retrieving IP\")\n\t\t\t\t\tsuccess = False\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\ts.close()\n\t\treturn success\n\t\t\n\tdef menu(self):\n\t\treturn input(\"\"\"Enter a number and press enter\n1 - Request file\n2 - List files \n3 - Close connection\n\"\"\" )\n\ndef callback(body):\n global rcv\n rcv = body\n\nrcv = None\nrx_port = 5005\ntx_port = 5007\nclient_rx_address = ('localhost', 8080)\n\nreceiver = Receiver(('localhost', rx_port), callback)# address and callback\nsender = Sender(('localhost', tx_port))# address\n\ndef main():\n\tCLIENT = Client()\n\treceiver.listen()\n\tCLIENT()\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "Client/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2621, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "json.dumps", "line_number": 24, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 64, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 76, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 76, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 76, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "572776070", "text": "import logging\nimport re\nimport MySQLdb\nimport sys\nimport os\n\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '../..'))\n#from config.config import hpo_conn as db\nfrom text.entity import Entity, Entities\nfrom config import config\nfrom text.token2 import Token2\n#from text.offset import Offset, Offsets, perfect_overlap, contained_by\n\nhpo_words = set()\nhpo_stopwords = set() \n\n\nclass HPOEntity(Entity):\n\t\"\"\"HPO entities\"\"\"\n\tdef __init__(self, tokens, sid, *args, **kwargs):\n\t\tsuper(HPOEntity, self).__init__(tokens, *args, **kwargs)\n\t\tself.type = \"hpo\"\n\t\tself.subtype = kwargs.get(\"subtype\")\n\t\tself.nextword = kwargs.get(\"nextword\")\n\t\tself.sid = sid\n\t\tself.hpo_id = None\n\t\tself.hpo_score = 0\n\t\tself.hpo_name = 0\n \n\t#tf_regex = re.compile(r\"\\A[A-Z]+\\d*\\w*\\d*\\Z\")\n\n\tdef get_dic(self):\n\t\tdic = super(HPOEntity, self).get_dic()\n\t\t#dic[\"subtype\"] = self.subtype\n\t\tdic[\"hpo_id\"] = self.hpo_id\n\t\tdic[\"hpo_name\"] = self.hpo_name\n\t\tdic[\"ssm_score\"] = self.ssm_score\n\t\tdic[\"ssm_entity\"] = self.ssm_best_ID\n\t\treturn dic\n\n\n\tdef validate(self, ths, rules):\n\t\t\"\"\"\n\t\tUse rules to validate if the entity was correctly identified\n\t\t:param rules:\n\t\t:return: True if entity does not fall into any of the rules, False if it does\n\t\t\"\"\"\n\t\tif \"stopwords\" in rules:\n\t\t\twords = self.text.split(\" \")\n\t\t\t#words += self.text.split(\"-\")\n\t\t\tstop = False\n\t\t\tfor s in hpo_stopwords:\n\t\t\t\tif any([s == w.lower() for w in words]):\n\t\t\t\t\tlogging.debug(\"ignored stopword %s\" % self.text)\n\t\t\t\t\tstop = True\n\t\t\tif stop:\n\t\t\t\treturn False\n\n\t\tif \"paren\" in rules:\n\t\t\tif (self.text[-1] == \")\" and \"(\" not in self.text) or (self.text[-1] == \"]\" and \"[\" not in self.text) or \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (self.text[-1] == \"}\" and \"{\" not in self.text):\n\t\t\t\tlogging.debug(\"parenthesis %s\" % self.text)\n\t\t\t\tself.dend -= 1\n\t\t\t\tself.end -= 1\n\t\t\t\tself.text = self.text[:-1]\n\t\treturn True", "sub_path": "src/text/text/hpo_entity.py", "file_name": "hpo_entity.py", "file_ext": "py", "file_size_in_byte": 1846, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "text.entity.Entity", "line_number": 18, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 54, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "156466961", "text": "import psycopg2, psycopg2.extras\nimport os\nimport glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import patches\nfrom matplotlib.pyplot import figure\nfrom datetime import timedelta, date\n\n\n\n\n\ntask = pd.read_csv(\"task.csv\") \n\n\ndef draw():\n\n\tfor index, row in task.iterrows():\n\n\t\ttask_id = int(row['task_id'])\n\n\t\ttask_dir = os.path.join(os.getcwd(), 'result/'+str(task_id))\n\t\tif not os.path.isdir(task_dir):\n\t\t\tcontinue\n\n\t\timage_dir = os.path.join(task_dir, 'image')\n\t\tif not os.path.isdir(image_dir):\n\t\t\tos.makedirs(image_dir)\n\n\t\tload_dir = os.path.join(task_dir, 'output')\n\t\tif not os.path.isdir(load_dir):\n\t\t\tcontinue\n\n\t\ttrain_loss = np.load(os.path.join(load_dir, 'train_loss.npy'))\n\t\ttrain_acc = np.load(os.path.join(load_dir, 'train_acc.npy'))\n\t\ttrain_f1= np.load(os.path.join(load_dir, 'train_f1.npy'))\n\t\ttrain_precision = np.load(os.path.join(load_dir, 'train_precision.npy'))\n\t\ttrain_recall = np.load(os.path.join(load_dir, 'train_recall.npy'))\n\n\t\tvalid_loss = np.load(os.path.join(load_dir, 'valid_loss.npy'))\n\t\tvalid_acc = np.load(os.path.join(load_dir, 'valid_acc.npy'))\n\t\tvalid_f1= np.load(os.path.join(load_dir, 'valid_f1.npy'))\n\t\tvalid_precision = np.load(os.path.join(load_dir, 'valid_precision.npy'))\n\t\tvalid_recall = np.load(os.path.join(load_dir, 'valid_recall.npy'))\n\n\n\t\t#plot train and validation loss\n\t\tplt.plot(train_loss)\n\t\tplt.plot(valid_loss)\n\t\tplt.title('Loss')\n\t\tplt.ylabel('Loss')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'loss.png'))\n\t\tplt.clf()\n\n\n\t\t#plot train and validation accuracy\n\t\tplt.plot(train_acc)\n\t\tplt.plot(valid_acc)\n\t\tplt.title('Accuracy')\n\t\tplt.ylabel('Accuracy')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'accuracy.png'))\n\t\tplt.clf()\n\n\t\t#plot train and validation accuracy\n\t\tplt.plot(train_f1)\n\t\tplt.plot(valid_f1)\n\t\tplt.title('F1')\n\t\tplt.ylabel('F1')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'f1.png'))\n\t\tplt.clf()\n\n\t\t#plot train and validation accuracy\n\t\tplt.plot(train_precision)\n\t\tplt.plot(valid_precision)\n\t\tplt.title('Precision')\n\t\tplt.ylabel('Precision')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'precision.png'))\n\t\tplt.clf()\n\n\t\t#plot train and validation accuracy\n\t\tplt.plot(train_recall)\n\t\tplt.plot(valid_recall)\n\t\tplt.title('Recall')\n\t\tplt.ylabel('Recall')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'recall.png'))\n\t\tplt.clf()\n\n\t\tplt.plot(train_acc)\n\t\tplt.plot(train_recall)\n\t\tplt.plot(train_precision)\n\t\tplt.plot(train_f1)\n\t\tplt.title('Train evaluation')\n\t\tplt.ylabel('Score')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Accuracy', 'Recall', 'Precision', 'F1'])\n\t\tplt.savefig(os.path.join(image_dir, 'train_evaluation.png'))\n\t\tplt.clf()\n\n\t\tplt.plot(valid_acc)\n\t\tplt.plot(valid_recall)\n\t\tplt.plot(valid_precision)\n\t\tplt.plot(valid_f1)\n\t\tplt.title('Valid evaluation')\n\t\tplt.ylabel('Score')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Accuracy', 'Recall', 'Precision', 'F1'])\n\t\tplt.savefig(os.path.join(image_dir, 'valid_evaluation.png'))\n\t\tplt.clf()\n\ndef get_loss():\n\n\tfor index, row in task.iterrows():\n\n\t\ttask_id = int(row['task_id'])\n\n\t\ttask_dir = os.path.join(os.getcwd(), 'result/'+str(task_id))\n\t\tif not os.path.isdir(task_dir):\n\t\t\tcontinue\n\n\t\tload_dir = os.path.join(task_dir, 'output')\n\t\tif not os.path.isdir(load_dir):\n\t\t\tcontinue\n\n\t\ttrain_f1= np.load(os.path.join(load_dir, 'train_f1.npy'))\n\t\tvalid_f1= np.load(os.path.join(load_dir, 'valid_f1.npy'))\n\n\t\ttrain_loss = np.load(os.path.join(load_dir, 'train_loss.npy'))\n\t\tvalid_loss = np.load(os.path.join(load_dir, 'valid_loss.npy'))\n\n\t\ttrain_loss_min = np.min(train_loss)\n\t\tvalid_loss_min = np.min(valid_loss)\n\n\t\ttrain_f1_max = np.max(train_f1)\n\t\tvalid_f1_max = np.max(valid_f1)\n\n\t\t# 8047 valid loss min = 0.599\n\t\t#print('task id: '+str(task_id)+' train loss min: '+str(train_loss_min)+' valid loss min: '+str(valid_loss_min))\n\n\t\t# 8056 valid f1 max = 0.594\n\t\tprint('task id: '+str(task_id)+' train f1 max: '+str(train_f1_max)+' valid f1 max: '+str(valid_f1_max))\n\n\nif __name__ == '__main__':\n get_loss()", "sub_path": "experiment/experiment1/draw.py", "file_name": "draw.py", "file_ext": "py", "file_size_in_byte": 4211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "165657796", "text": "import cv2\r\nimport time\r\nimport numpy as np\r\n\r\n#To save the output\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\r\noutputfile = cv2.VideoWriter('output.avi', fourcc, 30.0, (640, 480))\r\n\r\n#To start the webcam\r\ncam = cv2.VideoCapture(0)\r\n\r\n#make the code sleep\r\ntime.sleep(5)\r\n\r\n#making it loop 60 seconds\r\nfor i in range(60):\r\n ret, bg = cam.read()\r\n\r\nbg = np.flip(bg, axis = 1)\r\n\r\n#capturing then flipping\r\nwhile(cam.isOpened()):\r\n ret, ing = cam.read()\r\n if not ret:\r\n break", "sub_path": "cam.py", "file_name": "cam.py", "file_ext": "py", "file_size_in_byte": 486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cv2.VideoWriter_fourcc", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 10, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "427283077", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom ..items import AmazonScraperItem\nfrom ..items import BookOption\n\n\nclass AmazonSpider(scrapy.Spider):\n name = \"amazon\"\n\n # \n with open('results.json') as blackboard_results:\n classes = json.load(blackboard_results)\n list_blackboard_ISBNs = []\n for course in classes:\n if \"bookList\" in course:\n for book in course[\"bookList\"]:\n if book:\n list_blackboard_ISBNs.append(book[\"ISBN\"])\n\n start_urls = [\"https://www.campusbooks.com/search/{0}?condition%5Bnew%5D=new&condition%5Bused%5D=used&rental_period=0&postal_code=08540&buysellrent=buy&op=Apply+Filters&form_build_id=form-JPpsrJqHKfZ7FLJEFh_LL_ibJWGBITegB9RgsE9vejM&form_id=cb_search_filters_form\".format(isbn) for isbn in list_blackboard_ISBNs]\n\n def parse(self, response):\n\n allOffers = response.xpath('//div[@class = \"standard-offers\"]')\n options = allOffers.xpath('.//table[@class = \"table table-condensed\"]')\n\n buying_options = []\n\n for option in options:\n bookOption = BookOption()\n\n bookOption['condition'] = option.xpath('.//td[@class = \"condition\"]/text()').extract_first().strip()\n bookOption['price'] = option.xpath('.//td[@class = \"price hidden-xs\"]/text()').extract_first().strip()[1:]\n bookOption['seller'] = option.xpath('.//span[@class = \"sprite-logo\"]/@title').extract_first()\n bookOption['link'] = option.xpath('.//button[@class = \"btn orange-btn btn-fit\"]/../@href').extract_first()\n buying_options.append(dict(bookOption))\n\n scraperItem = AmazonScraperItem()\n scraperItem[\"isbn\"] = response.request.url[35:48]\n scraperItem[\"options\"] = buying_options\n\n yield scraperItem\n", "sub_path": "server/public/scripts/amazon_scraper/amazon_scraper/spiders/amazon.py", "file_name": "amazon.py", "file_ext": "py", "file_size_in_byte": 1795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "scrapy.Spider", "line_number": 8, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "items.BookOption", "line_number": 31, "usage_type": "call"}, {"api_name": "items.AmazonScraperItem", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "355767834", "text": "from flask import (Blueprint, render_template, redirect, url_for, \n jsonify, abort, request)\nfrom flask_security import current_user\nfrom flask_security.decorators import login_required, roles_required\nfrom .forms import DepotForm, ArretForm, RemisageForm\nfrom ..config import BaseConfig\n\ndepots = Blueprint('depots', __name__, url_prefix='/depots')\n\n\n# --------------------------------------------------\n# -------------------- DEPOT ---------------------\n# --------------------------------------------------\n@depots.route('/')\n@login_required\ndef indexDepots():\n from ..models import Depot, Arret\n if current_user.has_role('admin'):\n depots = Depot.getAll()\n else:\n idGroupeDeLigne = request.cookies['idGroupeDeLigne']\n depots = Depot.getAll(idGroupeDeLigne)\n arrets = Arret.getAll()\n \n return render_template('depots/index-depots.html', \n depots=depots,\n arrets=arrets)\n\n@depots.route('/ajouter', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef ajouterDepot():\n f = DepotForm()\n\n from ..models import Vehicule\n f.famille.choices = [(x[0], x[0]) for x in Vehicule.getDiscriminants()]\n\n from ..models import Depot\n from ..extensions import db\n if f.validate_on_submit():\n depot = Depot(codeDepot=f.code.data,\n nom=f.nom.data,\n familleVehicule=f.famille.data)\n db.session.add(depot)\n db.session.commit()\n if f.gererRemisage.data:\n return redirect(url_for('depots.editerRemisageDepot', id=depot.idDepot))\n return redirect(url_for('depots.indexDepots'))\n\n return render_template('depots/ajouter.html', form=f)\n\n@depots.route('/editer/', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef editerDepot(id):\n from ..models import Depot\n depot = Depot.getById(id)\n aRemisage = True if depot.voie_collection else False\n\n f = DepotForm(code=depot.codeDepot, nom=depot.nom,\n famille=depot.familleVehicule, gererRemisage=aRemisage)\n from ..models import Vehicule\n f.famille.choices = [(x[0], x[0]) for x in Vehicule.getDiscriminants()]\n\n\n from ..extensions import db\n if f.validate_on_submit():\n depot.codeDepot = f.code.data\n depot.nom = f.nom.data\n depot.familleVehicule = f.famille.data\n db.session.commit()\n\n if aRemisage and not f.gererRemisage.data:\n for v in depot.voie_collection:\n db.session.delete(v)\n for pa in depot.pointacces_collection:\n db.session.delete(pa)\n db.session.commit()\n\n if f.gererRemisage.data:\n return redirect(url_for('depots.editerRemisageDepot', id=depot.idDepot))\n return redirect(url_for('depots.indexDepots'))\n\n return render_template('depots/editer.html', form=f)\n\n@depots.route('/supprimer/', methods=['DELETE'])\ndef supprimerDepot(id):\n from ..models import Depot\n depot = Depot.getById(id)\n if not depot:\n abort(404)\n Depot.remove(depot)\n return jsonify({'result': True}), 200\n\n@depots.route('/depot/')\n@login_required\ndef afficherDepot(id):\n from ..models import Depot\n depot = Depot.getById(id)\n\n return render_template('depots/depot.html',\n depot=depot)\n\n@depots.route('/depot//remisage/editer', methods=['GET', 'POST'])\n@roles_required('admin')\ndef editerRemisageDepot(id):\n f = RemisageForm()\n\n from ..models import Depot, Voie, Place, Arret, PointAcces\n depot = Depot.getById(id)\n if not depot:\n abort(404)\n aRemisage = True if depot.voie_collection else False\n\n from ..extensions import db\n if f.validate_on_submit():\n # S'il y a un remisage, on le supprime\n if aRemisage:\n for v in depot.voie_collection:\n db.session.delete(v)\n for pa in depot.pointacces_collection:\n db.session.delete(pa)\n\n # On ajoute les voies et places\n for voie in f.voies.data:\n v = Voie(idDepot=id, libelleVoie=voie['libelle'])\n db.session.add(v)\n db.session.flush()\n for i in range(0, voie['nbPlaces']):\n db.session.add(Place(idVoie=v.idVoie, \n position=(i+1),\n type=voie['places'][i]['typePlace']))\n\n # On ajoute les points d'accès\n for pointAcces in f.pointsAcces.data:\n idArret = None\n if pointAcces['idArret'] != -1:\n idArret = pointAcces['idArret']\n db.session.add(PointAcces(idDepot=id,\n estEntree=pointAcces['estEntree'],\n estSortie=pointAcces['estSortie'],\n idArret=idArret,\n cote=pointAcces['cote']))\n db.session.commit()\n return redirect(url_for('depots.indexDepots'))\n\n # Si le dépot a un remisage\n if aRemisage:\n f.voies.pop_entry()\n for v in depot.voie_collection:\n f.voies.append_entry()\n f.voies.entries[-1].libelle.data = v.libelleVoie\n f.voies.entries[-1].nbPlaces.data = len(v.place_collection)\n for p in v.place_collection:\n f.voies.entries[-1].places.append_entry()\n f.voies.entries[-1].places.entries[-1].typePlace.data = int(p.type) if p.type else 0\n f.pointsAcces.pop_entry()\n for pa in depot.pointacces_collection:\n f.pointsAcces.append_entry()\n f.pointsAcces.entries[-1].estEntree.data = pa.estEntree\n f.pointsAcces.entries[-1].estSortie.data = pa.estSortie\n f.pointsAcces.entries[-1].cote.data = int(pa.cote)\n f.pointsAcces.entries[-1].idArret.data = pa.idArret if pa.idArret else -1\n\n return render_template('depots/editer-remisage.html', \n form=f,\n maxPlaces=BaseConfig.MAXIMUM_PLACES_PAR_VOIE)\n\n\n# --------------------------------------------------\n# -------------------- ARRET ---------------------\n# --------------------------------------------------\n@depots.route('/ajouter/arret', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef ajouterArret():\n f = ArretForm()\n\n from ..models import Arret\n from ..extensions import db\n if f.validate_on_submit():\n arret = Arret(codeArret=f.code.data,\n libelleArret=f.libelle.data)\n db.session.add(arret)\n db.session.commit()\n return redirect(url_for('depots.indexDepots'))\n\n return render_template('depots/ajouter-arret.html', form=f)\n\n@depots.route('/editer/arret/', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef editerArret(id):\n from ..models import Arret\n arret = Arret.getById(id)\n\n f = ArretForm(code=arret.codeArret,\n libelle=arret.libelleArret)\n\n from ..extensions import db\n if f.validate_on_submit():\n arret.codeArret = f.code.data\n arret.libelleArret = f.libelle.data\n db.session.commit()\n return redirect(url_for('depots.indexDepots'))\n\n return render_template('depots/editer-arret.html', form=f)\n\n@depots.route('/supprimer/arret/', methods=['DELETE'])\ndef supprimerArret(id):\n from ..models import Arret\n arret = Arret.getById(id)\n if not arret:\n abort(404)\n Arret.remove(arret)\n return jsonify({'result': True}), 200\n", "sub_path": "main/depots/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7092, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call"}, {"api_name": "flask_security.current_user.has_role", "line_number": 18, "usage_type": "call"}, {"api_name": "flask_security.current_user", "line_number": 18, "usage_type": "name"}, {"api_name": "models.Depot.getAll", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Depot", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.cookies", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "models.Depot.getAll", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Depot", "line_number": 22, "usage_type": "name"}, {"api_name": "models.Arret.getAll", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Arret", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask_security.decorators.login_required", "line_number": 15, "usage_type": "name"}, {"api_name": "forms.DepotForm", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Vehicule.getDiscriminants", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Vehicule", "line_number": 36, "usage_type": "name"}, {"api_name": "models.Depot", "line_number": 41, "usage_type": "call"}, {"api_name": "extensions.db.session.add", "line_number": 44, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 44, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 44, "usage_type": "name"}, {"api_name": "extensions.db.session.commit", "line_number": 45, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 45, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "flask_security.decorators.login_required", "line_number": 30, "usage_type": "name"}, {"api_name": "flask_security.decorators.roles_required", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Depot.getById", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Depot", "line_number": 57, "usage_type": "name"}, {"api_name": "forms.DepotForm", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Vehicule.getDiscriminants", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Vehicule", "line_number": 63, "usage_type": "name"}, {"api_name": "extensions.db.session.commit", "line_number": 71, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 71, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 71, "usage_type": "name"}, {"api_name": "extensions.db.session.delete", "line_number": 75, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 75, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 75, "usage_type": "name"}, {"api_name": "extensions.db.session.delete", "line_number": 77, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 77, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 77, "usage_type": "name"}, {"api_name": "extensions.db.session.commit", "line_number": 78, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 78, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 84, "usage_type": "call"}, {"api_name": "flask_security.decorators.login_required", "line_number": 53, "usage_type": "name"}, {"api_name": "flask_security.decorators.roles_required", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Depot.getById", "line_number": 89, "usage_type": "call"}, {"api_name": "models.Depot", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 91, "usage_type": "call"}, {"api_name": "models.Depot.remove", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Depot", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Depot.getById", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Depot", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 101, "usage_type": "call"}, {"api_name": "flask_security.decorators.login_required", "line_number": 96, "usage_type": "name"}, {"api_name": "forms.RemisageForm", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Depot.getById", "line_number": 110, "usage_type": "call"}, {"api_name": "models.Depot", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 112, "usage_type": "call"}, {"api_name": "extensions.db.session.delete", "line_number": 120, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 120, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 120, "usage_type": "name"}, {"api_name": "extensions.db.session.delete", "line_number": 122, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 122, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 122, "usage_type": "name"}, {"api_name": "models.Voie", "line_number": 126, "usage_type": "call"}, {"api_name": "extensions.db.session.add", "line_number": 127, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 127, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 127, "usage_type": "name"}, {"api_name": "extensions.db.session.flush", "line_number": 128, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 128, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 128, "usage_type": "name"}, {"api_name": "extensions.db.session.add", "line_number": 130, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 130, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 130, "usage_type": "name"}, {"api_name": "models.Place", "line_number": 130, "usage_type": "call"}, {"api_name": "extensions.db.session.add", "line_number": 139, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 139, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 139, "usage_type": "name"}, {"api_name": "models.PointAcces", "line_number": 139, "usage_type": "call"}, {"api_name": "extensions.db.session.commit", "line_number": 144, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 144, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 144, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 165, "usage_type": "call"}, {"api_name": "config.BaseConfig.MAXIMUM_PLACES_PAR_VOIE", "line_number": 167, "usage_type": "attribute"}, {"api_name": "config.BaseConfig", "line_number": 167, "usage_type": "name"}, {"api_name": "flask_security.decorators.roles_required", "line_number": 105, "usage_type": "call"}, {"api_name": "forms.ArretForm", "line_number": 177, "usage_type": "call"}, {"api_name": "models.Arret", "line_number": 182, "usage_type": "call"}, {"api_name": "extensions.db.session.add", "line_number": 184, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 184, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 184, "usage_type": "name"}, {"api_name": "extensions.db.session.commit", "line_number": 185, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 185, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 185, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 186, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 186, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 188, "usage_type": "call"}, {"api_name": "flask_security.decorators.login_required", "line_number": 174, "usage_type": "name"}, {"api_name": "flask_security.decorators.roles_required", "line_number": 175, "usage_type": "call"}, {"api_name": "models.Arret.getById", "line_number": 195, "usage_type": "call"}, {"api_name": "models.Arret", "line_number": 195, "usage_type": "name"}, {"api_name": "forms.ArretForm", "line_number": 197, "usage_type": "call"}, {"api_name": "extensions.db.session.commit", "line_number": 204, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 204, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 204, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 205, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 205, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 207, "usage_type": "call"}, {"api_name": "flask_security.decorators.login_required", "line_number": 191, "usage_type": "name"}, {"api_name": "flask_security.decorators.roles_required", "line_number": 192, "usage_type": "call"}, {"api_name": "models.Arret.getById", "line_number": 212, "usage_type": "call"}, {"api_name": "models.Arret", "line_number": 212, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 214, "usage_type": "call"}, {"api_name": "models.Arret.remove", "line_number": 215, "usage_type": "call"}, {"api_name": "models.Arret", "line_number": 215, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "307244126", "text": "import os\nimport numpy as np\nimport cmaps\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom copy import copy\nfrom cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER\nimport cartopy.io.shapereader as shpreader\nimport shapely.geometry as sgeom\nimport datetime\n\n# Constants\nBIGFONT=18\nMIDFONT=14\nSMFONT=10\n\n\n#--------------Function Defination----------------\n\n\n\ndef find_side(ls, side):\n \"\"\"\n Given a shapely LineString which is assumed to be rectangular, return the\n line corresponding to a given side of the rectangle.\n \"\"\"\n minx, miny, maxx, maxy = ls.bounds\n points = {'left': [(minx, miny), (minx, maxy)],\n 'right': [(maxx, miny), (maxx, maxy)],\n 'bottom': [(minx, miny), (maxx, miny)],\n 'top': [(minx, maxy), (maxx, maxy)],}\n return sgeom.LineString(points[side])\ndef lambert_xticks(ax, ticks):\n \"\"\"Draw ticks on the bottom x-axis of a Lambert Conformal projection.\"\"\"\n te = lambda xy: xy[0]\n lc = lambda t, n, b: np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T\n xticks, xticklabels = _lambert_ticks(ax, ticks, 'bottom', lc, te)\n ax.xaxis.tick_bottom()\n ax.set_xticks(xticks)\n ax.set_xticklabels([ax.xaxis.get_major_formatter()(xtick) for xtick in xticklabels], fontsize=MIDFONT)\ndef lambert_yticks(ax, ticks):\n \"\"\"Draw ricks on the left y-axis of a Lamber Conformal projection.\"\"\"\n te = lambda xy: xy[1]\n lc = lambda t, n, b: np.vstack((np.linspace(b[0], b[1], n), np.zeros(n) + t)).T\n yticks, yticklabels = _lambert_ticks(ax, ticks, 'left', lc, te)\n ax.yaxis.tick_left()\n ax.set_yticks(yticks)\n ax.set_yticklabels([ax.yaxis.get_major_formatter()(ytick) for ytick in yticklabels], fontsize=MIDFONT)\ndef _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor):\n \"\"\"Get the tick locations and labels for an axis of a Lambert Conformal projection.\"\"\"\n outline_patch = sgeom.LineString(ax.outline_patch.get_path().vertices.tolist())\n axis = find_side(outline_patch, tick_location)\n n_steps = 30\n extent = ax.get_extent(ccrs.PlateCarree())\n _ticks = []\n for t in ticks:\n xy = line_constructor(t, n_steps, extent)\n proj_xyz = ax.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])\n xyt = proj_xyz[..., :2]\n ls = sgeom.LineString(xyt.tolist())\n locs = axis.intersection(ls)\n if not locs:\n tick = [None]\n else:\n tick = tick_extractor(locs.xy)\n _ticks.append(tick[0])\n # Remove ticks that aren't visible: \n ticklabels = copy(ticks)\n while True:\n try:\n index = _ticks.index(None)\n except ValueError:\n break\n _ticks.pop(index)\n ticklabels.pop(index)\n return _ticks, ticklabels\n\ndef get_station_df(sta_path):\n '''get station info'''\n df = pd.read_excel(sta_path)\n df=df.dropna()\n return(df)\n\ndef conv_deg(deg_str):\n '''convert to degree info'''\n value=int(deg_str)//100\n value=value+(int(deg_str)-value*100)/60\n return(value)\n\n#--------------Function Defination----------------\n\ndef main():\n # Input File\n raw_file='/home/metctm1/array/data/2011-UST-RAP/a_precip_20201113141016.csv'\n\n # Province shp file\n province_shp_file=os.getenv('SHP_LIB')+'/cnmap/cnhimap.dbf'\n county_shp_file=os.getenv('SHP_LIB')+'/cnmap/county_2004.dbf'\n\n south_china_province=['广东', '广西', '海南']\n \n \n \n # deal with raw input\n df = pd.read_csv(raw_file,parse_dates=True) \n df['id']=df['lon']*df['lat']\n df_process=df.groupby('id').sum() # Resample into hourly data\n df_process['lon'] =df_process['lon']/df_process['val2']\n df_process['lat'] =df_process['lat']/df_process['val2']\n \n\n # read shp files\n province_shp=shpreader.Reader(province_shp_file).geometries()\n county_shp = shpreader.Reader(county_shp_file).geometries()\n \n \n \n # Set figure size\n proj = ccrs.Mercator(central_longitude=115., min_latitude=-80.0, max_latitude=84.0, globe=None, \n latitude_true_scale=22.0, false_easting=0.0, false_northing=0.0, scale_factor=None)\n fig = plt.figure(figsize=[10, 8],frameon=True)\n # Set projection and plot the main figure\n ax = fig.add_axes([0.08, 0.01, 0.8, 0.94], projection=proj)\n # Set figure extent\n ax.set_extent([109, 118, 20, 26],crs=ccrs.PlateCarree())\n \n\n # plot shp boundaries\n ax.add_geometries(county_shp, ccrs.PlateCarree(),facecolor='none', edgecolor='gray',linewidth=0.5, zorder = 0)\n ax.add_geometries(province_shp, ccrs.PlateCarree(),facecolor='none', edgecolor='black',linewidth=1., zorder = 1)\n\n # Add ocean, land, rivers and lakes\n #ax.add_feature(cfeature.OCEAN.with_scale('50m'))\n #ax.add_feature(cfeature.LAND.with_scale('50m'))\n # *must* call draw in order to get the axis boundary used to add ticks:\n fig.canvas.draw()\n # Define gridline locations and draw the lines using cartopy's built-in gridliner:\n # xticks = np.arange(80,130,10)\n # yticks = np.arange(15,55,5)\n xticks = range(109, 118, 2)\n yticks = range(20, 26, 2) \n #ax.gridlines(xlocs=xticks, ylocs=yticks,zorder=1,linestyle='--',lw=0.5,color='gray')\n\n # Label the end-points of the gridlines using the custom tick makers:\n ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER) \n ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)\n lambert_xticks(ax, xticks)\n lambert_yticks(ax, yticks)\n\n # Marker size in units of points^2\n cmap=cmaps.precip2_17lev\n sc=ax.scatter( df_process['lon'], df_process['lat'], marker='.', c=df_process['val1'], \n cmap=cmap, norm=matplotlib.colors.BoundaryNorm([0, 1, 2, 5, 10, 20, 30, 40, 50, 70, 100, 150, 200, 250, 300, 400, 500, 600], cmap.N),\n s=15,zorder=1, transform=ccrs.Geodetic(), label='pr')\n\n df_sig=df_process.where(df_process['val1']>250.)\n ax.scatter( df_sig['lon'], df_sig['lat'], marker='.', c=df_sig['val1'], \n cmap=cmap, norm=matplotlib.colors.BoundaryNorm([0, 1, 2, 5, 10, 20, 30, 40, 50, 70, 100, 150, 200, 250, 300, 400, 500, 600], cmap.N),\n s=50,zorder=9, transform=ccrs.Geodetic())\n \n plt.title('Observed Accumulated Rainfall during Mangkhut (1822)')\n cax=fig.add_axes([0.15, 0.02, 0.7, 0.03])#位置[左,下,右,上]\n cbar = fig.colorbar(sc,ticks=[0, 1, 5, 20, 40, 70, 150, 250, 400], cax=cax, orientation='horizontal')\n# cbar = fig.colorbar(sc)\n\n# Show figure\n plt.savefig('../fig/mangkhut_pr.png', dpi=120, bbox_inches='tight')\n# plt.show()\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n", "sub_path": "2011-UST-RAP/script/201119-draw-station-map-prec-mangkhut.py", "file_name": "201119-draw-station-map-prec-mangkhut.py", "file_ext": "py", "file_size_in_byte": 6685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "matplotlib.use", "line_number": 6, "usage_type": "call"}, {"api_name": "shapely.geometry.LineString", "line_number": 37, "usage_type": "call"}, {"api_name": "shapely.geometry", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "shapely.geometry.LineString", "line_number": 56, "usage_type": "call"}, {"api_name": "shapely.geometry", "line_number": 56, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 59, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 59, "usage_type": "name"}, {"api_name": "cartopy.crs.Geodetic", "line_number": 63, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 63, "usage_type": "name"}, {"api_name": "shapely.geometry.LineString", "line_number": 65, "usage_type": "call"}, {"api_name": "shapely.geometry", "line_number": 65, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 85, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 102, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 110, "usage_type": "call"}, {"api_name": "cartopy.io.shapereader.Reader", "line_number": 118, "usage_type": "call"}, {"api_name": "cartopy.io.shapereader", "line_number": 118, "usage_type": "name"}, {"api_name": "cartopy.io.shapereader.Reader", "line_number": 119, "usage_type": "call"}, {"api_name": "cartopy.io.shapereader", "line_number": 119, "usage_type": "name"}, {"api_name": "cartopy.crs.Mercator", "line_number": 124, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 130, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 130, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 134, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 134, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 135, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 135, "usage_type": "name"}, {"api_name": "cartopy.mpl.gridliner.LONGITUDE_FORMATTER", "line_number": 150, "usage_type": "argument"}, {"api_name": "cartopy.mpl.gridliner.LATITUDE_FORMATTER", "line_number": 151, "usage_type": "argument"}, {"api_name": "cmaps.precip2_17lev", "line_number": 156, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.BoundaryNorm", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 158, "usage_type": "attribute"}, {"api_name": "cartopy.crs.Geodetic", "line_number": 159, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.colors.BoundaryNorm", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 163, "usage_type": "attribute"}, {"api_name": "cartopy.crs.Geodetic", "line_number": 164, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}]} +{"seq_id": "141692291", "text": "import torch\nfrom torch.utils.data import DataLoader\nfrom dataset.dataset import Landsat8Dataset, Landsat8DatasetHDF5\nfrom dataset.dataset import LocalRandomSampler\nfrom dataset.customTransform import DenormalizeS2\nfrom torchvision import transforms\n\nimport argparse\nfrom IPython import embed\n\nimport gc\nimport gdal\nfrom gdalconst import GA_ReadOnly\nfrom osgeo import osr\n\nimport numpy as np\nfrom PIL import Image\n\nparser = argparse.ArgumentParser(description='PyTorch Super Res Example')\n# hyper-parameters\nparser.add_argument('--batchSize', type=int, default=1, help='training batch size')\nparser.add_argument('--testBatchSize', type=int, default=1, help='testing batch size')\nparser.add_argument('--nEpochs', type=int, default=20, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.001, help='Learning Rate. Default=0.01')\nparser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')\n\n# model configuration\nparser.add_argument('--upscale_factor', '-uf', type=int, default=4, help=\"super resolution upscale factor\")\nparser.add_argument('--model', '-m', type=str, default='sub', help='choose which model is going to use')\n\nargs = parser.parse_args()\n\ndef main():\n train_csv = \"../dataset/l8s2-train.csv\"\n val_csv = \"../dataset/l8s2-val.csv\"\n test_csv = \"../dataset/l8s2-test.csv\"\n single_csv = \"../dataset/l8s2-predict-single.csv\"\n\n #====================================================================================================\n # Dataloader with HDF5\n #====================================================================================================\n input_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n target_transform = transforms.Compose([\n transforms.Lambda(lambda x: [x[i].astype('float32') for i in range(13)]),\n transforms.Lambda(lambda x: [transforms.ToTensor()(x[i]) for i in range(13)])\n ])\n\n # train_set = Landsat8DatasetHDF5(train_csv,\n # input_transform = input_transform,\n # target_transform=target_transform)\n # train_data_loader = DataLoader(dataset=train_set, batch_size=args.batchSize, sampler = LocalRandomSampler(train_set))\n # train_data_loader = DataLoader(dataset=train_set, batch_size=args.batchSize, shuffle=False)\n\n # val_set = Landsat8DatasetHDF5(val_csv,\n # input_transform = input_transform,\n # target_transform=target_transform)\n # val_data_loader = DataLoader(dataset=val_set, batch_size=args.testBatchSize, shuffle=False)\n\n # test_set = Landsat8DatasetHDF5(test_csv,\n # input_transform = input_transform,\n # target_transform=target_transform)\n # test_data_loader = DataLoader(dataset=test_set, batch_size=args.testBatchSize, shuffle=False)\n\n single_set = Landsat8DatasetHDF5(single_csv,\n input_transform = input_transform,\n target_transform=target_transform)\n single_data_loader = DataLoader(dataset=single_set, batch_size=args.testBatchSize, shuffle=False)\n #====================================================================================================\n\n # L8\n # means = [489.7118, 591.63416, 826.2221, 948.7332, 1858.4872, 1864.6527, 1355.4669]\n # sds = [338.75378, 403.48727, 572.8161, 784.2508, 1208.3722, 1436.1204, 1138.7588]\n\n # S2\n means = [1440.2627, 1258.3445, 1214.9252, 1325.0135, 1486.8649, 1866.3961, 2085.1528, 2070.0884, 2272.1758, 931.276, 21.306807, 2370.4104, 1701.286]\n sds = [366.68463, 378.73654, 512.0519, 771.2212, 791.2124, 874.36127, 989.072, 1001.9915, 1093.7765, 552.87885, 28.292986, 1379.6288, 1097.3044]\n\n modelname = 'SubPixelCNN'\n modelname = 'SubPixelMaxPoolCNN'\n modelname = 'TransConvCNN'\n modelname = 'TransConvMaxPoolCNN'\n model = torch.load('save/'+modelname+'/model_path.pth')\n\n # model = torch.load('save/SubPixelCNN/model_path.pth')\n # model = torch.load('save/SubPixelMaxPoolCNN/model_path.pth')\n # model = torch.load('save/TransConvCNN/model_path.pth')\n # model = torch.load('save/TransConvMaxPoolCNN/model_path.pth')\n\n s2_path = 'S2A_MSIL1C_20171230T183751_N0206_R027_T11SLU_20171230T202151/T11SLU_20171230T183751'\n\n model.eval()\n\n iter_loader = iter(single_data_loader)\n for i in range(1):\n input, target = next(iter_loader)\n out = model(input.cuda())\n denorm = DenormalizeS2(means, sds)\n out_denorm = denorm(out)\n patch01 = out_denorm[0].reshape(500,500) \n patch09 = out_denorm[9].reshape(500,500)\n patch10 = out_denorm[10].reshape(500,500)\n\n patch05 = out_denorm[4].reshape(1500,1500) \n patch06 = out_denorm[5].reshape(1500,1500) \n patch07 = out_denorm[6].reshape(1500,1500) \n patch8A = out_denorm[8].reshape(1500,1500)\n patch11 = out_denorm[11].reshape(1500,1500)\n patch12 = out_denorm[12].reshape(1500,1500)\n\n patch02 = out_denorm[1].reshape(3000,3000) \n patch03 = out_denorm[2].reshape(3000,3000) \n patch04 = out_denorm[3].reshape(3000,3000) \n patch08 = out_denorm[7].reshape(3000,3000) \n\n #====================================================================================================\n # 01, 09, 10\n #====================================================================================================\n xstart = 475\n ystart = 475\n geotransform = (300000.0+(60.0*xstart), 60.0, 0.0, 3900000.0-(60.0*ystart), 0.0, -60.0)\n\n # 01\n print(\"Predicting B01\")\n nx = patch01.shape[0]\n ny = patch01.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B01.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B01.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch01) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch01.min()\n maxval = patch01.max()\n patch01 = ((patch01.astype(np.float32)-minval)/(maxval-minval))*256\n patch01 = patch01.astype(np.uint8)\n pil_img = Image.fromarray(patch01)\n pil_img.save('../save/jpg/'+modelname+'/pred_B01.jpg')\n\n\n # 09\n print(\"Predicting B09\")\n nx = patch09.shape[0]\n ny = patch09.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B09.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B09.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch09) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch09.min()\n maxval = patch09.max()\n patch09 = ((patch09.astype(np.float32)-minval)/(maxval-minval))*256\n patch09 = patch09.astype(np.uint8)\n pil_img = Image.fromarray(patch09)\n pil_img.save('../save/jpg/'+modelname+'/pred_B09.jpg')\n\n\n\n # 10\n print(\"Predicting B10\")\n nx = patch10.shape[0]\n ny = patch10.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B10.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B10.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch10) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch10.min()\n maxval = patch10.max()\n patch10 = ((patch10.astype(np.float32)-minval)/(maxval-minval))*256\n patch10 = patch10.astype(np.uint8)\n pil_img = Image.fromarray(patch10)\n pil_img.save('../save/jpg/'+modelname+'/pred_B10.jpg')\n\n\n\n #====================================================================================================\n # 02, 03, 04, 08\n #====================================================================================================\n xstart = 2850\n ystart = 2850\n geotransform = (300000.0+(10.0*xstart), 10.0, 0.0, 3900000.0-(10.0*ystart), 0.0, -10.0)\n\n # 02\n print(\"Predicting B02\")\n nx = patch02.shape[0]\n ny = patch02.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B02.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B02.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch02) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch02.min()\n maxval = patch02.max()\n patch02 = ((patch02.astype(np.float32)-minval)/(maxval-minval))*256\n patch02 = patch02.astype(np.uint8)\n pil_img = Image.fromarray(patch02)\n pil_img.save('../save/jpg/'+modelname+'/pred_B02.jpg')\n\n # 03\n print(\"Predicting B03\")\n nx = patch03.shape[0]\n ny = patch03.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B03.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B03.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch03) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch03.min()\n maxval = patch03.max()\n patch03 = ((patch03.astype(np.float32)-minval)/(maxval-minval))*256\n patch03 = patch03.astype(np.uint8)\n pil_img = Image.fromarray(patch03)\n pil_img.save('../save/jpg/'+modelname+'/pred_B03.jpg')\n\n\n # 04\n print(\"Predicting B04\")\n nx = patch04.shape[0]\n ny = patch04.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B04.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B04.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch04) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch04.min()\n maxval = patch04.max()\n patch04 = ((patch04.astype(np.float32)-minval)/(maxval-minval))*256\n patch04 = patch04.astype(np.uint8)\n pil_img = Image.fromarray(patch04)\n pil_img.save('../save/jpg/'+modelname+'/pred_B04.jpg')\n\n\n # 08\n print(\"Predicting B08\")\n nx = patch08.shape[0]\n ny = patch08.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B08.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B08.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch08) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch08.min()\n maxval = patch08.max()\n patch08 = ((patch08.astype(np.float32)-minval)/(maxval-minval))*256\n patch08 = patch08.astype(np.uint8)\n pil_img = Image.fromarray(patch08)\n pil_img.save('../save/jpg/'+modelname+'/pred_B08.jpg')\n\n\n #====================================================================================================\n # 05, 06, 07, 8A, 11, 12\n #====================================================================================================\n xstart = 1425\n ystart = 1425\n geotransform = (300000.0+(20.0*xstart), 20.0, 0.0, 3900000.0-(20.0*ystart), 0.0, -20.0)\n\n\n # 05\n print(\"Predicting B05\")\n nx = patch05.shape[0]\n ny = patch05.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B05.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B05.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch05) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch05.min()\n maxval = patch05.max()\n patch05 = ((patch05.astype(np.float32)-minval)/(maxval-minval))*256\n patch05 = patch05.astype(np.uint8)\n pil_img = Image.fromarray(patch05)\n pil_img.save('../save/jpg/'+modelname+'/pred_B05.jpg')\n\n\n # 06\n print(\"Predicting B06\")\n nx = patch06.shape[0]\n ny = patch06.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B06.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B06.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch06) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch06.min()\n maxval = patch06.max()\n patch06 = ((patch06.astype(np.float32)-minval)/(maxval-minval))*256\n patch06 = patch06.astype(np.uint8)\n pil_img = Image.fromarray(patch06)\n pil_img.save('../save/jpg/'+modelname+'/pred_B06.jpg')\n\n\n\n # 07\n print(\"Predicting B07\")\n nx = patch07.shape[0]\n ny = patch07.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B07.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B07.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch07) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch07.min()\n maxval = patch07.max()\n patch07 = ((patch07.astype(np.float32)-minval)/(maxval-minval))*256\n patch07 = patch07.astype(np.uint8)\n pil_img = Image.fromarray(patch07)\n pil_img.save('../save/jpg/'+modelname+'/pred_B07.jpg')\n\n\n\n # 8A\n print(\"Predicting B8A\")\n nx = patch8A.shape[0]\n ny = patch8A.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B8A.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B8A.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch8A) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch07.min()\n maxval = patch07.max()\n patch07 = ((patch07.astype(np.float32)-minval)/(maxval-minval))*256\n patch07 = patch07.astype(np.uint8)\n pil_img = Image.fromarray(patch07)\n pil_img.save('../save/jpg/'+modelname+'/pred_B8A.jpg')\n\n\n\n # 11\n print(\"Predicting B11\")\n nx = patch11.shape[0]\n ny = patch11.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B11.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B11.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch11) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch11.min()\n maxval = patch11.max()\n patch11 = ((patch11.astype(np.float32)-minval)/(maxval-minval))*256\n patch11 = patch11.astype(np.uint8)\n pil_img = Image.fromarray(patch11)\n pil_img.save('../save/jpg/'+modelname+'/pred_B11.jpg')\n\n\n\n # 12\n print(\"Predicting B12\")\n nx = patch12.shape[0]\n ny = patch12.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B12.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B12.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch12) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch12.min()\n maxval = patch12.max()\n patch12 = ((patch12.astype(np.float32)-minval)/(maxval-minval))*256\n patch12 = patch12.astype(np.uint8)\n pil_img = Image.fromarray(patch12)\n pil_img.save('../save/jpg/'+modelname+'/pred_B12.jpg')\n\n\nif __name__ == '__main__':\n main()", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 20574, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 46, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 46, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 47, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 47, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 48, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 48, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 48, "usage_type": "call"}, {"api_name": "dataset.dataset.Landsat8DatasetHDF5", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 85, "usage_type": "call"}, {"api_name": "dataset.customTransform.DenormalizeS2", "line_number": 100, "usage_type": "call"}, {"api_name": "gdal.Open", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 134, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 134, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 136, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 136, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 145, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 146, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 146, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 159, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 159, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 161, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 161, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 170, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 171, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 171, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 182, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 185, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 185, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 187, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 187, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 196, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 197, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 197, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 215, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 218, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 218, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 220, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 220, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 228, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 229, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 230, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 230, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 239, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 242, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 242, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 244, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 244, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 253, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 254, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 254, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 264, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 267, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 267, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 269, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 269, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 277, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 278, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 279, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 279, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 289, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 292, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 292, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 294, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 294, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 302, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 303, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 304, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 304, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 322, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 325, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 325, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 327, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 327, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 335, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 336, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 337, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 337, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 347, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 350, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 350, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 352, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 352, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 360, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 361, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 362, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 362, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 373, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 376, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 376, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 378, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 378, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 386, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 387, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 388, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 388, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 399, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 402, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 402, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 404, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 404, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 412, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 413, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 414, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 414, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 425, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 428, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 428, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 430, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 430, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 438, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 439, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 440, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 440, "usage_type": "name"}, {"api_name": "gdal.Open", "line_number": 450, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 451, "usage_type": "call"}, {"api_name": "gdal.GetDriverByName", "line_number": 454, "usage_type": "call"}, {"api_name": "gdal.GDT_Int16", "line_number": 454, "usage_type": "attribute"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 456, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 456, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 464, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 465, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 466, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 466, "usage_type": "name"}]} +{"seq_id": "108819504", "text": "from torch.nn.utils import clip_grad_norm_\nimport pickle as pkl\nimport torch\nfrom torch.nn import functional as F\nimport os\nfrom os.path import join, exists\nimport json\nimport math\n\ndef meta_save(path, word2id, net_args, train_params):\n \n if not exists(path):\n os.makedirs(path)\n\n with open(join(path, 'vocab.pkl'), 'wb') as f:\n pkl.dump(word2id, f, pkl.HIGHEST_PROTOCOL)\n\n meta = {}\n meta['net_args'] = net_args\n meta['traing_params'] = train_params\n\n with open(join(path, 'meta.json'), 'w') as f:\n json.dump(meta, f, indent=4)\n \n return meta\n\ndef get_basic_grad_fn(net, clip_grad, max_grad=1e2):\n def f():\n grad_norm = clip_grad_norm_(\n [p for p in net.parameters() if p.requires_grad], clip_grad) \n\n if max_grad is not None and grad_norm >= max_grad:\n\t\t\t# print('WARNING: Exploding Gradients {:.2f}'.format(grad_norm))\n grad_norm = max_grad\n grad_log = {}\n grad_log['grad_norm'] = grad_norm\n return grad_log\n return f\n\n\n#################### LSTM helper #########################\n\ndef reorder_sequence(sequence_emb, order, batch_first=False):\n \"\"\"\n sequence_emb: [T, B, D] if not batch_first\n order: list of sequence length\n \"\"\"\n batch_dim = 0 if batch_first else 1\n assert len(order) == sequence_emb.size()[batch_dim]\n\n order = torch.LongTensor(order).to(sequence_emb.device)\n sorted_ = sequence_emb.index_select(index=order, dim=batch_dim)\n\n return sorted_\n\ndef reorder_lstm_states(lstm_states, order):\n \"\"\"\n lstm_states: (H, C) of tensor [layer, batch, hidden]\n order: list of sequence length\n \"\"\"\n assert isinstance(lstm_states, tuple)\n assert len(lstm_states) == 2\n assert lstm_states[0].size() == lstm_states[1].size()\n assert len(order) == lstm_states[0].size()[1]\n\n order = torch.LongTensor(order).to(lstm_states[0].device)\n sorted_states = (lstm_states[0].index_select(index=order, dim=1),\n lstm_states[1].index_select(index=order, dim=1))\n\n return sorted_states\n\n#################### general sequence helper #########################\ndef len_mask(lens, device):\n \"\"\" users are resposible for shaping\n Return: tensor_type [B, T]\n \"\"\"\n #有种在填充有无字的感觉\n max_len = max(lens)\n batch_size = len(lens)\n mask = torch.ByteTensor(batch_size, max_len).to(device)\n mask.fill_(0)\n for i, l in enumerate(lens):\n mask[i, :l].fill_(1) #(34,81),有字的地方为1,没有的为0\n return mask\n\ndef sequence_mean(sequence, seq_lens, dim=1):\n if seq_lens:\n assert sequence.size(0) == len(seq_lens) # batch_size\n sum_ = torch.sum(sequence, dim=dim, keepdim=False)\n mean = torch.stack([s/l for s, l in zip(sum_, seq_lens)], dim=0)\n else:\n mean = torch.mean(sequence, dim=dim, keepdim=False)\n return mean\n\ndef sequence_loss(logits, targets, xent_fn=None, pad_idx=0):\n \"\"\" functional interface of SequenceLoss\"\"\"\n assert logits.size()[:-1] == targets.size()\n\n mask = targets != pad_idx\n target = targets.masked_select(mask)\n logit = logits.masked_select(\n mask.unsqueeze(2).expand_as(logits)\n ).contiguous().view(-1, logits.size(-1))\n if xent_fn:\n loss = xent_fn(logit, target)\n else:\n loss = F.cross_entropy(logit, target)\n assert (not math.isnan(loss.mean().item())\n and not math.isinf(loss.mean().item()))\n return loss\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 3478, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 16, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.ByteTensor", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 107, "usage_type": "name"}, {"api_name": "math.isnan", "line_number": 108, "usage_type": "call"}, {"api_name": "math.isinf", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "67984257", "text": "import sys\nimport re\nimport time\nimport copy\nimport numpy as np\nfrom models import utils\nfrom models.cross_validation import CrossValidation\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nimport xgboost as xgb\nfrom xgboost import XGBClassifier\nimport lightgbm as lgb\nfrom lightgbm import LGBMClassifier\nfrom catboost import CatBoostClassifier\nfrom config import cfg\n\n\nclass ModelBase(object):\n \"\"\"\n Base Model Class of Models in scikit-learn Module\n \"\"\"\n def __init__(self, x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va=None, y_va=None, w_va=None, e_va=None, use_multi_group=False):\n\n self.x_train = x_tr\n self.y_train = y_tr\n self.w_train = w_tr\n self.e_train = e_tr\n self.x_test = x_te\n self.id_test = id_te\n\n self.x_global_valid = x_va\n self.y_global_valid = y_va\n self.w_global_valid = w_va\n self.e_global_valid = e_va\n\n self.importance = np.array([])\n self.indices = np.array([])\n self.std = np.array([])\n self.model_name = ''\n self.num_boost_round = 0\n self.use_multi_group = use_multi_group\n self.use_global_valid = False\n self.use_custom_obj = False\n self.postscale = False\n self.postscale_rate = None\n\n if cfg.group_list is None:\n if use_multi_group:\n raise ValueError(\"Groups not found! 'use_multi_group' should be False!\")\n\n @staticmethod\n def get_reg(parameters):\n\n print('This Is Base Model!')\n reg = DecisionTreeClassifier()\n\n return reg\n\n def print_start_info(self):\n\n print('------------------------------------------------------')\n print('This Is Base Model!')\n\n self.model_name = 'base'\n\n @staticmethod\n def select_category_variable(x_train, x_g_train, x_valid, x_g_valid, x_test, x_g_test):\n\n return x_train, x_valid, x_test\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Classifier\n reg = self.get_reg(parameters)\n\n # Training Model\n reg.fit(x_train, y_train, sample_weight=w_train)\n\n return reg\n\n def get_pattern(self):\n return None\n\n def fit_with_round_log(self, boost_round_log_path, cv_count, x_train, y_train,\n w_train, x_valid, y_valid, w_valid, parameters,\n param_name_list, param_value_list, append_info=''):\n\n boost_round_log_path, _ = utils.get_boost_round_log_path(boost_round_log_path, self.model_name,\n param_name_list, param_value_list, append_info)\n boost_round_log_path += 'cv_cache/'\n utils.check_dir([boost_round_log_path])\n boost_round_log_path += self.model_name + '_cv_{}_log.txt'.format(cv_count)\n\n print('Saving Outputs to:', boost_round_log_path)\n print('------------------------------------------------------')\n\n open(boost_round_log_path, 'w+').close()\n\n with open(boost_round_log_path, 'a') as f:\n __console__ = sys.stdout\n sys.stdout = f\n reg = self.fit(x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters)\n sys.stdout = __console__\n\n with open(boost_round_log_path) as f:\n lines = f.readlines()\n idx_round_cv = []\n train_loss_round_cv = []\n valid_loss_round_cv = []\n global_valid_loss_round_cv = []\n pattern = self.get_pattern()\n for line in lines:\n if pattern.match(line) is not None:\n idx_round_cv.append(int(pattern.match(line).group(1)))\n train_loss_round_cv.append(float(pattern.match(line).group(2)))\n valid_loss_round_cv.append(float(pattern.match(line).group(3)))\n if self.use_global_valid:\n global_valid_loss_round_cv.append(float(pattern.match(line).group(4)))\n\n if self.use_global_valid:\n return reg, idx_round_cv, train_loss_round_cv, valid_loss_round_cv, global_valid_loss_round_cv\n else:\n return reg, idx_round_cv, train_loss_round_cv, valid_loss_round_cv\n\n def save_boost_round_log(self, boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, train_seed, cv_seed, csv_idx, parameters,\n param_name_list, param_value_list, append_info='',\n global_valid_loss_round_mean=None, profit=None):\n\n boost_round_log_upper_path = \\\n utils.get_boost_round_log_upper_path(\n boost_round_log_path, self.model_name, param_name_list, append_info)\n boost_round_log_path, param_name = \\\n utils.get_boost_round_log_path(\n boost_round_log_path, self.model_name,\n param_name_list, param_value_list, append_info)\n utils.save_boost_round_log_to_csv(\n self.model_name, boost_round_log_path, boost_round_log_upper_path, csv_idx,\n idx_round, valid_loss_round_mean, train_loss_round_mean, train_seed, cv_seed,\n parameters, param_name_list, param_value_list, param_name, profit=profit)\n if self.use_global_valid:\n utils.save_boost_round_log_gl_to_csv(\n self.model_name, boost_round_log_path, boost_round_log_upper_path,\n csv_idx, idx_round, valid_loss_round_mean, train_loss_round_mean,\n global_valid_loss_round_mean, train_seed, cv_seed, parameters,\n param_name_list, param_value_list, param_name, profit=profit)\n\n boost_round_log_path += 'final_logs/'\n utils.check_dir([boost_round_log_path])\n boost_round_log_path += self.model_name + '_' + str(csv_idx) + '_t-' \\\n + str(train_seed) + '_c-' + str(cv_seed) + '_log.csv'\n\n if self.use_global_valid:\n utils.save_final_boost_round_gl_log(\n boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, global_valid_loss_round_mean, profit=profit)\n else:\n utils.save_final_boost_round_log(\n boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, profit=profit)\n\n def get_importance(self, reg):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = reg.feature_importances_\n self.indices = np.argsort(self.importance)[::-1]\n\n feature_num = len(self.importance)\n\n for f in range(feature_num):\n print(\"%d | feature %d | %d\" % (\n f + 1, self.indices[f], self.importance[self.indices[f]]))\n\n def predict(self, reg, x_test, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Test Result...')\n\n pred_test = np.array(reg.predict(x_test))\n\n if pred_path is not None:\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test)\n\n return pred_test\n\n def get_pred_train(self, reg, x_train, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Train Probability...')\n\n pred_train = np.array(reg.predict(x_train))[:, 1]\n\n if pred_path is not None:\n utils.save_pred_train_to_csv(pred_path, pred_train, self.y_train)\n\n return pred_train\n\n def save_csv_log(self, mode, csv_log_path, param_name_list, param_value_list, csv_idx,\n loss_train_w_mean, loss_valid_w_mean, acc_train, train_seed, cv_seed,\n n_valid, n_cv, parameters, boost_round_log_path=None,\n file_name_params=None, append_info='', loss_global_valid=None,\n acc_global_valid=None, profit=None):\n\n if mode == 'auto_grid_search':\n\n csv_log_path, param_name, param_info = \\\n utils.get_grid_search_log_path(csv_log_path, self.model_name,\n param_name_list, param_value_list, append_info)\n if self.use_global_valid:\n utils.save_grid_search_log_with_glv_to_csv(\n csv_idx, csv_log_path + param_name + '_',\n loss_train_w_mean, loss_valid_w_mean, acc_train, train_seed,\n loss_global_valid, acc_global_valid, cv_seed, n_valid, n_cv,\n parameters, param_name_list, param_value_list, profit=profit)\n csv_log_path += str(param_info) + '_'\n utils.save_grid_search_log_with_glv_to_csv(\n csv_idx, csv_log_path, loss_train_w_mean, loss_valid_w_mean,\n acc_train, train_seed, loss_global_valid, acc_global_valid,\n cv_seed, n_valid, n_cv, parameters, param_name_list,\n param_value_list, profit=profit)\n else:\n utils.save_grid_search_log_to_csv(\n csv_idx, csv_log_path + param_name + '_', loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed, cv_seed, n_valid,\n n_cv, parameters, param_name_list, param_value_list, profit=profit)\n csv_log_path += str(param_info) + '_'\n utils.save_grid_search_log_to_csv(\n csv_idx, csv_log_path, loss_train_w_mean, loss_valid_w_mean,\n acc_train, train_seed, cv_seed, n_valid, n_cv, parameters,\n param_name_list, param_value_list, profit=profit)\n\n elif mode == 'auto_train_boost_round':\n\n boost_round_log_path, _ = \\\n utils.get_boost_round_log_path(\n boost_round_log_path, self.model_name, param_name_list, param_value_list, append_info)\n boost_round_log_path += self.model_name + '_' + append_info + '_'\n if self.use_global_valid:\n utils.save_grid_search_log_to_csv(\n csv_idx, boost_round_log_path, loss_train_w_mean, loss_valid_w_mean,\n acc_train, train_seed, cv_seed, n_valid, n_cv, parameters,\n param_name_list, param_value_list, profit=profit)\n else:\n utils.save_final_loss_log_to_csv(\n csv_idx, boost_round_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed, cv_seed,\n n_valid, n_cv, parameters, profit=profit)\n\n elif mode == 'auto_train':\n\n csv_log_path += self.model_name + '/'\n utils.check_dir([csv_log_path])\n csv_log_path += self.model_name + '_' + append_info + '/'\n utils.check_dir([csv_log_path])\n csv_log_path += self.model_name + '_'\n if file_name_params is not None:\n for p_name in file_name_params:\n csv_log_path += str(parameters[p_name]) + '_'\n else:\n for p_name, p_value in parameters.items():\n csv_log_path += str(p_value) + '_'\n\n if self.use_global_valid:\n utils.save_log_with_glv_to_csv(\n csv_idx, csv_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed,\n loss_global_valid, acc_global_valid, cv_seed,\n n_valid, n_cv, parameters, profit=profit)\n else:\n utils.save_final_loss_log_to_csv(\n csv_idx, csv_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed,\n cv_seed, n_valid, n_cv, parameters, profit=profit)\n\n else:\n\n csv_log_path += self.model_name + '_' + append_info + '_'\n if self.use_global_valid:\n utils.save_log_with_glv_to_csv(csv_idx, csv_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed,\n loss_global_valid, acc_global_valid, cv_seed,\n n_valid, n_cv, parameters, profit=profit)\n else:\n utils.save_final_loss_log_to_csv(csv_idx, csv_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed,\n cv_seed, n_valid, n_cv, parameters, profit=profit)\n\n def save_final_pred(self, mode, pred_test_mean, pred_path, parameters, csv_idx,\n train_seed, cv_seed, boost_round_log_path=None, param_name_list=None,\n param_value_list=None, file_name_params=None, append_info=''):\n\n params = '_'\n if file_name_params is not None:\n for p_name in file_name_params:\n params += utils.get_simple_param_name(p_name) + \\\n '-' + str(parameters[p_name]) + '_'\n else:\n for p_name, p_value in parameters.items():\n params += utils.get_simple_param_name(p_name) + '-' + str(p_value) + '_'\n\n if mode == 'auto_train':\n\n pred_path += self.model_name + '/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_' + append_info + '/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + params + 'results/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_' + str(csv_idx) + \\\n '_t-' + str(train_seed) + '_c-' + str(cv_seed) + '_'\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test_mean)\n\n elif mode == 'auto_train_boost_round':\n\n boost_round_log_path, _ = \\\n utils.get_boost_round_log_path(\n boost_round_log_path, self.model_name, param_name_list, param_value_list, append_info)\n pred_path = boost_round_log_path + 'final_results/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_' + str(csv_idx) + \\\n '_t-' + str(train_seed) + '_c-' + str(cv_seed) + '_'\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test_mean)\n\n else:\n pred_path += 'final_results/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_' + append_info + '/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_t-' + str(train_seed) + '_c-' + str(cv_seed) + params\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test_mean)\n\n @staticmethod\n def get_postscale_rate(y):\n\n positive = 0\n for y_ in y:\n if y_ == 1:\n positive += 1\n\n positive_rate = positive / len(y)\n postscale_rate = len(y) / (2*positive)\n\n return positive_rate, postscale_rate\n\n @staticmethod\n def prescale(x_train, y_train, w_train, e_train):\n\n print('[W] PreScaling Train Set...')\n\n positive_idx = []\n negative_idx = []\n for i, y in enumerate(y_train):\n if y == 1:\n positive_idx.append(i)\n else:\n negative_idx.append(i)\n n_positive = len(positive_idx)\n n_negative = len(negative_idx)\n print('Number of Positive Labels: {}'.format(n_positive))\n print('Number of Negative Labels: {}'.format(n_negative))\n\n if n_positive > n_negative:\n positive_idx = list(np.random.choice(positive_idx, len(negative_idx), replace=False))\n elif n_negative > n_positive:\n negative_idx = list(np.random.choice(negative_idx, n_positive, replace=False))\n\n # Checking\n if len(positive_idx) != len(negative_idx):\n raise ValueError('PreScaling Failed! len(positive_idx) != len(negative_idx)!')\n else:\n print('Number of PreScaled Labels: {}'.format(len(positive_idx)))\n\n prescale_idx = list(np.sort(positive_idx + negative_idx))\n x_train = x_train[prescale_idx]\n y_train = y_train[prescale_idx]\n w_train = w_train[prescale_idx]\n e_train = e_train[prescale_idx]\n print('------------------------------------------------------')\n\n return x_train, y_train, w_train, e_train\n\n def lgb_postscale_feval(self, preds, train_data):\n\n pred = copy.deepcopy(preds)\n labels = train_data.get_label()\n weights = train_data.get_weight()\n pred *= self.postscale_rate\n loss = utils.log_loss_with_weight(pred, labels, weights)\n\n return 'binary_logloss', loss, False\n\n def xgb_postscale_feval(self, preds, train_data):\n\n pred = copy.deepcopy(preds)\n labels = train_data.get_label()\n weights = train_data.get_weight()\n pred *= self.postscale_rate\n loss = utils.log_loss_with_weight(pred, labels, weights)\n\n return 'logloss', loss\n\n def train(self, pred_path=None, loss_log_path=None, csv_log_path=None, boost_round_log_path=None,\n train_seed=None, cv_args=None, parameters=None, show_importance=False, show_accuracy=False,\n save_cv_pred=True, save_cv_pred_train=False, save_final_pred=True, save_final_pred_train=False,\n save_csv_log=True, csv_idx=None, prescale=False, postscale=False, use_global_valid=False,\n return_pred_test=False, mode=None, param_name_list=None, param_value_list=None,\n use_custom_obj=False, use_scale_pos_weight=False, file_name_params=None, append_info=None):\n\n # Check if directories exit or not\n utils.check_dir_model(pred_path, loss_log_path)\n utils.check_dir([pred_path, loss_log_path, csv_log_path, boost_round_log_path])\n\n # Global Validation\n self.use_global_valid = use_global_valid\n\n # Use Custom Objective Function\n self.use_custom_obj = use_custom_obj\n\n # Cross Validation Arguments\n cv_args_copy, n_valid, n_cv, n_era, cv_seed = utils.get_cv_args(cv_args, append_info)\n\n if csv_idx is None:\n csv_idx = self.model_name\n\n # Print Start Information and Get Model Name\n self.print_start_info()\n\n if use_global_valid:\n print('------------------------------------------------------')\n print('[W] Using Global Validation...')\n\n cv_count = 0\n pred_test_total = []\n pred_train_total = []\n loss_train_total = []\n loss_valid_total = []\n loss_train_w_total = []\n loss_valid_w_total = []\n idx_round = []\n train_loss_round_total = []\n valid_loss_round_total = []\n global_valid_loss_round_total = []\n pred_global_valid_total = []\n loss_global_valid_total = []\n loss_global_valid_w_total = []\n\n # Get Cross Validation Generator\n if 'cv_generator' in cv_args_copy:\n cv_generator = cv_args_copy['cv_generator']\n if cv_generator is None:\n cv_generator = CrossValidation.era_k_fold\n cv_args_copy.pop('cv_generator')\n else:\n cv_generator = CrossValidation.era_k_fold\n print('------------------------------------------------------')\n print('[W] Using CV Generator: {}'.format(getattr(cv_generator, '__name__')))\n\n if 'era_list' in cv_args_copy:\n print('Era List: ', cv_args_copy['era_list'])\n if 'window_size' in cv_args_copy:\n print('Window Size: ', cv_args_copy['window_size'])\n if 'cv_weights' in cv_args_copy:\n cv_weights = cv_args_copy['cv_weights']\n cv_args_copy.pop('cv_weights')\n if cv_weights is not None:\n if len(cv_weights) != n_cv:\n raise ValueError(\"The length of 'cv_weights'({}) should be equal to 'n_cv'({})!\"\n .format(len(cv_weights), n_cv))\n else:\n cv_weights = None\n\n # Training on Cross Validation Sets\n for x_train, y_train, w_train, e_train, x_valid, y_valid, w_valid, e_valid, valid_era \\\n in cv_generator(x=self.x_train, y=self.y_train,\n w=self.w_train, e=self.e_train, **cv_args_copy):\n\n # CV Start Time\n cv_start_time = time.time()\n\n cv_count += 1\n\n # Get Positive Rate of Train Set and postscale Rate\n positive_rate_train, postscale_rate = self.get_postscale_rate(y_train)\n positive_rate_valid, _ = self.get_postscale_rate(y_valid)\n\n # Remove Metric of Post Scale\n if postscale:\n self.postscale = True\n self.postscale_rate = postscale_rate\n if 'metric' in parameters.keys():\n parameters.pop('metric')\n if 'eval_metric' in parameters.keys():\n parameters.pop('eval_metric')\n\n if use_scale_pos_weight:\n if self.model_name == 'xgb':\n parameters['scale_pos_weight'] = postscale_rate\n\n print('------------------------------------------------------')\n print('Validation Set Era: ', valid_era)\n print('Number of Features: ', x_train.shape[1])\n print('------------------------------------------------------')\n print('Positive Rate of Train Set: {:.6f}'.format(positive_rate_train))\n print('Positive Rate of Valid Set: {:.6f}'.format(positive_rate_valid))\n print('------------------------------------------------------')\n\n # prescale\n if prescale:\n x_train, y_train, w_train, e_train = self.prescale(x_train, y_train, w_train, e_train)\n\n # Fitting and Training Model\n if mode == 'auto_train_boost_round':\n if use_global_valid:\n reg, idx_round_cv, train_loss_round_cv, \\\n valid_loss_round_cv, global_valid_loss_round_cv = \\\n self.fit_with_round_log(\n boost_round_log_path, cv_count, x_train, y_train, w_train, x_valid, y_valid,\n w_valid, parameters, param_name_list, param_value_list, append_info=append_info)\n global_valid_loss_round_total.append(global_valid_loss_round_cv)\n else:\n reg, idx_round_cv, train_loss_round_cv, valid_loss_round_cv = \\\n self.fit_with_round_log(\n boost_round_log_path, cv_count, x_train, y_train, w_train, x_valid, y_valid,\n w_valid, parameters, param_name_list, param_value_list, append_info=append_info)\n\n idx_round = idx_round_cv\n train_loss_round_total.append(train_loss_round_cv)\n valid_loss_round_total.append(valid_loss_round_cv)\n else:\n reg = self.fit(x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters)\n\n # Feature Importance\n if show_importance:\n self.get_importance(reg)\n\n # Prediction\n if save_cv_pred:\n cv_pred_path = \\\n pred_path + 'cv_results/' + self.model_name + '_cv_{}_'.format(cv_count)\n else:\n cv_pred_path = None\n pred_test = self.predict(reg, self.x_test, pred_path=cv_pred_path)\n\n # Save Train Probabilities to CSV File\n if save_cv_pred_train:\n cv_pred_train_path = \\\n pred_path + 'cv_pred_train/' + self.model_name + '_cv_{}_'.format(cv_count)\n else:\n cv_pred_train_path = None\n pred_train = self.get_pred_train(reg, x_train, pred_path=cv_pred_train_path)\n pred_train_all = self.get_pred_train(reg, self.x_train, pred_path=cv_pred_train_path)\n\n # Predict Global Validation Set\n if use_global_valid:\n pred_global_valid = self.predict(reg, self.x_global_valid)\n else:\n pred_global_valid = np.array([])\n\n # Get Probabilities of Validation Set\n pred_valid = self.predict(reg, x_valid)\n\n # postscale\n if postscale:\n print('------------------------------------------------------')\n print('[W] PostScaling Results...')\n print('PostScale Rate: {:.6f}'.format(postscale_rate))\n pred_test *= postscale_rate\n pred_train *= postscale_rate\n pred_valid *= postscale_rate\n if use_global_valid:\n pred_global_valid *= postscale_rate\n\n # Print LogLoss\n print('------------------------------------------------------')\n print('Validation Set Era: ', valid_era)\n loss_train, loss_valid, loss_train_w, loss_valid_w = \\\n utils.print_loss(pred_train, y_train, w_train, pred_valid, y_valid, w_valid)\n\n # Print and Get Accuracies of CV\n acc_train_cv, acc_valid_cv, acc_train_cv_era, acc_valid_cv_era = \\\n utils.print_and_get_accuracy(pred_train, y_train, e_train,\n pred_valid, y_valid, e_valid, show_accuracy)\n\n # Print Loss and Accuracy of Global Validation Set\n if use_global_valid:\n loss_global_valid, loss_global_valid_w, acc_global_valid = \\\n utils.print_global_valid_loss_and_acc(\n pred_global_valid, self.y_global_valid, self.w_global_valid)\n pred_global_valid_total.append(pred_global_valid)\n loss_global_valid_total.append(loss_global_valid)\n loss_global_valid_w_total.append(loss_global_valid_w)\n\n # Save Losses to File\n utils.save_loss_log(\n loss_log_path + self.model_name + '_', cv_count, parameters, n_valid, n_cv,\n valid_era, loss_train, loss_valid, loss_train_w, loss_valid_w, train_seed,\n cv_seed, acc_train_cv, acc_valid_cv, acc_train_cv_era, acc_valid_cv_era)\n\n pred_test_total.append(pred_test)\n pred_train_total.append(pred_train_all)\n loss_train_total.append(loss_train)\n loss_valid_total.append(loss_valid)\n loss_train_w_total.append(loss_train_w)\n loss_valid_w_total.append(loss_valid_w)\n\n # CV End Time\n print('------------------------------------------------------')\n print('CV Done! Using Time: {}s'.format(time.time() - cv_start_time))\n\n print('======================================================')\n print('Calculating Final Result...')\n\n # Calculate Means of pred and losses\n pred_test_mean, pred_train_mean, loss_train_mean, \\\n loss_valid_mean, loss_train_w_mean, loss_valid_w_mean = \\\n utils.calculate_means(pred_test_total, pred_train_total, loss_train_total, loss_valid_total,\n loss_train_w_total, loss_valid_w_total, weights=cv_weights)\n\n # Save 'num_boost_round'\n if self.model_name in ['xgb', 'lgb']:\n parameters['num_boost_round'] = self.num_boost_round\n\n # Calculate Profit\n profit = 0\n\n # Save Logs of num_boost_round\n if mode == 'auto_train_boost_round':\n if use_global_valid:\n train_loss_round_mean, valid_loss_round_mean, global_valid_loss_round_mean = \\\n utils.calculate_boost_round_means(\n train_loss_round_total, valid_loss_round_total, weights=cv_weights,\n global_valid_loss_round_total=global_valid_loss_round_total)\n self.save_boost_round_log(\n boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, train_seed, cv_seed, csv_idx,\n parameters, param_name_list, param_value_list, append_info=append_info,\n global_valid_loss_round_mean=global_valid_loss_round_mean, profit=profit)\n else:\n train_loss_round_mean, valid_loss_round_mean = \\\n utils.calculate_boost_round_means(\n train_loss_round_total, valid_loss_round_total, weights=cv_weights)\n self.save_boost_round_log(\n boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, train_seed, cv_seed, csv_idx, parameters,\n param_name_list, param_value_list, append_info=append_info, profit=profit)\n\n # Save Final Result\n if save_final_pred:\n self.save_final_pred(\n mode, pred_test_mean, pred_path, parameters, csv_idx, train_seed,\n cv_seed, boost_round_log_path, param_name_list, param_value_list,\n file_name_params=file_name_params, append_info=append_info)\n\n # Save Final pred_train\n if save_final_pred_train:\n utils.save_pred_train_to_csv(pred_path + 'final_pred_train/' + self.model_name + '_',\n pred_train_mean, self.y_train)\n\n # Print Total Losses\n utils.print_total_loss(loss_train_mean, loss_valid_mean, loss_train_w_mean,\n loss_valid_w_mean, profit=profit)\n\n # Print and Get Accuracies of CV of All Train Set\n acc_train, acc_train_era = \\\n utils.print_and_get_train_accuracy(pred_train_mean, self.y_train, self.e_train, show_accuracy)\n\n # Save Final Losses to File\n utils.save_final_loss_log(\n loss_log_path + self.model_name + '_', parameters, n_valid, n_cv,\n loss_train_mean, loss_valid_mean, loss_train_w_mean, loss_valid_w_mean,\n train_seed, cv_seed, acc_train, acc_train_era)\n\n # Print Global Validation Information and Save\n if use_global_valid:\n # Calculate Means of Probabilities and Losses\n pred_global_valid_mean, loss_global_valid_mean, loss_global_valid_w_mean = \\\n utils.calculate_global_valid_means(pred_global_valid_total, loss_global_valid_total,\n loss_global_valid_w_total, weights=cv_weights)\n # Print Loss and Accuracy\n acc_total_global_valid = \\\n utils.print_total_global_valid_loss_and_acc(\n pred_global_valid_mean, self.y_global_valid,\n loss_global_valid_mean, loss_global_valid_w_mean)\n # Save csv log\n if save_csv_log:\n self.save_csv_log(\n mode, csv_log_path, param_name_list, param_value_list, csv_idx, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed, cv_seed, n_valid, n_cv, parameters,\n boost_round_log_path=boost_round_log_path, file_name_params=file_name_params,\n append_info=append_info, loss_global_valid=loss_global_valid_w_mean,\n acc_global_valid=acc_total_global_valid, profit=profit)\n\n # Save Loss Log to csv File\n if save_csv_log:\n if not use_global_valid:\n self.save_csv_log(\n mode, csv_log_path, param_name_list, param_value_list, csv_idx, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed, cv_seed, n_valid, n_cv, parameters,\n boost_round_log_path=boost_round_log_path, file_name_params=file_name_params,\n append_info=append_info, profit=profit)\n\n # Remove 'num_boost_round' of parameters\n if 'num_boost_round' in parameters:\n parameters.pop('num_boost_round')\n\n # Return Final Result\n if return_pred_test:\n return pred_test_mean\n\n def stack_train(self, x_train, y_train, w_train, x_g_train, x_valid, y_valid,\n w_valid, x_g_valid, x_test, x_g_test, parameters, show_importance=False):\n\n # Select Group Variable\n x_train, x_valid, x_test = self.select_category_variable(x_train, x_g_train, x_valid,\n x_g_valid, x_test, x_g_test)\n\n # Print Start Information and Get Model Name\n self.print_start_info()\n print('Number of Features: ', x_train.shape[1])\n print('------------------------------------------------------')\n\n # Fitting and Training Model\n reg = self.fit(x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters)\n\n # Feature Importance\n if show_importance:\n self.get_importance(reg)\n\n # Prediction\n pred_train = self.predict(reg, x_train)\n pred_valid = self.predict(reg, x_valid)\n pred_test = self.predict(reg, x_test)\n\n # Print LogLoss\n loss_train, loss_valid, loss_train_w, loss_valid_w = \\\n utils.print_loss(pred_train, y_train, w_train, pred_valid, y_valid, w_valid)\n\n losses = [loss_train, loss_valid, loss_train_w, loss_valid_w]\n\n return pred_valid, pred_test, losses\n\n\nclass LRegression(ModelBase):\n \"\"\"\n Logistic Regression\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = LogisticRegression(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Logistic Regression...')\n\n self.model_name = 'lr'\n\n def get_importance(self, reg):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n self.importance = np.abs(reg.coef_)[0]\n indices = np.argsort(self.importance)[::-1]\n\n feature_num = self.x_train.shape[1]\n\n for f in range(feature_num):\n print(\"%d | feature %d | %f\" % (f + 1, indices[f], self.importance[indices[f]]))\n\n\nclass KNearestNeighbor(ModelBase):\n \"\"\"\n k-Nearest Neighbor Classifier\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = KNeighborsClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training k-Nearest Neighbor Classifier...')\n\n self.model_name = 'knn'\n\n\nclass SupportVectorClustering(ModelBase):\n \"\"\"\n SVM - Support Vector Clustering\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = SVC(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Support Vector Clustering...')\n\n self.model_name = 'svc'\n\n\nclass Gaussian(ModelBase):\n \"\"\"\n Gaussian NB\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = GaussianNB(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Gaussian...')\n\n self.model_name = 'gs'\n\n\nclass DecisionTree(ModelBase):\n \"\"\"\n Decision Tree\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = DecisionTreeClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Decision Tree...')\n\n self.model_name = 'dt'\n\n\nclass RandomForest(ModelBase):\n \"\"\"\n Random forecast\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = RandomForestRegressor(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Random forecast...')\n\n self.model_name = 'rf'\n\n\nclass ExtraTrees(ModelBase):\n \"\"\"\n Extra Trees\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = ExtraTreesClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Extra Trees...')\n\n self.model_name = 'et'\n\n\nclass AdaBoost(ModelBase):\n \"\"\"\n AdaBoost\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = AdaBoostClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training AdaBoost...')\n\n self.model_name = 'ab'\n\n\nclass GradientBoosting(ModelBase):\n \"\"\"\n Gradient Boosting\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = GradientBoostingClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Gradient Boosting...')\n\n self.model_name = 'gb'\n\n\nclass XGBoost(ModelBase):\n \"\"\"\n XGBoost\n \"\"\"\n def __init__(self, x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va=None, y_va=None, w_va=None, e_va=None,\n num_boost_round=None, use_multi_group=False):\n\n super(XGBoost, self).__init__(x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va, y_va, w_va, e_va, use_multi_group)\n\n self.num_boost_round = num_boost_round\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training XGBoost...')\n\n self.model_name = 'xgb'\n\n @staticmethod\n def logloss_obj(pred, d_train):\n\n y = d_train.get_label()\n\n grad = (pred - y) / ((1.0 - pred) * pred)\n hess = (pred * pred - 2.0 * pred * y + y) / ((1.0 - pred) * (1.0 - pred) * pred * pred)\n\n return grad, hess\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n d_train = xgb.DMatrix(x_train, label=y_train, weight=w_train)\n d_valid = xgb.DMatrix(x_valid, label=y_valid, weight=w_valid)\n\n # Booster\n if self.use_global_valid:\n d_gl_valid = xgb.DMatrix(self.x_global_valid, label=self.y_global_valid, weight=self.w_global_valid)\n eval_list = [(d_train, 'Train'), (d_valid, 'Valid'), (d_gl_valid, 'Global_Valid')]\n else:\n eval_list = [(d_train, 'Train'), (d_valid, 'Valid')]\n\n if self.postscale:\n if self.use_custom_obj:\n bst = xgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n evals=eval_list, obj=self.logloss_obj, feval=self.xgb_postscale_feval)\n else:\n bst = xgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n evals=eval_list, feval=self.xgb_postscale_feval)\n else:\n if self.use_custom_obj:\n bst = xgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n obj=self.logloss_obj, evals=eval_list)\n else:\n bst = xgb.train(parameters, d_train, num_boost_round=self.num_boost_round, evals=eval_list)\n\n return bst\n\n def get_pattern(self):\n\n if self.use_global_valid:\n if self.postscale:\n return re.compile(r'\\[(\\d*)\\].*\\tTrain-logloss:(.*)\\tValid-logloss:(.*)\\tGlobal_Valid-logloss:(.*)')\n else:\n return re.compile(r'\\[(\\d*)\\]\\tTrain-logloss:(.*)\\tValid-logloss:(.*)\\tGlobal_Valid-logloss:(.*)')\n else:\n if self.postscale:\n return re.compile(r'\\[(\\d*)\\].*\\tTrain-logloss:(.*)\\tValid-logloss:(.*)')\n else:\n return re.compile(r'\\[(\\d*)\\]\\tTrain-logloss:(.*)\\tValid-logloss:(.*)')\n\n def get_importance(self, model):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = model.get_fscore()\n sorted_importance = sorted(self.importance.items(), key=lambda d: d[1], reverse=True)\n\n feature_num = len(self.importance)\n\n for i in range(feature_num):\n print('{} | feature {} | {}'.format(i + 1, sorted_importance[i][0], sorted_importance[i][1]))\n\n def predict(self, model, x_test, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Test Probability...')\n\n pred_test = model.predict(xgb.DMatrix(x_test))\n\n if pred_path is not None:\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test)\n\n return pred_test\n\n def get_pred_train(self, model, x_train, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Train Probability...')\n\n pred_train = model.predict(xgb.DMatrix(x_train))\n\n if pred_path is not None:\n utils.save_pred_train_to_csv(pred_path, pred_train, self.y_train)\n\n return pred_train\n\n\nclass SKLearnXGBoost(ModelBase):\n \"\"\"\n XGBoost using sklearn module\n \"\"\"\n @staticmethod\n def get_reg(parameters=None):\n\n print('Initialize Model...')\n reg = XGBClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training XGBoost(sklearn)...')\n\n self.model_name = 'xgb_sk'\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Classifier\n reg = self.get_reg(parameters)\n\n # Training Model\n reg.fit(x_train, y_train, sample_weight=w_train,\n eval_set=[(x_train, y_train), (x_valid, y_valid)],\n early_stopping_rounds=100, eval_metric='logloss', verbose=True)\n\n return reg\n\n def get_importance(self, reg):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = reg.feature_importances_\n self.indices = np.argsort(self.importance)[::-1]\n\n feature_num = len(self.importance)\n\n for f in range(feature_num):\n print(\"%d | feature %d | %f\" % (f + 1, self.indices[f], self.importance[self.indices[f]]))\n\n\nclass LightGBM(ModelBase):\n \"\"\"\n LightGBM\n \"\"\"\n def __init__(self, x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va=None, y_va=None, w_va=None, e_va=None,\n num_boost_round=None, use_multi_group=False):\n\n super(LightGBM, self).__init__(x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va, y_va, w_va, e_va, use_multi_group)\n\n self.num_boost_round = num_boost_round\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training LightGBM...')\n\n self.model_name = 'lgb'\n\n @staticmethod\n def select_category_variable(x_train, x_g_train, x_valid, x_g_valid, x_test, x_g_test):\n\n return x_g_train, x_g_valid, x_g_test\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Category Feature's Index\n idx_category = utils.get_idx_category(x_train, self.use_multi_group)\n\n d_train = lgb.Dataset(x_train, label=y_train, weight=w_train, categorical_feature=idx_category)\n d_valid = lgb.Dataset(x_valid, label=y_valid, weight=w_valid, categorical_feature=idx_category)\n\n # Booster\n if self.use_global_valid:\n d_gl_valid = lgb.Dataset(self.x_global_valid, label=self.y_global_valid,\n weight=self.w_global_valid, categorical_feature=idx_category)\n if self.postscale:\n bst = lgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n valid_sets=[d_valid, d_gl_valid, d_train],\n valid_names=['Valid', 'Global_Valid', 'Train'], feval=self.lgb_postscale_feval)\n else:\n bst = lgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n valid_sets=[d_valid, d_gl_valid, d_train],\n valid_names=['Valid', 'Global_Valid', 'Train'])\n else:\n if self.postscale:\n bst = lgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n valid_sets=[d_valid, d_train], valid_names=['Valid', 'Train'],\n feval=self.lgb_postscale_feval)\n else:\n bst = lgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n valid_sets=[d_valid, d_train], valid_names=['Valid', 'Train'])\n\n return bst\n\n def get_pattern(self):\n\n if self.use_global_valid:\n return re.compile(r\"\\[(\\d*)\\]\\tTrain\\'s binary_logloss: (.*)\\tValid\\'s binary_logloss:(.*)\\tGlobal_Valid\\'s binary_logloss:(.*)\")\n else:\n return re.compile(r\"\\[(\\d*)\\]\\tTrain\\'s binary_logloss: (.*)\\tValid\\'s binary_logloss:(.*)\")\n\n @staticmethod\n def logloss_obj(y, pred):\n\n grad = (pred - y) / ((1 - pred) * pred)\n hess = (pred * pred - 2 * pred * y + y) / ((1 - pred) * (1 - pred) * pred * pred)\n\n return grad, hess\n\n def get_importance(self, bst):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = bst.feature_importance()\n self.indices = np.argsort(self.importance)[::-1]\n\n feature_num = len(self.importance)\n\n for f in range(feature_num):\n print(\"%d | feature %d | %d\" % (f + 1, self.indices[f], self.importance[self.indices[f]]))\n\n print('\\n')\n\n def predict(self, bst, x_test, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Test Probability...')\n\n pred_test = bst.predict(x_test)\n\n if pred_path is not None:\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test)\n\n return pred_test\n\n def get_pred_train(self, bst, x_train, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Train Probability...')\n\n pred_train = bst.predict(x_train)\n\n if pred_path is not None:\n utils.save_pred_train_to_csv(pred_path, pred_train, self.y_train)\n\n return pred_train\n\n\nclass SKLearnLightGBM(ModelBase):\n \"\"\"\n LightGBM using sklearn module\n \"\"\"\n @staticmethod\n def get_reg(parameters=None):\n\n print('Initialize Model...')\n reg = LGBMClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training LightGBM(sklearn)...')\n\n self.model_name = 'lgb_sk'\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Classifier\n reg = self.get_reg(parameters)\n\n # Get Category Feature's Index\n idx_category = utils.get_idx_category(x_train, self.use_multi_group)\n\n # Fitting and Training Model\n reg.fit(x_train, y_train, sample_weight=w_train, categorical_feature=idx_category,\n eval_set=[(x_train, y_train), (x_valid, y_valid)], eval_names=['train', 'eval'],\n early_stopping_rounds=100, eval_sample_weight=[w_train, w_valid],\n eval_metric='logloss', verbose=True)\n\n return reg\n\n\nclass CatBoost(ModelBase):\n \"\"\"\n CatBoost\n \"\"\"\n @staticmethod\n def get_reg(parameters=None):\n\n reg = CatBoostClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training CatBoost...')\n\n self.model_name = 'cb'\n\n @staticmethod\n def select_category_variable(x_train, x_g_train, x_valid, x_g_valid, x_test, x_g_test):\n\n return x_g_train, x_g_valid, x_g_test\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Classifier\n reg = self.get_reg(parameters)\n\n # Get Category Feature's Index\n idx_category = utils.get_idx_category(x_train, self.use_multi_group)\n\n # Convert Zeros in Weights to Small Positive Numbers\n w_train = [0.001 if w == 0 else w for w in w_train]\n\n # Fitting and Training Model\n reg.fit(X=x_train, y=y_train, cat_features=idx_category, sample_weight=w_train,\n baseline=None, use_best_model=None, eval_set=(x_valid, y_valid), verbose=True, plot=False)\n\n return reg\n\n def get_pattern(self):\n\n return re.compile(r'(\\d*):\\tlearn (.*)\\ttest (.*)\\tbestTest')\n\n def get_importance(self, reg):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = reg.feature_importances_\n self.indices = np.argsort(self.importance)[::-1]\n\n feature_num = len(self.importance)\n\n for f in range(feature_num):\n print(\"%d | feature %d | %d\" % (f + 1, self.indices[f], self.importance[self.indices[f]]))\n", "sub_path": "src/models/classifiers.py", "file_name": "classifiers.py", "file_ext": "py", "file_size_in_byte": 50178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "config.cfg.group_list", "line_number": 55, "usage_type": "attribute"}, {"api_name": "config.cfg", "line_number": 55, "usage_type": "name"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 63, "usage_type": "call"}, {"api_name": "models.utils.get_boost_round_log_path", "line_number": 96, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 96, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 99, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 99, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 109, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 111, "usage_type": "attribute"}, {"api_name": "models.utils.get_boost_round_log_upper_path", "line_number": 139, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 139, "usage_type": "name"}, {"api_name": "models.utils.get_boost_round_log_path", "line_number": 142, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 142, "usage_type": "name"}, {"api_name": "models.utils.save_boost_round_log_to_csv", "line_number": 145, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 145, "usage_type": "name"}, {"api_name": "models.utils.save_boost_round_log_gl_to_csv", "line_number": 150, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 150, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 157, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 157, "usage_type": "name"}, {"api_name": "models.utils.save_final_boost_round_gl_log", "line_number": 162, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 162, "usage_type": "name"}, {"api_name": "models.utils.save_final_boost_round_log", "line_number": 166, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 166, "usage_type": "name"}, {"api_name": "numpy.argsort", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 189, "usage_type": "call"}, {"api_name": "models.utils.save_pred_to_csv", "line_number": 192, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 192, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 201, "usage_type": "call"}, {"api_name": "models.utils.save_pred_train_to_csv", "line_number": 204, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 204, "usage_type": "name"}, {"api_name": "models.utils.get_grid_search_log_path", "line_number": 217, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 217, "usage_type": "name"}, {"api_name": "models.utils.save_grid_search_log_with_glv_to_csv", "line_number": 220, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 220, "usage_type": "name"}, {"api_name": "models.utils.save_grid_search_log_with_glv_to_csv", "line_number": 226, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 226, "usage_type": "name"}, {"api_name": "models.utils.save_grid_search_log_to_csv", "line_number": 232, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 232, "usage_type": "name"}, {"api_name": "models.utils.save_grid_search_log_to_csv", "line_number": 237, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 237, "usage_type": "name"}, {"api_name": "models.utils.get_boost_round_log_path", "line_number": 245, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 245, "usage_type": "name"}, {"api_name": "models.utils.save_grid_search_log_to_csv", "line_number": 249, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 249, "usage_type": "name"}, {"api_name": "models.utils.save_final_loss_log_to_csv", "line_number": 254, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 254, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 262, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 262, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 264, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 264, "usage_type": "name"}, {"api_name": "models.utils.save_log_with_glv_to_csv", "line_number": 274, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 274, "usage_type": "name"}, {"api_name": "models.utils.save_final_loss_log_to_csv", "line_number": 280, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 280, "usage_type": "name"}, {"api_name": "models.utils.save_log_with_glv_to_csv", "line_number": 289, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 289, "usage_type": "name"}, {"api_name": "models.utils.save_final_loss_log_to_csv", "line_number": 294, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 294, "usage_type": "name"}, {"api_name": "models.utils.get_simple_param_name", "line_number": 305, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 305, "usage_type": "name"}, {"api_name": "models.utils.get_simple_param_name", "line_number": 309, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 309, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 314, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 314, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 316, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 316, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 318, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 318, "usage_type": "name"}, {"api_name": "models.utils.save_pred_to_csv", "line_number": 321, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 321, "usage_type": "name"}, {"api_name": "models.utils.get_boost_round_log_path", "line_number": 326, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 326, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 329, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 329, "usage_type": "name"}, {"api_name": "models.utils.save_pred_to_csv", "line_number": 332, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 332, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 336, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 336, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 338, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 338, "usage_type": "name"}, {"api_name": "models.utils.save_pred_to_csv", "line_number": 340, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 340, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 373, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 375, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 375, "usage_type": "attribute"}, {"api_name": "numpy.sort", "line_number": 383, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 394, "usage_type": "call"}, {"api_name": "models.utils.log_loss_with_weight", "line_number": 398, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 398, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 404, "usage_type": "call"}, {"api_name": "models.utils.log_loss_with_weight", "line_number": 408, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 408, "usage_type": "name"}, {"api_name": "models.utils.check_dir_model", "line_number": 420, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 420, "usage_type": "name"}, {"api_name": "models.utils.check_dir", "line_number": 421, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 421, "usage_type": "name"}, {"api_name": "models.utils.get_cv_args", "line_number": 430, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 430, "usage_type": "name"}, {"api_name": "models.cross_validation.CrossValidation.era_k_fold", "line_number": 461, "usage_type": "attribute"}, {"api_name": "models.cross_validation.CrossValidation", "line_number": 461, "usage_type": "name"}, {"api_name": "models.cross_validation.CrossValidation.era_k_fold", "line_number": 464, "usage_type": "attribute"}, {"api_name": "models.cross_validation.CrossValidation", "line_number": 464, "usage_type": "name"}, {"api_name": "time.time", "line_number": 488, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 567, "usage_type": "call"}, {"api_name": "models.utils.print_loss", "line_number": 587, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 587, "usage_type": "name"}, {"api_name": "models.utils.print_and_get_accuracy", "line_number": 591, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 591, "usage_type": "name"}, {"api_name": "models.utils.print_global_valid_loss_and_acc", "line_number": 597, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 597, "usage_type": "name"}, {"api_name": "models.utils.save_loss_log", "line_number": 604, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 604, "usage_type": "name"}, {"api_name": "time.time", "line_number": 618, "usage_type": "call"}, {"api_name": "models.utils.calculate_means", "line_number": 626, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 626, "usage_type": "name"}, {"api_name": "models.utils.calculate_boost_round_means", "line_number": 640, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 640, "usage_type": "name"}, {"api_name": "models.utils.calculate_boost_round_means", "line_number": 650, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 650, "usage_type": "name"}, {"api_name": "models.utils.save_pred_train_to_csv", "line_number": 666, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 666, "usage_type": "name"}, {"api_name": "models.utils.print_total_loss", "line_number": 670, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 670, "usage_type": "name"}, {"api_name": "models.utils.print_and_get_train_accuracy", "line_number": 675, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 675, "usage_type": "name"}, {"api_name": "models.utils.save_final_loss_log", "line_number": 678, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 678, "usage_type": "name"}, {"api_name": "models.utils.calculate_global_valid_means", "line_number": 687, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 687, "usage_type": "name"}, {"api_name": "models.utils.print_total_global_valid_loss_and_acc", "line_number": 691, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 691, "usage_type": "name"}, {"api_name": "models.utils.print_loss", "line_number": 746, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 746, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 761, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 776, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 777, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 793, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 813, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 833, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 853, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 873, "usage_type": "call"}, {"api_name": "sklearn.ensemble.ExtraTreesClassifier", "line_number": 893, "usage_type": "call"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 913, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 933, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 977, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 978, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 982, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 989, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 992, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 996, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 999, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1007, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1009, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1012, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1014, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 1034, "usage_type": "call"}, {"api_name": "models.utils.save_pred_to_csv", "line_number": 1037, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 1037, "usage_type": "name"}, {"api_name": "xgboost.DMatrix", "line_number": 1046, "usage_type": "call"}, {"api_name": "models.utils.save_pred_train_to_csv", "line_number": 1049, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 1049, "usage_type": "name"}, {"api_name": "xgboost.XGBClassifier", "line_number": 1062, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 1091, "usage_type": "call"}, {"api_name": "models.utils.get_idx_category", "line_number": 1127, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 1127, "usage_type": "name"}, {"api_name": "lightgbm.Dataset", "line_number": 1129, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 1130, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 1134, "usage_type": "call"}, {"api_name": "lightgbm.train", "line_number": 1137, "usage_type": "call"}, {"api_name": "lightgbm.train", "line_number": 1141, "usage_type": "call"}, {"api_name": "lightgbm.train", "line_number": 1146, "usage_type": "call"}, {"api_name": "lightgbm.train", "line_number": 1150, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1158, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1160, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 1176, "usage_type": "call"}, {"api_name": "models.utils.save_pred_to_csv", "line_number": 1193, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 1193, "usage_type": "name"}, {"api_name": "models.utils.save_pred_train_to_csv", "line_number": 1205, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 1205, "usage_type": "name"}, {"api_name": "lightgbm.LGBMClassifier", "line_number": 1218, "usage_type": "call"}, {"api_name": "models.utils.get_idx_category", "line_number": 1235, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 1235, "usage_type": "name"}, {"api_name": "catboost.CatBoostClassifier", "line_number": 1253, "usage_type": "call"}, {"api_name": "models.utils.get_idx_category", "line_number": 1275, "usage_type": "call"}, {"api_name": "models.utils", "line_number": 1275, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 1288, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 1296, "usage_type": "call"}]} +{"seq_id": "498077444", "text": "# -*- coding: utf-8 -*-\n# @Author : tongyuze\n# @Date & Time : 2019/4/1 0:00\n# @FileName : mpl_squares.py\n# @Software : PyCharm\n# --------------\nimport matplotlib.pyplot as plt\n\nvalues = [1, 2, 3, 4, 5, 6]\nsquares = [1, 4, 9, 16, 25, 36]\nplt.plot(values, squares, linewidth=3)\n\nplt.title('Square Numbers', fontsize=16)\nplt.xlabel('Value', fontsize=14)\nplt.ylabel('Square of Value', fontsize=14)\nplt.tick_params(axis='both', labelsize=14)\nplt.show()\n", "sub_path": "chapter15/mpl_squares.py", "file_name": "mpl_squares.py", "file_ext": "py", "file_size_in_byte": 463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "349850520", "text": "from __future__ import print_function\nimport numpy as np\nfrom scipy.interpolate import RegularGridInterpolator\nimport parameters as par\n\nclass Sigma:\n\n def __init__(self):\n\n log_mass, sigma, alpha = np.loadtxt(par.sigma_file, unpack=True)\n redshift, delta_crit = \\\n np.loadtxt(par.deltacrit_file, skiprows=1, unpack=True)\n\n self._sigma_interpolator = \\\n RegularGridInterpolator((log_mass,), sigma, bounds_error=False, \n fill_value=None)\n\n self._deltacrit_interpolator = \\\n RegularGridInterpolator((redshift,), delta_crit, bounds_error=False, \n fill_value=None)\n\n\n def sigma(self, log_mass, redshift):\n\n # sigma(M, z=0)\n sigma = self._sigma_interpolator(log_mass)\n\n # sigma(M, z)\n return sigma * \\\n self.deltacrit(redshift) / self.deltacrit(np.array([0,]))[0]\n\n def deltacrit(self, redshift):\n \n return self._deltacrit_interpolator(redshift)\n\nif __name__ == \"__main__\":\n\n s = Sigma()\n print(s.sigma(np.array([14,]), np.array([1,])))\n", "sub_path": "sigma.py", "file_name": "sigma.py", "file_ext": "py", "file_size_in_byte": 1132, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.loadtxt", "line_number": 10, "usage_type": "call"}, {"api_name": "parameters.sigma_file", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 12, "usage_type": "call"}, {"api_name": "parameters.deltacrit_file", "line_number": 12, "usage_type": "attribute"}, {"api_name": "scipy.interpolate.RegularGridInterpolator", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.interpolate.RegularGridInterpolator", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "97989577", "text": "import re, json, requests\nfrom os import path\nfrom mygene import MyGeneInfo\nfrom medoo import Raw, Field\nfrom diot import Diot\nfrom pyppl.utils import always_list\nfrom bioprocs.utils.cache import Cache\nfrom bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord\nfrom tempfile import gettempdir\n\n\"\"\"\n`notfound`: What if a symbol is not found. Default: skip\n\t- skip : skip the record(don't write it to output file)\n\t- ignore: use the original name;\n\t- error : report erro\n\"\"\"\nSPECIES = {\n\t'hg19': 'human',\n\t'hg38': 'human',\n\t'mm9' : 'mouse',\n\t'mm10': 'mouse'\n}\nTAXIDS = {\n\t'hg19': 9606,\n\t'hg38': 9606,\n\t'mm9' : 10090,\n\t'mm10': 10090\n}\n# local to remote\nFIELD_L2M = {\n\t'ensembl_gene' : 'ensembl.gene',\n\t'ensembl_protein' : 'ensembl.protein',\n\t'ensembl_transcript': 'ensembl.transcript',\n\t'refseq_genomic' : 'refseq.genomic',\n\t'refseq_rna' : 'refseq.rna',\n\t'refseq_protein' : 'refseq.protein',\n\t'uniprot_Swiss_Prot': 'uniprot.Swiss-Prot',\n}\n# remote to local\nFIELD_M2L = {\n\t'ensembl.gene' : 'ensembl_gene',\n\t'ensembl.protein' : 'ensembl_protein',\n\t'ensembl.transcript': 'ensembl_transcript',\n\t'refseq.genomic' : 'refseq_genomic',\n\t'refseq.rna' : 'refseq_rna',\n\t'refseq.protein' : 'refseq_protein',\n\t'uniprot.Swiss-Prot': 'uniprot_Swiss_Prot'\n}\nclass RecordNotFound(Exception):\n\tpass\n\ndef replaceList(l, search, replace):\n\tif not isinstance(search, list):\n\t\tsearch = [search]\n\tret = l[:]\n\tfor i, e in enumerate(ret):\n\t\tif e in search:\n\t\t\tret[i] = replace\n\treturn ret\n\ndef querygene(*args, **kwargs):\n\trets = []\n\ttry:\n\t\tmgret = MyGeneInfo().querymany(*args, **kwargs)\n\texcept requests.exceptions.ConnectionError:\n\t\treturn rets\n\tfor ret in mgret:\n\t\tout = {}\n\t\trets.append(out)\n\t\tfor key, val in ret.items():\n\t\t\tif 'ensembl' == key:\n\t\t\t\tensembl = val[0] if isinstance(val, list) else (val or {})\n\t\t\t\tout['ensembl_gene'] = ensembl.get('gene', '')\n\t\t\t\tout['ensembl_protein'] = ensembl.get('protein', [])\n\t\t\t\tout['ensembl_transcript'] = ensembl.get('transcript', [])\n\t\t\telif 'refseq' == key:\n\t\t\t\trefseq = val[0] if isinstance(val, list) else (val or {})\n\t\t\t\tout['refseq_genomic'] = refseq.get('genomic', [])\n\t\t\t\tout['refseq_rna'] = refseq.get('rna', [])\n\t\t\t\tout['refseq_protein'] = refseq.get('protein', [])\n\t\t\telif 'uniprot' == key:\n\t\t\t\tuniprot = val[0] if isinstance(val, list) else (val or {})\n\t\t\t\tout['uniprot_Swiss_Prot'] = uniprot.get('Swiss-Prot', [])\n\t\t\telse:\n\t\t\t\tout[key] = val\n\treturn rets\n\nfields2local = lambda keys: [FIELD_M2L.get(key, key) for key in keys]\nfields2remote = lambda keys: [FIELD_L2M.get(key, key) for key in keys]\n\ndef genenorm(infile, outfile = None, notfound = 'ignore', frm = 'symbol, alias', to = 'symbol', genome = 'hg19', inopts = None, outopts = None, genecol = None, cachedir = gettempdir()):\n\n\t_inopts = Diot(skip = 0, comment = '#', delimit = '\\t')\n\t_inopts.update(inopts or {})\n\tinopts = _inopts\n\n\t_outopts = Diot(delimit = '\\t', append = False, query = False, head = True)\n\t_outopts.update(outopts or {})\n\toutopts = _outopts\n\toutquery = outopts.get('query', False)\n\touthead = outopts.get('head', outopts.get('cnames', True))\n\tif 'query' in outopts:\n\t\toutquery = outopts['query']\n\t\tdel outopts['query']\n\tif 'head' in outopts:\n\t\touthead = outopts['head']\n\t\tdel outopts['head']\n\tif 'cnames' in outopts:\n\t\touthead = outopts['cnames']\n\t\tdel outopts['cnames']\n\n\treader = TsvReader(infile, **inopts)\n\t#if not reader.meta: reader.autoMeta()\n\tgenecol = genecol or 0\n\tgenes = set()\n\tncol = 0\n\tfor r in reader:\n\t\tncol = ncol or len(r)\n\t\tgenes.add(r[genecol].strip())\n\treader.rewind()\n\tif not reader.meta:\n\t\treader.meta.extend(['COL' + str(i + 1) for i in range(ncol)])\n\tgenes = list(genes)\n\n\tdbfile = path.join(cachedir, 'geneinfo.db')\n\tcache = Cache(dbfile, 'geneinfo', {\n\t\t'_id' : 'text',\n\t\t'symbol' : 'text',\n\t\t'HGNC' : 'int',\n\t\t'alias' : \"text default ''\",\n\t\t'ensembl_gene' : 'text',\n\t\t'ensembl_protein' : 'text',\n\t\t'ensembl_transcript': 'text',\n\t\t'refseq_genomic' : 'text',\n\t\t'refseq_rna' : 'text',\n\t\t'refseq_protein' : 'text',\n\t\t'entrezgene' : 'int',\n\t\t'genomic_pos' : 'text',\n\t\t'genomic_pos_hg19' : 'text',\n\t\t'genomic_pos_mm9' : 'text',\n\t\t'ipi' : 'text',\n\t\t'pfam' : \"text default ''\",\n\t\t'pdb' : 'text',\n\t\t'type_of_gene' : 'text',\n\t\t'taxid' : 'int',\n\t\t'uniprot_Swiss_Prot': \"text default ''\",\n\t}, '_id')\n\n\tdummies = {\n\t\t'symbol' : 'iplain',\n\t\t'alias' : 'iarray',\n\t\t'pfam' : 'iarray',\n\t\t'uniprot' : 'iarray',\n\t\t'genomic_pos' : 'json',\n\t\t'genomic_pos_hg19' : 'json',\n\t\t'genomic_pos_mm9' : 'json',\n\t\t'ipi' : 'iarray',\n\t\t'pdb' : 'iarray',\n\t\t'refseq_genomic' : 'iarray',\n\t\t'refseq_protein' : 'iarray',\n\t\t'refseq_rna' : 'iarray',\n\t\t'ensembl_protein' : 'iarray',\n\t\t'ensembl_transcript': 'iarray',\n\t\t'uniprot_Swiss_Prot': 'iarray',\n\t}\n\n\t# query from cache\n\ttocols = always_list(to)\n\t# alias\n\ttocols = replaceList(tocols, ['ensg', 'ensemblgene', 'ensembl'], 'ensembl.gene')\n\ttocols = replaceList(tocols, ['uniprot'], 'uniprot.Swiss-Prot')\n\ttocols = replaceList(tocols, ['refseq'], 'refseq.rna')\n\ttocols = fields2local(tocols)\n\n\tfrmcols = always_list(frm)\n\tfrmcols = replaceList(frmcols, ['ensg', 'ensemblgene', 'ensembl'], 'ensembl.gene')\n\tfrmcols = replaceList(frmcols, ['uniprot'], 'uniprot.Swiss-Prot')\n\tfrmcols = replaceList(frmcols, ['refseq'], 'refseq.rna')\n\tfrmcols = fields2local(frmcols)\n\n\tcolumns = list(set(tocols + frmcols + ['taxid']))\n\tfrmkeys = ','.join(frmcols)\n\n\tallfound, allrest = cache.query(columns, {frmkeys: genes, 'taxid': TAXIDS[genome]}, dummies)\n\t# query from api\n\tmgret = querygene(allrest[frmkeys], scopes = fields2remote(frmcols), fields = fields2remote(columns), species = SPECIES[genome])\n\t# get all result for each query\n\tgenetmp = {}\n\tfor gret in mgret:\n\t\tif not gret['query'] in genetmp:\n\t\t\tgenetmp[gret['query']] = []\n\t\tgenetmp[gret['query']].append(gret)\n\n\tgenemap = {}\n\tdata2save = {}\n\tfor query, gret in genetmp.items():\n\t\t# re-score the items if query is entirely matched\n\t\tscore = 0\n\t\tgr = None\n\t\tfor g in gret:\n\t\t\t# not all result returned\n\t\t\tif not all([x in g for x in tocols]): continue\n\n\t\t\tif any([g[x] == query for x in tocols]):\n\t\t\t\tthescore = g['_score'] + 10000\n\t\t\telif any([str(g[x]).upper() == query.upper() for x in tocols]):\n\t\t\t\tthescore = g['_score'] + 5000\n\t\t\telif any([x in g and query.upper() in [str(u).upper() for u in list(g[x])] for x in tocols]):\n\t\t\t\tthescore = g['_score'] + 1000\n\t\t\telse:\n\t\t\t\tthescore = g['_score']\n\t\t\tif thescore > score:\n\t\t\t\tscore = thescore\n\t\t\t\tgr = g\n\n\t\tif not gr: continue\n\t\tdel gr['_score']\n\t\tdel gr['query']\n\t\tgr = Cache._result({x:(gr[x] if x in gr else '') for x in set(columns + list(gr.keys()))}, dummies)\n\t\tfor x, val in gr.items():\n\t\t\tif not x in data2save:\n\t\t\t\tdata2save[x] = []\n\t\t\tdata2save[x].append(val)\n\t\tgenemap[query] = gr\n\n\t# add cached data\n\tfor i, ret in allfound.items():\n\t\tquery = genes[i]\n\t\tgenemap[query] = ret\n\n\t#del genetmp\n\t#print genemap\n\n\t# cache genemap\n\t#cachedata = {}\n\t#querys = genemap.keys()\n\t#for query in querys:\n\t#\tfor k, v in genemap[query].items():\n\t#\t\tif not k in cachedata:\n\t#\t\t\tcachedata[k] = []\n\t#\t\tcachedata[k].append(v)\n\n\t#if cachedata:\n\t#\tcache.save(cachedata, cachefactory)\n\t#\tdel cachedata\n\tif data2save:\n\t\t# make it unique\n\t\tds_keys = list(data2save.keys())\n\t\tdata2save_uniq = {k:[] for k in ds_keys}\n\t\ttmp_container = []\n\t\tfor i in range(len(data2save[ds_keys[0]])):\n\t\t\ttmp = {k:data2save[k][i] for k in ds_keys}\n\t\t\tif not tmp in tmp_container:\n\t\t\t\ttmp_container.append(tmp)\n\t\t\t\tfor k in ds_keys:\n\t\t\t\t\tdata2save_uniq[k].append(data2save[k][i])\n\n\t\tcache.save(data2save_uniq, dummies)\n\n\tif outfile:\n\t\twriter = TsvWriter(outfile, **outopts)\n\t\twriter.meta.extend(reader.meta)\n\t\tif outquery:\n\t\t\twrite.meta.append('_QUERY')\n\n\t\tif len(tocols) > 1:\n\t\t\tgcolidx = genecol if isinstance(genecol, int) else writer.meta.index(genecol)\n\t\t\twriter.meta[(gcolidx+1):(gcolidx+1)] = [(tocol, None) for tocol in tocols[1:]]\n\n\t\tif outhead:\n\t\t\twriter.writeHead()\n\t\t#print writer.meta\n\n\t\t#i = 0\n\t\tfor row in reader:\n\t\t\tr = TsvRecord(row.values(), reader.meta)\n\n\t\t\t#if (i <= 10): print r\n\t\t\tquery = r[genecol].strip()\n\t\t\tif query not in genemap:\n\t\t\t\tif notfound == 'error':\n\t\t\t\t\traise RecordNotFound('Record not found: %s' % query)\n\t\t\t\telif notfound == 'skip':\n\t\t\t\t\tcontinue\n\t\t\t\tif len(tocols) > 1:\n\t\t\t\t\tfor tocol in tocols[1:]:\n\t\t\t\t\t\tr[tocol] = ''\n\t\t\telse:\n\t\t\t\t#if (i <= 10): print genecol\n\t\t\t\tr[genecol] = genemap[query][tocols[0]]\n\t\t\t\t#if (i <= 10): print genemap[query][tocols[0]], r\n\t\t\t\tif len(tocols) > 1:\n\t\t\t\t\tfor tocol in tocols[1:]:\n\t\t\t\t\t\tr[tocol] = genemap[query][tocol]\n\n\t\t\tif outquery:\n\t\t\t\tr._QUERY = query\n\n\t\t\t#if (i <= 10): print r\n\t\t\ti += 1\n\t\t\twriter.write(r)\n\n\treturn genemap\n", "sub_path": "bioprocs/utils/gene.py", "file_name": "gene.py", "file_ext": "py", "file_size_in_byte": 8851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "mygene.MyGeneInfo", "line_number": 64, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 91, "usage_type": "call"}, {"api_name": "diot.Diot", "line_number": 93, "usage_type": "call"}, {"api_name": "diot.Diot", "line_number": 97, "usage_type": "call"}, {"api_name": "bioprocs.utils.tsvio2.TsvReader", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "name"}, {"api_name": "bioprocs.utils.cache.Cache", "line_number": 126, "usage_type": "call"}, {"api_name": "pyppl.utils.always_list", "line_number": 168, "usage_type": "call"}, {"api_name": "pyppl.utils.always_list", "line_number": 175, "usage_type": "call"}, {"api_name": "bioprocs.utils.cache.Cache._result", "line_number": 219, "usage_type": "call"}, {"api_name": "bioprocs.utils.cache.Cache", "line_number": 219, "usage_type": "name"}, {"api_name": "bioprocs.utils.tsvio2.TsvWriter", "line_number": 261, "usage_type": "call"}, {"api_name": "bioprocs.utils.tsvio2.TsvRecord", "line_number": 276, "usage_type": "call"}]} +{"seq_id": "315618198", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 25 15:31:09 2017\n\n@author: gibraanrahman\n\nScript to plot tumor 170410 t-SNE data.\n\"\"\"\n\nimport os\nimport re\nimport tSNETools\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nfrom bhtsne import run_bh_tsne\nfrom matplotlib.pylab import savefig\n\ndef get_nolgcl_data():\n \"\"\"Import untransformed data csv.\"\"\"\n filename_comp_nolgcl = '170410_Tumor/woPI_comp.csv'\n comp_nolgcl_data = pd.read_csv(filename_comp_nolgcl)\n return comp_nolgcl_data[comp_nolgcl_data['FSC-A'] > 50000]\n\ndef get_data():\n \"\"\"Import transformed data csv and pre-process.\"\"\"\n filename_comp = '170410_Tumor/woPI_comp_lgcl.csv' \n comp_data = pd.read_csv(filename_comp)\n \n comp_data = comp_data[comp_data['FSC-A'] > 50000]\n comp_data = comp_data.drop('Alexa Fluor 700-A_Comp', axis=1)\n comp_data = comp_data.drop('SSC-A', axis=1)\n \n comp_channels = comp_data.drop('FSC-A', axis=1)\n comp_channels_min = comp_channels.values.min()\n comp_channels_max = comp_channels.values.max()\n \n comp_FSC = comp_data['FSC-A'].values.reshape(-1, 1)\n \n min_max_scaler = MinMaxScaler((comp_channels_min, comp_channels_max))\n comp_data['FSC-A'] = min_max_scaler.fit_transform(comp_FSC)\n \n return comp_data\n\ndef run_tSNE(flow_data):\n \"\"\"Perform t-SNE dimensionality reduction.\"\"\"\n tSNE = run_bh_tsne(flow_data, initial_dims=flow_data.shape[1],\n randseed=1, max_iter=1000)\n df = pd.DataFrame(tSNE)\n \n df.to_csv('170410_Tumor/1000it_tseed1_comp.csv', index=False)\n\ndef load_tSNE():\n \"\"\"Load pre-calculated t-SNE data.\"\"\"\n return pd.read_csv('170410_Tumor/1000it_tseed1_comp.csv')\n\ndef plot_tSNE(df, flow_data, nolgcl_data):\n \"\"\"Plot tSNE maps colored by parameters.\"\"\"\n for col in flow_data.columns:\n tSNETools.heatmap(df, flow_data[col], nolgcl_data[col])\n try:\n os.mkdir('170410_Tumor/Plots')\n except FileExistsError:\n pass\n filename = '170410_Tumor/Plots/1000it_tseed1_{}'.format(col)\n chan = re.sub('_Comp', '', col)\n plt.xlabel('t-SNE X1')\n plt.ylabel('t-SNE X2')\n #plt.title('Tumor t-SNE Map Colored By {}'.format(chan))\n savefig(filename)\n plt.close()\n\n \n\n", "sub_path": "170410_heatmap.py", "file_name": "170410_heatmap.py", "file_ext": "py", "file_size_in_byte": 2338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 41, "usage_type": "call"}, {"api_name": "bhtsne.run_bh_tsne", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "tSNETools.heatmap", "line_number": 61, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 63, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "36479911", "text": "from rest_framework import serializers\nfrom django.core.validators import RegexValidator\nimport re\nfrom django.core.validators import MinLengthValidator,MaxLengthValidator\nfrom helper.validate import validate_activation_pass\nfrom gtb import constant\nfrom .models import Activation\n\n\nclass ComputerInfoializer(serializers.Serializer):\n name = serializers.CharField(required=True, validators=[MaxLengthValidator(150)])\n windows_product_id = serializers.CharField(\n required=True,\n validators=[\n MaxLengthValidator(24),\n RegexValidator(\n regex=re.compile(constant.PATTERN_WINDOWS_PRODUCT_ID),\n code=constant.INVALID_FORMAT_CODE,\n message='includes alphabet and numbers,hyphen'\n )\n ])\n mac_address = serializers.CharField(\n required=True,\n validators=[\n MaxLengthValidator(20),\n RegexValidator(\n regex=re.compile(constant.PATTERN_MAC_ADDRESS),\n code=constant.INVALID_FORMAT_CODE,\n message='includes alphabet and numbers,hyphen'\n )\n ])\n drive_serial_number = serializers.CharField(\n required=True,\n validators=[\n MaxLengthValidator(20),\n RegexValidator(\n regex=re.compile(constant.PATTERN_DRIVE_SERIAL_NUMBER),\n code=constant.INVALID_FORMAT_CODE,\n message='includes alphabet and numbers,hyphen'\n )\n ])\n\n\nclass ActivationSerializer(serializers.Serializer):\n license_id = serializers.CharField(\n required=True,\n validators=[\n MinLengthValidator(6),\n MaxLengthValidator(6),\n RegexValidator(\n regex=re.compile(constant.PATTERN_PATTERN_ALPHANUMERIC),\n code=constant.INVALID_FORMAT_CODE,\n message='includes alphabet and numbers'\n )\n ]\n )\n application_name = serializers.CharField(required=True)\n activate_password = serializers.CharField(validators=[MaxLengthValidator(128), validate_activation_pass], required=True)\n computer_info = ComputerInfoializer(required=True)\n locale = serializers.CharField(required=True)\n\n\nclass AcitvationModelSerializer(serializers.ModelSerializer):\n license_key = serializers.StringRelatedField(source='license')\n product_name = serializers.SerializerMethodField()\n\n class Meta:\n model = Activation\n exclude = ('is_deleted',)\n\n def get_product_name(self, obj):\n return obj.license.product.product_name\n", "sub_path": "apps/admin/activation/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 2587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "rest_framework.serializers.Serializer", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 11, "usage_type": "name"}, {"api_name": "django.core.validators.MaxLengthValidator", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 12, "usage_type": "name"}, {"api_name": "django.core.validators.MaxLengthValidator", "line_number": 15, "usage_type": "call"}, {"api_name": "django.core.validators.RegexValidator", "line_number": 16, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "gtb.constant.PATTERN_WINDOWS_PRODUCT_ID", "line_number": 17, "usage_type": "attribute"}, {"api_name": "gtb.constant", "line_number": 17, "usage_type": "name"}, {"api_name": "gtb.constant.INVALID_FORMAT_CODE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "gtb.constant", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 22, "usage_type": "name"}, {"api_name": "django.core.validators.MaxLengthValidator", "line_number": 25, "usage_type": "call"}, {"api_name": "django.core.validators.RegexValidator", "line_number": 26, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "gtb.constant.PATTERN_MAC_ADDRESS", "line_number": 27, "usage_type": "attribute"}, {"api_name": "gtb.constant", "line_number": 27, "usage_type": "name"}, {"api_name": "gtb.constant.INVALID_FORMAT_CODE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "gtb.constant", "line_number": 28, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 32, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 32, "usage_type": "name"}, {"api_name": "django.core.validators.MaxLengthValidator", "line_number": 35, "usage_type": "call"}, {"api_name": "django.core.validators.RegexValidator", "line_number": 36, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 37, "usage_type": "call"}, {"api_name": "gtb.constant.PATTERN_DRIVE_SERIAL_NUMBER", "line_number": 37, "usage_type": "attribute"}, {"api_name": "gtb.constant", "line_number": 37, "usage_type": "name"}, {"api_name": "gtb.constant.INVALID_FORMAT_CODE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "gtb.constant", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.serializers.Serializer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 44, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 45, "usage_type": "name"}, {"api_name": "django.core.validators.MinLengthValidator", "line_number": 48, "usage_type": "call"}, {"api_name": "django.core.validators.MaxLengthValidator", "line_number": 49, "usage_type": "call"}, {"api_name": "django.core.validators.RegexValidator", "line_number": 50, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 51, "usage_type": "call"}, {"api_name": "gtb.constant.PATTERN_PATTERN_ALPHANUMERIC", "line_number": 51, "usage_type": "attribute"}, {"api_name": "gtb.constant", "line_number": 51, "usage_type": "name"}, {"api_name": "gtb.constant.INVALID_FORMAT_CODE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "gtb.constant", "line_number": 52, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 57, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 57, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 58, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 58, "usage_type": "name"}, {"api_name": "django.core.validators.MaxLengthValidator", "line_number": 58, "usage_type": "call"}, {"api_name": "helper.validate.validate_activation_pass", "line_number": 58, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 60, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 60, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 63, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 63, "usage_type": "name"}, {"api_name": "rest_framework.serializers.StringRelatedField", "line_number": 64, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 64, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 65, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 65, "usage_type": "name"}, {"api_name": "models.Activation", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "184472472", "text": "\nfrom secrets import token_bytes\nfrom typing import Tuple\n\n\ndef random_key(length: int) -> int:\n tb: bytes = token_bytes(length)\n return int.from_bytes(tb, \"big\")\n\n\ndef encrypt(orig: str) -> Tuple[int, int]:\n orig_bytes: bytes = orig.encode()\n dummy: int = random_key(len(orig_bytes))\n orig_key: int = int.from_bytes(orig_bytes, \"big\")\n encrypted: int = orig_key ^ dummy\n return dummy, encrypted\n\n\ndef decrypt(key1: int, key2: int) -> str:\n decrypted: int = key1 ^ key2\n temp: bytes = decrypted.to_bytes((decrypted.bit_length() + 7) // 8, \"big\")\n return temp.decode()\n\n\nif __name__ == \"__main__\":\n key1, key2 = encrypt(\"masooria\")\n result: str = decrypt(key1, key2)\n print(result)\n", "sub_path": "0encryption.py", "file_name": "0encryption.py", "file_ext": "py", "file_size_in_byte": 720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "secrets.token_bytes", "line_number": 7, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "40690719", "text": "import pandas as pd\nimport numpy as np\nfrom sklearn.externals import joblib\nfrom sklearn.ensemble import RandomForestRegressor\n\n\n#ファイル読み込み&クリーニング\ndef read_clean(url,raceName,day):\n #alldata = pd.read_csv(\"C:\\\\Users\\\\owner\\\\PycharmProjects\\\\keirin\\\\df_result.csv\")\n alldata = pd.read_csv(url,encoding='s-jis')\n # 不要なカラムを削除\n #デバッグ用にデータ削減\n #msk = np.random.rand(len(alldata)) < 0.00001\n #alldata = alldata[msk]\n\n for col in ['レース日','出身県','名前','競輪場','結果上がりタイム','結果着差(cm)','地名','タイム','出身地方','KEYSTRING','競走時刻','出身県.1','結果SB']:\n del alldata[col]\n\n alldata =alldata[(alldata['レース名'] == raceName)]\n alldata =alldata[(alldata['日数'] == day)]\n\n # カテゴリカル変数にする\n for col in ['レース名','日数','級班','脚質','競輪場コード','バック','競走ランク','レース番号_車番']:\n tmp = pd.get_dummies(alldata[col])\n alldata = pd.concat((alldata,tmp), axis=1)\n\n for col in ['レース名','日数','級班','脚質','競輪場コード','バック','競走ランク','レース番号_車番']:\n del alldata[col]\n\n alldata['res1'] = alldata.結果順位.apply(lambda x: (1 if x == 1 else 0))#目的変数(一位フラグ)を作成\n alldata['res3'] = alldata.結果順位.apply(lambda x: (1 if x <= 3 else 0))#目的変数(三位フラグ)を作成\n del alldata['結果順位']\n\n #alldata.to_csv(\"test.csv\")\n\n #不正文字列(Err,inf,\" \")を置換\n #alldata=alldata.replace({'Err': 0}, regex=True)\n #alldata=alldata.replace({'inf': 0}, regex=True)\n #alldata=alldata.replace({'': 0}, regex=True)\n #alldata=alldata.replace({' ': 0}, regex=True)\n #alldata=alldata.replace({' ': 0}, regex=True)\n alldata = alldata.fillna(0)\n alldata = alldata.replace('inf','0')\n alldata.replace([np.inf, -np.inf], 0)\n # np.any(np.isnan(alldata))\n # np.all(np.isfinite(alldata))\n # alldata[(alldata==float(\"inf\")) | (alldata==float(\"-inf\"))] = 0.0\n # alldata[np.isnan(alldata)] = 0\n # print(alldata.isnan().values.any()) # これだめ\n\n # 一回保存(デバッグ用)\n # alldata.to_csv(\"testResult_work_nensyuu.csv\")\n return alldata\n\n\ndef test(alldata,leanDateRate, testCount,tergetCol1,raceName1,day1):\n #  学習データを説明変数と目的変数に分割\n # msk = np.random.rand(len(alldata)) < 0.05\n\n # res1,res3は相関それぞれ関連が強いのでどちらかを削除する\n if tergetCol1 == 'res1':\n del alldata['res3']\n if tergetCol1 == 'res3':\n del alldata['res1']\n\n if len(alldata) >= 10000:\n msk = np.random.rand(len(alldata)) < leanDateRate\n else:\n msk = np.random.rand(len(alldata)) < 0.5\n train = alldata[msk]\n test = alldata[~msk]\n\n # データの列数を表示\n print(len(train))\n print(len(test))\n\n trainArr = train.drop(tergetCol1, axis=1).as_matrix() # training array\n trainRes = train.as_matrix([tergetCol1]) # training results\n\n # trainArr.to_csv(\"train_nensyuu.csv\")\n # np.savetxt(\"train_nensyuu.csv\", trainArr)\n # trainRes.to_csv(\"test_nensyuu.csv\")\n # np.savetxt(\"test_nensyuu.csv\", trainRes)\n\n #  学習しモデルを作成\n # rf = RandomForestRegressor(n_estimators=10000) # 100 decision trees is a good enough number\n rf = RandomForestRegressor(n_estimators=testCount)\n\n # print(trainArr)\n # print(trainRes)\n\n # print(np.isfinite(trainArr))\n # print(np.isfinite(trainRes))\n rf.fit(trainArr, trainRes) # finally, we fit the data to the algorithm!!! :)\n\n #  テスト用の説明変数を作成\n testArr = test.drop(tergetCol1, axis=1).as_matrix()\n\n #  テストデータで説明変数から目的変数を作成\n results = rf.predict(testArr)\n\n #  結果出力用に学習結果をDfに追加する\n test['predictions'] = results\n\n #重要度を表示\n print(list(train.columns.values))\n print(rf.feature_importances_)\n\n # with open(str(leanDateRate) + \"_\" + str(testCount)+ \"_\" + tergetCol+ \"_\" + tmp_res+\"_\" + raceName+\"_\" + day+\".importance.txt\", 'wb') as f:\n # f.writelines([\"%s\\n\" % str(item) for item in list(train.columns.values)])\n # f.write(rf.feature_importances_)\n # f.close()\n\n # 学習結果をファイルに出力\n test.to_csv(\"testResult_\" + \"_\" + str(leanDateRate) + \"_\" + str(testCount)+ \"_\" + tergetCol1+ \"_\" + raceName1+\"_\" + day1+ \".csv\")\n # モデルを保存\n joblib.dump(rf, str(leanDateRate) + \"_\" + str(testCount)+ \"_\" + tergetCol1+ \"_\" + raceName1+\"_\" + day1+\".model\")\n\n return test\n\n#############################主処理#################################\n# file='C:\\\\Users\\\\owner\\\\PycharmProjects\\\\keirin\\\\testResult_res1-1_10000ken-10000kai-1.5h.csv'\n# tmpdf = pd.read_csv(file,encoding='s-jis',header=0)\n#\n# main = pd.DataFrame({'A': [np.nan]})\n# main.empty\n#\n# main[\"0.95_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.95)]['res'].mean()\n# main[\"0.90_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.9)]['res'].mean()\n# main[\"0.85_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.85)]['res'].mean()\n# main[\"0.80_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.80)]['res'].mean()\n# main[\"0.75_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.75)]['res'].mean()\n# main[\"0.70_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.7)]['res'].mean()\n#\n# main[\"0.95_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.95)]['res'].count()\n# main[\"0.90_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.9)]['res'].count()\n# main[\"0.85_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.85)]['res'].count()\n# main[\"0.80_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.80)]['res'].count()\n# main[\"0.75_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.75)]['res'].count()\n# main[\"0.70_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.7)]['res'].count()\n# with open('testtesttest.csv', 'a') as f:\n# main.to_csv(f, header=False)\n\n#学習データ用ファイル\nurl = \"C:\\\\Users\\\\owner\\\\PycharmProjects\\\\keirin\\\\df_result.csv\"\n# 結果書き込み用ファイル\nmain = pd.DataFrame({'A': [np.nan]})\nmain.empty\n\nisFirst = True\n#テスト\n#for (rate, testCnt) in zip( [0.05, 0.05, 0.05], [1000, 2000, 4000]):\nfor tmp_res in ['res1','res3']:\n #for (rate, testCnt) in zip([0.01,0.01,0.01,0.01], [100,1000,2000,5000]):\n for (rate, testCnt) in zip([0.01,0.05,0.1], [2000, 2000, 2000, 2000]):\n raceNames = [\"A級ガ一般\",\"A級ガ予1\",\"A級ガ予2\",\"A級ガ決勝\",\"A級チ一般\",\"A級チ予選\",\"A級チ決勝\",\"A級チ準決\",\"A級チ選抜\",\"A級一予選\",\"A級一般\",\"A級予選\",\"A級二予選\",\"A級優秀\",\"A級初特選\",\"A級決勝\",\"A級準決勝\",\"A級特一般\",\"A級特予選\",\"A級特選\",\"A級選抜\",\"S級シャイ\",\"S級ローズ\",\"S級一予選\",\"S級一般\",\"S級一般一\",\"S級一般二\",\"S級予選\",\"S級二予選\",\"S級二予A\",\"S級二予B\",\"S級優秀\",\"S級初特選\",\"S級日競杯\",\"S級決勝\",\"S級準決勝\",\"S級特一般\",\"S級特秀\",\"S級特選\",\"S級特選一\",\"S級特選予\",\"S級特選二\",\"S級白虎賞\",\"S級選抜\",\"S級選抜一\",\"S級選抜二\",\"S級青龍賞\",\"S級順位決\",\"S級DRM\",\"S級DS\",\"S級GDR\",\"S級ORI\",\"S級SPR\",\"S級STR\",\"S級WS\",\"SA混合YGP\"]\n days = [\"初日\", \"2日目\", \"3日目\", \"4日目\", \"5日目\", \"最終日\"]\n\n for raceName in raceNames:\n for day in days:\n # ファイル読み込み\n alldata = read_clean(url,raceName,day)\n #100行以下なら処理しない\n if len(alldata) <= 100:\n continue\n #テストを実行\n tmpdf = test(alldata,rate,testCnt,tmp_res,raceName,day)\n #テスト結果を分析、評価、結果をファイルに追記\n main[\"receName\"] = raceName\n main[\"day\"] = day\n main[\"colcount\"] = len(tmpdf)\n #予測したレースの的中率を算出\n main[\"0.95_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.95)][tmp_res].mean()\n main[\"0.90_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.9)][tmp_res].mean()\n main[\"0.85_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.85)][tmp_res].mean()\n main[\"0.80_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.80)][tmp_res].mean()\n main[\"0.75_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.75)][tmp_res].mean()\n main[\"0.70_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.7)][tmp_res].mean()\n main[\"0.60_mean\"] = tmpdf[(tmpdf['predictions'] >= 0.6)][tmp_res].mean()\n main[\"0.50_mean\"] = tmpdf[(tmpdf['predictions'] >= 0.5)][tmp_res].mean()\n main[\"0.40_mean\"] = tmpdf[(tmpdf['predictions'] >= 0.4)][tmp_res].mean()\n # 予測したレースのレース数を算出\n main[\"0.95_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.95)][tmp_res].count()\n main[\"0.90_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.9)][tmp_res].count()\n main[\"0.85_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.85)][tmp_res].count()\n main[\"0.80_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.80)][tmp_res].count()\n main[\"0.75_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.75)][tmp_res].count()\n main[\"0.70_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.7)][tmp_res].count()\n main[\"0.60_count\"] = tmpdf[(tmpdf['predictions'] >= 0.6)][tmp_res].count()\n main[\"0.50_count\"] = tmpdf[(tmpdf['predictions'] >= 0.5)][tmp_res].count()\n main[\"0.40_count\"] = tmpdf[(tmpdf['predictions'] >= 0.4)][tmp_res].count()\n\n main[\"rate\"] = rate\n main[\"testCnt\"] = testCnt\n with open('keirinAnalisysResult.csv', 'a') as f:\n if isFirst:\n main.to_csv(f, header=True)\n else:\n main.to_csv(f, header=False)\n isFirst = False\n", "sub_path": "kring_project/code/sandbox/03_randomF.py", "file_name": "03_randomF.py", "file_ext": "py", "file_size_in_byte": 10254, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 69, "usage_type": "attribute"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 117, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 147, "usage_type": "attribute"}]} +{"seq_id": "393589910", "text": "#-*-coding:utf-8-*-\nimport os\n\nfrom flask import Flask, render_template, request\nfrom flask_socketio import SocketIO, emit\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\nchannels = list()\n# Channels view\n#{\"name\": \"channel_name\", \"messages\": [], \"id\":0})\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/channels\", methods=[\"POST\", \"GET\"])\ndef channel_list():\n\n # if GET request\n if request.method == \"GET\":\n return render_template(\"channels.html\", channels=channels)\n \n # if submit create channel form\n else:\n name = request.form.get(\"channel\")\n\n # unique name for each channel\n for channel in channels:\n if name == channel[\"name\"]:\n return render_template(\"error.html\", message=\"Channel name already taken\")\n\n id = len(channels)\n channels.append({\"name\": name, \"messages\": [], \"id\":id})\n return render_template(\"channels.html\", channels=channels)\n\n\n@app.route(\"/channels/\")\ndef channel(id):\n return render_template(\"channel.html\", channel=channels[id])\n\n@socketio.on(\"send message\")\ndef message(data):\n\n message = data[\"message\"]\n user = data[\"user\"]\n date = data[\"date\"]\n id = int(data[\"id\"])\n channels[id][\"messages\"].append([message, user, date])\n\n # no messages more than 100\n if len(channels[id][\"messages\"]) > 100:\n channels[id][\"messages\"].pop(0)\n \n emit(\"show message\", {\"message\": message, \"user\": user, \"date\": date}, broadcast=True)\n\nif __name__ == \"__main__\":\n socketio.run(app)", "sub_path": "application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 1619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "flask_socketio.SocketIO", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 42, "usage_type": "call"}, {"api_name": "flask_socketio.emit", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "457387043", "text": "# -*- coding: utf-8 -*-\nimport settings\nimport os\n\nfrom bottle import static_file, request\nfrom utils import *\nfrom bottle import template\n\nimport bottle\n\napp = application = bottle.Bottle()\n\n\n# Artist routes\n\n@app.route('/getartist/')\ndef get_artist(artist_id):\n data, error = query_get_artist(artist_id)\n return template('editartist.tpl', artist=data[0], error=error)\n\n\n@app.route('/getartists/empty')\ndef get_artists_empty():\n return template(\n 'searchartist.tpl'\n )\n\n\n@app.route('/getartists')\ndef get_artists():\n firstname = request.query.firstname\n surname = request.query.surname\n yearfrom = request.query.yearfrom\n yearto = request.query.yearto\n type = request.query.type\n (data, error) = query_get_artists(\n firstname, surname, yearfrom, yearto, type)\n if error:\n return template('searchartist.tpl', error=error)\n return template(\n 'show_artists.tpl',\n header=data[0],\n data=data[1:],\n error=None\n )\n\n\n@app.route('/insertartist/empty')\ndef insert_artist_empty():\n return template(\n 'insertartist.tpl',\n error=None,\n )\n\n\n@app.route('/insertartist')\ndef insert_artist():\n artist_id = request.query.artist_id\n name = request.query.name\n surname = request.query.surname\n birth_year = request.query.birth_year\n data, error = query_insert_artist(artist_id, name, surname, birth_year)\n return template(\n 'insertartist.tpl',\n error=error,\n )\n\n\n@app.route('/editartist/')\ndef edit_artist(artist_id):\n name = request.query.name\n surname = request.query.surname\n birth_year = request.query.birth_year\n data, error = query_edit_artist(artist_id, name, surname, birth_year)\n return template(\n 'editartist.tpl',\n artist=data[0] if not error else (\n artist_id, name, surname, birth_year),\n error=error\n )\n\n\n@app.route('/getsongs/empty')\ndef get_songs_empty():\n return template('searchsong.tpl')\n\n\n@app.route('/getsongs')\ndef get_songs():\n song_title = request.query.songtitle\n production_year = request.query.productionyear\n company = request.query.company\n table = query_get_songs(song_title, production_year, company)\n return template(\n 'showsongs.tpl',\n header=table[0],\n data=table[1:],\n error=None\n )\n\n\n@app.route('/insertsong/empty')\ndef insert_song_empty():\n cd, singer, composer, songwriter = query_song_parameters()\n return template(\n 'insertsong.tpl',\n cd=cd,\n singer=singer,\n composer=composer,\n songwriter=songwriter,\n error=None,\n )\n\n\n@app.route('/insertsong')\ndef insert_song():\n success = False\n title = request.query.title\n production_year = request.query.productionyear\n cd = request.query.cd\n singer = request.query.singer\n composer = request.query.composer\n song_writer = request.query.songwriter\n error = query_insert_song(\n title, production_year, cd, singer, composer, song_writer\n )\n cd, singer, composer, songwriter = query_song_parameters()\n if not error:\n success = True\n return template(\n 'insertsong.tpl',\n cd=cd,\n singer=singer,\n composer=composer,\n songwriter=songwriter,\n error=error,\n success=success\n )\n\n\n@app.route('/:path')\ndef callback(path):\n return static_file(path, 'views')\n\n\n@app.route('/')\ndef index():\n return template('home.tpl', name='aa')\n\nif __name__ == '__main__':\n bottle.run(\n app=app, host='localhost', port=settings.web_port, reloader=True,\n debug=True\n )\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "bottle.Bottle", "line_number": 11, "usage_type": "call"}, {"api_name": "bottle.template", "line_number": 19, "usage_type": "call"}, {"api_name": "bottle.template", "line_number": 24, "usage_type": "call"}, {"api_name": "bottle.request.query", "line_number": 31, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 31, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 32, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 32, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 33, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 33, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 34, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 34, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 35, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 35, "usage_type": "name"}, {"api_name": "bottle.template", "line_number": 39, "usage_type": "call"}, {"api_name": "bottle.template", "line_number": 40, "usage_type": "call"}, {"api_name": "bottle.template", "line_number": 50, "usage_type": "call"}, {"api_name": "bottle.request.query", "line_number": 58, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 58, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 59, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 59, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 60, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 60, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 61, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 61, "usage_type": "name"}, {"api_name": "bottle.template", "line_number": 63, "usage_type": "call"}, {"api_name": "bottle.request.query", "line_number": 71, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 71, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 72, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 72, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 73, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 73, "usage_type": "name"}, {"api_name": "bottle.template", "line_number": 75, "usage_type": "call"}, {"api_name": "bottle.template", "line_number": 85, "usage_type": "call"}, {"api_name": "bottle.request.query", "line_number": 90, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 90, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 91, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 91, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 92, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 92, "usage_type": "name"}, {"api_name": "bottle.template", "line_number": 94, "usage_type": "call"}, {"api_name": "bottle.template", "line_number": 105, "usage_type": "call"}, {"api_name": "bottle.request.query", "line_number": 118, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 118, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 119, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 119, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 120, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 120, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 121, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 121, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 122, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 122, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 123, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 123, "usage_type": "name"}, {"api_name": "bottle.template", "line_number": 130, "usage_type": "call"}, {"api_name": "bottle.static_file", "line_number": 143, "usage_type": "call"}, {"api_name": "bottle.template", "line_number": 148, "usage_type": "call"}, {"api_name": "bottle.run", "line_number": 151, "usage_type": "call"}, {"api_name": "settings.web_port", "line_number": 152, "usage_type": "attribute"}]} +{"seq_id": "524383495", "text": "### Returns 1 whole image and annotations instead of indididual objects ###\n\n\nfrom pycocotools import coco\nimport numpy as np\nimport cv2\nimport math\nimport tensorflow as tf\n\nclass dataset:\n\n def __init__(self):\n \n self.dataType = 'val2017'\n \n # initialize COCO api for instance annotations\n annFile='./COCO/annotations/instances_{}.json'.format(self.dataType)\n self.imageDir = './COCO/images/'\n\n self.coco_handle=coco.COCO(annFile)\n\n # human-readable COCO categories\n cats = self.coco_handle.loadCats(self.coco_handle.getCatIds())\n nms=[cat['name'] for cat in cats]\n\n # get all images containing given categories (nms)\n self.catIds = self.coco_handle.getCatIds(catNms=nms)\n self.imgIds = self.coco_handle.getImgIds()\n self.totalImages = len(self.imgIds)\n self.numImages = 0 #number of processed images\n \n print(len(self.imgIds), \"total images in\", self.dataType, \"set.\")\n\n def nextImage(self): #return next image\n \n if self.numImages >= self.totalImages:\n print(\"No more images!\")\n return None, None, None\n \n #Retrieve image location\n img = self.coco_handle.loadImgs(self.imgIds[self.numImages])[0] #image descriptor\n image_location = self.imageDir+self.dataType+'/'+img['file_name']\n #Retrieve annotations\n annIds = self.coco_handle.getAnnIds(imgIds=self.imgIds[self.numImages],\n catIds=self.catIds,\n iscrowd=None)\n anns = self.coco_handle.loadAnns(annIds) #annotation data\n image, labels, boxes = parseAnnotation(image_location, anns)\n \n image = image.astype(np.float32)\n image = np.divide(image, 255.0)\n \n self.numImages = self.numImages + 1\n print(\"{} images left...\".format(self.totalImages - self.numImages))\n \n return image, labels, boxes, img['file_name']\n\ndef parseAnnotation(file, annotations):\n\n image = cv2.imread(file) #actual image\n #cv2.imshow(\"image\", image)\n #cv2.waitKey(1)\n if image is None: exit(\"No image!\")\n \n boxes = list()\n labels = list()\n for ann in annotations: #get bounding boxes\n boxes.append(ann['bbox'])\n labels.append(labeled(ann['category_id']))\n \n return image, labels, boxes\n \n\ndef labeled(id): #normalize labels to fit within 80\n if id == 81: return 12\n elif id == 82: return 26\n elif id == 84: return 30\n elif id == 85: return 45\n elif id == 86: return 66\n elif id == 87: return 68\n elif id == 88: return 69\n elif id == 89: return 71\n elif id == 90: return 29\n else: return id\n", "sub_path": "COCO/COCOlite.py", "file_name": "COCOlite.py", "file_ext": "py", "file_size_in_byte": 2753, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pycocotools.coco.COCO", "line_number": 20, "usage_type": "call"}, {"api_name": "pycocotools.coco", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.divide", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "509096064", "text": "# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]\n# snippet-sourcedescription:[put_bucket_acl.py demonstrates how to set the access control list for an Amazon S3 bucket.]\n# snippet-service:[s3]\n# snippet-keyword:[Amazon S3]\n# snippet-keyword:[Python]\n# snippet-sourcesyntax:[python]\n# snippet-sourcesyntax:[python]\n# snippet-sourcetype:[full-example]\n# snippet-sourcedate:[2019-03-07]\n# snippet-sourceauthor:[AWS]\n\n# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# This file is licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License. A copy of the\n# License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS\n# OF ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport copy\nimport logging\nimport boto3\nfrom botocore.exceptions import ClientError\n\n\ndef get_bucket_acl(bucket_name):\n \"\"\"Retrieve the access control list of an Amazon S3 bucket\n\n :param bucket_name: string\n :return: Dictionary defining the bucket's access control list consisting\n of owner and grants. If error, return None.\n \"\"\"\n\n # Retrieve the bucket ACL\n s3 = boto3.client('s3')\n try:\n response = s3.get_bucket_acl(Bucket=bucket_name)\n except ClientError as e:\n # AllAccessDisabled error == bucket not found\n logging.error(e)\n return None\n\n # Return both the Owner and Grants keys\n # The Owner and Grants settings together form the Access Control Policy.\n # The Grants alone form the Access Control List.\n return {'Owner': response['Owner'], 'Grants': response['Grants']}\n\n\ndef put_bucket_acl(bucket_name, acl):\n \"\"\"Set the access control list of an Amazon S3 bucket\n\n :param bucket_name: string\n :param acl: Dictionary defining the ACL consisting of grants and permissions\n :return: True if ACL was set, otherwise False\n \"\"\"\n\n # Set the ACL\n s3 = boto3.client('s3')\n try:\n s3.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=acl)\n except ClientError as e:\n # AccessDenied error == bucket prohibits public access\n # AllAccessDisabled error == bucket not found\n # AmbiguousGrantByEmailAddress == email address is associated with\n # multiple AWS accounts\n logging.error(e)\n return False\n return True\n\n\ndef main():\n \"\"\"Exercise put_bucket_acl()\"\"\"\n\n # Assign these values before running the program\n test_bucket_name = 'BUCKET_NAME'\n new_grantee_canonical_user_id = 'AWS_USER_ID'\n # new_grantee_email = 'EMAIL_ADDRESS' # Set AWS User ID or email, but not both\n new_grantee_permission = 'READ' # Or 'FULL_CONTROL', etc.\n\n # Set up logging\n logging.basicConfig(level=logging.DEBUG,\n format='%(levelname)s: %(asctime)s: %(message)s')\n\n # Get the bucket's current ACL\n acl = get_bucket_acl(test_bucket_name)\n if acl is None:\n exit(-1)\n\n # Add a new grant to the current ACL\n new_grant = {\n 'Grantee': {\n 'ID': new_grantee_canonical_user_id,\n 'Type': 'CanonicalUser',\n #'EmailAddress': new_grantee_email, # Set ID or Email\n #'Type': 'AmazonCustomerByEmail',\n },\n 'Permission': new_grantee_permission,\n }\n # If we don't want to modify the original ACL variable, then we\n # must do a deepcopy\n modified_acl = copy.deepcopy(acl)\n modified_acl['Grants'].append(new_grant)\n\n # Put the updated bucket ACL\n if put_bucket_acl(test_bucket_name, modified_acl):\n logging.info(f'The ACL was set for {test_bucket_name}')\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "python/example_code/s3/put_bucket_acl.py", "file_name": "put_bucket_acl.py", "file_ext": "py", "file_size_in_byte": 3857, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "boto3.client", "line_number": 39, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 42, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 44, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 62, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 65, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 70, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 85, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "401897099", "text": "'''\nCreated on 2010. 10. 9.\n\n@author: hsh\n'''\n\nimport logging\nimport thread\n\nimport Pyro.core \nimport Pyro.naming \nfrom Pyro.errors import NamingError \n\nimport util.const;\n\nimport util.ready_logging;\n\nimport nexus.nexus_service_for_interceptor as nis\nimport nexus.nexus_service_for_carrier as ncs\n\ndef run_carrier_service():\n daemon=Pyro.core.Daemon()\n ns=Pyro.naming.NameServerLocator().getNS()\n daemon.useNameServer(ns)\n \n nexus_name = util.const.NEXUS_SERVICE_NAME_FOR_CARRIER\n\n try:\n ns.unregister(nexus_name)\n except NamingError:\n pass\n \n uri = daemon.connect(ncs.NexusServiceForCarrier(), nexus_name)\n logging.info(\"carrier service connect: %s\" % uri)\n \n while True:\n daemon.handleRequests(10.0)\n ncs.CarrierManager.ins().update_carrier_keys()\n\ndef run_interceptor_service():\n daemon=Pyro.core.Daemon()\n ns=Pyro.naming.NameServerLocator().getNS()\n daemon.useNameServer(ns)\n \n nexus_name = util.const.NEXUS_SERVICE_NAME_FOR_INTERCEPTOR\n\n try:\n ns.unregister(nexus_name)\n except NamingError:\n pass\n \n uri = daemon.connect(nis.NexusServiceForInterceptor(), nexus_name)\n \n logging.info(\"interceptor service connect return: %s\" % uri)\n while True:\n daemon.handleRequests(10.0)\n nis.InterceptorManager.ins().update_interceptor_keys()\n\ndef main():\n thread.start_new(run_carrier_service, ())\n run_interceptor_service()\n \nif __name__ == '__main__':\n util.ready_logging.ready_logging(\"nexus_main_log.txt\")\n \n logging.info(\"start nexus\")\n main()\n logging.info(\"end nexus\")\n", "sub_path": "kb_codes/sandbox/syscarrier/src/nexus/nexus_main.py", "file_name": "nexus_main.py", "file_ext": "py", "file_size_in_byte": 1620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "Pyro.core.core.Daemon", "line_number": 22, "usage_type": "call"}, {"api_name": "Pyro.core.core", "line_number": 22, "usage_type": "attribute"}, {"api_name": "Pyro.core", "line_number": 22, "usage_type": "name"}, {"api_name": "Pyro.core.naming.NameServerLocator", "line_number": 23, "usage_type": "call"}, {"api_name": "Pyro.core.naming", "line_number": 23, "usage_type": "attribute"}, {"api_name": "Pyro.core", "line_number": 23, "usage_type": "name"}, {"api_name": "util.const.const", "line_number": 26, "usage_type": "attribute"}, {"api_name": "util.const", "line_number": 26, "usage_type": "name"}, {"api_name": "Pyro.errors.NamingError", "line_number": 30, "usage_type": "name"}, {"api_name": "nexus.nexus_service_for_carrier.NexusServiceForCarrier", "line_number": 33, "usage_type": "call"}, {"api_name": "nexus.nexus_service_for_carrier", "line_number": 33, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 34, "usage_type": "call"}, {"api_name": "nexus.nexus_service_for_carrier.CarrierManager.ins", "line_number": 38, "usage_type": "call"}, {"api_name": "nexus.nexus_service_for_carrier.CarrierManager", "line_number": 38, "usage_type": "attribute"}, {"api_name": "nexus.nexus_service_for_carrier", "line_number": 38, "usage_type": "name"}, {"api_name": "Pyro.core.core.Daemon", "line_number": 41, "usage_type": "call"}, {"api_name": "Pyro.core.core", "line_number": 41, "usage_type": "attribute"}, {"api_name": "Pyro.core", "line_number": 41, "usage_type": "name"}, {"api_name": "Pyro.core.naming.NameServerLocator", "line_number": 42, "usage_type": "call"}, {"api_name": "Pyro.core.naming", "line_number": 42, "usage_type": "attribute"}, {"api_name": "Pyro.core", "line_number": 42, "usage_type": "name"}, {"api_name": "util.const.const", "line_number": 45, "usage_type": "attribute"}, {"api_name": "util.const", "line_number": 45, "usage_type": "name"}, {"api_name": "Pyro.errors.NamingError", "line_number": 49, "usage_type": "name"}, {"api_name": "nexus.nexus_service_for_interceptor.NexusServiceForInterceptor", "line_number": 52, "usage_type": "call"}, {"api_name": "nexus.nexus_service_for_interceptor", "line_number": 52, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 54, "usage_type": "call"}, {"api_name": "nexus.nexus_service_for_interceptor.InterceptorManager.ins", "line_number": 57, "usage_type": "call"}, {"api_name": "nexus.nexus_service_for_interceptor.InterceptorManager", "line_number": 57, "usage_type": "attribute"}, {"api_name": "nexus.nexus_service_for_interceptor", "line_number": 57, "usage_type": "name"}, {"api_name": "thread.start_new", "line_number": 60, "usage_type": "call"}, {"api_name": "util.const.ready_logging.ready_logging", "line_number": 64, "usage_type": "call"}, {"api_name": "util.const.ready_logging", "line_number": 64, "usage_type": "attribute"}, {"api_name": "util.const", "line_number": 64, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "358736478", "text": "from __future__ import print_function\n\nimport datetime\nimport json\nimport os.path\nimport pickle\nfrom datetime import *\n\nfrom dateutil.relativedelta import *\nfrom dateutil.rrule import *\nfrom google.auth.transport.requests import Request\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\n\nDATE_FORMAT = '%d/%m/%Y'\nΤΙΜΕΖΟΝΕ = \"Europe/Athens\"\nSCOPES = ['https://www.googleapis.com/auth/calendar']\n\nservice = None\nlab_calendar = None\ntheory_calendar = None\n\n\ndef api_service():\n global service\n\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n get_calendars()\n\n\ndef get_calendars():\n global theory_calendar\n global lab_calendar\n\n calendar_list = service.calendarList().list(pageToken=None).execute()\n for count, calendar_list_entry in enumerate(calendar_list['items']):\n print(str(count + 1) + \")\", calendar_list_entry['summary'])\n\n theory_choice = int(input(\"Choose calendar for theory: \")) - 1\n theory_calendar = calendar_list['items'][theory_choice]['id']\n\n lab_choice = int(input(\"Choose calendar for lab: \")) - 1\n lab_calendar = calendar_list['items'][lab_choice]['id']\n\n\ndef get_data_from_json(filename):\n with open(filename, \"r\", encoding='utf8') as f:\n data = json.load(f)\n\n return data\n\n\ndef get_dates():\n holidays_file = input(\"Enter the holidays json: \")\n holidays = get_data_from_json(holidays_file)\n\n courses_file = input(\"Enter the courses json: \")\n courses = get_data_from_json(courses_file)\n\n return (holidays, courses)\n\n\ndef get_duration():\n start_date = input(\"When should the first event take place: \")\n end_date = input(\"When should the last event take place: \")\n start_date = datetime.strptime(start_date, DATE_FORMAT)\n end_date = datetime.strptime(end_date, DATE_FORMAT)\n return (start_date, end_date)\n\n\ndef process_holidays(holidays):\n days = set()\n for entry in holidays:\n if entry['end_date'] == '':\n days.add(datetime.strptime(entry['start_date'], DATE_FORMAT))\n else:\n start_date = datetime.strptime(entry['start_date'], DATE_FORMAT)\n end_date = datetime.strptime(entry['end_date'], DATE_FORMAT)\n for dt in rrule(DAILY, dtstart=start_date, until=end_date):\n days.add(dt)\n return days\n\n\ndef process_courses(courses):\n timetable = []\n for weekday in [\"monday\", \"tuesday\", \"wednessday\", \"thursday\", \"friday\"]:\n timetable.append(\n [entry for entry in courses if entry[\"weekday\"] == weekday])\n return timetable\n\n\ndef add_course(day, course):\n start_date = day.replace(hour=int(course[\"start_time\"][0:2]),\n minute=int(course[\"start_time\"][3:]))\n end_date = day.replace(hour=int(course[\"end_time\"][0:2]),\n minute=int(course[\"end_time\"][3:]))\n event = {\n 'summary': course['name'],\n 'location': course[\"location\"],\n 'description': course['description'],\n 'start': {\n 'dateTime': start_date.isoformat(),\n 'timeZone': ΤΙΜΕΖΟΝΕ,\n },\n 'end': {\n 'dateTime': end_date.isoformat(),\n 'timeZone': ΤΙΜΕΖΟΝΕ,\n }\n }\n event = service.events().insert(\n calendarId=lab_calendar if\n (course['type'] == \"lab\") else theory_calendar,\n body=event).execute()\n\n\ndef add_courses(timetable, holidays, start_date, end_date):\n for day in rrule(DAILY, dtstart=start_date, until=end_date):\n if day in holidays or day.weekday() >= 5:\n continue\n else:\n for course in timetable[day.weekday()]:\n add_course(day, course)\n\n\ndef main():\n api_service()\n (start_date, end_date) = get_duration()\n (holidays, courses) = get_dates()\n holidays = process_holidays(holidays)\n timetable = process_courses(courses)\n add_courses(timetable, holidays, start_date, end_date)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "timetable_to_calendar.py", "file_name": "timetable_to_calendar.py", "file_ext": "py", "file_size_in_byte": 4667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.path.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 30, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.Request", "line_number": 34, "usage_type": "call"}, {"api_name": "google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file", "line_number": 36, "usage_type": "call"}, {"api_name": "google_auth_oauthlib.flow.InstalledAppFlow", "line_number": 36, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 41, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.build", "line_number": 43, "usage_type": "call"}, {"api_name": "json.load", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "279908344", "text": "#There are 2 ways for scraping the website\n# 1. USE API\n# 2. HTML SCRAPING USING SOME TOOL LIKE BS4\n\n#Step: Install all the required packages\n#pip install requests\n#pip install bs4\n#pip install html5lib\n\nimport requests\nimport csv\nfrom bs4 import BeautifulSoup\nurl = \"https://shyamal2411.github.io/TypeTest/\"\n\n#step 1: Get the html\nr = requests.get(url)\nhtmlContent = r.content\n# print(htmlContent) \n# PRINTS THE WHOLE HTML CONTENT TO TERMINAL\n\n#step 2: Parse the html\nsoup = BeautifulSoup(htmlContent, 'html.parser')\n# print(soup.prettify)\n# prettifies the content\n\n#step 3: HTML tree traversal\ntitle = soup.title\n# print(title.string)\n\nsoup.find_all(\"a\")\n# print(soup.find_all)\n\nmeta = soup.find_all(\"meta\")\n# print(meta[0])\n# print(meta[1])\n# print(meta[2])\n# print(meta[3])\n\nfile = open(\"Try.csv\",\"w\")\ncsv_writer = csv.writer(file)\nprint(meta[0])\n\n", "sub_path": "Python/webScrap.py", "file_name": "webScrap.py", "file_ext": "py", "file_size_in_byte": 852, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 22, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "167687326", "text": "#!/usr/bin/env python3\n\n# Copyright (C) 2017-2020 The btclib developers\n#\n# This file is part of btclib. It is subject to the license terms in the\n# LICENSE file found in the top-level directory of this distribution.\n#\n# No part of btclib including this file, may be copied, modified, propagated,\n# or distributed except according to the terms contained in the LICENSE file.\n\n\"\"\"Varint encoding and decoding functions.\n\nA varint (variable integer) is variable-length quantity that uses an\narbitrary number of binary octets (eight-bit bytes) to represent an\narbitrarily large integer.\nIt is usually a base-128 (7 bits) representation of an unsigned integer\nwith the addition of the eighth bit to mark continuation of bytes;\nit is used to save additional space for a resource constrained system.\n\nThis is the slightly different Bitcoin implementation, used in transaction\ndata to indicate the number of upcoming fields or the length of the\nupcoming field.\n\nUp to 0xfc, a varint is just 1 byte; however, if the integer is greater than\n0xfc, then it is expanded as [1 byte prefix][number]:\n\n* prefix 0xfd markes the next two bytes as the number;\n* prefix 0xfe markes the next four bytes as the number;\n* prefix 0xff markes the next eight bytes as the number.\n\"\"\"\n\nfrom io import BytesIO\nfrom typing import BinaryIO, Union\n\nfrom .utils import Octets, bytes_from_hexstring\n\n\ndef decode(stream: Union[BinaryIO, Octets]) -> int:\n '''Return the variable-length integer read from a stream.'''\n\n stream = bytes_from_hexstring(stream)\n\n if isinstance(stream, bytes):\n stream = BytesIO(stream)\n\n i = stream.read(1)[0]\n if i == 0xfd:\n # 0xfd marks the next two bytes as the number\n return int.from_bytes(stream.read(2), byteorder='little')\n elif i == 0xfe:\n # 0xfe marks the next four bytes as the number\n return int.from_bytes(stream.read(4), byteorder='little')\n elif i == 0xff:\n # 0xff marks the next eight bytes as the number\n return int.from_bytes(stream.read(8), byteorder='little')\n else:\n # anything else is just the integer\n return i\n\n\ndef encode(i: int) -> bytes:\n '''Return the varint bytes encoding of an integer.'''\n\n if i <= 0xfc: # 1 byte\n return bytes([i])\n elif i <= 0xffff: # 2 bytes\n return b'\\xfd' + i.to_bytes(2, byteorder='little')\n elif i <= 0xffffffff: # 4 bytes\n return b'\\xfe' + i.to_bytes(4, byteorder='little')\n elif i <= 0xffffffffffffffff: # 8 bytes\n return b'\\xff' + i.to_bytes(8, byteorder='little')\n else:\n raise ValueError(f'integer too large ({hex(i)}) for varint encoding')\n", "sub_path": "btclib/varint.py", "file_name": "varint.py", "file_ext": "py", "file_size_in_byte": 2676, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "typing.Union", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.BinaryIO", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.Octets", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.bytes_from_hexstring", "line_number": 41, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "634349565", "text": "from app.db.db_ticker import *\nimport threading\nimport time\nimport json\n\n\nclass store_ticker(threading.Thread):\n def __init__(self, ticker_buff, bat_num = 5):\n super(store_ticker, self).__init__()\n self.ticker_buff = ticker_buff\n self.bat_num = bat_num\n\n def fetch_data(self):\n self.ticker_buff.open_mmap()\n ret, s = self.ticker_buff.buffer_read()\n self.val = []\n if s != \"\":\n sm_data = json.loads(s)\n list = sm_data[\"data\"]\n if len(list) != 0:\n for i in range(0, self.bat_num):\n self.val.append(list.pop(0))\n if len(list) == 0:\n break\n\n if len(list) == 0:\n json_str = \"\"\n else:\n data = {}\n data[\"data\"] = list\n json_str = json.dumps(data)\n\n self.ticker_buff.buffer_reset()\n self.ticker_buff.buffer_write(json_str)\n self.ticker_buff.close_mmap()\n\n def process_val(self):\n while self.val != []:\n row = self.val.pop(0)\n self.op.dbop_insert_ticker_sequence(self.db, row[0], row[1], row[2], row[3], row[4])\n\n def run(self):\n\n self.db = MySQLCommand(\"localhost\", 3306, \"root\", \"123456\", \"ticker\")\n self.db.connectMysql()\n self.op = db_ticker()\n\n while True:\n if self.ticker_buff.mutex_lock.acquire(1):\n self.fetch_data()\n self.ticker_buff.mutex_lock.release()\n self.process_val()\n time.sleep(0.5)\n\n", "sub_path": "app/db/store_ticker.py", "file_name": "store_ticker.py", "file_ext": "py", "file_size_in_byte": 1602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "threading.Thread", "line_number": 7, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "493398913", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 13 19:07:15 2020\n\n@author: qtckp\n\"\"\"\n\nimport Stemmer\nstemmer = Stemmer.Stemmer('russian')\n\ndef stem(text):\n return stemmer.stemWord(text.lower())\n\n\n\nwith open('Stemms1000.txt','r', encoding = 'utf8') as f:\n stops = [word.rstrip() for word in f.readlines()]\n stops = set(stops) # in по множеству где-то в 200+ раз быстрее\n\n\n\ncounter = {k: 0 for k in stops}\nkk = 0\nwith open('word_lines.txt','r', encoding = 'utf8') as f:\n \n for line in f:\n \n arr = line.rstrip().lower().split()\n \n kk += 1\n \n new = {}\n \n for r in arr:\n \n rr = stem(r)\n \n if rr in stops:\n \n counter[rr] += 1\n \n if rr in new:\n new[rr] +=1\n else:\n new[rr] = 1\n \n for k, v in new.items():\n if v>5:\n del counter[k]\n stops.discard(k)\n \n \n if kk % 10000 == 0:\n print(kk)\n\n\n\n\ncounter2 = {k: v for k, v in sorted(counter.items(), key=lambda item: item[1])}\n\nprint(f'lenght = {len(stops)}')\n\nwith open('Counts2.txt','w', encoding = 'utf8') as f:\n for k, v in counter2.items():\n f.write(f'{k} = {v} \\n')\n\n\n\n\n", "sub_path": "Counter2.py", "file_name": "Counter2.py", "file_ext": "py", "file_size_in_byte": 1390, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "Stemmer.Stemmer", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "145885128", "text": "import datetime\nimport json\nimport os\nimport os.path\nimport shutil\nimport tempfile\nimport unittest\nimport unittest.mock\nimport urllib.request\n\nfrom cate.core.ds import DATA_STORE_REGISTRY, DataAccessError, format_variables_info_string\nfrom cate.core.types import PolygonLike, TimeRangeLike, VarNamesLike\nfrom cate.ds.esa_cci_odp import EsaCciOdpDataStore, find_datetime_format\nfrom cate.ds.local import LocalDataStore\n\n\n@unittest.skip(reason='Because it writes a lot of files')\n# @unittest.skipUnless(condition=os.environ.get('CATE_ODP_TEST', None), reason=\"skipped unless CATE_ODP_TEST=1\")\nclass EsaCciOdpDataStoreIndexCacheTest(unittest.TestCase):\n def test_index_cache(self):\n self.data_store = EsaCciOdpDataStore(index_cache_used=True, index_cache_expiration_days=1.0e-6)\n data_sources = self.data_store.query()\n self.assertIsNotNone(data_sources)\n for data_source in data_sources:\n data_source.update_file_list()\n # data_source.sync()\n\n\ndef _create_test_data_store():\n with open(os.path.join(os.path.dirname(__file__), 'esgf-index-cache.json')) as fp:\n json_text = fp.read()\n json_dict = json.loads(json_text)\n # The EsaCciOdpDataStore created with an initial json_dict avoids fetching it from remote\n data_store = EsaCciOdpDataStore('test-odp', index_cache_json_dict=json_dict)\n DATA_STORE_REGISTRY.add_data_store(data_store)\n return data_store\n\n\nclass EsaCciOdpDataStoreTest(unittest.TestCase):\n def setUp(self):\n self.data_store = _create_test_data_store()\n\n def test_id_title_and_is_local(self):\n self.assertEqual(self.data_store.id, 'test-odp')\n self.assertEqual(self.data_store.title, 'ESA CCI Open Data Portal')\n self.assertEqual(self.data_store.is_local, False)\n\n def test_query(self):\n data_sources = self.data_store.query()\n self.assertIsNotNone(data_sources)\n self.assertEqual(len(data_sources), 61)\n\n def test_query_with_string(self):\n data_sources = self.data_store.query(query_expr='OC')\n self.assertIsNotNone(data_sources)\n self.assertEqual(len(data_sources), 20)\n\n\nclass EsaCciOdpDataSourceTest(unittest.TestCase):\n def setUp(self):\n self.data_store = _create_test_data_store()\n oc_data_sources = self.data_store.query(query_expr='OC')\n self.assertIsNotNone(oc_data_sources)\n self.assertIsNotNone(oc_data_sources[0])\n self.first_oc_data_source = oc_data_sources[0]\n self.tmp_dir = tempfile.mkdtemp()\n\n self._existing_local_data_store = DATA_STORE_REGISTRY.get_data_store('local')\n DATA_STORE_REGISTRY.add_data_store(LocalDataStore('local', self.tmp_dir))\n\n def tearDown(self):\n if self._existing_local_data_store:\n DATA_STORE_REGISTRY.add_data_store(self._existing_local_data_store)\n shutil.rmtree(self.tmp_dir, ignore_errors=True)\n\n def test_make_local_and_update(self):\n\n soilmoisture_data_sources = self.data_store.query(\n query_expr='esacci.SOILMOISTURE.day.L3S.SSMV.multi-sensor.multi-platform.COMBINED.02-1.r1')\n soilmoisture_data_source = soilmoisture_data_sources[0]\n\n reference_path = os.path.join(os.path.dirname(__file__),\n os.path.normpath('resources/datasources/local/files/'))\n\n def find_files_mock(_, time_range):\n\n def build_file_item(item_name: str, date_from: datetime, date_to: datetime, size: int):\n\n return [item_name, date_from, date_to, size,\n {'OPENDAP': os.path.join(reference_path, item_name),\n 'HTTPServer': 'file:' + urllib.request.pathname2url(os.path.join(reference_path, item_name))}]\n\n reference_files = {\n 'ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781114000000-fv02.2.nc': {\n 'date_from': datetime.datetime(1978, 11, 14, 0, 0),\n 'date_to': datetime.datetime(1978, 11, 14, 23, 59),\n 'size': 21511378\n },\n 'ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781115000000-fv02.2.nc': {\n 'date_from': datetime.datetime(1978, 11, 15, 0, 0),\n 'date_to': datetime.datetime(1978, 11, 15, 23, 59),\n 'size': 21511378\n },\n 'ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781116000000-fv02.2.nc': {\n 'date_from': datetime.datetime(1978, 11, 16, 0, 0),\n 'date_to': datetime.datetime(1978, 11, 16, 23, 59),\n 'size': 21511378\n }\n }\n\n reference_files_list = []\n\n for reference_file in reference_files.items():\n file_name = reference_file[0]\n file_date_from = reference_file[1].get('date_from')\n file_date_to = reference_file[1].get('date_to')\n file_size = reference_file[1].get('size')\n if time_range:\n if file_date_from >= time_range[0] and file_date_to <= time_range[1]:\n reference_files_list.append(build_file_item(file_name,\n file_date_from,\n file_date_to,\n file_size))\n else:\n reference_files_list.append(build_file_item(file_name,\n file_date_from,\n file_date_to,\n file_size))\n return reference_files_list\n\n with unittest.mock.patch('cate.ds.esa_cci_odp.EsaCciOdpDataSource._find_files', find_files_mock):\n with unittest.mock.patch.object(EsaCciOdpDataStore, 'query', return_value=[]):\n\n new_ds_title = 'local_ds_test'\n new_ds_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n try:\n new_ds = soilmoisture_data_source.make_local(new_ds_title, time_range=new_ds_time_range)\n except Exception:\n raise ValueError(reference_path, os.listdir(reference_path))\n self.assertIsNotNone(new_ds)\n\n self.assertEqual(new_ds.id, \"local.%s\" % new_ds_title)\n self.assertEqual(new_ds.temporal_coverage(), new_ds_time_range)\n\n new_ds_w_one_variable_title = 'local_ds_test_var'\n new_ds_w_one_variable_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n new_ds_w_one_variable_var_names = VarNamesLike.convert(['sm'])\n\n new_ds_w_one_variable = soilmoisture_data_source.make_local(\n new_ds_w_one_variable_title,\n time_range=new_ds_w_one_variable_time_range,\n var_names=new_ds_w_one_variable_var_names\n )\n self.assertIsNotNone(new_ds_w_one_variable)\n\n self.assertEqual(new_ds_w_one_variable.id, \"local.%s\" % new_ds_w_one_variable_title)\n ds = new_ds_w_one_variable.open_dataset()\n\n new_ds_w_one_variable_var_names.extend(['lat', 'lon', 'time'])\n\n self.assertSetEqual(set(ds.variables),\n set(new_ds_w_one_variable_var_names))\n\n new_ds_w_region_title = 'from_local_to_local_region'\n new_ds_w_region_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n new_ds_w_region_spatial_coverage = PolygonLike.convert(\"10,20,30,40\")\n\n new_ds_w_region = soilmoisture_data_source.make_local(\n new_ds_w_region_title,\n time_range=new_ds_w_region_time_range,\n region=new_ds_w_region_spatial_coverage) # type: LocalDataSource\n\n self.assertIsNotNone(new_ds_w_region)\n\n self.assertEqual(new_ds_w_region.id, \"local.%s\" % new_ds_w_region_title)\n\n self.assertEqual(new_ds_w_region.spatial_coverage(), new_ds_w_region_spatial_coverage)\n\n new_ds_w_region_title = 'from_local_to_local_region_one_var'\n new_ds_w_region_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n new_ds_w_region_var_names = VarNamesLike.convert(['sm'])\n new_ds_w_region_spatial_coverage = PolygonLike.convert(\"10,20,30,40\")\n\n new_ds_w_region = soilmoisture_data_source.make_local(\n new_ds_w_region_title,\n time_range=new_ds_w_region_time_range,\n var_names=new_ds_w_region_var_names,\n region=new_ds_w_region_spatial_coverage) # type: LocalDataSource\n\n self.assertIsNotNone(new_ds_w_region)\n\n self.assertEqual(new_ds_w_region.id, \"local.%s\" % new_ds_w_region_title)\n\n self.assertEqual(new_ds_w_region.spatial_coverage(), new_ds_w_region_spatial_coverage)\n data_set = new_ds_w_region.open_dataset()\n new_ds_w_region_var_names.extend(['lat', 'lon', 'time'])\n\n self.assertSetEqual(set(data_set.variables), set(new_ds_w_region_var_names))\n\n new_ds_w_region_title = 'from_local_to_local_region_two_var_sm_uncertainty'\n new_ds_w_region_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n new_ds_w_region_var_names = VarNamesLike.convert(['sm', 'sm_uncertainty'])\n new_ds_w_region_spatial_coverage = PolygonLike.convert(\"10,20,30,40\")\n\n new_ds_w_region = soilmoisture_data_source.make_local(\n new_ds_w_region_title,\n time_range=new_ds_w_region_time_range,\n var_names=new_ds_w_region_var_names,\n region=new_ds_w_region_spatial_coverage) # type: LocalDataSource\n\n self.assertIsNotNone(new_ds_w_region)\n\n self.assertEqual(new_ds_w_region.id, \"local.%s\" % new_ds_w_region_title)\n\n self.assertEqual(new_ds_w_region.spatial_coverage(), new_ds_w_region_spatial_coverage)\n data_set = new_ds_w_region.open_dataset()\n new_ds_w_region_var_names.extend(['lat', 'lon', 'time'])\n\n self.assertSetEqual(set(data_set.variables), set(new_ds_w_region_var_names))\n\n empty_ds_timerange = (datetime.datetime(2017, 12, 1, 0, 0), datetime.datetime(2017, 12, 31, 23, 59))\n with self.assertRaises(DataAccessError) as cm:\n soilmoisture_data_source.make_local('empty_ds', time_range=empty_ds_timerange)\n self.assertEqual('CCI Open Data Portal data source \"{}\"\\ndoes not seem to have any datasets in given '\n 'time range {}'.format(soilmoisture_data_source.id,\n TimeRangeLike.format(empty_ds_timerange)),\n str(cm.exception))\n\n new_ds_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 14, 23, 59)))\n\n new_ds = soilmoisture_data_source.make_local(\"title_test_copy\", time_range=new_ds_time_range)\n self.assertIsNotNone(new_ds)\n self.assertEqual(new_ds.meta_info['title'], soilmoisture_data_source.meta_info['title'])\n\n title = \"Title Test!\"\n new_ds = soilmoisture_data_source.make_local(\"title_test_set\", title, time_range=new_ds_time_range)\n self.assertIsNotNone(new_ds)\n self.assertEqual(new_ds.meta_info['title'], title)\n\n def test_data_store(self):\n self.assertIs(self.first_oc_data_source.data_store,\n self.data_store)\n\n def test_id(self):\n self.assertEqual(self.first_oc_data_source.id,\n 'esacci.OC.day.L3S.K_490.multi-sensor.multi-platform.MERGED.1-0.r2')\n\n def test_schema(self):\n self.assertEqual(self.first_oc_data_source.schema,\n None)\n\n @unittest.skip(reason='outdated info string')\n def test_info_string(self):\n self.assertIn('product_string: MERGED\\n',\n self.first_oc_data_source.info_string)\n\n def test_variables_info_string(self):\n self.assertIn('kd_490 (m-1):\\n',\n format_variables_info_string(self.first_oc_data_source.variables_info),\n self.first_oc_data_source.variables_info)\n self.assertIn('Long name: Downwelling attenuation coefficient at 490nm',\n format_variables_info_string(self.first_oc_data_source.variables_info))\n\n @unittest.skip(reason='ssl error on windows')\n def test_temporal_coverage(self):\n self.assertEqual(self.first_oc_data_source.temporal_coverage(),\n (datetime.datetime(1997, 9, 4, 0, 0), datetime.datetime(2000, 6, 24, 0, 0)))\n\n def assert_tf(self, filename: str, expected_time_format: str):\n time_format, p1, p2 = find_datetime_format(filename)\n self.assertEqual(time_format, expected_time_format)\n\n def test_time_filename_patterns(self):\n self.assert_tf('20020730174408-ESACCI-L3U_GHRSST-SSTskin-AATSR-LT-v02.0-fv01.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('19911107054700-ESACCI-L2P_GHRSST-SSTskin-AVHRR12_G-LT-v02.0-fv01.0.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SEAICE-L4-SICONC-SSMI-NH25kmEASE2-19920610-fv01.11.nc', '%Y%m%d')\n self.assert_tf('ESACCI-SEAICE-L4-SICONC-SSMI-SH25kmEASE2-20000101-20001231-fv01.11.nc', '%Y%m%d')\n self.assert_tf('ESACCI-SEAICE-L4-SICONC-AMSR-NH25kmEASE2-20070204-fv01.11.nc', '%Y%m%d')\n self.assert_tf('ESACCI-SEAICE-L4-SICONC-AMSR-SH25kmEASE2-20040427-fv01.11.nc', '%Y%m%d')\n self.assert_tf('19921018120000-ESACCI-L4_GHRSST-SSTdepth-OSTIA-GLOB_LT-v02.0-fv01.0.nc', '%Y%m%d%H%M%S')\n self.assert_tf('19940104120000-ESACCI-L4_GHRSST-SSTdepth-OSTIA-GLOB_LT-v02.0-fv01.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-OZONE-L3S-TC-MERGED-DLR_1M-20090301-fv0100.nc', '%Y%m%d')\n self.assert_tf('20070328-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-15-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20091002-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-16-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20090729-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-18-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20070410-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-17-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-K_490-MERGED-1D_DAILY_4km_SIN_PML_KD490_Lee-20000129-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-K_490-MERGED-1D_DAILY_4km_GEO_PML_KD490_Lee-19980721-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OZONE-L3-NP-MERGED-KNMI-200812-fv0002.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-CHLOR_A-MERGED-1D_DAILY_4km_GEO_PML_OC4v6-19971130-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-CHLOR_A-MERGED-1D_DAILY_4km_SIN_PML_OC4v6-19980625-fv1.0.nc', '%Y%m%d')\n self.assert_tf('200903-ESACCI-L3C_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-15-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-GHG-L2-CH4-GOSAT-SRPR-20100501-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CH4-GOSAT-SRPR-20091201-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CO2-GOSAT-SRFP-20101220-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CH4-GOSAT-SRFP-20100109-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CO2-GOSAT-SRFP-20090527-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CH4-GOSAT-SRFP-20100714-fv1.nc', '%Y%m%d')\n self.assert_tf('20090616-ESACCI-L3U_CLOUD-CLD_PRODUCTS-MODIS_TERRA-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20070717-ESACCI-L3U_CLOUD-CLD_PRODUCTS-MODIS_AQUA-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-8D_DAILY_4km_GEO_PML_OC4v6_QAA-19971211-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-8D_DAILY_4km_SIN_PML_OC4v6_QAA-20080921-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1M_MONTHLY_4km_SIN_PML_OC4v6_QAA-200906-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1M_MONTHLY_4km_GEO_PML_OC4v6_QAA-200707-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1Y_YEARLY_4km_GEO_PML_OC4v6_QAA-2005-fv1.0.nc', '%Y')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1Y_YEARLY_4km_GEO_PML_OC4v6_QAA-2003-fv1.0.nc', '%Y')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-8D_DAILY_4km_GEO_PML_OC4v6_QAA-19970914-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-IOP-MERGED-1D_DAILY_4km_GEO_PML_QAA-19970915-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-IOP-MERGED-1D_DAILY_4km_GEO_PML_QAA-19980724-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20020822103843-ESACCI-L3U_GHRSST-SSTskin-AATSR-LT-v02.0-fv01.0.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-OZONE-L3S-TC-MERGED-DLR_1M-19980301-fv0100.nc', '%Y%m%d')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781120000000-fv02.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMV-PASSIVE-19791011000000-fv02.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMV-PASSIVE-19790519000000-fv02.2.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMS-ACTIVE-19911026000000-fv02.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMS-ACTIVE-19911010000000-fv02.2.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SEALEVEL-IND-MSL-MERGED-20151104000000-fv01.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SEALEVEL-IND-MSLAMPH-MERGED-20151104000000-fv01.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SEALEVEL-IND-MSLTR-MERGED-20151104000000-fv01.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-OC-L3S-RRS-MERGED-1D_DAILY_4km_GEO_PML_RRS-19980418-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-RRS-MERGED-1D_DAILY_4km_SIN_PML_RRS-19980925-fv1.0.nc', '%Y%m%d')\n self.assert_tf('200811-ESACCI-L3C_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-18-fv1.0.nc', '%Y%m')\n self.assert_tf('200704-ESACCI-L3C_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-16-fv1.0.nc', '%Y%m')\n self.assert_tf('200811-ESACCI-L3C_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-17-fv1.0.nc', '%Y%m')\n self.assert_tf('200712-ESACCI-L3C_CLOUD-CLD_PRODUCTS-MODIS_TERRA-fv1.0.nc', '%Y%m')\n self.assert_tf('200902-ESACCI-L3C_CLOUD-CLD_PRODUCTS-MODIS_AQUA-fv1.0.nc', '%Y%m')\n self.assert_tf('200706-ESACCI-L3S_CLOUD-CLD_PRODUCTS-MODIS_MERGED-fv1.0.nc', '%Y%m')\n self.assert_tf('200901-ESACCI-L3S_CLOUD-CLD_PRODUCTS-AVHRR_MERGED-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1M_MONTHLY_4km_GEO_PML_OC4v6_QAA-200505-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1D_DAILY_4km_SIN_PML_OC4v6_QAA-19980720-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1D_DAILY_4km_GEO_PML_OC4v6_QAA-19990225-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-8D_DAILY_4km_GEO_PML_OC4v6_QAA-19990407-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1D_DAILY_4km_GEO_PML_OC4v6_QAA-19970915-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20060107-ESACCI-L4_FIRE-BA-MERIS-fv4.1.nc', '%Y%m%d')\n", "sub_path": "test/ds/test_esa_cci_odp.py", "file_name": "test_esa_cci_odp.py", "file_ext": "py", "file_size_in_byte": 20235, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "unittest.TestCase", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cate.ds.esa_cci_odp.EsaCciOdpDataStore", "line_number": 21, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "cate.ds.esa_cci_odp.EsaCciOdpDataStore", "line_number": 34, "usage_type": "call"}, {"api_name": "cate.core.ds.DATA_STORE_REGISTRY.add_data_store", "line_number": 35, "usage_type": "call"}, {"api_name": "cate.core.ds.DATA_STORE_REGISTRY", "line_number": 35, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 39, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tempfile.mkdtemp", "line_number": 66, "usage_type": "call"}, {"api_name": "cate.core.ds.DATA_STORE_REGISTRY.get_data_store", "line_number": 68, "usage_type": "call"}, {"api_name": "cate.core.ds.DATA_STORE_REGISTRY", "line_number": 68, "usage_type": "name"}, {"api_name": "cate.core.ds.DATA_STORE_REGISTRY.add_data_store", "line_number": 69, "usage_type": "call"}, {"api_name": "cate.core.ds.DATA_STORE_REGISTRY", "line_number": 69, "usage_type": "name"}, {"api_name": "cate.ds.local.LocalDataStore", "line_number": 69, "usage_type": "call"}, {"api_name": "cate.core.ds.DATA_STORE_REGISTRY.add_data_store", "line_number": 73, "usage_type": "call"}, {"api_name": "cate.core.ds.DATA_STORE_REGISTRY", "line_number": 73, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "urllib.request.request.pathname2url", "line_number": 91, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 91, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 91, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 131, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 131, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch.object", "line_number": 132, "usage_type": "call"}, {"api_name": "cate.ds.esa_cci_odp.EsaCciOdpDataStore", "line_number": 132, "usage_type": "argument"}, {"api_name": "unittest.mock", "line_number": 132, "usage_type": "attribute"}, {"api_name": "cate.core.types.TimeRangeLike.convert", "line_number": 135, "usage_type": "call"}, {"api_name": "cate.core.types.TimeRangeLike", "line_number": 135, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 140, "usage_type": "call"}, {"api_name": "cate.core.types.TimeRangeLike.convert", "line_number": 147, "usage_type": "call"}, {"api_name": "cate.core.types.TimeRangeLike", "line_number": 147, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 147, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 148, "usage_type": "call"}, {"api_name": "cate.core.types.VarNamesLike.convert", "line_number": 149, "usage_type": "call"}, {"api_name": "cate.core.types.VarNamesLike", "line_number": 149, "usage_type": "name"}, {"api_name": "cate.core.types.TimeRangeLike.convert", "line_number": 167, "usage_type": "call"}, {"api_name": "cate.core.types.TimeRangeLike", "line_number": 167, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 167, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 168, "usage_type": "call"}, {"api_name": "cate.core.types.PolygonLike.convert", "line_number": 169, "usage_type": "call"}, {"api_name": "cate.core.types.PolygonLike", "line_number": 169, "usage_type": "name"}, {"api_name": "cate.core.types.TimeRangeLike.convert", "line_number": 183, "usage_type": "call"}, {"api_name": "cate.core.types.TimeRangeLike", "line_number": 183, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 184, "usage_type": "call"}, {"api_name": "cate.core.types.VarNamesLike.convert", "line_number": 185, "usage_type": "call"}, {"api_name": "cate.core.types.VarNamesLike", "line_number": 185, "usage_type": "name"}, {"api_name": "cate.core.types.PolygonLike.convert", "line_number": 186, "usage_type": "call"}, {"api_name": "cate.core.types.PolygonLike", "line_number": 186, "usage_type": "name"}, {"api_name": "cate.core.types.TimeRangeLike.convert", "line_number": 205, "usage_type": "call"}, {"api_name": "cate.core.types.TimeRangeLike", "line_number": 205, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 205, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 206, "usage_type": "call"}, {"api_name": "cate.core.types.VarNamesLike.convert", "line_number": 207, "usage_type": "call"}, {"api_name": "cate.core.types.VarNamesLike", "line_number": 207, "usage_type": "name"}, {"api_name": "cate.core.types.PolygonLike.convert", "line_number": 208, "usage_type": "call"}, {"api_name": "cate.core.types.PolygonLike", "line_number": 208, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 226, "usage_type": "call"}, {"api_name": "cate.core.ds.DataAccessError", "line_number": 227, "usage_type": "argument"}, {"api_name": "cate.core.types.TimeRangeLike.format", "line_number": 231, "usage_type": "call"}, {"api_name": "cate.core.types.TimeRangeLike", "line_number": 231, "usage_type": "name"}, {"api_name": "cate.core.types.TimeRangeLike.convert", "line_number": 234, "usage_type": "call"}, {"api_name": "cate.core.types.TimeRangeLike", "line_number": 234, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 234, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 235, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 258, "usage_type": "call"}, {"api_name": "cate.core.ds.format_variables_info_string", "line_number": 265, "usage_type": "call"}, {"api_name": "cate.core.ds.format_variables_info_string", "line_number": 268, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 273, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 270, "usage_type": "call"}, {"api_name": "cate.ds.esa_cci_odp.find_datetime_format", "line_number": 276, "usage_type": "call"}]} +{"seq_id": "385464723", "text": "#!/usr/bin/env python3\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, User, Electronics, Device\n\nengine = create_engine('sqlite:///electronics.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n# Laptop Devices\nelectronic1 = Electronics(name=\"laptops\", id=1, user_id=12345)\nsession.add(electronic1)\nsession.commit()\n\ndevice1 = Device(user_id=12345, name=\"MacBook Pro\", id=1,\n description=\"MacBook Pro made by Apple\",\n price=\"$1299.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic1)\nsession.add(device1)\nsession.commit()\n\ndevice2 = Device(user_id=12345, name=\"MacBook Air\", id=2,\n description=\"MacBook Air made by Apple\",\n price=\"$1099.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic1)\nsession.add(device2)\nsession.commit()\n\ndevice3 = Device(user_id=12345, name=\"XPS 13\", id=3,\n description=\"XPS 13 made by Dell\",\n price=\"$999.99\", year=\"2020\", brand=\"Dell\",\n electronics=electronic1)\nsession.add(device3)\nsession.commit()\n\ndevice4 = Device(user_id=12345, name=\"XPS 15\", id=4,\n description=\"XPS 15 made by Dell\",\n price=\"$1199.99\", year=\"2020\", brand=\"Dell\",\n electronics=electronic1)\nsession.add(device4)\nsession.commit()\n\ndevice5 = Device(user_id=12345, name=\"ZBook 15\", id=5,\n description=\"ZBook 15 made by HP\",\n price=\"$1299.99\", year=\"2020\", brand=\"HP\",\n electronics=electronic1)\nsession.add(device5)\nsession.commit()\n\ndevice6 = Device(user_id=12345, name=\"Pavilion x360\", id=6,\n description=\"Pavilion x360 made by HP\",\n price=\"$599.99\", year=\"2020\", brand=\"HP\",\n electronics=electronic1)\nsession.add(device6)\nsession.commit()\n\ndevice7 = Device(user_id=12345, name=\"Yoga 730\", id=7,\n description=\"Yoga 730 made by Lenovo\",\n price=\"$829.99\", year=\"2020\", brand=\"Lenovo\",\n electronics=electronic1)\nsession.add(device7)\nsession.commit()\n\ndevice8 = Device(user_id=12345, name=\"Nitro 5\", id=8,\n description=\"Nitro 5 made by Acer\",\n price=\"$749.99\", year=\"2020\", brand=\"Acer\",\n electronics=electronic1)\nsession.add(device8)\nsession.commit()\n\ndevice9 = Device(user_id=12345, name=\"Notebook 7\", id=9,\n description=\"Notebook 7 made by Samsung\",\n price=\"$799.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic1)\nsession.add(device9)\nsession.commit()\n\ndevice10 = Device(user_id=12345, name=\"Surface Book 2\", id=10,\n description=\"Surface Book 2 made by Microsoft\",\n price=\"$1999.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic1)\nsession.add(device10)\nsession.commit()\n\n# Tablet Devices\nelectronic2 = Electronics(name=\"tablets\", id=2, user_id=12345)\nsession.add(electronic2)\nsession.commit()\n\ndevice1 = Device(user_id=12345, name=\"iPad Pro\", id=11,\n description=\"iPad Pro made by Apple\",\n price=\"$799.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic2)\nsession.add(device1)\nsession.commit()\n\ndevice2 = Device(user_id=12345, name=\"iPad Air\", id=12,\n description=\"iPad Air made by Apple\",\n price=\"$499.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic2)\nsession.add(device2)\nsession.commit()\n\ndevice3 = Device(user_id=12345, name=\"iPad\", id=13,\n description=\"iPad made by Apple\",\n price=\"$329.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic2)\nsession.add(device3)\nsession.commit()\n\ndevice4 = Device(user_id=12345, name=\"iPad mini\", id=14,\n description=\"iPad mini made by Apple\",\n price=\"$399.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic2)\nsession.add(device4)\nsession.commit()\n\ndevice5 = Device(user_id=12345, name=\"Surface Pro 7\", id=15,\n description=\"Surface Pro 7 made by Microsoft\",\n price=\"$699.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic2)\nsession.add(device5)\nsession.commit()\n\ndevice6 = Device(user_id=12345, name=\"Surface Go\", id=16,\n description=\"Surface Go made by Microsoft\",\n price=\"$549.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic2)\nsession.add(device6)\nsession.commit()\n\ndevice7 = Device(user_id=12345, name=\"Surface Pro X\", id=17,\n description=\"Surface Pro X made by Microsoft\",\n price=\"$1599.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic2)\nsession.add(device7)\nsession.commit()\n\ndevice8 = Device(user_id=12345, name=\"Galaxy Tab A\", id=18,\n description=\"Galaxy Tab A made by Samsung\",\n price=\"$289.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic2)\nsession.add(device8)\nsession.commit()\n\ndevice9 = Device(user_id=12345, name=\"Galaxy Tab S6\", id=19,\n description=\"Galaxy Tab S6 made by Samsung\",\n price=\"$549.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic2)\nsession.add(device9)\nsession.commit()\n\ndevice10 = Device(user_id=12345, name=\"Kindle\", id=20,\n description=\"Kindle made by Amazon\",\n price=\"$129.99\", year=\"2020\", brand=\"Amazon\",\n electronics=electronic2)\nsession.add(device10)\nsession.commit()\n\n# Phone Devices\nelectronic3 = Electronics(name=\"phones\", id=3, user_id=12345)\nsession.add(electronic3)\nsession.commit()\n\ndevice1 = Device(user_id=12345, name=\"iPhone 11\", id=21,\n description=\"iPhone 11 made by Apple\",\n price=\"$699.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic3)\nsession.add(device1)\nsession.commit()\n\ndevice2 = Device(user_id=12345, name=\"iPhone 11 Pro\", id=22,\n description=\"iPhone 11 Pro made by Apple\",\n price=\"$999.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic3)\nsession.add(device2)\nsession.commit()\n\ndevice3 = Device(user_id=12345, name=\"iPhone 11 Pro Max\", id=23,\n description=\"iPhone 11 Pro Max made by Apple\",\n price=\"$1099.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic3)\nsession.add(device3)\nsession.commit()\n\ndevice4 = Device(user_id=12345, name=\"Pixel 4\", id=24,\n description=\"Pixel 4 made by Google\",\n price=\"$699.99\", year=\"2020\", brand=\"Google\",\n electronics=electronic3)\nsession.add(device4)\nsession.commit()\n\ndevice5 = Device(user_id=12345, name=\"Pixel 4 XL\", id=25,\n description=\"Pixel 4 XL made by Google\",\n price=\"$749.99\", year=\"2020\", brand=\"Google\",\n electronics=electronic3)\nsession.add(device5)\nsession.commit()\n\ndevice6 = Device(user_id=12345, name=\"Galaxy Note10\", id=26,\n description=\"Galaxy Note10 made by Samsung\",\n price=\"$699.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic3)\nsession.add(device6)\nsession.commit()\n\ndevice7 = Device(user_id=12345, name=\"Galaxy S10\", id=27,\n description=\"Galaxy S10 made by Samsung\",\n price=\"$599.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic3)\nsession.add(device7)\nsession.commit()\n\n# Video Game Console Devices\nelectronic4 = Electronics(name=\"video-game-consoles\", id=4, user_id=12345)\nsession.add(electronic4)\nsession.commit()\n\ndevice1 = Device(user_id=12345, name=\"Xbox One S\", id=28,\n description=\"Xbox One S made by Microsoft\",\n price=\"$199.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic4)\nsession.add(device1)\nsession.commit()\n\ndevice2 = Device(user_id=12345, name=\"Xbox One X\", id=29,\n description=\"Xbox One X made by Microsoft\",\n price=\"$299.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic4)\nsession.add(device2)\nsession.commit()\n\ndevice3 = Device(user_id=12345, name=\"PS4\", id=30,\n description=\"PS4 made by Sony\",\n price=\"$299.99\", year=\"2020\", brand=\"Sony\",\n electronics=electronic4)\nsession.add(device3)\nsession.commit()\n\ndevice4 = Device(user_id=12345, name=\"PS4 Pro\", id=31,\n description=\"PS4 Pro made by Sony\",\n price=\"$399.99\", year=\"2020\", brand=\"Sony\",\n electronics=electronic4)\nsession.add(device4)\nsession.commit()\n\ndevice5 = Device(user_id=12345, name=\"Switch\", id=32,\n description=\"Switch made by Nintendo\",\n price=\"$299.99\", year=\"2020\", brand=\"Nintendo\",\n electronics=electronic4)\nsession.add(device5)\nsession.commit()\n\nprint(\"Added electronic devices!\")\n", "sub_path": "catalog/lotsoftech.py", "file_name": "lotsoftech.py", "file_ext": "py", "file_size_in_byte": 9143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 6, "usage_type": "call"}, {"api_name": "database_setup.Base.metadata", "line_number": 7, "usage_type": "attribute"}, {"api_name": "database_setup.Base", "line_number": 7, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 8, "usage_type": "call"}, {"api_name": "database_setup.Electronics", "line_number": 12, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 16, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 23, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 30, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 37, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 44, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 51, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 58, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 65, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 72, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 79, "usage_type": "call"}, {"api_name": "database_setup.Electronics", "line_number": 87, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 91, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 98, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 105, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 112, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 119, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 126, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 133, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 140, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 147, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 154, "usage_type": "call"}, {"api_name": "database_setup.Electronics", "line_number": 162, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 166, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 173, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 180, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 187, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 194, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 201, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 208, "usage_type": "call"}, {"api_name": "database_setup.Electronics", "line_number": 216, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 220, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 227, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 234, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 241, "usage_type": "call"}, {"api_name": "database_setup.Device", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "602678283", "text": "# -*- coding: utf-8 -*-\nimport json\nfrom django.shortcuts import redirect, render, render_to_response, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom Class.models import Class\nfrom Course.control import get_course, create_course\nfrom .models import Course\nfrom MajorProfessional.models import MajorProfessional\nfrom Account.models import Account\nfrom Classroom.models import Classroom\nfrom datetime import datetime\n\n\ndef course_index(request):\n majorprofessional_list = MajorProfessional.objects.filter(parent__isnull=True)\n classroom_list = Classroom.objects.filter(parent__isnull=True)\n this_year = datetime.today().year\n year_list = [str(this_year-1) + '-' + str(this_year), str(this_year-2) + '-' + str(this_year-1)]\n semester_list = Course.SEMESTER.values()\n semester_year_list = [x+y for x in year_list for y in semester_list]\n # 没有请求时默认为空\n if 'year' in request.GET:\n course_list = Course.objects.all()\n if 'teacher' in request.GET and request.GET['teacher']:\n course_list = course_list.filter(teacher=request.GET['teacher'])\n elif 'class_room' in request.GET and request.GET['class_room']:\n course_list = course_list.filter(classroom=request.GET['class_room'])\n elif 'class_name' in request.GET and request.GET['class_name']:\n course_list = course_list.filter(class_name=request.GET['class_name'])\n if 'year' in request.GET and request.GET['year']:\n value = '0' + bin(int(request.GET['year']))[2:]\n year = year_list[int(value[-2])]\n semester = Course.SEMESTER.keys()[int(value[-1])]\n course_list = course_list.filter(year=year, semester=semester)\n course = []\n for time in Course.TIME.keys():\n course_info = course_list.filter(time=time)\n # 取得该时间每天的课程\n info_list = get_course(course_info)\n # 与时间信息一起绑定到course中\n course.append([Course.TIME.get(time)] + info_list)\n return render(request, 'Course/course_index.html', locals())\n\n\n@login_required\ndef course_edit(request, Course_id=None):\n if request.method == 'POST':\n # 解析获取的数据\n data = json.loads(request.POST['data'])\n num = int(data['num'])\n class_name = data['class']\n year = data['year']\n semester = data['semester']\n # class_room day_time 用于解决时间地点冲突\n DAY = Course.DAY.keys()\n TIME = Course.TIME.keys()\n day_time = [(x, y) for x in DAY for y in TIME]\n class_room = Classroom.objects.filter(status=0, parent__isnull=False)\n # 根据课程数量循环新建相应的课程\n for x in range(num):\n account = data['course' + str(x)][0]\n number = data['course' + str(x)][1]\n # 根据 班级 课程 节数 新建课程对象\n course = create_course(class_room, class_name, account, number, day_time, year, semester)\n return HttpResponseRedirect(reverse('course_detail', args=(class_name, year, semester)))\n else:\n major_list = MajorProfessional.objects.filter(parent__isnull=False)\n account_list = Account.objects.all()\n semester_list = Course.SEMESTER\n return render(request, 'Course/course_edit.html', locals())\n\n\ndef course_detail(request, class_name=None, year=None, semester=None):\n course_list = Course.objects.filter(class_name=class_name,\n year=year, semester=semester)\n course = []\n for time in Course.TIME.keys():\n course_info = course_list.filter(time=time)\n # 取得该时间每天的课程\n info_list = get_course(course_info)\n # 与时间信息一起绑定到course中\n course.append([Course.TIME.get(time)] + info_list)\n return render(request, 'Course/course_detail.html', locals())\n", "sub_path": "Course/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4051, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "MajorProfessional.models.MajorProfessional.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "MajorProfessional.models.MajorProfessional.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "MajorProfessional.models.MajorProfessional", "line_number": 17, "usage_type": "name"}, {"api_name": "Classroom.models.Classroom.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "Classroom.models.Classroom.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Classroom.models.Classroom", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "models.Course.SEMESTER.values", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Course.SEMESTER", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 21, "usage_type": "name"}, {"api_name": "models.Course.objects.all", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Course.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 25, "usage_type": "name"}, {"api_name": "models.Course.SEMESTER.keys", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Course.SEMESTER", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Course.TIME.keys", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Course.TIME", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 38, "usage_type": "name"}, {"api_name": "Course.control.get_course", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Course.TIME.get", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Course.TIME", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 43, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Course.DAY.keys", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Course.DAY", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 57, "usage_type": "name"}, {"api_name": "models.Course.TIME.keys", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Course.TIME", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 58, "usage_type": "name"}, {"api_name": "Classroom.models.Classroom.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "Classroom.models.Classroom.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "Classroom.models.Classroom", "line_number": 60, "usage_type": "name"}, {"api_name": "Course.control.create_course", "line_number": 66, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 67, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 67, "usage_type": "call"}, {"api_name": "MajorProfessional.models.MajorProfessional.objects.filter", "line_number": 69, "usage_type": "call"}, {"api_name": "MajorProfessional.models.MajorProfessional.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "MajorProfessional.models.MajorProfessional", "line_number": 69, "usage_type": "name"}, {"api_name": "Account.models.Account.objects.all", "line_number": 70, "usage_type": "call"}, {"api_name": "Account.models.Account.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "Account.models.Account", "line_number": 70, "usage_type": "name"}, {"api_name": "models.Course.SEMESTER", "line_number": 71, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 71, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 72, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 47, "usage_type": "name"}, {"api_name": "models.Course.objects.filter", "line_number": 76, "usage_type": "call"}, {"api_name": "models.Course.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 76, "usage_type": "name"}, {"api_name": "models.Course.TIME.keys", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Course.TIME", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 79, "usage_type": "name"}, {"api_name": "Course.control.get_course", "line_number": 82, "usage_type": "call"}, {"api_name": "models.Course.TIME.get", "line_number": 84, "usage_type": "call"}, {"api_name": "models.Course.TIME", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 84, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "126542352", "text": "\"\"\"\nCopyright 2020 Teng Huang\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\nfrom janos import *\nimport pandas as pd\nimport numpy as np\nimport sys\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom datetime import datetime\nimport time\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\n\npd.options.mode.chained_assignment = None\n\n\"\"\"\nload data\n\"\"\"\n# This is the data frame for training the predictive models.\nhistorical_student_data = pd.read_csv(\"./data/college_student_enroll-s1-1.csv\")\n\n# This is information of applicants, whose financial aid is to be determined.\n# We will use these numbers (SAT, GPA) later in the objective function.\napplications = pd.read_csv(\"./data/college_applications6000.csv\")\n\n\"\"\"\nset the constant in the model\n\"\"\"\nscholarships = [0, 2.5] # lower and upper bound if the scholarship\nn_simulations = 5 # to have meaningful mean and standard deviation;\nstudent_sizes = [50, 100, 500, 1000] # we measure these predictions' RMSE\n# interview_sizes = [5, 10, 15, 20, 25]\nLAYERS = 3\nnodes_per_layer = 10\n\"\"\"\npretrained model\n\"\"\"\n# Assign X and y\nX = historical_student_data[[\"SAT\", \"GPA\", \"merit\"]]\ny = historical_student_data[[\"enroll\"]]\n\n# Before training the model, standardize SAT and GPA.\n# For convenience, we do not standardize merit.\nscaler_sat = StandardScaler().fit(X[[\"SAT\"]])\nscaler_gpa = StandardScaler().fit(X[[\"GPA\"]])\nX['SAT_scaled'] = scaler_sat.transform(X[['SAT']])\nX['GPA_scaled'] = scaler_gpa.transform(X[['GPA']])\n\n# Also, standardize the SAT and GPA in the application data\napplications[\"SAT_scaled\"] = scaler_sat.transform(applications[[\"SAT\"]])\napplications[\"GPA_scaled\"] = scaler_gpa.transform(applications[[\"GPA\"]])\n\n\"\"\"\nPrepare the output file\n\"\"\"\nnow = datetime.now()\ndate_time = now.strftime(\"%H-%M-%S-%Y%m%d\")\nfilename = \"20200501_neural_network_\" + date_time + \".txt\"\noutput = open(filename, \"w\")\noutput.write(\"PM\\t\\tstudent_size\\t\\tn_layers\\t\\titeration\\t\\tjanos_time\\t\\tgurobi_time\\t\\tobj_val\\n\")\noutput.close()\n\nfor student_size in student_sizes:\n n_applications = student_size\n BUDGET = int(0.2 * n_applications)\n hidden_layer_sizes = []\n for n_layers in range(LAYERS):\n\n hidden_layer_sizes.append(nodes_per_layer)\n\n my_logistic_regression = MLPRegressor(\n hidden_layer_sizes=hidden_layer_sizes, random_state=0) ### TODO: how to link training and optimization!\n my_logistic_regression.fit(X[[\"SAT_scaled\", \"GPA_scaled\", \"merit\"]], y)\n\n for iter in range(n_simulations):\n random_sample = applications.sample(student_size, random_state=iter)\n random_sample = random_sample.reset_index()\n\n m = JModel()\n\n # Define regular variables\n assign_scholarship = m.add_regular_variables([n_applications], \"assign_scholarship\")\n for app_index in range(n_applications):\n assign_scholarship[app_index].setContinuousDomain(lower_bound=scholarships[0],\n upper_bound=scholarships[1])\n assign_scholarship[app_index].setObjectiveCoefficient(0)\n\n # Define predicted variables\n # First, we need to create structures of predictive models. In this case, we associate such a structure with an existing / pretrained logistic regression model.\n logistic_regression_model = OptimizationPredictiveModel(m, pretrained_model=my_logistic_regression,\n feature_names=[\"SAT_scaled\", \"GPA_scaled\", \"merit\"])\n\n # Now, we could define the predicted decision variables and associate them with the predicted model structure.\n enroll_probabilities = m.add_predicted_variables([n_applications], \"enroll_probs\")\n for app_index in range(n_applications):\n enroll_probabilities[app_index].setObjectiveCoefficient(1)\n mapping_of_vars = {\"merit\": assign_scholarship[app_index],\n \"SAT_scaled\": random_sample[\"SAT_scaled\"][app_index],\n \"GPA_scaled\": random_sample[\"GPA_scaled\"][app_index]}\n enroll_probabilities[app_index].setPM(logistic_regression_model, mapping_of_vars)\n\n # Construct constraints\n # \\sum_i x_i <= BUDGET\n scholarship_deployed = Expression()\n\n for app_index in range(n_applications):\n scholarship_deployed.add_term(assign_scholarship[app_index], 1)\n\n m.add_constraint(scholarship_deployed, \"less_equal\", BUDGET)\n # m.add_gurobi_param_settings(\"MIPGap\", 0.01)\n\n # solve the model\n m.add_gurobi_param_settings('TimeLimit', 1800)\n m.add_gurobi_param_settings('DUALREDUCTIONS', 0)\n m.add_gurobi_param_settings('MIPGap', 0.001)\n m.add_gurobi_param_settings('Threads', 1)\n m.set_output_flag(0)\n m.solve()\n\n \"\"\"\n write output\n borrowed from https://www.gurobi.com/documentation/8.1/examples/workforce1_py.html\n \"\"\"\n status = m.gurobi_model.status\n\n if status == GRB.Status.UNBOUNDED:\n print('The model cannot be solved because it is unbounded')\n sys.exit(0)\n elif status == GRB.Status.OPTIMAL:\n output = open(filename, \"a\")\n output.write(\"NN\\t\\t\" + str(student_size) + \"\\t\\t\" + str(n_layers) + \"\\t\\t\" + str(iter) +\n \"\\t\\t\" + str(m.get_time()) + \"\\t\\t\" + str(m.gurobi_model.runtime) +\n \"\\t\\t\" + str(m.gurobi_model.objval) + \"\\n\")\n output.close()\n\n elif status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:\n print('Optimization was stopped with status %d' % status)\n else:\n # if none of the above, then do IIS\n print('The model is infeasible; computing IIS')\n m.gurobi_model.computeIIS()\n m.gurobi_model.write(\"ip_model_inf.ilp\")\n if m.gurobi_model.IISMinimal:\n print('IIS is minimal\\n')\n else:\n print('IIS is not minimal\\n')\n print('\\nThe following constraint(s) cannot be satisfied:')\n for c in m.gurobi_model.getConstrs():\n if c.IISConstr:\n print('%s' % c.constrName)\n", "sub_path": "scripts/evaluate_neural_network_20200430.py", "file_name": "evaluate_neural_network_20200430.py", "file_ext": "py", "file_size_in_byte": 7613, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pandas.options", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "name"}, {"api_name": "sklearn.neural_network.MLPRegressor", "line_number": 93, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "495352119", "text": "from network import Network\nfrom common import mean_square_error, save, load_args\nimport numpy\nfrom sys import argv\nfrom time import time\n\nacceptable_error, learning_rate, momentum, hidden_size, bias_switch, lower_limit, upper_limit, set_size, path = load_args(argv)\nprint('acceptable error:\\t' + str(acceptable_error) + '\\n' +\n 'learning rate: \\t' + str(learning_rate) + '\\n' +\n 'momentum: \\t' + str(momentum) + '\\n' +\n 'hidden size: \\t' + str(hidden_size) + '\\n' +\n 'bias switch: \\t' + str(bias_switch) + '\\n' +\n 'lower limit: \\t' + str(lower_limit) + '\\n' +\n 'upper limit: \\t' + str(upper_limit) + '\\n' +\n 'set size: \\t' + str(set_size) + '\\n')\n\n# start\ninput_list = []\ntarget_list = []\nnetwork = Network(1, hidden_size, 1, learning_rate, bias_switch, momentum)\n\n# generate training sets\nfor epoch in range(set_size):\n r = numpy.random.randint(lower_limit, upper_limit + 1)\n input_list.append(r)\n target_list.append(numpy.sqrt(r))\n\n# train\nepoch = 0\nerror = 10\nerror_ar = []\nstart = time() * 1000\nwhile epoch < 100000 and error > acceptable_error:\n error_ar.clear()\n for j in range(len(input_list)):\n r = numpy.random.randint(0, len(input_list))\n network.train(input_list[r], target_list[r])\n error_ar.append(mean_square_error(network.query, input_list, target_list))\n error = numpy.sum(error_ar) / len(error_ar)\n if epoch % 100 == 0:\n print(str(epoch) + '\\t\\terror = ' + str(error))\n epoch += 1\nstop = time() * 1000\n\nprint('\\n' + str(epoch) + '\\t\\terror = ' + str(error))\n\n# save\nsave(network, path + '.ser')\n# save(network.w_ih.tolist(), 'w_ih.ser')\n# save(network.w_ho.tolist(), 'w_ho.ser')\n# save(network.b_ih.tolist(), 'b_ih.ser')\n# save(network.b_ho.tolist(), 'b_ho.ser')\n\nwith open(path + '.txt', 'w', encoding='utf-8') as f:\n f.write(str(error) + '\\n')\n f.write(str(epoch) + '\\n')\n f.write(str(format(stop - start, '.3f')))\n", "sub_path": "zad2/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1963, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "common.load_args", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "argument"}, {"api_name": "network.Network", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "network.train", "line_number": 37, "usage_type": "call"}, {"api_name": "common.mean_square_error", "line_number": 38, "usage_type": "call"}, {"api_name": "network.query", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "common.save", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "385250553", "text": "import torch\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndef collate_self_train(batch):\n batch_mod = {'sketch_img': [], 'sketch_boxes': [],\n 'positive_img': [], 'positive_boxes': [],\n 'negative_img': [], 'negative_boxes': [],\n }\n for i_batch in batch:\n batch_mod['sketch_img'].append(i_batch['sketch_img'])\n batch_mod['positive_img'].append(i_batch['positive_img'])\n batch_mod['negative_img'].append(i_batch['negative_img'])\n batch_mod['sketch_boxes'].append(torch.tensor(i_batch['sketch_boxes']).float())\n batch_mod['positive_boxes'].append(torch.tensor(i_batch['positive_boxes']).float())\n batch_mod['negative_boxes'].append(torch.tensor(i_batch['negative_boxes']).float())\n\n batch_mod['sketch_img'] = torch.stack(batch_mod['sketch_img'], dim=0)\n batch_mod['positive_img'] = torch.stack(batch_mod['positive_img'], dim=0)\n batch_mod['negative_img'] = torch.stack(batch_mod['negative_img'], dim=0)\n\n return batch_mod\n\n\ndef collate_self_test(batch):\n batch_mod = {'sketch_img': [], 'sketch_boxes': [], 'sketch_path': [],\n 'positive_img': [], 'positive_boxes': [], 'positive_path': [],\n }\n\n for i_batch in batch:\n batch_mod['sketch_img'].append(i_batch['sketch_img'])\n batch_mod['sketch_path'].append(i_batch['sketch_path'])\n batch_mod['positive_img'].append(i_batch['positive_img'])\n batch_mod['positive_path'].append(i_batch['positive_path'])\n batch_mod['sketch_boxes'].append(torch.tensor(i_batch['sketch_boxes']).float())\n batch_mod['positive_boxes'].append(torch.tensor(i_batch['positive_boxes']).float())\n\n batch_mod['sketch_img'] = torch.stack(batch_mod['sketch_img'], dim=0)\n batch_mod['positive_img'] = torch.stack(batch_mod['positive_img'], dim=0)\n\n return batch_mod", "sub_path": "Code/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.device", "line_number": 2, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 2, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 2, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "192783643", "text": "import MySQLdb\nimport database_creds as dbc\n\n#SQL\nlast_processed_id_zero = 'INSERT INTO last_processed_id (id) VALUES (0)'\nlast_processed_id_update = 'UPDATE last_processed_id SET id = %s;'\nall_user_ids = 'SELECT DISTINCT(user_id) FROM raw_data;'\nset_rank = 'SET @rank=0;'\nfind_row_number = \"SELECT rank from (select @rank:=@rank+1 'rank', id from raw_data) AS T WHERE id = %s;\"\ngroup_data = 'SELECT SUM(amount), COUNT(DISTINCT event_id), MAX(amount), MIN(amount), user_id FROM raw_data GROUP BY user_id LIMIT %s , %s;'\nagg_data_update = 'UPDATE agg_data SET balance=balance+%s, event_number = event_number+%s, best_event = %s, worst_event=%s WHERE user_id = %s;'\nfind_id_by_row = 'SELECT id FROM raw_data LIMIT %s, 1;'\nlast_processed_id_select = 'SELECT id FROM last_processed_id LIMIT 1'\nagg_data_insert_ids = 'INSERT INTO agg_data (user_id) VALUES (%s);'\ncheck_agg_data = 'SELECT * FROM agg_data LIMIT 1'\ncount_all_rows = 'SELECT COUNT(*) FROM raw_data;'\n\nROW_NUM = 10000\n\n\ndef check_agg_empty():\n user_ids = []\n cur.execute(all_user_ids)\n for uid in cur:\n user_ids.append(uid)\n cur.executemany(agg_data_insert_ids, user_ids)\n db.commit()\n\n\ndef get_rows(number_of_rows, last_id):\n cur.execute(set_rank)\n cur.execute(find_row_number, last_id)\n last_row_number = cur.fetchone()\n print(last_id, last_row_number)\n cur.execute(group_data, (last_row_number[0]-1, last_row_number[0]+number_of_rows))\n print(cur.fetchall())\n cur_n=db.cursor()\n cur_n.executemany(agg_data_update, cur)\n db.commit()\n return last_row_number\n \n\nif __name__=='__main__':\n\n try:\n db = MySQLdb.connect(dbc.host, dbc.user, dbc.passwd, dbc.db_name)\n cur = db.cursor()\n except:\n exit('Connection failed. Something went wrong') \n\n cur.execute(check_agg_data)\n if not cur.fetchone():\n check_agg_empty() \n\n cur.execute(last_processed_id_select)\n last_id = cur.fetchone()\n if not last_id:\n cur.execute(last_processed_id_zero)\n cur.execute(find_id_by_row, (0,))\n last_id = cur.fetchone()\n cur.execute(last_processed_id_update, last_id)\n db.commit()\n\n while last_id:\n last_row_number = get_rows(ROW_NUM, last_id)\n cur.execute(find_id_by_row, (last_row_number[0]+ROW_NUM-1,))\n last_id = cur.fetchone()\n print(last_id)\n if last_id:\n cur.execute(last_processed_id_update, last_id)\n else:\n cur.execute(count_all_rows)\n rows_num = cur.fetchone()\n cur.execute(find_id_by_row, (rows_num[0]-1,))\n last_id = cur.fetchone()\n cur.execute(last_processed_id_update, last_id)\n last_id = None\n db.commit() \n \n print(\"FINISH\")\n # db.commit()\n \n \n \n \n \n \n\n", "sub_path": "fill_agg_data.py", "file_name": "fill_agg_data.py", "file_ext": "py", "file_size_in_byte": 2893, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "MySQLdb.connect", "line_number": 46, "usage_type": "call"}, {"api_name": "database_creds.host", "line_number": 46, "usage_type": "attribute"}, {"api_name": "database_creds.user", "line_number": 46, "usage_type": "attribute"}, {"api_name": "database_creds.passwd", "line_number": 46, "usage_type": "attribute"}, {"api_name": "database_creds.db_name", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "123980066", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cvxpy as cv\r\n\r\n# dataset 4\r\nn = 200\r\nx_d4 = 3 * (np.random.rand(n, 4) - 0.5)\r\ny_d4 = (2 * x_d4[:, 0] - 1 * x_d4[:,1] + 0.5 + 0.5 * np.random.randn(n)) > 0\r\ny_d4 = 2 * y_d4 -1\r\n\r\n#model\r\nw = np.array([1,1,1,1])\r\n\r\n#sigmoid_vector\r\ndef sigmoid_vector(y, w_dot_x):\r\n return 1/(1+np.exp(-y*w_dot_x))\r\n\r\nlmd = 0.01\r\n\r\n#loss function\r\ndef J(x, y, w):\r\n w_dot_x = np.dot(x,w.T)\r\n sig_vec = sigmoid_vector(y, w_dot_x)\r\n entropy_vec = -np.log(sig_vec)\r\n return np.sum(entropy_vec) + lmd*np.dot(w,w.T)\r\n\r\n#gradient\r\ndef J_grad(x, y, w):\r\n w_dot_x = np.dot(x,w)\r\n sig_vec = sigmoid_vector(y, w_dot_x)\r\n #coefficient vector\r\n co_vec = -y*(-sig_vec+1)\r\n return np.sum(np.dot(np.diag(co_vec), x), axis=0) + 2*lmd*w\r\n\r\ndef vectorize_mats(x1,x2):\r\n shape = x1.shape\r\n n = shape[0]\r\n d = shape[1]\r\n xx1 = np.tile(x1,(1,d)).reshape(1,n*d*d)\r\n xx2 = np.tile(x2.reshape(n*d,1),(1,d)).reshape(1,n*d*d)\r\n return (xx1*xx2).reshape(n,d,d)\r\n\r\n#hessian\r\ndef J_hessian(x, y, w):\r\n shape = x.shape\r\n d = shape[1]\r\n w_dot_x = np.dot(x,w)\r\n sig_vec = sigmoid_vector(y, w_dot_x)\r\n #coefficient vector\r\n co_vec = sig_vec*(-sig_vec+1)\r\n return np.sum(vectorize_mats(np.dot(np.diag(co_vec), x), x), axis=0) + 2*lmd*np.diag(np.ones(d))\r\n\r\n#print(J_hessian(x_d4, y_d4, w))\r\n\r\n#train w under this comment\r\nepoch = 50\r\n#history of lost in each method\r\nbatch_hist = []\r\nnewton_hist = []\r\n\r\n#upper bound of Liptitz constant of the gradient\r\nmax_hessian = np.sum(0.25*vectorize_mats(x_d4, x_d4), axis=0) + 2*lmd*np.diag(np.ones((x_d4.shape[1])))\r\nlip = np.linalg.norm(max_hessian, 2)\r\nprint(lip)\r\nrate = 1/(lip)\r\n\r\ndef train_batch(x,y,w,epoch):\r\n global batch_hist\r\n batch_hist = []\r\n for i in np.arange(epoch):\r\n w = w - rate * J_grad(x, y, w)\r\n batch_hist.append(J(x, y, w))\r\n return w\r\n\r\ndef train_newton(x,y,w,epoch):\r\n global newton_hist\r\n newton_hist = []\r\n for i in np.arange(epoch):\r\n w = w - (np.linalg.inv(J_hessian(x, y, w))).dot(J_grad(x, y, w))\r\n newton_hist.append(J(x, y, w))\r\n return w\r\n\r\nw1 = train_batch(x_d4, y_d4, w, epoch)\r\nw2 = train_newton(x_d4, y_d4, w, epoch)\r\n\r\nJ_min = min(np.array(batch_hist).min(), np.array(newton_hist).min())\r\nbatch_hist = batch_hist - J_min \r\nnewton_hist = newton_hist - J_min \r\nprint(w1)\r\nprint(w2)\r\n\r\n#graph\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nax.plot(np.arange(epoch), batch_hist, label='batch')\r\nax.plot(np.arange(epoch), newton_hist, label='newton')\r\nax.set_yscale('log')\r\nax.set_ylim(pow(10,-15),pow(10,2))\r\n\r\nfig.savefig('1-1.png')", "sub_path": "1-1.py", "file_name": "1-1.py", "file_ext": "py", "file_size_in_byte": 2641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.random.rand", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "451960021", "text": "import pdb\n\"\"\"\nCreated on Wed Aug 27 22:31:09 2014\n\n@author: rkp\n\nSet of functions for pulling out the nodes and edges according to specific\nranking criteria.\n\"\"\"\n\nimport numpy as np\nimport operator\nimport networkx as nx\n\n\ndef reciprocity(W_net):\n \"\"\"Calculate the percentage of reciprocal connections.\"\"\"\n W_binary = W_net > 0\n np.fill_diagonal(W_binary, False)\n total_cxns = W_binary.sum()\n recip_cxns = (W_binary * W_binary.T).sum()\n arecip_cxns = total_cxns - recip_cxns\n\n# import pdb; pdb.set_trace()\n recip_coeff = recip_cxns / (recip_cxns + 2. * arecip_cxns)\n return recip_coeff\n\n\ndef out_in(W_net, labels, binarized=True):\n \"\"\"Calculate the output/input ratio given the weight matrix.\"\"\"\n if binarized:\n W = (W_net > 0).astype(float)\n else:\n W = W_net.copy()\n # Calculate total output & input connections\n out_total = W.sum(axis=1)\n in_total = W.sum(axis=0)\n out_in_vec = out_total.astype(float) / in_total\n # Put into dictionary format\n out_dict = {labels[idx]: out_total[idx] for idx in range(len(labels))}\n in_dict = {labels[idx]: in_total[idx] for idx in range(len(labels))}\n out_in_dict = {labels[idx]: out_in_vec[idx] for idx in range(len(labels))}\n\n return out_dict, in_dict, out_in_dict\n\n\ndef get_ranked(criteria_dict, high_to_low=True):\n \"\"\"Get labels & criteria, sorted (ranked) by criteria.\"\"\"\n\n dict_list_sorted = sorted(criteria_dict.iteritems(),\n key=operator.itemgetter(1), reverse=high_to_low)\n\n labels_sorted = [item[0] for item in dict_list_sorted]\n criteria_sorted = [item[1] for item in dict_list_sorted]\n\n return labels_sorted, criteria_sorted\n\n\ndef node_edge_overlap(node_list, edge_list):\n \"\"\"Calculate the overlap of a set of nodes and edges.\n Returns which edges are touching a node and which connect two nodes.\"\"\"\n\n # Calculate how many edges contain at least one node in node list\n edges_touching = [edge for edge in edge_list if edge[0] in node_list\n or edge[1] in node_list]\n edges_connecting = [edge for edge in edge_list if edge[0] in node_list\n and edge[1] in node_list]\n\n return edges_touching, edges_connecting\n\n\ndef bidirectional_metrics(W_net, coords, labels, binarized=False):\n \"\"\"Calculate bidirectionality metrics for a graph given its weights.\n\n Returns:\n List of labeled nonzero edges, (Ne x 3) array of distance,\n bidirectionality coefficient, and connection strength.\"\"\"\n if binarized:\n W_bi = (W_net > 0).astype(float)\n else:\n W_bi = W_net.copy()\n\n # Get nonzero elements of W_bi\n nz_idxs = np.array(W_bi.nonzero()).T\n nz_idxs = np.array([nz_idx for nz_idx in nz_idxs\n if labels[nz_idx[0]][:-2] != labels[nz_idx[1]][:-2]])\n\n # Generate edge labels\n edges = [(labels[nz_idx[0]], labels[nz_idx[1]]) for nz_idx in nz_idxs]\n\n # Make array for storing bidirectional metrics\n bd_metrics = np.zeros((len(edges), 3), dtype=float)\n\n # Calculate all metrics\n for e_idx, nz in enumerate(nz_idxs):\n # Distance\n d = np.sqrt(np.sum((coords[nz[0], :] - coords[nz[1], :]) ** 2))\n # Strength\n s = W_bi[nz[0], nz[1]] + W_bi[nz[1], nz[0]]\n # Bidirectionality coefficient\n bdc = 1 - np.abs(W_bi[nz[0], nz[1]] - W_bi[nz[1], nz[0]]) / s\n # Store metrics\n bd_metrics[e_idx, :] = [d, bdc, s]\n\n return edges, bd_metrics\n\n\ndef whole_graph_metrics(graph, weighted=False):\n graph_metrics = {}\n\n # Shortest average path length\n graph_metrics['avg_shortest_path'] = \\\n nx.average_shortest_path_length(graph, weight=weighted)\n\n # Average eccentricity\n ecc_dict = nx.eccentricity(graph)\n graph_metrics['avg_eccentricity'] = np.mean(np.array(ecc_dict.values()))\n\n # Average clustering coefficient\n # NOTE: Option to include or exclude zeros\n graph_metrics['avg_ccoeff'] = \\\n nx.average_clustering(graph, weight=weighted, count_zeros=True)\n\n # Average node betweeness\n avg_node_btwn_dict = nx.betweenness_centrality(graph, normalized=True)\n graph_metrics['avg_node_btwn'] = \\\n np.mean(np.array(avg_node_btwn_dict.values()))\n\n # Average edge betweeness\n avg_edge_btwn_dict = nx.edge_betweenness_centrality(graph, normalized=True)\n graph_metrics['avg_edge_btwn'] = \\\n np.mean(np.array(avg_edge_btwn_dict.values()))\n\n # Number of isolates\n graph_metrics['isolates'] = len(nx.isolates(graph))\n\n return graph_metrics\n", "sub_path": "network_compute.py", "file_name": "network_compute.py", "file_ext": "py", "file_size_in_byte": 4544, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.fill_diagonal", "line_number": 19, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 101, "usage_type": "call"}, {"api_name": "networkx.average_shortest_path_length", "line_number": 113, "usage_type": "call"}, {"api_name": "networkx.eccentricity", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "networkx.average_clustering", "line_number": 122, "usage_type": "call"}, {"api_name": "networkx.betweenness_centrality", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 127, "usage_type": "call"}, {"api_name": "networkx.edge_betweenness_centrality", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 132, "usage_type": "call"}, {"api_name": "networkx.isolates", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "464322183", "text": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport config\nfrom transformer_zero_velocity import Transformer\nfrom utils import dataloader\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nDATA_DIR = config.DATA_DIR\ntrain_X, train_Y, dev_X, dev_Y = dataloader.load_data(DATA_DIR)\n\nbatch_size = config.BATCH_SIZE\nnum_epochs = config.NUM_EPOCHS\ninitial_lr = config.LR\nhidden_size = config.HIDDEN_SIZE\nnum_layers = config.NUM_LAYERS\n\n# Define model\nprint(\"Build Transformer model ..\")\nmodel = Transformer()\n\nmodel.to(device)\nloss_function = nn.NLLLoss()\nval_acc = 0.0\noptimizer = optim.Adam(model.parameters(), lr=initial_lr)\nscheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 120, 160, 200], gamma=0.8)\n# scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=32)\ntrain_on_gpu = torch.cuda.is_available()\nif train_on_gpu:\n print(\"\\n Training on GPU\")\nelse:\n print(\"\\n No GPU, training on CPU\")\n\nnum_batches = int(train_X.shape[0] / batch_size)\nnum_dev_batches = int(dev_X.shape[0] / batch_size)\n\nval_loss_list, val_accuracy_list, epoch_list = [], [], []\n\nprint(\"Training ...\")\nprint(\"learning rate: \", optimizer.defaults['lr'])\nfor epoch in range(num_epochs):\n\n train_running_loss, train_acc = 0.0, 0.0\n # print(model.hidden)\n # model.hidden = model.init_hidden()\n for i in range(num_batches):\n\n model.zero_grad()\n # TODO:see notes\n X_local_minibatch, y_local_minibatch = (\n train_X[i * batch_size: (i + 1) * batch_size, ],\n train_Y[i * batch_size: (i + 1) * batch_size, ],\n )\n X_local_minibatch = X_local_minibatch.permute(1, 0, 2)\n y_local_minibatch = torch.max(y_local_minibatch, 1)[1] # 返回每行最大值(gt)的索引\n\n y_pred = model(X_local_minibatch) # fwd the bass (forward pass)\n loss = loss_function(y_pred, y_local_minibatch) # compute loss\n loss.backward() # reeeeewind (backward pass)\n optimizer.step() # parameter update\n train_running_loss += loss.detach().item() # unpacks the tensor into a scalar value\n train_acc += model.get_accuracy(y_pred, y_local_minibatch)\n\n print(\"learning rate: \", optimizer.param_groups[0]['lr'])\n scheduler.step()\n print(\n \"Epoch: %d | NLLoss: %.4f | Train Accuracy: %.2f\"\n % (epoch, train_running_loss / num_batches, train_acc / num_batches)\n )\n\n print(\"Validation ...\") # should this be done every N epochs\n if (epoch + 1) % 5 == 0:\n val_running_loss, val_acc = 0.0, 0.0\n\n # Compute validation loss, accuracy. Use torch.no_grad() & model.eval()\n with torch.no_grad():\n model.eval()\n\n # model.hidden = model.init_hidden()\n for i in range(num_dev_batches):\n X_local_validation_minibatch, y_local_validation_minibatch = (\n dev_X[i * batch_size: (i + 1) * batch_size, ],\n dev_Y[i * batch_size: (i + 1) * batch_size, ],\n )\n X_local_minibatch = X_local_validation_minibatch.permute(1, 0, 2)\n y_local_minibatch = torch.max(y_local_validation_minibatch, 1)[1]\n\n y_pred = model(X_local_minibatch)\n val_loss = loss_function(y_pred, y_local_minibatch)\n\n val_running_loss += (\n val_loss.detach().item()\n ) # unpacks the tensor into a scalar value\n val_acc += model.get_accuracy(y_pred, y_local_minibatch)\n\n model.train() # reset to train mode after iterationg through validation data\n print(\n \"Epoch: %d | NLLoss: %.4f | Train Accuracy: %.2f | Val Loss %.4f | Val Accuracy: %.2f\"\n % (\n epoch,\n train_running_loss / num_batches,\n train_acc / num_batches,\n val_running_loss / num_dev_batches,\n val_acc / num_dev_batches,\n )\n )\n\n epoch_list.append(epoch)\n val_accuracy_list.append(val_acc / num_dev_batches)\n val_loss_list.append(val_running_loss / num_dev_batches)\n\n# torch.save(model, \"./model/tt\")\n'''\nplt.plot(epoch_list, val_loss_list)\nplt.xlabel(\"# of epochs\")\nplt.ylabel(\"Loss\")\nplt.title(\"LSTM: Loss vs # epochs\")\nplt.savefig('graph.png')\nplt.show()\n\nplt.plot(epoch_list, val_accuracy_list, color=\"red\")\nplt.xlabel(\"# of epochs\")\nplt.ylabel(\"Accuracy\")\nplt.title(\"LSTM: Accuracy vs # epochs\")\nplt.savefig('graph_1.png')\nplt.show()\n'''\n# print(\"max val accuracy: \", max(val_acc))\n# torch.save(model, './model/test.pkl')\n", "sub_path": "train_transformer.py", "file_name": "train_transformer.py", "file_ext": "py", "file_size_in_byte": 4752, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torch.device", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.DATA_DIR", "line_number": 11, "usage_type": "attribute"}, {"api_name": "utils.dataloader.load_data", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.dataloader", "line_number": 12, "usage_type": "name"}, {"api_name": "config.BATCH_SIZE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "config.NUM_EPOCHS", "line_number": 15, "usage_type": "attribute"}, {"api_name": "config.LR", "line_number": 16, "usage_type": "attribute"}, {"api_name": "config.HIDDEN_SIZE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "config.NUM_LAYERS", "line_number": 18, "usage_type": "attribute"}, {"api_name": "transformer_zero_velocity.Transformer", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.NLLLoss", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.MultiStepLR", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "625464539", "text": "import cv2\nimport numpy as np\nimport dlib\nimport winsound\nimport random\nimport datetime\nimport keyboard\nimport time\nimport csv\nimport pandas as pd\nimport plotly.express as px\nwith open('attention_time_file.cvs', 'w') as fa:\n writer = csv.writer(fa)\n writer.writerow([\"Attention[%]\", \"Time[s]\"])\n\ncap = cv2.VideoCapture(0) ##---change\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\nt = datetime.datetime.now()\n ####################################Timestamp fajl\nentry1 = 1\nentry2 = 1\nstart = 0.0\n\nwhile True:\n _, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = detector(gray)\n isItFace = len(faces)\n voiceNum = random.randint(1, 7) # random broj od 1-7 radi biranja random glasovnog fajla.\n\n #####################################PROVERAVA DA LI JE LICE U VIEWFIELDU, AKO NIJE, BROJI VREME###################\n if isItFace == 0 and entry1 == 1:\n entry1 = 0\n start = time.time()\n print('ne prati')\n ###################################################################################################################\n\n if isItFace==1:\n start=time.time()\n t1 = datetime.datetime.now()\n delta_time = t1 - t\n sec = delta_time.seconds\n with open('attention_time_file.cvs', 'a') as fa:\n writer = csv.writer(fa)\n writer.writerow([100, int(sec)])\n\n for face in faces:\n\n ####################################NALAZI LICE POMOCU DLIB-a###################################################\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n ###############################################################################################################\n landmarks = predictor(gray, face)\n\n ##########Pracenje FaceMap vrednosti, koja je dostupna na gitu u svrhu odrednjivanja polozaja lica#############\n # position-33\n xposCrit33 = landmarks.part(33).x\n # position-1\n xposCrit1 = landmarks.part(1).x\n # position-15\n xposCrit15 = landmarks.part(15).x\n # position-2\n xposCrit2 = landmarks.part(2).x\n # position-14\n xposCrit14 = landmarks.part(14).x\n # postion-3\n xposCrit3 = landmarks.part(3).x\n # position-13\n xposCrit13 = landmarks.part(13).x\n ##############################################################################################################\n\n #################### Pracenje Facemap Vrednosti za preklapanje tacaka na ocima ################################\n yposCrit37 = landmarks.part(37).y\n yposCrit38 = landmarks.part(38).y\n yposCrit40 = landmarks.part(40).y\n yposCrit41 = landmarks.part(41).y\n yposCrit43 = landmarks.part(43).y\n yposCrit44 = landmarks.part(44).y\n yposCrit46 = landmarks.part(46).y\n yposCrit47 = landmarks.part(47).y\n ###############################################################################################################\n\n ############################PROVERAVA VREDNOSTI I POKLAPANJE KRITICNIH TACAKA LICA############################\n if (xposCrit33 == xposCrit1) or (xposCrit33 == xposCrit3) or (xposCrit33 == xposCrit2) or \\\n (xposCrit33 == xposCrit15) or (xposCrit33 == xposCrit14) or (xposCrit33 == xposCrit13) or \\\n (yposCrit37 == yposCrit41) or (yposCrit38 == yposCrit40) or \\\n (yposCrit43 == yposCrit47) or (yposCrit44 == yposCrit46):\n winsound.PlaySound('glas' + voiceNum.__str__() + '.wav', winsound.SND_ASYNC | winsound.SND_ALIAS)\n cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 3)\n t1 = datetime.datetime.now()\n delta_time = t1 - t\n sec = delta_time.seconds\n with open('attention_time_file.cvs', 'a') as fa:\n writer = csv.writer(fa)\n writer.writerow([0, int(sec)])\n else:\n t1 = datetime.datetime.now()\n delta_time = t1 - t\n sec = delta_time.seconds\n with open('attention_time_file.cvs', 'a') as fa:\n writer = csv.writer(fa)\n writer.writerow([100, int(sec)])\n\n ###############################################################################################################\n print(face)\n ##############################################68 landmarks na licu, fajl FaceMap.png##########################\n for i in range(0, 68):\n x = landmarks.part(i).x\n y = landmarks.part(i).y\n cv2.circle(frame, (x, y), 3, (0, 255, 0), -1)\n\n cv2.imshow(\"Frame 1\", frame)\n\n #######################################PROVERAVA VREME PROVEDENO VAN VIEWFIELDA####################################\n if time.time() - start > 3: ##CHANGE!\n winsound.PlaySound('glas' + voiceNum.__str__() + '.wav', winsound.SND_ASYNC | winsound.SND_ALIAS)\n t1 = datetime.datetime.now()\n delta_time = t1 - t\n entry1 = 1\n sec = delta_time.seconds\n with open('attention_time_file.cvs', 'a') as fa:\n writer = csv.writer(fa)\n writer.writerow([0, int(sec)])\n ####################################################################################################################\n key = cv2.waitKey(1)\n if key == 27:\n break\n if keyboard.is_pressed('esc'):\n break\n\n\ndf = pd.read_csv('attention_time_file.cvs')\nfig = px.line(df, x='Time[s]', y='Attention[%]', title='Percentage of attention in time')\nfig.show()\n", "sub_path": "main4Endz.py", "file_name": "main4Endz.py", "file_ext": "py", "file_size_in_byte": 5661, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "csv.writer", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 16, "usage_type": "call"}, {"api_name": "dlib.get_frontal_face_detector", "line_number": 18, "usage_type": "call"}, {"api_name": "dlib.shape_predictor", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 28, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 47, "usage_type": "call"}, {"api_name": "winsound.PlaySound", "line_number": 93, "usage_type": "call"}, {"api_name": "winsound.SND_ASYNC", "line_number": 93, "usage_type": "attribute"}, {"api_name": "winsound.SND_ALIAS", "line_number": 93, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 102, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 102, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 106, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 117, "usage_type": "call"}, {"api_name": "time.time", "line_number": 120, "usage_type": "call"}, {"api_name": "winsound.PlaySound", "line_number": 121, "usage_type": "call"}, {"api_name": "winsound.SND_ASYNC", "line_number": 121, "usage_type": "attribute"}, {"api_name": "winsound.SND_ALIAS", "line_number": 121, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 127, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 130, "usage_type": "call"}, {"api_name": "keyboard.is_pressed", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 137, "usage_type": "call"}, {"api_name": "plotly.express.line", "line_number": 138, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "147708781", "text": "import tensorflow as tf\nimport gradio\nimport matplotlib.pyplot as plt\nimport random\nimport os\nfrom PIL import Image\n\nstyle_predict_path = tf.keras.utils.get_file('style_predict.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite')\nstyle_transform_path = tf.keras.utils.get_file('style_transform.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite')\nmodels = style_predict_path, style_transform_path\n\n\ndef load_img(pil_img):\n filepath = \"tmp/\" + str(random.getrandbits(32)) + '.png'\n pil_img.save(filepath)\n img = tf.io.read_file(filepath)\n img = tf.io.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n img = img[tf.newaxis, :]\n return img, filepath\n\n\ndef preprocess_image(image, target_dim):\n # Resize the image so that the shorter dimension becomes 256px.\n shape = tf.cast(tf.shape(image)[1:-1], tf.float32)\n short_dim = min(shape)\n scale = target_dim / short_dim\n new_shape = tf.cast(shape * scale, tf.int32)\n image = tf.image.resize(image, new_shape)\n\n # Central crop the image.\n image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)\n return image\n\n\n\ndef run_style_predict(preprocessed_style_image, style_predict_path):\n # Load the model.\n interpreter = tf.lite.Interpreter(model_path=style_predict_path)\n\n # Set model input.\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n interpreter.set_tensor(input_details[0][\"index\"], preprocessed_style_image)\n\n # Calculate style bottleneck.\n interpreter.invoke()\n style_bottleneck = interpreter.tensor(\n interpreter.get_output_details()[0][\"index\"]\n )()\n\n return style_bottleneck\n\n\ndef run_style_transform(style_bottleneck, preprocessed_content_image,\n style_transform_path):\n # Load the model.\n interpreter = tf.lite.Interpreter(model_path=style_transform_path)\n\n # Set model input.\n input_details = interpreter.get_input_details()\n interpreter.allocate_tensors()\n\n # Set model inputs.\n interpreter.set_tensor(input_details[0][\"index\"], preprocessed_content_image)\n interpreter.set_tensor(input_details[1][\"index\"], style_bottleneck)\n interpreter.invoke()\n\n # Transform content image.\n stylized_image = interpreter.tensor(\n interpreter.get_output_details()[0][\"index\"]\n )()\n filepath = \"tmp/\" + str(random.getrandbits(32)) + '.png'\n if len(stylized_image.shape) > 3:\n stylized_image = tf.squeeze(stylized_image, axis=0)\n plt.imsave(filepath, stylized_image)\n else:\n plt.imsave(filepath, stylized_image)\n\n stylized_image = Image.open(filepath)\n os.remove(filepath)\n return stylized_image\n\n\ndef predict(content_img, style_img, content_blending_ratio):\n style_predict_path, style_transform_path = models\n content_img, filepath_content = load_img(content_img)\n style_img, filepath_style = load_img(style_img)\n preprocessed_content_image = preprocess_image(content_img, 384)\n preprocessed_style_image = preprocess_image(style_img, 256)\n style_bottleneck = run_style_predict(preprocessed_style_image,\n style_predict_path)\n style_bottleneck_content = run_style_predict(\n preprocess_image(content_img, 256), style_predict_path)\n # content_blending_ratio = 0.25\n style_bottleneck_blended = content_blending_ratio * \\\n style_bottleneck_content + \\\n (1 - content_blending_ratio) * \\\n style_bottleneck\n\n stylized_image_blended = run_style_transform(style_bottleneck_blended,\n preprocessed_content_image,\n style_transform_path)\n\n\n os.remove(filepath_content)\n os.remove(filepath_style)\n\n return stylized_image_blended\n\n\nINPUTS = [gradio.inputs.Image(label=\"Content Image\"),\n gradio.inputs.Image(label=\"Style Image\"), \n gradio.inputs.Slider(0, 1, \"Content\", \"Blending\", \"Ratio\")]\n\nOUTPUTS = gradio.outputs.Image(label=\"Stylized Image\")\nINTERFACE = gradio.Interface(fn=predict, inputs=INPUTS, outputs=OUTPUTS)\nINTERFACE.launch()\n", "sub_path": "gradio-config.py", "file_name": "gradio-config.py", "file_ext": "py", "file_size_in_byte": 4360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "tensorflow.keras.utils.get_file", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.utils.get_file", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 9, "usage_type": "attribute"}, {"api_name": "random.getrandbits", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.io.read_file", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.io.decode_image", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.image.convert_image_dtype", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.newaxis", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize_with_crop_or_pad", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.lite.Interpreter", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.lite", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.lite.Interpreter", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.lite", "line_number": 58, "usage_type": "attribute"}, {"api_name": "random.getrandbits", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 80, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 80, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 81, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 106, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 107, "usage_type": "call"}, {"api_name": "gradio.inputs.Image", "line_number": 112, "usage_type": "call"}, {"api_name": "gradio.inputs", "line_number": 112, "usage_type": "attribute"}, {"api_name": "gradio.inputs.Image", "line_number": 113, "usage_type": "call"}, {"api_name": "gradio.inputs", "line_number": 113, "usage_type": "attribute"}, {"api_name": "gradio.inputs.Slider", "line_number": 114, "usage_type": "call"}, {"api_name": "gradio.inputs", "line_number": 114, "usage_type": "attribute"}, {"api_name": "gradio.outputs.Image", "line_number": 116, "usage_type": "call"}, {"api_name": "gradio.outputs", "line_number": 116, "usage_type": "attribute"}, {"api_name": "gradio.Interface", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "169302805", "text": "import os\n\nfrom services.base import BaseService\nfrom utils import construct_auth_bearer\n\n\nclass Reddit(BaseService):\n def exchange_token(self, code, callback_url):\n \"\"\"\n Retrieve the access token given after acquiring authorization code\n\n Detailed archived documentation can be found at https://github.com/reddit-archive/reddit/wiki/oauth2\n\n :param code: A one-time use code that may be exchanged for a bearer token\n :type code: str\n :param callback_url: Callback URL from application domain\n :type callback_url: str\n :return: Base service result object containing response data\n :rtype: BaseServiceResult\n \"\"\"\n self.requestor.auth = (REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET)\n return self.post(\n \"access_token\",\n params={\n \"code\": code,\n \"grant_type\": \"authorization_code\",\n \"redirect_uri\": callback_url,\n },\n )\n\n def get_user_post(self, access_token, user_name):\n \"\"\"\n Fetch individual user post by default\n\n :param access_token: Access token acquired to access Reddit API\n :type access_token: str\n :param user_name: Username from Reddit\n :type user_name: str\n :return: Base service result object containing response data\n :rtype: BaseServiceResult\n \"\"\"\n\n def extract_post_data(post_data, alt_text_key=None):\n \"\"\"\n Helper function to extract post data\n\n :param post_data: Received post data information\n :type post_data: dict\n :return: Processed post data information\n :rtype: dict\n \"\"\"\n post_data = post_data[\"data\"]\n return {\n \"message\": post_data[\n \"selftext\" if alt_text_key is None else alt_text_key\n ],\n \"time\": int(post_data[\"created_utc\"]),\n \"id\": post_data[\"id\"],\n }\n\n submitted_url = self.construct_url(\n REDDIT_RESOURCE_API_BASE_URL, \"user\", f\"{user_name}/submitted\"\n )\n submitted_response = self.get(\n submitted_url, headers=construct_auth_bearer(access_token)\n )\n submitted_response.data = list(\n map(\n lambda post: extract_post_data(post),\n submitted_response.data[\"data\"][\"children\"],\n )\n )\n\n comment_url = self.construct_url(\n REDDIT_RESOURCE_API_BASE_URL, \"user\", f\"{user_name}/comments\"\n )\n comment_response = self.get(\n comment_url, headers=construct_auth_bearer(access_token)\n )\n comment_response.data = list(\n map(\n lambda post: extract_post_data(post, \"body\"),\n comment_response.data[\"data\"][\"children\"],\n )\n )\n\n submitted_response.data = submitted_response.data + comment_response.data\n\n return submitted_response\n\n def get_user_profile(self, access_token):\n \"\"\"\n Fetch individual user profile by default\n\n :param access_token: Access token acquired to access Reddit API\n :type access_token: str\n :return: Base service result object containing response data\n :rtype: BaseServiceResult\n \"\"\"\n url = self.construct_url(REDDIT_RESOURCE_API_BASE_URL, \"api/v1\", \"me\")\n return self.get(url, headers=construct_auth_bearer(access_token))\n\n def extract_user_profile(self, data):\n \"\"\"\n Extract user profile data\n\n :param data: User profile data\n :type data: dict\n :return: Processed profile data\n :rtype: dict\n \"\"\"\n return {\"id\": data[\"id\"], \"name\": data[\"name\"]}\n\n\nREDDIT_API_BASE_URL = \"https://www.reddit.com/api/v1\"\nREDDIT_RESOURCE_API_BASE_URL = \"https://oauth.reddit.com/\"\nREDDIT_CLIENT_ID = os.getenv(\"REDDIT_CLIENT_ID\")\nREDDIT_CLIENT_SECRET = os.getenv(\"REDDIT_CLIENT_SECRET\")\n\nRedditService = Reddit(\"reddit\", REDDIT_API_BASE_URL, use_session=True)\n", "sub_path": "server/api/services/reddit.py", "file_name": "reddit.py", "file_ext": "py", "file_size_in_byte": 4076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "services.base.BaseService", "line_number": 7, "usage_type": "name"}, {"api_name": "utils.construct_auth_bearer", "line_number": 65, "usage_type": "call"}, {"api_name": "utils.construct_auth_bearer", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.construct_auth_bearer", "line_number": 101, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 117, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "460485194", "text": "import os\nimport time\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport uabRepoPaths\nimport uabUtilreader\nimport util_functions\nimport uabCrossValMaker\nimport bohaoCustom.uabPreprocClasses as bPreproc\nimport uabPreprocClasses\nimport uab_collectionFunctions\nimport uab_DataHandlerFunctions\nfrom bohaoCustom import uabDataReader\nfrom bohaoCustom import uabMakeNetwork\nfrom bohaoCustom import uabMakeNetwork_UNet\n\nRUN_ID = 0\nBATCH_SIZE = 5\nLEARNING_RATE = 1e-5\nINPUT_SIZE = 572\nTILE_SIZE = 5000\nEPOCHS = 40\nNUM_CLASS = 2\nN_TRAIN = 8000\nN_VALID = 1000\nGPU = 1\nDECAY_STEP = 20\nDECAY_RATE = 0.1\nMODEL_NAME = 'inria_loo_mtl_retrain_finetune_{}_{}'\nSFN = 32\nLEAVE_CITY = 0\nPRE_TRAINED_DIR = r'/hdd6/Models/Inria_Domain_LOO/UnetCrop_inria_aug_leave_0_0_PS(572, 572)_BS5_' \\\n r'EP100_LR0.0001_DS60_DR0.1_SFN32'\n\n\nclass UnetPredictRetrain(uabMakeNetwork_UNet.UnetModelPredict):\n def __init__(self, inputs, trainable, input_size, model_name='', dropout_rate=None,\n learn_rate=1e-4, decay_step=60, decay_rate=0.1, epochs=100,\n batch_size=5, start_filter_num=32):\n uabMakeNetwork.Network.__init__(self, inputs, trainable, dropout_rate,\n learn_rate, decay_step, decay_rate, epochs, batch_size)\n self.name = 'UnetPredictRetrain'\n self.model_name = self.get_unique_name(model_name)\n self.sfn = start_filter_num\n self.learning_rate = None\n self.valid_cross_entropy = tf.placeholder(tf.float32, [])\n self.valid_iou = tf.placeholder(tf.float32, [])\n self.valid_images = tf.placeholder(tf.uint8, shape=[None, input_size[0],\n input_size[1] * 3, 3], name='validation_images')\n self.update_ops = None\n self.config = None\n\n def create_graph(self, x_name, class_num, start_filter_num=32):\n self.class_num = class_num\n sfn = self.sfn\n\n # downsample\n conv1, pool1 = self.conv_conv_pool(self.inputs[x_name], [sfn, sfn], self.trainable, name='conv1',\n padding='valid', dropout=self.dropout_rate)\n conv2, pool2 = self.conv_conv_pool(pool1, [sfn*2, sfn*2], self.trainable, name='conv2',\n padding='valid', dropout=self.dropout_rate)\n conv3, pool3 = self.conv_conv_pool(pool2, [sfn*4, sfn*4], self.trainable, name='conv3',\n padding='valid', dropout=self.dropout_rate)\n conv4, pool4 = self.conv_conv_pool(pool3, [sfn*8, sfn*8], self.trainable, name='conv4',\n padding='valid', dropout=self.dropout_rate)\n self.encoding = self.conv_conv_pool(pool4, [sfn*16, sfn*16], self.trainable, name='conv5', pool=False,\n padding='valid', dropout=self.dropout_rate)\n\n # upsample\n up6 = self.crop_upsample_concat(self.encoding, conv4, 8, name='6')\n conv6 = self.conv_conv_pool(up6, [sfn*8, sfn*8], self.trainable, name='up6', pool=False,\n padding='valid', dropout=self.dropout_rate)\n up7 = self.crop_upsample_concat(conv6, conv3, 32, name='7')\n conv7 = self.conv_conv_pool(up7, [sfn*4, sfn*4], self.trainable, name='up7', pool=False,\n padding='valid', dropout=self.dropout_rate)\n up8 = self.crop_upsample_concat(conv7, conv2, 80, name='8')\n conv8 = self.conv_conv_pool(up8, [sfn*2, sfn*2], self.trainable, name='up8', pool=False,\n padding='valid', dropout=self.dropout_rate)\n up9 = self.crop_upsample_concat(conv8, conv1, 176, name='9')\n conv9 = self.conv_conv_pool(up9, [sfn, sfn], self.trainable, name='up9', pool=False,\n padding='valid', dropout=self.dropout_rate)\n\n self.pred = tf.layers.conv2d(conv9, class_num, (1, 1), name='final', activation=None, padding='same')\n self.output = tf.nn.softmax(self.pred)\n\n def make_optimizer(self, train_var_filter):\n with tf.control_dependencies(self.update_ops):\n if train_var_filter is None:\n hard_optm = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss,\n global_step=self.global_step)\n soft_optm = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss,\n global_step=None)\n self.optimizer = [hard_optm, soft_optm]\n\n def make_loss(self, y_name, loss_type='xent', **kwargs):\n with tf.variable_scope('loss'):\n pred_flat = tf.reshape(self.pred, [-1, self.class_num])\n _, w, h, _ = self.inputs[y_name].get_shape().as_list()\n y = tf.image.resize_image_with_crop_or_pad(self.inputs[y_name], w-self.get_overlap(), h-self.get_overlap())\n y_flat = tf.reshape(tf.squeeze(y, axis=[3]), [-1, ])\n indices = tf.squeeze(tf.where(tf.less_equal(y_flat, self.class_num - 1)), 1)\n gt = tf.gather(y_flat, indices)\n prediction = tf.gather(pred_flat, indices)\n\n pred = tf.argmax(prediction, axis=-1, output_type=tf.int32)\n intersect = tf.cast(tf.reduce_sum(gt * pred), tf.float32)\n union = tf.cast(tf.reduce_sum(gt), tf.float32) + tf.cast(tf.reduce_sum(pred), tf.float32) \\\n - tf.cast(tf.reduce_sum(gt * pred), tf.float32)\n self.loss_iou = tf.convert_to_tensor([intersect, union])\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt))\n\n def make_update_ops(self, x_name, y_name):\n tf.add_to_collection('inputs', self.inputs[x_name])\n tf.add_to_collection('inputs', self.inputs[y_name])\n tf.add_to_collection('outputs', self.pred)\n self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n def make_summary(self, hist=False):\n if hist:\n tf.summary.histogram('Predicted Prob', tf.argmax(tf.nn.softmax(self.pred), 1))\n tf.summary.scalar('Cross Entropy', self.loss)\n tf.summary.scalar('learning rate', self.learning_rate)\n self.summary = tf.summary.merge_all()\n\n def train_config(self, x_name, y_name, n_train, n_valid, patch_size, ckdir, loss_type='xent',\n train_var_filter=None, hist=False, par_dir=None, **kwargs):\n self.make_loss(y_name, loss_type, **kwargs)\n self.make_learning_rate(n_train)\n self.make_update_ops(x_name, y_name)\n self.make_optimizer(train_var_filter)\n self.make_ckdir(ckdir, patch_size, par_dir)\n self.make_summary(hist)\n self.config = tf.ConfigProto(allow_soft_placement=True)\n self.n_train = n_train\n self.n_valid = n_valid\n\n def train(self, x_name, y_name, y_name2, n_train, sess, summary_writer, n_valid=1000,\n train_reader=None, train_reader_building=None, valid_reader=None,\n image_summary=None, verb_step=100, save_epoch=5,\n img_mean=np.array((0, 0, 0), dtype=np.float32),\n continue_dir=None, valid_iou=False):\n # define summary operations\n valid_cross_entropy_summary_op = tf.summary.scalar('xent_validation', self.valid_cross_entropy)\n valid_iou_summary_op = tf.summary.scalar('iou_validation', self.valid_iou)\n valid_image_summary_op = tf.summary.image('Validation_images_summary', self.valid_images,\n max_outputs=10)\n\n if continue_dir is not None and os.path.exists(continue_dir):\n self.load(continue_dir, sess)\n gs = sess.run(self.global_step)\n start_epoch = int(np.ceil(gs/n_train*self.bs))\n start_step = gs - int(start_epoch*n_train/self.bs)\n else:\n start_epoch = 0\n start_step = 0\n\n cross_entropy_valid_min = np.inf\n iou_valid_max = 0\n for epoch in range(start_epoch, self.epochs):\n start_time = time.time()\n for step in range(start_step, n_train, self.bs):\n X_batch, y_batch = train_reader.readerAction(sess)\n _, self.global_step_value = sess.run([self.optimizer[0], self.global_step],\n feed_dict={self.inputs[x_name]:X_batch,\n self.inputs[y_name]:y_batch,\n self.trainable: True})\n X_batch_retrain, y_batch_retrain = train_reader_building.readerAction(sess)\n _, self.global_step_value = sess.run([self.optimizer[1], self.global_step],\n feed_dict={self.inputs[x_name]: X_batch_retrain,\n self.inputs[y_name]: y_batch_retrain,\n self.trainable: True})\n if self.global_step_value % verb_step == 0:\n step_cross_entropy, step_summary = \\\n sess.run([self.loss, self.summary],\n feed_dict={self.inputs[x_name]: X_batch, self.inputs[y_name]: y_batch,\n self.trainable: False})\n summary_writer.add_summary(step_summary, self.global_step_value)\n print('Epoch {:d} step {:d}\\tcross entropy = {:.3f}'.\n format(epoch, self.global_step_value, step_cross_entropy))\n # validation\n cross_entropy_valid_mean = []\n iou_valid_mean = np.zeros(2)\n X_batch_val, y_batch_val, pred_valid = None, None, None\n for step in range(0, n_valid, self.bs):\n X_batch_val, y_batch_val = valid_reader.readerAction(sess)\n pred_valid, cross_entropy_valid, iou_valid = sess.run([self.pred, self.loss, self.loss_iou],\n feed_dict={self.inputs[x_name]: X_batch_val,\n self.inputs[y_name]: y_batch_val,\n self.trainable: False})\n cross_entropy_valid_mean.append(cross_entropy_valid)\n iou_valid_mean += iou_valid\n cross_entropy_valid_mean = np.mean(cross_entropy_valid_mean)\n iou_valid_mean = iou_valid_mean[0] / iou_valid_mean[1]\n duration = time.time() - start_time\n if valid_iou:\n print('Validation IoU: {:.3f}, duration: {:.3f}'.format(iou_valid_mean, duration))\n else:\n print('Validation cross entropy: {:.3f}, duration: {:.3f}'.format(cross_entropy_valid_mean,\n duration))\n valid_cross_entropy_summary = sess.run(valid_cross_entropy_summary_op,\n feed_dict={self.valid_cross_entropy: cross_entropy_valid_mean})\n valid_iou_summary = sess.run(valid_iou_summary_op,\n feed_dict={self.valid_iou: iou_valid_mean})\n summary_writer.add_summary(valid_cross_entropy_summary, self.global_step_value)\n summary_writer.add_summary(valid_iou_summary, self.global_step_value)\n if valid_iou:\n if iou_valid_mean > iou_valid_max:\n iou_valid_max = iou_valid_mean\n saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=1)\n saver.save(sess, '{}/best_model.ckpt'.format(self.ckdir))\n\n else:\n if cross_entropy_valid_mean < cross_entropy_valid_min:\n cross_entropy_valid_min = cross_entropy_valid_mean\n saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=1)\n saver.save(sess, '{}/best_model.ckpt'.format(self.ckdir))\n\n if image_summary is not None:\n valid_image_summary = sess.run(valid_image_summary_op,\n feed_dict={self.valid_images:\n image_summary(X_batch_val[:,:,:,:3], y_batch_val, pred_valid,\n img_mean)})\n summary_writer.add_summary(valid_image_summary, self.global_step_value)\n\n if epoch % save_epoch == 0:\n saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=1)\n saver.save(sess, '{}/model_{}.ckpt'.format(self.ckdir, epoch), global_step=self.global_step)\n\n\ndef read_flag():\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch-size', default=BATCH_SIZE, type=int, help='batch size (10)')\n parser.add_argument('--learning-rate', type=float, default=LEARNING_RATE, help='learning rate (1e-3)')\n parser.add_argument('--input-size', default=INPUT_SIZE, type=int, help='input size 224')\n parser.add_argument('--tile-size', default=TILE_SIZE, type=int, help='tile size 5000')\n parser.add_argument('--epochs', default=EPOCHS, type=int, help='# epochs (1)')\n parser.add_argument('--num-classes', type=int, default=NUM_CLASS, help='# classes (including background)')\n parser.add_argument('--n-train', type=int, default=N_TRAIN, help='# samples per epoch')\n parser.add_argument('--n-valid', type=int, default=N_VALID, help='# patches to valid')\n parser.add_argument('--GPU', type=str, default=GPU, help=\"GPU used for computation.\")\n parser.add_argument('--decay-step', type=float, default=DECAY_STEP, help='Learning rate decay step in number of epochs.')\n parser.add_argument('--decay-rate', type=float, default=DECAY_RATE, help='Learning rate decay rate')\n parser.add_argument('--model-name', type=str, default=MODEL_NAME, help='Model name')\n parser.add_argument('--run-id', type=str, default=RUN_ID, help='id of this run')\n parser.add_argument('--sfn', type=int, default=SFN, help='filter number of the first layer')\n parser.add_argument('--leave-city', type=int, default=LEAVE_CITY, help='city id to leave-out in training')\n\n flags = parser.parse_args()\n flags.input_size = (flags.input_size, flags.input_size)\n flags.tile_size = (flags.tile_size, flags.tile_size)\n flags.model_name = flags.model_name.format(flags.leave_city, flags.run_id)\n return flags\n\n\ndef main(flags):\n # make network\n # define place holder\n X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')\n y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')\n y2 = tf.placeholder(tf.float32, shape=[None, 1], name='y2')\n mode = tf.placeholder(tf.bool, name='mode')\n model = UnetPredictRetrain({'X':X, 'Y':y},\n trainable=mode,\n model_name=flags.model_name,\n input_size=flags.input_size,\n batch_size=flags.batch_size,\n learn_rate=flags.learning_rate,\n decay_step=flags.decay_step,\n decay_rate=flags.decay_rate,\n epochs=flags.epochs,\n start_filter_num=flags.sfn)\n model.create_graph('X', class_num=flags.num_classes)\n\n # create collection\n # the original file is in /ei-edl01/data/uab_datasets/inria\n blCol = uab_collectionFunctions.uabCollection('inria')\n opDetObj = bPreproc.uabOperTileDivide(255) # inria GT has value 0 and 255, we map it back to 0 and 1\n # [3] is the channel id of GT\n rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj)\n rescObj.run(blCol)\n img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info\n\n # extract patches\n extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4],\n cSize=flags.input_size,\n numPixOverlap=int(model.get_overlap()),\n extSave=['jpg', 'jpg', 'jpg', 'png'],\n isTrain=True,\n gtInd=3,\n pad=model.get_overlap()/2)\n patchDir = extrObj.run(blCol)\n\n # make data reader\n # use uabCrossValMaker to get fileLists for training and validation\n idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city')\n idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')\n idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]\n # use first city for validation\n filter_train = []\n filter_valid = []\n for i in range(5):\n for j in range(1, 37):\n if i != flags.leave_city and j > 5:\n filter_train.append(j * 10 + i)\n elif i == flags.leave_city and j <= 5:\n filter_valid.append(j * 10 + i)\n # use first city for validation\n file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_train)\n file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_valid)\n\n dataReader_train = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_train, flags.input_size,\n flags.batch_size, dataAug='flip,rotate',\n block_mean=np.append([0], img_mean), batch_code=0)\n dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size,\n flags.batch_size, dataAug='flip,rotate',\n block_mean=np.append([0], img_mean), batch_code=0)\n\n blCol = uab_collectionFunctions.uabCollection('inria_unet_retrain')\n img_mean = blCol.getChannelMeans([1, 2, 3]) # get mean of rgb info\n\n # extract patches\n extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3],\n cSize=flags.input_size,\n numPixOverlap=int(model.get_overlap()),\n extSave=['png', 'jpg', 'jpg', 'jpg'],\n isTrain=True,\n gtInd=3,\n pad=model.get_overlap() / 2)\n patchDir = extrObj.run(blCol)\n\n # make data reader\n # use uabCrossValMaker to get fileLists for training and validation\n idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city')\n idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')\n idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]\n # use first city for validation\n filter_train = []\n filter_valid = []\n for i in range(5):\n for j in range(1, 37):\n if i != flags.leave_city and j > 5:\n filter_train.append(j * 10 + i)\n elif i == flags.leave_city and j > 5:\n filter_valid.append(j * 10 + i)\n # use first city for validation\n file_list_retrain = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_valid)\n # no augmentation needed for validation\n dataReader_retrain = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_retrain, flags.input_size,\n flags.batch_size, dataAug='flip,rotate',\n block_mean=np.append([0], img_mean), batch_code=0)\n\n # train\n start_time = time.time()\n\n model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,\n loss_type='xent', par_dir='Inria_Domain_LOO')\n model.run(train_reader=dataReader_train,\n train_reader_building=dataReader_retrain,\n valid_reader=dataReader_valid,\n pretrained_model_dir=PRE_TRAINED_DIR,\n isTrain=True,\n img_mean=img_mean,\n verb_step=100, # print a message every 100 step(sample)\n save_epoch=5, # save the model every 5 epochs\n gpu=GPU,\n tile_size=flags.tile_size,\n patch_size=flags.input_size)\n\n duration = time.time() - start_time\n print('duration {:.2f} hours'.format(duration/60/60))\n\n\nif __name__ == '__main__':\n flags = read_flag()\n main(flags)\n", "sub_path": "]tasks/2018.06.28.mtl_semi_unsupervised/train_inria_unet_retrain.py", "file_name": "train_inria_unet_retrain.py", "file_ext": "py", "file_size_in_byte": 21224, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "bohaoCustom.uabMakeNetwork_UNet.UnetModelPredict", "line_number": 37, "usage_type": "attribute"}, {"api_name": "bohaoCustom.uabMakeNetwork_UNet", "line_number": 37, "usage_type": "name"}, {"api_name": "bohaoCustom.uabMakeNetwork.Network.__init__", "line_number": 41, "usage_type": "call"}, {"api_name": "bohaoCustom.uabMakeNetwork.Network", "line_number": 41, "usage_type": "attribute"}, {"api_name": "bohaoCustom.uabMakeNetwork", "line_number": 41, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.uint8", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.control_dependencies", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.image.resize_image_with_crop_or_pad", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 100, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.less_equal", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 109, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 111, "usage_type": "attribute"}, {"api_name": "tensorflow.add_to_collection", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.add_to_collection", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.add_to_collection", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.get_collection", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.GraphKeys", "line_number": 117, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tensorflow.argmax", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 124, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 141, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 144, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 145, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.image", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 158, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 193, "usage_type": "call"}, {"api_name": "time.time", "line_number": 195, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 210, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 216, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 216, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables", "line_number": 216, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 227, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 227, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables", "line_number": 227, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 259, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 260, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 260, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 261, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 261, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 262, "usage_type": "call"}, {"api_name": "tensorflow.bool", "line_number": 262, "usage_type": "attribute"}, {"api_name": "uab_collectionFunctions.uabCollection", "line_number": 277, "usage_type": "call"}, {"api_name": "bohaoCustom.uabPreprocClasses.uabOperTileDivide", "line_number": 278, "usage_type": "call"}, {"api_name": "bohaoCustom.uabPreprocClasses", "line_number": 278, "usage_type": "name"}, {"api_name": "uabPreprocClasses.uabPreprocMultChanOp", "line_number": 280, "usage_type": "call"}, {"api_name": "uab_DataHandlerFunctions.uabPatchExtr", "line_number": 285, "usage_type": "call"}, {"api_name": "uabCrossValMaker.uabUtilGetFolds", "line_number": 296, "usage_type": "call"}, {"api_name": "uabCrossValMaker.uabUtilGetFolds", "line_number": 297, "usage_type": "call"}, {"api_name": "uabCrossValMaker.make_file_list_by_key", "line_number": 309, "usage_type": "call"}, {"api_name": "uabCrossValMaker.make_file_list_by_key", "line_number": 310, "usage_type": "call"}, {"api_name": "bohaoCustom.uabDataReader.ImageLabelReader", "line_number": 312, "usage_type": "call"}, {"api_name": "bohaoCustom.uabDataReader", "line_number": 312, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 314, "usage_type": "call"}, {"api_name": "bohaoCustom.uabDataReader.ImageLabelReader", "line_number": 315, "usage_type": "call"}, {"api_name": "bohaoCustom.uabDataReader", "line_number": 315, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 317, "usage_type": "call"}, {"api_name": "uab_collectionFunctions.uabCollection", "line_number": 319, "usage_type": "call"}, {"api_name": "uab_DataHandlerFunctions.uabPatchExtr", "line_number": 323, "usage_type": "call"}, {"api_name": "uabCrossValMaker.uabUtilGetFolds", "line_number": 334, "usage_type": "call"}, {"api_name": "uabCrossValMaker.uabUtilGetFolds", "line_number": 335, "usage_type": "call"}, {"api_name": "uabCrossValMaker.make_file_list_by_key", "line_number": 347, "usage_type": "call"}, {"api_name": "bohaoCustom.uabDataReader.ImageLabelReader", "line_number": 349, "usage_type": "call"}, {"api_name": "bohaoCustom.uabDataReader", "line_number": 349, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 351, "usage_type": "call"}, {"api_name": "time.time", "line_number": 354, "usage_type": "call"}, {"api_name": "uabRepoPaths.modelPath", "line_number": 356, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 370, "usage_type": "call"}]} +{"seq_id": "625379327", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nimport requests\nimport json\nimport numpy as np\nimport pymongo\nfrom bson.objectid import ObjectId\nfrom django.conf import settings\n\nhostaddress = settings.DB_ADDR\nhostport = settings.DB_PORT\ndbname = settings.DB_NAME\n\ngtwhost = settings.GTW_HOST\ngtwport = settings.GTW_PORT\n\nhrahost = settings.HRA_HOST\nhraport = settings.HRA_PORT\n\n# Versi Development\nclient2 = pymongo.MongoClient('mongodb://%s:%s/%s'\\\n\t%(hostaddress,hostport,dbname))\nnDB = client2[dbname]\n\nclient = pymongo.MongoClient('mongodb://%s:%s/%s'\\\n\t%(hostaddress,hostport,dbname))\ndb = client[dbname]\n\n\ndef homepage(response):\n args = get_topics(response)\n return render(response, \"main/index.html\", args)\n\n\ndef arrhytmia(response):\n if response.method == \"POST\":\n args = check_topic_data(response)\n if args[\"error\"] == \"true\":\n return render(response, \"main/index.html\", args)\n else:\n topic = args[\"topic\"]\n args = arrhytmia_getdata(response, args[\"topic\"])\n key = arrhytmia_get_history(response, topic)\n args[\"keys\"] = key\n return render(response, \"main/ecgdashboard.html\", args)\n # arrhytmia_process(response)\n\n\ndef arrhytmia_get_history(response, topic):\n data = response.user\n user = data.username\n conn = db[user]\n history = conn.find({\n \"analytic_type\": \"arrhytmia\",\n \"topic\": topic\n }, {\n \"analytic_type\": 0,\n \"arrhytmia\": 0,\n \"topic\": 0,\n \"created\": 0\n })\n key = {}\n historys = list(history)\n if len(historys) > 0:\n for history in historys:\n key[history[\"key\"]] = history[\"_id\"]\n return key\n\n\ndef arrhytmia_getdata(response, topic):\n data = response.user\n user = data.username\n conn = db[user]\n\n args = {}\n args[\"topic\"] = topic\n args[\"user\"] = user\n filter = {\n \"status\": {\n \"$exists\": False\n },\n \"analytic_type\": {\n \"$exists\": False\n },\n \"payload.ecg\": {\n \"$exists\": True\n },\n \"topic\": topic\n }\n count = conn.find(filter).count()\n if count > 499:\n start = conn.find(filter).sort(\"_id\", 1).limit(1)\n datas = list(start)[0]\n idstart = datas['_id']\n start = datas['time']\n end = conn.find(filter).sort(\"_id\", -1).limit(1)\n datae = list(end)[0]\n idend = datae['_id']\n end = datae['time']\n args[\"start\"] = start\n args[\"end\"] = end\n args[\"idstart\"] = idstart\n args[\"idend\"] = idend\n args[\"error\"] = \"\"\n else:\n args[\"start\"] = \"\"\n args[\"end\"] = \"\"\n args[\"idstart\"] = \"\"\n args[\"idend\"] = \"\"\n args[\"error\"] = \"true\"\n args[\"message\"] = \"There is not enough data to clasify!\"\n\n return args\n\n\ndef arrhytmia_history(response):\n if response.method == \"POST\":\n data = response.user\n username = data.username\n age = data.age\n topic = response.POST.get(\"topic\")\n key = response.POST.get(\"selectdata\")\n uid = response.POST.get(key)\n key_split = key.split(\" : \")\n start = key_split[0]\n end = key_split[1]\n args = get_hasil(username, uid, age)\n args[\"topic\"] = topic\n args[\"start\"] = start\n args[\"end\"] = end\n return render(response, \"main/details.html\", args)\n\n\ndef arrhytmia_process(response):\n if response.method == \"POST\":\n data = response.user\n username = data.username\n age = data.age\n topic = response.POST.get(\"topic\")\n start = response.POST.get(\"idstart\")\n end = response.POST.get(\"idend\")\n args = {\n \"user\": username,\n \"start\": start,\n \"end\": end,\n \"topic\": topic\n }\n conn = requests.post(\n \"http://3.1.49.16/analytic/arrhytmia\",\n json=json.dumps(args),\n headers={\n 'Content-type': 'application/json; charset=UTF-8'}\n\n )\n data = json.loads(conn.text)\n if data['status'] == \"OK\":\n args = get_hasil(username, data['id'], age)\n args[\"topic\"] = topic\n args[\"start\"] = start\n args[\"end\"] = end\n return render(response, \"main/details.html\", args)\n else:\n return render(response, \"main/404.html\")\n\n\ndef heart_rate(response):\n if response.method == \"POST\":\n args = check_topic_data(response)\n if args[\"error\"] == \"true\":\n return render(response, \"main/index.html\", args)\n else:\n args = heart_rate_process(response, args[\"topic\"])\n return render(response, \"main/heart_rate/home.html\", args)\n # payload = {\n # \"status\" : \"OK\",\n # }\n # payload = json.dumps(payload)\n # return HttpResponse(payload, content_type='application/json')\n\n\ndef heart_rate_process(response, topic):\n data = response.user\n username = data.username\n uid = data.id\n age = data.age\n args = {\n \"user\": username,\n \"uid\": uid,\n \"age\": age,\n \"topic\": topic\n }\n conn = requests.post(\n \"http://{}:{}/analytic/hr/dashboard\".format(hrahost,hraport),\n json=json.dumps(args),\n headers={\n 'Content-type': 'application/json; charset=UTF-8'}\n )\n data = json.loads(conn.text)\n print(data)\n print(type(data))\n args[\"created\"] = data['user']['time']['created']\n args[\"now\"] = data['user']['time']['now']\n args[\"status\"] = data['status']\n args[\"topic\"] = topic\n\n return args\n\n\ndef trend_process(response):\n data = {}\n args = {}\n datauser = response.user\n username = datauser.username\n age = datauser.age\n uid = datauser.id\n data['user'] = username\n data['age'] = age\n data['uid'] = uid\n if response.method == \"POST\":\n data['type'] = response.POST.get(\"type\")\n data['topic'] = response.POST.get(\"topic\")\n if data['type'] == \"track\":\n data['date'] = \"\"\n else:\n data['date'] = response.POST.get(\"date\")\n conn = requests.post(\n \"http://{}:{}/analytic/hr/process\".format(hrahost,hraport), \n json=json.dumps(data),\n headers={\n 'Content-type': 'application/json; charset=UTF-8'}\n )\n data = json.loads(conn.text)\n args[\"user\"] = data['user']\n args[\"status\"] = data['status']\n args[\"trend\"] = data['trend']\n args[\"topic\"] = response.POST.get(\"topic\")\n return render(response, \"main/heart_rate/home.html\", args)\n\n\ndef article_summaries(response, status):\n if status == \"slow\":\n return render(response, \"main/heart_rate/article_summaries/slow.html\")\n else:\n return render(response, \"main/heart_rate/article_summaries/fast.html\")\n\n\ndef get_topics(response):\n userdata = response.user\n uid = userdata.id\n topics_conn = nDB[\"topics\"]\n topics = topics_conn.find({\n \"user_id\": uid\n })\n payload = []\n args = {}\n for topic in topics:\n payload.append(topic['apiendpoint'])\n args['topics'] = payload\n args['error'] = \"\"\n args['error_msg'] = \"\"\n return args\n\n\ndef check_topic_data(response):\n user = str(response.user)\n analytic_type = response.POST.get(\"analytictype\")\n topic = response.POST.get(\"selecttopic\")\n\n if topic == \"0\":\n args = get_topics(response)\n args['error'] = \"true\"\n args['error_msg'] = \"Please select topic to analyze!!!\"\n print(\"topic 0\")\n else:\n args = get_topics(response)\n payload = {\n \"user\" : user,\n \"analytic_type\" : analytic_type,\n \"topic\" : topic\n }\n\n conn = requests.post(\n \"http://{}:{}/api/countdata\".format(gtwhost,gtwport),\n json=json.dumps(payload),\n headers={\n 'Content-type': 'application/json; charset=UTF-8'\n }\n )\n\n if conn.status_code:\n data = json.loads(conn.text)\n data_length = data[\"data_length\"]\n else:\n data_length = 0\n \n if data_length <= 0:\n args['error'] = \"true\"\n args['error_msg'] = \"There is no data to analyze!!!\"\n # if analytic_type == \"arrhytmia\":\n # args['error'] = \"true\"\n # args['error_msg'] = \"There is no data to analyze!!!\"\n # print(\"topic 0 arrhytmia\")\n # else:\n # args['error'] = \"true\"\n # args['error_msg'] = \"There is no data to analyze!!!\"\n # print(\"topic 0 hr\")\n args[\"topic\"] = topic\n return args\n\n\ndef get_hasil(nama, id, umur):\n connection = nDB[nama]\n query_result = connection.find_one({\"_id\": ObjectId(id)})\n # query_result = connection.find(\n # {\"analytic_type\": \"arrhytmia\"},\n # {\"analytic_type\": 0, \"_id\": 0, \"processed\": 0}\n # )\n\n # query_result = list(query_result)[0]['arrhytmia']\n data = query_result[\"arrhytmia\"]['data']\n hasil = query_result[\"arrhytmia\"]['hasil']\n\n umur = umur\n created = query_result[\"created\"]\n created = created.strftime(\"%a, %d %b %Y %H:%M:%S\")\n\n ecg = data[\"ecg\"]\n ts = data[\"timeseries\"]\n ecg_ts = []\n # ts + ecg\n for count, i in enumerate(ecg):\n ecg_ts.append([ts[count], i])\n\n filtered = data[\"filtered\"]\n filtered_ts = []\n # ts + filtered\n for count, i in enumerate(filtered):\n filtered_ts.append([ts[count], i])\n\n # hasil\n result = hasil[\"hasil\"]\n PVC = []\n PAB = []\n RBB = []\n LBB = []\n APC = []\n VEB = []\n for key in result.keys():\n if len(result[key]) > 0:\n tmp = result[key]\n for x in tmp:\n if key == \"PVC\":\n PVC.append([ts[x[0]], ts[x[1]]])\n elif key == \"PAB\":\n PAB.append([ts[x[0]], ts[x[1]]])\n elif key == \"RBB\":\n RBB.append([ts[x[0]], ts[x[1]]])\n elif key == \"LBB\":\n LBB.append([ts[x[0]], ts[x[1]]])\n elif key == \"APC\":\n APC.append([ts[x[0]], ts[x[1]]])\n elif key == \"VEB\":\n VEB.append([ts[x[0]], ts[x[1]]])\n result = {\n \"APC\": APC,\n \"LBB\": LBB,\n \"PAB\": PAB,\n \"PVC\": PVC,\n \"RBB\": RBB,\n \"VEB\": VEB\n }\n\n # get value ts by rpeaks index\n rpeaks = hasil[\"rpeaks\"]\n rpeaks = np.array(rpeaks)\n ts_tmp = np.array(ts)\n rpeaks = ts_tmp[rpeaks]\n rpeaks = rpeaks.tolist()\n\n hr = hasil[\"heart_rate\"]\n hr_template = hasil[\"hr_template\"]\n\n tmp = {\n \"status\": \"OK\",\n \"result\": {\n \"nama\": nama,\n \"umur\": umur,\n \"data\": ecg_ts,\n \"hasil\": result,\n \"filtered\": filtered_ts,\n \"hr_template\": hr_template,\n \"rpeaks\": rpeaks,\n \"hr\": hr\n }\n }\n\n args = {}\n\n args['data'] = ecg_ts\n args['filtered'] = filtered_ts\n args['hasil'] = result\n args['hr'] = hr\n args['hr_template'] = hr_template\n args['rpeaks'] = rpeaks\n args['nama'] = nama\n args['umur'] = umur\n args['created'] = created\n\n return args\n\n\ndef get_data_ecg(user):\n data = False\n connection = db[user]\n print(\"Connected to collection : \", user)\n\n id = connection.find({\"payload.ecg\": {\"$exists\": True}}, {\"payload\": 0})\n\n id = list(id)\n if id:\n id = id[0]['_id']\n cursor = connection.aggregate(\n [\n {\"$match\": {\"_id\": ObjectId(id)}},\n {\"$project\": {\n \"raw\": '$payload.ecg',\n \"_id\": 0\n }}\n ]\n )\n data = list(cursor)\n print(\"Getting payload from collection\")\n data = data[0]['raw']\n return data\n\n\n# Dari JAY Versi Lama 2\n# def homepage(response):\n# #urlAnalytic = \"http://3.1.218.130:5000/\"\n\n# #responAnalytic = requests.request(\"GET\", urlAnalytic)\n# #responAnalytic = responAnalytic.json()\n# #if responAnalytic['status'] == \"OK\":\n# # args = {}\n# # data = []\n# # col = db[\"pasien\"]\n# # pasien = col.find({})\n# # for i in pasien:\n# # tmp = {\n# # \"nama\": i[\"nama\"],\n# # \"umur\": i[\"umur\"]\n# # }\n# # data.append(tmp)\n# # args['data'] = data\n# # return render(response, \"main/index.html\", args)\n# #else:\n# # return render(response, \"main/404.html\")\n# # args = {}\n# # data = []\n# # col = db[\"pasien\"]\n# # pasien = col.find({})\n# # for i in pasien:\n# # tmp = {\n# # \"nama\": i[\"nama\"],\n# # \"umur\": i[\"umur\"]\n# # }\n# # data.append(tmp)\n# # args['data'] = data\n# # return render(response, \"main/index.html\", args)\n# payload = response.user\n# payload = json.dumps({\n# \"id\" : payload.id\n# })\n# return HttpResponse(payload, content_type='application/json')\n\n# def analytic(response, username):\n# urlAnalytic = \"http://3.1.218.130:5000/requestAnalysis/{}\".format(username)\n# responAnalytic = requests.request(\"GET\", urlAnalytic)\n# responAnalytic = responAnalytic.json()\n# if responAnalytic['status'] == \"OK\":\n# args = getHasil(username)\n# return render(response, \"main/details.html\", args)\n# else:\n# return render(response, \"main/404.html\")\n\n# def getHasil(name):\n# errorMsg = \"\"\n# count = 0\n\n# col = db[\"data\"]\n# col1 = db[\"pasien\"]\n# col2 = db[\"hasil\"]\n\n# check = []\n\n\n# check.append(col.find({\"nama\":name}).count())\n# check.append(col1.find({\"nama\":name}).count())\n# check.append(col2.find({\"nama\":name}).count())\n# if check.count(1) < 3:\n# for count,i in enumerate(check):\n# if i == 0:\n# if count == 0:\n# errorMsg = errorMsg + \" Data in Data Collection Not Found.\"\n# elif count == 1:\n# errorMsg = errorMsg + \" Data in Pasien Collection Not Found.\"\n# else:\n# errorMsg = errorMsg + \" Data in Hasil Collection Not Found.\"\n# tmp = {\n# \"status\":\"ERROR\",\n# \"message\":errorMsg\n# }\n\n# else:\n# data = col.find_one({\"nama\":name})\n# pasien = col1.find_one({\"nama\":name})\n# hasil = col2.find_one({\"nama\":name})\n\n# nama = name\n# umur = pasien[\"umur\"]\n# ecg = data[\"data\"]\n# ts = data[\"timeseries\"]\n# ecg_ts = []\n# # ts + ecg\n# for count,i in enumerate(ecg):\n# ecg_ts.append([ts[count],i])\n\n# filtered = data[\"filtered\"]\n# filtered_ts = []\n# # ts + filtered\n# for count,i in enumerate(filtered):\n# filtered_ts.append([ts[count],i])\n\n# # hasil\n# result = hasil[\"hasil\"]\n# PVC = []\n# PAB = []\n# RBB = []\n# LBB = []\n# APC = []\n# VEB = []\n# for key in result.keys():\n# if len(result[key]) > 0 :\n# tmp = result[key]\n# for x in tmp:\n# if key == \"PVC\":\n# PVC.append([ts[x[0]],ts[x[1]]])\n# elif key == \"PAB\":\n# PAB.append([ts[x[0]],ts[x[1]]])\n# elif key == \"RBB\":\n# RBB.append([ts[x[0]],ts[x[1]]])\n# elif key == \"LBB\":\n# LBB.append([ts[x[0]],ts[x[1]]])\n# elif key == \"APC\":\n# APC.append([ts[x[0]],ts[x[1]]])\n# elif key == \"VEB\":\n# VEB.append([ts[x[0]],ts[x[1]]])\n# result = {\n# \"APC\": APC,\n# \"LBB\": LBB,\n# \"PAB\": PAB,\n# \"PVC\": PVC,\n# \"RBB\": RBB,\n# \"VEB\": VEB\n# }\n\n# #get value ts by rpeaks index\n# rpeaks = hasil[\"rpeaks\"]\n# rpeaks = np.array(rpeaks)\n# ts_tmp = np.array(ts)\n# rpeaks = ts_tmp[rpeaks]\n# rpeaks = rpeaks.tolist()\n\n# hr = hasil[\"heart_rate\"]\n# hr_template = hasil[\"hr_template\"]\n\n# tmp = {\n# \"status\":\"OK\",\n# \"result\":{\n# \"nama\":nama,\n# \"umur\":umur,\n# \"data\":ecg_ts,\n# \"hasil\":result,\n# \"filtered\":filtered_ts,\n# \"hr_template\":hr_template,\n# \"rpeaks\": rpeaks,\n# \"hr\":hr\n# }\n# }\n# args = {}\n\n# args['data'] = ecg_ts\n# args['filtered'] = filtered_ts\n# args['hasil'] = result\n# args['hr'] = hr\n# args['hr_template'] = hr_template\n# args['rpeaks'] = rpeaks\n# args['nama'] = nama\n# args['umur'] = umur\n\n# return args\n# END dari JAY Versi Lama 2\n\n\n# DARI JAY Versi lama\n# def homepage(response):\n# urlStorage = \"http://127.0.0.1:5001/getAllName\"\n# urlAnalytic = \"http://127.0.0.1:5000/\"\n# responAnalytic = requests.request(\"GET\", urlAnalytic)\n# responAnalytic = responAnalytic.json()\n# if responAnalytic['status'] == \"OK\":\n# args = {}\n# responStorage = requests.request(\"GET\", urlStorage)\n# respon = responStorage.json()\n# args['data'] = respon\n# return render(response,\"dataanalytics/index.html\",args)\n# else:\n# return render(response,\"dataanalytics/404.html\")\n\n# def analytic(response, username):\n# urlAnalytic = \"http://127.0.0.1:5000/requestAnalysis/{}\".format(username)\n# responAnalytic = requests.request(\"GET\", urlAnalytic)\n# responAnalytic = responAnalytic.json()\n# if responAnalytic['status'] == \"OK\":\n# args = getHasil(username)\n# return render(response,\"dataanalytics/details.html\",args)\n# else:\n# return render(response,\"dataanalytics/404.html\")\n\n\n# def getHasil(username):\n# args = {}\n# url = \"http://127.0.0.1:5001/getOneData/{}\".format(username)\n# respon = requests.request(\"GET\", url)\n# result = respon.json()\n\n# args['data'] = result['result']['data']\n# args['filtered'] = result['result']['filtered']\n# args['hasil'] = result['result']['hasil']\n# print(args['hasil'])\n# args['hr'] = result['result']['hr']\n# args['hr_template'] = result['result']['hr_template']\n# args['rpeaks'] = result['result']['rpeaks']\n# args['nama'] = result['result']['nama']\n# args['umur'] = result['result']['umur']\n\n# return args\n# #END DARI JAY\n", "sub_path": "dataanalytics/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 18758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.conf.settings.DB_ADDR", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.settings.DB_PORT", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.DB_NAME", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.settings.GTW_HOST", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.settings.GTW_PORT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.settings.HRA_HOST", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.settings.HRA_PORT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "pymongo.MongoClient", "line_number": 21, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 131, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 148, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 150, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 155, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 161, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 163, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 170, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 173, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 192, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 194, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 198, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 226, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 228, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 232, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 237, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 242, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 244, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 282, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 284, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 291, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 375, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 376, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 424, "usage_type": "call"}]} +{"seq_id": "128986388", "text": "import pygame\nfrom pygame.locals import *\nfrom pygame.sprite import Group, groupcollide, spritecollide\n\nfrom random import randint\n\nfrom game.soundplayer import *\nfrom game import *\nimport json\n\nMAX_STAGE = 10\nMONSTERS_SPEED = 2\n\nclass Stage:\n def __init__(self, screen, fps = 60, key = 1):\n self.key = key\n self.json_data = None\n self.screen = screen\n self.map = None\n self.temp_effects = []\n self.bg = None\n self.FPS = fps\n self.done = False\n self.CLOCK = pygame.time.Clock()\n \n self.player = None\n self.health_bar = None\n self.monsters = Group()\n\n playBackgroundMusic()\n \n def start(self):\n config = json.loads(open('definitions/stages.json').read())\n config = config['stage' + str(self.key)]\n\n self.bg = pygame.image.load(config['background']).convert_alpha()\n self.screen.blit(self.bg, (0, 0))\n\n entities_config = json.loads(open('definitions/entities.json').read())\n conf = entities_config['player']\n\n #gambiarra\n if self.player == None:\n self.player = Player('player', tuple(config['player_position']),\n conf['shot'], conf['shot_speed'], conf['life'], conf['damage'])\n else:\n self.player.shots = Group()\n\n if self.health_bar == None:\n self.health_bar = HealthBar(self.player)\n #fim gambiarra\n \n #botando os monstros a partir do y = 0\n y = 0\n for i in range(1, 5):\n line = config['line' + str(i)]\n x = config['monsters_position'][0]\n y_M = 0\n for c in line:\n conf = entities_config['monster' + c] \n self.monsters.add(Monster('monster' + c, (x, y), conf['shot'],\n conf['shot_speed'], conf['life'], conf['damage'], conf['value'], MONSTERS_SPEED))\n x += conf['size'][0]\n if conf['size'][1] > y_M:\n y_M = conf['size'][1]\n y += y_M\n\n #subindo os monstros para fazer animacao de descida, onde y = altura das 4 linhas\n for monster in self.monsters:\n monster.rect.y -= y\n\n #descendo eles com velocidade normal para ficarem em sua posicao padrao\n yMovToPos = 0\n while True:\n if yMovToPos < (y + config['monsters_position'][1])/MONSTERS_SPEED:\n for monster in self.monsters:\n monster.setSpeed((0,1))\n monster.do()\n yMovToPos += 1\n else:\n break\n self.renderObjects(True)\n self.CLOCK.tick(self.FPS)\n self.run()\n\n def run(self):\n framesSinceLastEnemyShot = 0\n lastMove = \"right\"\n\n while not self.done:\n \n #Keyboard press and exit events\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n\n #Keyboard hold\n keys = pygame.key.get_pressed() \n if keys[K_LEFT] or keys[K_a]:\n if(not self.player.touchingLeftBorder()):\n self.player.setSpeed((-1, 0))\n if keys[K_RIGHT] or keys[K_d]:\n if(not self.player.touchingRightBorder()):\n self.player.setSpeed((1, 0))\n if keys[K_SPACE]:\n if(self.player.attempt_shoot(self.CLOCK)):\n playSoundPlayerShot()\n self.player.do()\n\n #Checking if monsters can be moved\n canMoveLeft = canMoveRight = True\n for monster in self.monsters:\n if(monster.touchingLeftBorder()):\n lastMove = \"left\"\n canMoveLeft = False\n if(monster.touchingRightBorder()):\n lastMove = \"right\"\n canMoveRight = False\n\n if(canMoveRight and lastMove == \"left\"):\n monsSpeed = (1,0)\n elif(canMoveLeft and lastMove == \"right\"):\n monsSpeed = (-1,0)\n else:\n monsSpeed = (0,0)\n\n for monster in self.monsters:\n monster.setSpeed(monsSpeed)\n monster.do()\n\n #Player-Shot collision with monster\n collided = groupcollide(self.player.shots, self.monsters, True, False)\n for key, values in collided.items():\n for value in values:\n value.life -= self.player.damage\n if value.life <= 0:\n self.player.score += value.value\n collision_pos = value.getPosition()\n self.temp_effects.append(TempEffect(\"hit_blue\", \"effects\", collision_pos))\n\n #Monsters-Shots collision with player\n for monster in self.monsters:\n collided = spritecollide(self.player, monster.shots, True)\n if(len(collided)):\n self.player.life -= monster.damage\n collision_pos = self.player.getPosition()\n self.temp_effects.append(TempEffect(\"hit_blue\", \"effects\", collision_pos))\n\n #Select random enemy to shot and control time between shots\n indexMonShooting = randint(0, len(self.monsters))\n if((framesSinceLastEnemyShot > 30 and len(self.monsters) > 4) or \\\n (framesSinceLastEnemyShot > 60)):\n for i, monster in enumerate(self.monsters):\n if(i == indexMonShooting):\n monster.shoot()\n framesSinceLastEnemyShot = 0\n else:\n framesSinceLastEnemyShot += 1\n\n if self.player.life <= 0:\n self.done = True\n\n text = \"You lost! Score: \" + str(self.player.score)\n self.showText(text)\n playSoundDeath()\n elif len(self.monsters) <= 0:\n self.done = True\n\n #Updating and rendering objects\n self.renderObjects()\n\n self.CLOCK.tick(self.FPS)\n\n if(len(self.monsters) <= 0):\n if(self.key < MAX_STAGE):\n self.done = False\n self.key += 1\n self.start()\n else:\n text = \"You won! Score: \" + str(self.player.score)\n self.showText(text)\n pygame.time.delay(2000)\n \n\n def showText(self, text):\n font = pygame.font.Font('freesansbold.ttf', 40)\n textSurface = font.render(text, True, (0,0,0))\n dest = textSurface.get_rect()\n dest.center = (320,240)\n self.screen.blit(textSurface, dest)\n pygame.display.flip()\n\n \n def renderObjects(self, starting = False):\n self.map = pygame.sprite.RenderUpdates()\n \n self.screen.blit(self.bg, (0, 0)) \n\n self.map.update()\n self.map.draw(self.screen)\n\n if not starting:\n self.health_bar.draw(self.screen)\n\n self.player.update()\n self.player.update_attack_clock(self.CLOCK)\n if(self.player.life > 0):\n self.player.draw(self.screen)\n\n\n for monster in self.monsters:\n monster.update()\n monster.draw(self.screen)\n\n for i, tmp in enumerate(self.temp_effects):\n if(tmp.is_dead()):\n self.temp_effects.pop(i)\n else:\n tmp.update_time(self.CLOCK)\n tmp.draw(self.screen)\n\n pygame.display.flip()\n", "sub_path": "game/stage.py", "file_name": "stage.py", "file_ext": "py", "file_size_in_byte": 7551, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pygame.time.Clock", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 28, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 36, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 98, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.sprite.groupcollide", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 143, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.time.delay", "line_number": 182, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 186, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 186, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 191, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 191, "usage_type": "attribute"}, {"api_name": "pygame.sprite.RenderUpdates", "line_number": 195, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 195, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 222, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 222, "usage_type": "attribute"}]} +{"seq_id": "412018048", "text": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom functools import partial\n\nimport numpy as np\nimport paddle.fluid as fluid\nimport paddle.fluid.layers as layers\n\nfrom paddlerec.core.utils import envs\nfrom paddlerec.core.model import ModelBase\n\n\ndef positionwise_feed_forward(x, d_inner_hid, d_hid, dropout_rate):\n \"\"\"\n Position-wise Feed-Forward Networks.\n This module consists of two linear transformations with a ReLU activation\n in between, which is applied to each position separately and identically.\n \"\"\"\n hidden = layers.fc(input=x,\n size=d_inner_hid,\n num_flatten_dims=2,\n act=\"relu\")\n if dropout_rate:\n hidden = layers.dropout(\n hidden,\n dropout_prob=dropout_rate,\n seed=dropout_seed,\n is_test=False)\n out = layers.fc(input=hidden, size=d_hid, num_flatten_dims=2)\n return out\n\n\ndef pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):\n \"\"\"\n Add residual connection, layer normalization and droput to the out tensor\n optionally according to the value of process_cmd.\n This will be used before or after multi-head attention and position-wise\n feed-forward networks.\n \"\"\"\n for cmd in process_cmd:\n if cmd == \"a\": # add residual connection\n out = out + prev_out if prev_out else out\n elif cmd == \"n\": # add layer normalization\n out = layers.layer_norm(\n out,\n begin_norm_axis=len(out.shape) - 1,\n param_attr=fluid.initializer.Constant(1.),\n bias_attr=fluid.initializer.Constant(0.))\n elif cmd == \"d\": # add dropout\n if dropout_rate:\n out = layers.dropout(\n out,\n dropout_prob=dropout_rate,\n seed=dropout_seed,\n is_test=False)\n return out\n\n\npre_process_layer = partial(pre_post_process_layer, None)\npost_process_layer = pre_post_process_layer\n\n\nclass Model(ModelBase):\n def __init__(self, config):\n ModelBase.__init__(self, config)\n\n def _init_hyper_parameters(self):\n self.item_emb_size = envs.get_global_env(\n \"hyper_parameters.item_emb_size\", 64)\n self.cat_emb_size = envs.get_global_env(\n \"hyper_parameters.cat_emb_size\", 64)\n self.position_emb_size = envs.get_global_env(\n \"hyper_parameters.position_emb_size\", 64)\n self.act = envs.get_global_env(\"hyper_parameters.act\", \"sigmoid\")\n self.is_sparse = envs.get_global_env(\"hyper_parameters.is_sparse\",\n False)\n # significant for speeding up the training process\n self.use_DataLoader = envs.get_global_env(\n \"hyper_parameters.use_DataLoader\", False)\n self.item_count = envs.get_global_env(\"hyper_parameters.item_count\",\n 63001)\n self.cat_count = envs.get_global_env(\"hyper_parameters.cat_count\", 801)\n self.position_count = envs.get_global_env(\n \"hyper_parameters.position_count\", 5001)\n self.n_encoder_layers = envs.get_global_env(\n \"hyper_parameters.n_encoder_layers\", 1)\n self.d_model = envs.get_global_env(\"hyper_parameters.d_model\", 96)\n self.d_key = envs.get_global_env(\"hyper_parameters.d_key\", None)\n self.d_value = envs.get_global_env(\"hyper_parameters.d_value\", None)\n self.n_head = envs.get_global_env(\"hyper_parameters.n_head\", None)\n self.dropout_rate = envs.get_global_env(\n \"hyper_parameters.dropout_rate\", 0.0)\n self.postprocess_cmd = envs.get_global_env(\n \"hyper_parameters.postprocess_cmd\", \"da\")\n self.preprocess_cmd = envs.get_global_env(\n \"hyper_parameters.postprocess_cmd\", \"n\")\n self.prepostprocess_dropout = envs.get_global_env(\n \"hyper_parameters.prepostprocess_dropout\", 0.0)\n self.d_inner_hid = envs.get_global_env(\"hyper_parameters.d_inner_hid\",\n 512)\n self.relu_dropout = envs.get_global_env(\n \"hyper_parameters.relu_dropout\", 0.0)\n self.layer_sizes = envs.get_global_env(\"hyper_parameters.fc_sizes\",\n None)\n\n def multi_head_attention(self, queries, keys, values, d_key, d_value,\n d_model, n_head, dropout_rate):\n keys = queries if keys is None else keys\n values = keys if values is None else values\n if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3\n ):\n raise ValueError(\n \"Inputs: quries, keys and values should all be 3-D tensors.\")\n\n def __compute_qkv(queries, keys, values, n_head, d_key, d_value):\n \"\"\"\n Add linear projection to queries, keys, and values.\n \"\"\"\n q = fluid.layers.fc(input=queries,\n size=d_key * n_head,\n bias_attr=False,\n num_flatten_dims=2)\n k = fluid.layers.fc(input=keys,\n size=d_key * n_head,\n bias_attr=False,\n num_flatten_dims=2)\n v = fluid.layers.fc(input=values,\n size=d_value * n_head,\n bias_attr=False,\n num_flatten_dims=2)\n return q, k, v\n\n def __split_heads_qkv(queries, keys, values, n_head, d_key, d_value):\n \"\"\"\n Reshape input tensors at the last dimension to split multi-heads \n and then transpose. Specifically, transform the input tensor with shape\n [bs, max_sequence_length, n_head * hidden_dim] to the output tensor\n with shape [bs, n_head, max_sequence_length, hidden_dim].\n \"\"\"\n # The value 0 in shape attr means copying the corresponding dimension\n # size of the input as the output dimension size.\n reshaped_q = fluid.layers.reshape(\n x=queries, shape=[0, 0, n_head, d_key], inplace=True)\n # permuate the dimensions into:\n # [batch_size, n_head, max_sequence_len, hidden_size_per_head]\n q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])\n # For encoder-decoder attention in inference, insert the ops and vars\n # into global block to use as cache among beam search.\n reshaped_k = fluid.layers.reshape(\n x=keys, shape=[0, 0, n_head, d_key], inplace=True)\n k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3])\n reshaped_v = fluid.layers.reshape(\n x=values, shape=[0, 0, n_head, d_value], inplace=True)\n v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3])\n\n return q, k, v\n\n def scaled_dot_product_attention(q, k, v, d_key, dropout_rate):\n \"\"\"\n Scaled Dot-Product Attention\n \"\"\"\n product = fluid.layers.matmul(\n x=q, y=k, transpose_y=True, alpha=d_key**-0.5)\n\n weights = fluid.layers.softmax(product)\n if dropout_rate:\n weights = fluid.layers.dropout(\n weights,\n dropout_prob=dropout_rate,\n seed=None,\n is_test=False)\n out = fluid.layers.matmul(weights, v)\n return out\n\n def __combine_heads(x):\n \"\"\"\n Transpose and then reshape the last two dimensions of inpunt tensor x\n so that it becomes one dimension, which is reverse to __split_heads.\n \"\"\"\n if len(x.shape) != 4:\n raise ValueError(\"Input(x) should be a 4-D Tensor.\")\n\n trans_x = fluid.layers.transpose(x, perm=[0, 2, 1, 3])\n # The value 0 in shape attr means copying the corresponding dimension\n # size of the input as the output dimension size.\n return fluid.layers.reshape(\n x=trans_x,\n shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],\n inplace=True)\n\n q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)\n q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)\n\n ctx_multiheads = scaled_dot_product_attention(q, k, v, d_model,\n dropout_rate)\n\n out = __combine_heads(ctx_multiheads)\n\n proj_out = fluid.layers.fc(input=out,\n size=d_model,\n bias_attr=False,\n num_flatten_dims=2)\n\n return proj_out\n\n def encoder_layer(self, x):\n attention_out = self.multi_head_attention(\n pre_process_layer(x, self.preprocess_cmd,\n self.prepostprocess_dropout), None, None,\n self.d_key, self.d_value, self.d_model, self.n_head,\n self.dropout_rate)\n attn_output = post_process_layer(x, attention_out,\n self.postprocess_cmd,\n self.prepostprocess_dropout)\n ffd_output = positionwise_feed_forward(\n pre_process_layer(attn_output, self.preprocess_cmd,\n self.prepostprocess_dropout), self.d_inner_hid,\n self.d_model, self.relu_dropout)\n return post_process_layer(attn_output, ffd_output,\n self.postprocess_cmd,\n self.prepostprocess_dropout)\n\n def net(self, inputs, is_infer=False):\n\n init_value_ = 0.1\n\n hist_item_seq = self._sparse_data_var[1]\n hist_cat_seq = self._sparse_data_var[2]\n position_seq = self._sparse_data_var[3]\n target_item = self._sparse_data_var[4]\n target_cat = self._sparse_data_var[5]\n target_position = self._sparse_data_var[6]\n self.label = self._sparse_data_var[0]\n\n item_emb_attr = fluid.ParamAttr(name=\"item_emb\")\n cat_emb_attr = fluid.ParamAttr(name=\"cat_emb\")\n position_emb_attr = fluid.ParamAttr(name=\"position_emb\")\n\n hist_item_emb = fluid.embedding(\n input=hist_item_seq,\n size=[self.item_count, self.item_emb_size],\n param_attr=item_emb_attr,\n is_sparse=self.is_sparse)\n\n hist_cat_emb = fluid.embedding(\n input=hist_cat_seq,\n size=[self.cat_count, self.cat_emb_size],\n param_attr=cat_emb_attr,\n is_sparse=self.is_sparse)\n\n hist_position_emb = fluid.embedding(\n input=hist_cat_seq,\n size=[self.position_count, self.position_emb_size],\n param_attr=position_emb_attr,\n is_sparse=self.is_sparse)\n\n target_item_emb = fluid.embedding(\n input=target_item,\n size=[self.item_count, self.item_emb_size],\n param_attr=item_emb_attr,\n is_sparse=self.is_sparse)\n\n target_cat_emb = fluid.embedding(\n input=target_cat,\n size=[self.cat_count, self.cat_emb_size],\n param_attr=cat_emb_attr,\n is_sparse=self.is_sparse)\n\n target_position_emb = fluid.embedding(\n input=target_position,\n size=[self.position_count, self.position_emb_size],\n param_attr=position_emb_attr,\n is_sparse=self.is_sparse)\n\n item_sequence_target = fluid.layers.reduce_sum(\n fluid.layers.sequence_concat([hist_item_emb, target_item_emb]),\n dim=1)\n cat_sequence_target = fluid.layers.reduce_sum(\n fluid.layers.sequence_concat([hist_cat_emb, target_cat_emb]),\n dim=1)\n position_sequence_target = fluid.layers.reduce_sum(\n fluid.layers.sequence_concat(\n [hist_position_emb, target_position_emb]),\n dim=1)\n\n whole_embedding_withlod = fluid.layers.concat(\n [\n item_sequence_target, cat_sequence_target,\n position_sequence_target\n ],\n axis=1)\n pad_value = fluid.layers.assign(input=np.array(\n [0.0], dtype=np.float32))\n whole_embedding, _ = fluid.layers.sequence_pad(whole_embedding_withlod,\n pad_value)\n\n for _ in range(self.n_encoder_layers):\n enc_output = self.encoder_layer(whole_embedding)\n enc_input = enc_output\n enc_output = pre_process_layer(enc_output, self.preprocess_cmd,\n self.prepostprocess_dropout)\n\n dnn_input = fluid.layers.reduce_sum(enc_output, dim=1)\n\n for s in self.layer_sizes:\n dnn_input = fluid.layers.fc(\n input=dnn_input,\n size=s,\n act=self.act,\n param_attr=fluid.ParamAttr(\n initializer=fluid.initializer.TruncatedNormalInitializer(\n loc=0.0, scale=init_value_ / math.sqrt(float(10)))),\n bias_attr=fluid.ParamAttr(\n initializer=fluid.initializer.TruncatedNormalInitializer(\n loc=0.0, scale=init_value_)))\n\n y_dnn = fluid.layers.fc(input=dnn_input, size=1, act=None)\n\n self.predict = fluid.layers.sigmoid(y_dnn)\n cost = fluid.layers.log_loss(\n input=self.predict, label=fluid.layers.cast(self.label, \"float32\"))\n avg_cost = fluid.layers.reduce_sum(cost)\n\n self._cost = avg_cost\n\n predict_2d = fluid.layers.concat([1 - self.predict, self.predict], 1)\n label_int = fluid.layers.cast(self.label, 'int64')\n auc_var, batch_auc_var, _ = fluid.layers.auc(input=predict_2d,\n label=label_int,\n slide_steps=0)\n self._metrics[\"AUC\"] = auc_var\n self._metrics[\"BATCH_AUC\"] = batch_auc_var\n if is_infer:\n self._infer_results[\"AUC\"] = auc_var\n", "sub_path": "models/rank/BST/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 14874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "paddle.fluid.layers.fc", "line_number": 32, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 32, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.dropout", "line_number": 37, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 37, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fc", "line_number": 42, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 42, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.layer_norm", "line_number": 57, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 57, "usage_type": "name"}, {"api_name": "paddle.fluid.initializer.Constant", "line_number": 60, "usage_type": "call"}, {"api_name": "paddle.fluid.initializer", "line_number": 60, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 60, "usage_type": "name"}, {"api_name": "paddle.fluid.initializer.Constant", "line_number": 61, "usage_type": "call"}, {"api_name": "paddle.fluid.initializer", "line_number": 61, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 61, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.dropout", "line_number": 64, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 64, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 72, "usage_type": "call"}, {"api_name": "paddlerec.core.model.ModelBase", "line_number": 76, "usage_type": "name"}, {"api_name": "paddlerec.core.model.ModelBase.__init__", "line_number": 78, "usage_type": "call"}, {"api_name": "paddlerec.core.model.ModelBase", "line_number": 78, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 81, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 81, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 83, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 83, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 85, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 85, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 87, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 87, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 88, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 88, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 91, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 91, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 93, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 93, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 95, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 95, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 96, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 96, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 98, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 98, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 100, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 100, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 101, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 101, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 102, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 102, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 103, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 103, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 104, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 104, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 106, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 106, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 108, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 108, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 110, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 110, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 112, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 112, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 114, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 114, "usage_type": "name"}, {"api_name": "paddlerec.core.utils.envs.get_global_env", "line_number": 116, "usage_type": "call"}, {"api_name": "paddlerec.core.utils.envs", "line_number": 116, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fc", "line_number": 132, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 132, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 132, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fc", "line_number": 136, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 136, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 136, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fc", "line_number": 140, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 140, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 140, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 155, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 155, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 155, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.transpose", "line_number": 159, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 159, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 159, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 162, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 162, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 162, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.transpose", "line_number": 164, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 164, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 164, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 165, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 165, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 165, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.transpose", "line_number": 167, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 167, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 167, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.matmul", "line_number": 175, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 175, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 175, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.softmax", "line_number": 178, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 178, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 178, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.dropout", "line_number": 180, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 180, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 180, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.matmul", "line_number": 185, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 185, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 185, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.transpose", "line_number": 196, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 196, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 196, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 199, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 199, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 199, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fc", "line_number": 212, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 212, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 212, "usage_type": "name"}, {"api_name": "paddle.fluid.ParamAttr", "line_number": 248, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 248, "usage_type": "name"}, {"api_name": "paddle.fluid.ParamAttr", "line_number": 249, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 249, "usage_type": "name"}, {"api_name": "paddle.fluid.ParamAttr", "line_number": 250, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 250, "usage_type": "name"}, {"api_name": "paddle.fluid.embedding", "line_number": 252, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 252, "usage_type": "name"}, {"api_name": "paddle.fluid.embedding", "line_number": 258, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 258, "usage_type": "name"}, {"api_name": "paddle.fluid.embedding", "line_number": 264, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 264, "usage_type": "name"}, {"api_name": "paddle.fluid.embedding", "line_number": 270, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 270, "usage_type": "name"}, {"api_name": "paddle.fluid.embedding", "line_number": 276, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 276, "usage_type": "name"}, {"api_name": "paddle.fluid.embedding", "line_number": 282, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 282, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reduce_sum", "line_number": 288, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 288, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 288, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.sequence_concat", "line_number": 289, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 289, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 289, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reduce_sum", "line_number": 291, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 291, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 291, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.sequence_concat", "line_number": 292, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 292, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 292, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reduce_sum", "line_number": 294, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 294, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 294, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.sequence_concat", "line_number": 295, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 295, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 295, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 299, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 299, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 299, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.assign", "line_number": 305, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 305, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 305, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 306, "usage_type": "attribute"}, {"api_name": "paddle.fluid.layers.sequence_pad", "line_number": 307, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 307, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 307, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reduce_sum", "line_number": 316, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 316, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 316, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fc", "line_number": 319, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 319, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 319, "usage_type": "name"}, {"api_name": "paddle.fluid.ParamAttr", "line_number": 323, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 323, "usage_type": "name"}, {"api_name": "paddle.fluid.initializer.TruncatedNormalInitializer", "line_number": 324, "usage_type": "call"}, {"api_name": "paddle.fluid.initializer", "line_number": 324, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 324, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 325, "usage_type": "call"}, {"api_name": "paddle.fluid.ParamAttr", "line_number": 326, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 326, "usage_type": "name"}, {"api_name": "paddle.fluid.initializer.TruncatedNormalInitializer", "line_number": 327, "usage_type": "call"}, {"api_name": "paddle.fluid.initializer", "line_number": 327, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 327, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fc", "line_number": 330, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 330, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 330, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.sigmoid", "line_number": 332, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 332, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 332, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.log_loss", "line_number": 333, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 333, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 333, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.cast", "line_number": 334, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 334, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 334, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reduce_sum", "line_number": 335, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 335, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 335, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 339, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 339, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 339, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.cast", "line_number": 340, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 340, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 340, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.auc", "line_number": 341, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 341, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 341, "usage_type": "name"}]} +{"seq_id": "28167402", "text": "import os\nimport maya.cmds as cmds\nfrom PySide import QtCore\nimport dev.python.lib.ui as ui\nreload(ui)\n\n# This file's path\nuiCodePath = os.path.realpath(__file__)\n\n# UI path\ndevDirectory = uiCodePath.rpartition('maya')[0]\nuiFile = os.path.join(devDirectory, 'ui', 'rigCreator.ui')\n\nlistForm, listBase = ui.loadUI(uiFile)\nmayaWindow = ui.getMayaWindow()\n\n\nclass RigModuleUI(listForm, listBase):\n\n def __init__(self, parent=mayaWindow):\n self.windowName = 'rigCreator'\n\n # Using an env variable makes the path more generic\n self.settings_path = os.path.join(os.getenv('HOME'), \"settingsFile.ini\")\n\n if cmds.window(self.windowName, exists=True):\n cmds.deleteUI(self.windowName)\n\n super(RigModuleUI, self).__init__(parent)\n\n self.setupUi(self)\n\n\n def create(self):\n # Restore window's previous geometry from file\n if os.path.exists(self.settings_path):\n settings_obj = QtCore.QSettings(self.settings_path, QtCore.QSettings.IniFormat)\n self.restoreGeometry(settings_obj.value(\"windowGeometry\"))\n\n\n def closeEvent(self, event):\n # Save window's geometry\n settings_obj = QtCore.QSettings(self.settings_path, QtCore.QSettings.IniFormat)\n settings_obj.setValue(\"windowGeometry\", self.saveGeometry())\n\n\ndef run():\n rigui = RigModuleUI()\n rigui.create()\n rigui.show()\n\n\"\"\"\nimport sys\nimport os.path\n\n# code path\n# ---------------\npath = os.path.join(\"D:\", os.path.sep,\n \"all_works\",\n \"MAYA_DEV\",\n \"EHM_tools\",\n \"MAYA\",\n \"codes\")\n\nif path not in sys.path:\n sys.path.append(path)\n\nfrom python import *\nimport python.rig.ui.rigModule as rigModule\nreload(rigModule)\nrigModule.run()\n\n\"\"\"", "sub_path": "dev/maya/python/rig/ui/delete/rigModule.py", "file_name": "rigModule.py", "file_ext": "py", "file_size_in_byte": 1797, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "dev.python.lib.ui", "line_number": 5, "usage_type": "argument"}, {"api_name": "os.path.realpath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "dev.python.lib.ui.loadUI", "line_number": 14, "usage_type": "call"}, {"api_name": "dev.python.lib.ui", "line_number": 14, "usage_type": "name"}, {"api_name": "dev.python.lib.ui.getMayaWindow", "line_number": 15, "usage_type": "call"}, {"api_name": "dev.python.lib.ui", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 24, "usage_type": "call"}, {"api_name": "maya.cmds.window", "line_number": 26, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 26, "usage_type": "name"}, {"api_name": "maya.cmds.deleteUI", "line_number": 27, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "PySide.QtCore.QSettings", "line_number": 37, "usage_type": "call"}, {"api_name": "PySide.QtCore", "line_number": 37, "usage_type": "name"}, {"api_name": "PySide.QtCore.QSettings", "line_number": 43, "usage_type": "call"}, {"api_name": "PySide.QtCore", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "706939", "text": "import logging\n\nimport click\n\nfrom scout.commands.utils import builds_option\n\nfrom scout.export.transcript import export_transcripts\n\nLOG = logging.getLogger(__name__)\n\n\n@click.command('transcripts', short_help='Export transcripts')\n@builds_option\n@click.pass_context\ndef transcripts(context, build):\n \"\"\"Export all transcripts to .bed like format\"\"\"\n LOG.info(\"Running scout export transcripts\")\n adapter = context.obj['adapter']\n \n header = [\"#Chrom\\tStart\\tEnd\\tTranscript\\tRefSeq\\tHgncID\"]\n\n for line in header:\n click.echo(line)\n\n transcript_string = (\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\")\n\n for tx_obj in export_transcripts(adapter):\n click.echo(transcript_string.format(\n tx_obj['chrom'],\n tx_obj['start'],\n tx_obj['end'],\n tx_obj['ensembl_transcript_id'],\n tx_obj.get('refseq_id',''),\n tx_obj['hgnc_id'],\n )\n )\n", "sub_path": "scout/commands/export/transcript.py", "file_name": "transcript.py", "file_ext": "py", "file_size_in_byte": 935, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 23, "usage_type": "call"}, {"api_name": "scout.export.transcript.export_transcripts", "line_number": 27, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 28, "usage_type": "call"}, {"api_name": "click.command", "line_number": 12, "usage_type": "call"}, {"api_name": "scout.commands.utils.builds_option", "line_number": 13, "usage_type": "name"}, {"api_name": "click.pass_context", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "481044809", "text": "from urlparams import *\nimport urllib, urllib2\nimport json\n\nclass CustomService(object):\n \"\"\"docstring for CustomService\"\"\"\n def __init__(self, arg):\n super(CustomService, self).__init__()\n self.arg = arg\n\n\nclass AboutApi(object):\n \"\"\"docstring for AboutApi\"\"\"\n def __init__(self, arg):\n super(AboutApi, self).__init__()\n self.access_token = arg\n\n def getIpList(self):\n data = {'access_token': self.access_token}\n try:\n req = urllib2.urlopen(AccessToken('').ip_list, urllib.urlencode(data))\n except urllib2.URLError as e:\n print(e)\n except urllib2.HTTPError as e:\n print(e)\n try:\n res = json.loads(req.read())\n except Exception as e:\n print(e)\n return res\n\nclass Menus(object):\n \"\"\"docstring for Menus\"\"\"\n def __init__(self, arg):\n super(Menus, self).__init__()\n self.access_token = arg\n def makeMenu(self, menus):\n if isinstance(menus, dict):\n return\n", "sub_path": "modules.py", "file_name": "modules.py", "file_ext": "py", "file_size_in_byte": 1040, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "urllib2.urlopen", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib2.URLError", "line_number": 22, "usage_type": "attribute"}, {"api_name": "urllib2.HTTPError", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "18803292", "text": "\n# coding: utf-8\n\n# Traffic Light Detection\n\n\nimport numpy as np\n# import matplotlib.pyplot as plt\nfrom PIL import Image\nfrom time import time\nimport tensorflow as tf\nfrom tensorflow.python.keras.models import Sequential\n# from keras.models import Sequential\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D\n# from keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n\nfrom tensorflow.python.keras.optimizers import Adam\n# from keras.optimizers import Adam\n# from keras.preprocessing.image import ImageDataGenerator, load_img\n# from keras.preprocessing.image import array_to_img, img_to_array\n\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator, load_img\nfrom tensorflow.python.keras.preprocessing.image import array_to_img, img_to_array\n\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.models import model_from_json\nimport pygame\nimport pygame.camera\n\n\n\n\ndef capture():\n returnCode = 200\n pygame.camera.init()\n cam = pygame.camera.Camera(\"/dev/video0\")\n cam.start()\n while(returnCode==200):\n img1 = cam.get_image()\n print(\"click!\")\n pic1 = pygame.transform.scale(img1, (150, 150))\n pygame.image.save(pic1,\"images/red/warn1.jpg\")\n returnCode = process(\"images/red/warn1.jpg\")\n time.sleep(2)\n\n\ndef process(image_name):\n img_width = 150\n img_height = 150\n batch_size = 16\n epochs = 100\n\n train_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=10,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n train_generator = train_datagen.flow_from_directory(\n directory='images',\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical')\n\n model_name = 'model_keras_v2.0.8_8840_16_100_1507793881247232'\n\n test_datagen = ImageDataGenerator(\n rescale=1./255)\n\n test_generator = train_datagen.flow_from_directory(\n 'images',\n target_size=(150, 150),\n batch_size=batch_size,\n class_mode='categorical')\n\n json_file = open('model_keras_v2.0.8_11195_16_100_1508741543103608.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n loaded_model.load_weights(\"model_keras_v2.0.8_11195_16_100_1508741543103608.h5\")\n print(\"Loaded model from disk\")\n\n i = 0\n for images in test_generator:\n print(images[0].shape)\n predictions = loaded_model.predict(images[0])\n i += 1\n if i > 0:\n break\n\n class_to_idx = train_generator.class_indices\n\n\n images = [\n load_img(image_name)\n ]\n\n for image in images:\n\n start = time()\n\n # Resize to 150x150\n img = image.resize((img_width, img_height))\n\n # Normalized numpy array with shape (150, 150, 3)\n x = img_to_array(img) / 255.\n #, dim_ordering='tf'\n # Numpy array with shape (1, 150, 150, 3)\n x = x.reshape((1,) + x.shape)\n\n prediction = loaded_model.predict(x)\n\n pred_idx = np.argmax(prediction[0])\n\n if(pred_idx==0):\n print(\"GREEN GO\")\n return 100\n\n if(pred_idx==1):\n print(\"RED STOP\")\n return 200\n\n if(pred_idx==2):\n print(\"UNKNOWN STOP\")\n return 200\n\n\n\nif __name__ == '__main__':\n capture()\n", "sub_path": "traffic-light-classification/detect.py", "file_name": "detect.py", "file_ext": "py", "file_size_in_byte": 3481, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pygame.camera.init", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.camera", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.camera.Camera", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.camera", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.image.save", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 45, "usage_type": "name"}, {"api_name": "tensorflow.python.keras.preprocessing.image.ImageDataGenerator", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.preprocessing.image.ImageDataGenerator", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.models.model_from_json", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.preprocessing.image.load_img", "line_number": 97, "usage_type": "call"}, {"api_name": "time.time", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.preprocessing.image.img_to_array", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "12648654", "text": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nfrom glitter import block_admin\n\n\nurlpatterns = [\n # Admin\n url(r'^admin/', include(admin.site.urls)),\n url(r'^blockadmin/', include(block_admin.site.urls)),\n\n url(r'^accounts/', include('django.contrib.auth.urls')),\n url(r'^news/', include('glitter_news.urls', namespace='glitter-news')),\n url(r'^events/', include('glitter_events.urls', namespace='glitter-events')),\n url(\n r'^vacancies/',\n include('digitaldiocese.vacancies.urls', namespace='digitaldiocese-vacancies')\n ),\n url(r'^documents/', include('glitter_documents.urls', namespace='glitter-documents')),\n url(r'^acny/', include('digitaldiocese.acny.urls', namespace='digitaldiocese-acny')),\n url(r'^search/', include('digitaldiocese.search.urls', namespace='search')),\n]\n\n# Make it easier to see a 404 page under debug\nif settings.DEBUG:\n from django.views.defaults import page_not_found\n\n urlpatterns += [\n url(r'^404/$', page_not_found),\n ]\n\n# Serving static/media under debug\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "sub_path": "project_name/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 14, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 15, "usage_type": "call"}, {"api_name": "glitter.block_admin.site", "line_number": 15, "usage_type": "attribute"}, {"api_name": "glitter.block_admin", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.views.defaults.page_not_found", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.contrib.staticfiles.urls.staticfiles_urlpatterns", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "622680565", "text": "import os\nfrom sqlalchemy import or_\nfrom PIL import Image\nfrom flask import url_for, render_template, flash, redirect, request, abort, Blueprint\nfrom FlaskSite import app, bcrypt, db, mail\nfrom FlaskSite.categories.Forms import AddCategoryForm, EditCategoryForm\nfrom FlaskSite.Models import (UserType, User, Item, Category, Cart, Transaction, TransactionDetail, History, HistoryDetail,\n Status, Category, Chat, ChatDetail, ShippingRecord, Shipping)\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom FlaskSite.Variables import *\n\ncategories = Blueprint('categories', __name__)\n\n@categories.route('/add_category', methods=['GET', 'POST'])\n@login_required\ndef AddCategory():\n if not current_user.is_authenticated or current_user.usertype.name in restrictedUser:\n return redirect(url_for('main.Home'))\n form = AddCategoryForm()\n if form.validate_on_submit():\n category = Category(name = form.name.data, description = form.description.data)\n db.session.add(category)\n db.session.commit()\n flash('Category Added!', 'success')\n return redirect(url_for('categories.AddCategory'))\n return render_template('addCategory.html', title=title+' - Add Category', form=form)\n\n@categories.route('/category/edit', methods=['GET', 'POST'])\n@login_required\ndef EditCategory():\n if not current_user.is_authenticated or current_user.usertype.name in restrictedUser:\n return redirect(url_for('main.Home'))\n form = EditCategoryForm()\n category_id = request.args.get('category_id', 1, type=int)\n category = Category.query.get(category_id)\n if form.validate_on_submit():\n category.name = form.name.data\n category.description = form.description.data\n db.session.commit()\n flash('Category Changed!', 'success')\n return redirect(url_for('categories.ViewCategory', category_id=category_id))\n elif request.method == 'GET':\n form.name.data = category.name\n form.description.data = category.description\n return render_template('editCategory.html', title=title+' - Edit Category', form=form, category=category)\n\n@categories.route(\"/category/\")\ndef ViewCategory(category_id):\n category = Category.query.get(category_id)\n items = Item.query.filter_by(category_id=category_id).all()\n return render_template('viewCategory.html', title=title+' - '+category.name, category=category, items=items)\n\n", "sub_path": "FlaskSite/categories/Routes.py", "file_name": "Routes.py", "file_ext": "py", "file_size_in_byte": 2474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.Blueprint", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 17, "usage_type": "name"}, {"api_name": "flask_login.current_user.usertype", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 18, "usage_type": "call"}, {"api_name": "FlaskSite.categories.Forms.AddCategoryForm", "line_number": 19, "usage_type": "call"}, {"api_name": "FlaskSite.Models.Category", "line_number": 21, "usage_type": "call"}, {"api_name": "FlaskSite.db.session.add", "line_number": 22, "usage_type": "call"}, {"api_name": "FlaskSite.db.session", "line_number": 22, "usage_type": "attribute"}, {"api_name": "FlaskSite.db", "line_number": 22, "usage_type": "name"}, {"api_name": "FlaskSite.db.session.commit", "line_number": 23, "usage_type": "call"}, {"api_name": "FlaskSite.db.session", "line_number": 23, "usage_type": "attribute"}, {"api_name": "FlaskSite.db", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 15, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 31, "usage_type": "name"}, {"api_name": "flask_login.current_user.usertype", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 32, "usage_type": "call"}, {"api_name": "FlaskSite.categories.Forms.EditCategoryForm", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "FlaskSite.Models.Category.query.get", "line_number": 35, "usage_type": "call"}, {"api_name": "FlaskSite.Models.Category.query", "line_number": 35, "usage_type": "attribute"}, {"api_name": "FlaskSite.Models.Category", "line_number": 35, "usage_type": "name"}, {"api_name": "FlaskSite.db.session.commit", "line_number": 39, "usage_type": "call"}, {"api_name": "FlaskSite.db.session", "line_number": 39, "usage_type": "attribute"}, {"api_name": "FlaskSite.db", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 29, "usage_type": "name"}, {"api_name": "FlaskSite.Models.Category.query.get", "line_number": 49, "usage_type": "call"}, {"api_name": "FlaskSite.Models.Category.query", "line_number": 49, "usage_type": "attribute"}, {"api_name": "FlaskSite.Models.Category", "line_number": 49, "usage_type": "name"}, {"api_name": "FlaskSite.Models.Item.query.filter_by", "line_number": 50, "usage_type": "call"}, {"api_name": "FlaskSite.Models.Item.query", "line_number": 50, "usage_type": "attribute"}, {"api_name": "FlaskSite.Models.Item", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "153632155", "text": "import webapp2\nfrom webapp2_extras import jinja2\nfrom google.appengine.api import users\nfrom models.users import User\nfrom webapp2 import uri_for\n\n\nclass BaseHandler(webapp2.RequestHandler):\n def __init__(self, request=None, response=None):\n super(BaseHandler, self).__init__(request, response)\n\n @webapp2.cached_property\n def jinja2(self):\n return jinja2.get_jinja2(app=self.app)\n\n def render_response(self, template, **context):\n ctx = {\n 'logout_url': users.create_logout_url(uri_for('index')),\n 'account_url': uri_for('account')\n }\n ctx.update(context)\n\n # Render\n view = self.jinja2.render_template(template, **ctx)\n self.response.write(view)\n\n def render_template(self, template, **context):\n \"\"\"\n Renders a template and returns the rendered content\n \"\"\"\n return self.jinja2.render_template(template, **context)\n\n def get_user(self):\n user = users.get_current_user()\n if not user:\n self.redirect(users.create_login_url(uri_for('account')))\n return False\n else:\n user_query = User.query(User.user_id == user.user_id())\n db_user_list = user_query.fetch()\n if len(db_user_list) > 0:\n db_user = db_user_list[0]\n else:\n db_user = User(user_id=user.user_id(), nickname='', connection_code='', favorite_color='')\n db_user.put()\n return db_user\n", "sub_path": "views/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "webapp2.RequestHandler", "line_number": 8, "usage_type": "attribute"}, {"api_name": "webapp2_extras.jinja2.get_jinja2", "line_number": 14, "usage_type": "call"}, {"api_name": "webapp2_extras.jinja2", "line_number": 14, "usage_type": "name"}, {"api_name": "webapp2.cached_property", "line_number": 12, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 18, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 18, "usage_type": "name"}, {"api_name": "webapp2.uri_for", "line_number": 18, "usage_type": "call"}, {"api_name": "webapp2.uri_for", "line_number": 19, "usage_type": "call"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 34, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 34, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_login_url", "line_number": 36, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 36, "usage_type": "name"}, {"api_name": "webapp2.uri_for", "line_number": 36, "usage_type": "call"}, {"api_name": "models.users.User.query", "line_number": 39, "usage_type": "call"}, {"api_name": "models.users.User", "line_number": 39, "usage_type": "name"}, {"api_name": "models.users.User.user_id", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.users.User", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "640206422", "text": "import distribute_setup\ndistribute_setup.use_setuptools()\n\nfrom setuptools import setup\nimport sys\n\n# Python 3 conversion\nextra = {}\nif sys.version_info >= (3,):\n extra['use_2to3'] = True\n\nsetup(\n name = 'Keen-SoftLayer',\n version = '1.1',\n description = \"A library to contact SoftLayer's backend services\",\n author = 'SoftLayer Technologies, Inc.',\n author_email = 'dan@keen.io',\n packages = [\n 'SoftLayer',\n ],\n license = 'The BSD License',\n url = 'http://github.com/softlayer/softlayer-api-python-client',\n classifiers = [],\n **extra\n)\n", "sub_path": "pypi_install_script/Keen-SoftLayer-1.1.tar/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "distribute_setup.use_setuptools", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 9, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "4385861", "text": "import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport time\nimport IPython.display as display\nimport sys\nimport numpy as np\nimport torch.nn as nn\nimport collections\nimport torch.nn.init as init\n\n'''1.获取和读取数据集'''\n# 训练集\nmnist_train = torchvision.datasets.FashionMNIST(\n root='./Datasets/FashionMNIST/', train=True, download=True, transform=transforms.ToTensor())\n# 测试集\nmnist_test = torchvision.datasets.FashionMNIST(\n root='./Datasets/FashionMNIST/', train=False, download=True, transform=transforms.ToTensor())\n\nbatch_size = 256\n\nif sys.platform.startswith('win'):\n # 0表示不用额外的进程来加速读取数据\n num_workers = 0\nelse:\n num_workers = 4\n# 返回数据集迭代器\ntrain_iter = torch.utils.data.DataLoader(\n mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)\ntest_iter = torch.utils.data.DataLoader(\n mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n'''2.定义模型'''\n# 将x形状转换为(batch_size,784)\nclass FlattenLayer(nn.Module):\n def __init__(self):\n super(FlattenLayer, self).__init__()\n # x shape: (batch, *, *, ...)\n\n def forward(self, x):\n return x.view(x.shape[0], -1)\n\n\n# 特征维数(输入层神经元数量)\nnum_inputs = 784\n# 输出维数(输出层神经元数量)\nnum_outputs = 10\n# 隐藏层神经元数量\nnum_hiddens = 256\n# 网络结构\nnet = nn.Sequential(\n # (batch_size,1,28,28)修改为(batch_size,784)\n FlattenLayer(),\n # 隐藏层的输入\n nn.Linear(num_inputs, num_hiddens),\n # 隐藏层的输出\n nn.ReLU(),\n # 输出层的输入\n nn.Linear(num_hiddens, num_outputs),\n)\n# 参数每个元素初始化为随机采样于均值为0、标准差为0.01的正态分布\nfor params in net.parameters():\n init.normal_(params, mean=0, std=0.01)\n\n\n'''3.分类准确率函数'''\ndef evaluate_accuracy(data_iter, net):\n acc_sum, n = 0.0, 0\n for X, y in data_iter:\n acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()\n n += len(y)\n return acc_sum / n\n\n\n'''4.训练模型'''\n# 输出层softmax函数和交叉熵损失函数\nloss = torch.nn.CrossEntropyLoss()\n# 定义优化算法(更新权重参数)\noptimizer = torch.optim.SGD(net.parameters(), lr=0.5)\n# 或者手动定义\ndef sgd(params, lr, batch_size):\n for param in params:\n param.data -= lr * param.grad / batch_size\n\n\n# 训练轮数\nnum_epochs = 5\n# 训练函数\ndef train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,\n params=None, lr=None, optimizer=None):\n # 训练过程\n for epoch in range(num_epochs):\n # 损失,准确率\n train_l_sum, train_acc_sum, n = 0.0, 0.0, 0\n for X, y in train_iter:\n # 预测向量\n y_hat = net(X)\n # 损失\n l = loss(y_hat, y).sum()\n # 反向传播计算梯度前梯度清零\n if optimizer is not None:\n optimizer.zero_grad()\n elif params is not None and params[0].grad is not None:\n for param in params:\n param.grad.data.zero_()\n # 反向传播计算损失关于权重的梯度\n l.backward()\n # 使用小批量随机梯度下降迭代权重参数\n if optimizer is None:\n sgd(params, lr, batch_size)\n else:\n optimizer.step()\n # 训练集损失和\n train_l_sum += l.item()\n # 训练集准确率的和\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()\n # 训练集数量\n n += y.shape[0]\n # 测试集准确率\n test_acc = evaluate_accuracy(test_iter, net)\n print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'\n % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))\n\n\n# 训练(网络结构,训练集迭代器,测试集迭代器,交叉熵损失函数,训练轮数,batch_size,权重参数,学习率)\ntrain_ch3(net, train_iter, test_iter, loss, num_epochs,\n batch_size, None, None, optimizer)\n\n\n'''\nepoch 1, loss 0.0031, train acc 0.704, test acc 0.790\nepoch 2, loss 0.0019, train acc 0.821, test acc 0.764\nepoch 3, loss 0.0017, train acc 0.844, test acc 0.790\nepoch 4, loss 0.0015, train acc 0.857, test acc 0.835\nepoch 5, loss 0.0014, train acc 0.865, test acc 0.859\n'''", "sub_path": "深度学习/动手学深度学习/3.深度学习基础/3.10.py", "file_name": "3.10.py", "file_ext": "py", "file_size_in_byte": 4459, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "torchvision.datasets.FashionMNIST", "line_number": 15, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.datasets.FashionMNIST", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.platform.startswith", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "300548549", "text": "# Digitransit Routing API DEMO\n\n#%%\nimport geopandas as gpd\nimport utils.dt_routing as routing\nfrom datetime import datetime\n\n# route params for testing\ncoords_from = {'lat': 60.168992, 'lon': 24.932366 }\ncoords_to = {'lat': 60.175294, 'lon': 24.684855 }\nwalkSpeed = '1.33'\nmaxWalkDistance = 6000\n\n#%%\n# build and run routing query\nitins = routing.get_route_itineraries(coords_from, coords_to, walkSpeed, maxWalkDistance, 3, datetime.now())\n\n# parse geometry from Google Encoded Polyline Algorithm Format\nitins_geom = routing.parse_itin_geom(itins)\n\n#%%\n# print route geometry (line) of the first itinerary\nitin = itins_geom[0]\nitin['line_geom']\n\n#%%\n# get only travel time\ntt = routing.get_mean_travel_time(coords_from, coords_to, walkSpeed, maxWalkDistance, 3, True, datetime.now())\nprint('Avg. travel time:', tt)\n\n#%%\n", "sub_path": "demo/.ipynb_checkpoints/dt_routing-checkpoint.py", "file_name": "dt_routing-checkpoint.py", "file_ext": "py", "file_size_in_byte": 821, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "utils.dt_routing.get_route_itineraries", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.dt_routing", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "utils.dt_routing.parse_itin_geom", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.dt_routing", "line_number": 19, "usage_type": "name"}, {"api_name": "utils.dt_routing.get_mean_travel_time", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.dt_routing", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "299442785", "text": "# Bouncer\n# Written by aquova, 2018-2019\n# https://github.com/aquova/bouncer\n\nimport discord, json, sqlite3, datetime, asyncio, os, subprocess, sys\nimport Utils\nfrom User import User\nfrom Utils import DATABASE_PATH\nfrom Hunt import Hunter\n\n# Reading values from config file\nwith open('private/config.json') as config_file:\n cfg = json.load(config_file)\n\n# Configuring preferences\ndiscordKey = cfg['discord']\n# The first entry in validInputChannels is the one DMs and censor warnings are sent\nvalidInputChannels = cfg['channels']['listening']\n# Channel to save notes/warns/etc\nlogChannel = cfg['channels']['log']\n# Channel to save system logs - leaves, bans, joins, etc\nsystemLog = cfg['channels']['syslog']\nvalidRoles = cfg['roles']\n\nsendBanDM = (cfg['DM']['ban'].upper() == \"ON\")\nsendWarnDM = (cfg['DM']['warn'].upper() == \"ON\")\n\n# Determine if this is a debugging instance\ndebugBot = (cfg['debug'].upper() == \"TRUE\")\ndebugging = False\n\nclient = discord.Client()\nstartTime = 0\n\ncharLimit = 2000\n\n# Event hunt object\nhunter = Hunter()\n\n# Notes on database structure:\n# Most of the columns are self explanitory\n# num column is the category of the infraction\n# 0: Ban\n# >0: The number of the warning\n# -1: Note\n# -2: Kick\n# -3: Unban\n\nsqlconn = sqlite3.connect(DATABASE_PATH)\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS badeggs (dbid INT PRIMARY KEY, id INT, username TEXT, num INT, date DATE, message TEXT, staff TEXT, post INT);\")\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS blocks (id TEXT);\")\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS staffLogs (staff TEXT PRIMARY KEY, bans INT, warns INT);\")\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS monthLogs (month TEXT PRIMARY KEY, bans INT, warns INT);\")\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS hunters (id INT PRIMARY KEY, username TEXT, count INT);\")\nsqlconn.commit()\nsqlconn.close()\n\nwarnThreshold = 3\nreviewThreshold = 6 # In months\n\n# Containers to store needed information in memory\nrecentBans = {}\nblockList = []\nrecentReply = None\n\nhelpInfo = {\n '$WARN': '`$warn USER reason`',\n '$BAN': '`$ban USER reason`',\n '$UNBAN': '`$unban USER reason`',\n '$KICK': '`$kick USER reason`',\n '$SEARCH': '`$search USER`',\n '$NOTE': '`$note USER message`',\n '$REMOVE': '`$remove USER [num]`',\n '$BLOCK': '`$block USER`',\n '$UNBLOCK': '`$unblock USER`',\n '$REPLY': '`$reply USER`',\n '$EDIT': '`$edit USER [num] new_message`'\n}\n\n# This is basically a makeshift enum\nclass LogTypes:\n UNBAN = -3\n KICK = -2\n NOTE = -1\n BAN = 0\n WARN = 1\n\n# Searches the database for the specified user, given a message\n# m: Discord message object\nasync def userSearch(m):\n try:\n user = User(m, recentBans)\n except User.MessageError:\n await m.channel.send(\"I wasn't able to find a user anywhere based on that message. `$search USER`\")\n return\n\n searchResults = user.search()\n try:\n username = user.getName(recentBans)\n if searchResults == []:\n await m.channel.send(\"User {} was not found in the database\\n\".format(username))\n return\n except User.MessageError:\n await m.channel.send(\"That user was not found in the database or the server\\n\")\n return\n\n noteTotal = 0\n criticizeNotes = True\n out = \"User {} was found with the following infractions\\n\".format(username)\n for index, item in enumerate(searchResults):\n n = \"{}. \".format(index+1)\n if item[1] == LogTypes.BAN:\n n += \"[{}] **{}** - Banned by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[4], item[3])\n elif item[1] == LogTypes.NOTE:\n n += \"[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[4], item[3])\n noteTotal += 1\n elif item[1] == LogTypes.KICK:\n n += \"[{}] **{}** - Kicked by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[4], item[3])\n elif item[1] == LogTypes.UNBAN:\n n += \"[{}] **{}** - Unbanned by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[4], item[3])\n else: # LogTypes.WARN\n n += \"[{}] **{}** - Warning #{} by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[1], item[4], item[3])\n criticizeNotes = False\n\n if item[1] >= warnThreshold:\n n += \"They have received {} warnings, it is recommended that they be banned.\\n\".format(warnThreshold)\n\n if len(out) + len(n) < charLimit:\n out += n\n else:\n await m.channel.send(out)\n out = n\n\n await m.channel.send(out)\n\n# Note a warn or ban for a user\n# m: Discord message object\nasync def logUser(m, state):\n try:\n user = User(m, recentBans)\n except User.MessageError:\n if state == LogTypes.NOTE:\n await m.channel.send(\"I wasn't able to understand that message: `$note USER`\")\n else:\n await m.channel.send(\"I wasn't able to understand that message: `$log USER`\")\n return\n\n sqlconn = sqlite3.connect(DATABASE_PATH)\n if state == LogTypes.WARN:\n count = sqlconn.execute(\"SELECT COUNT(*) FROM badeggs WHERE id=? AND num > 0\", [user.id]).fetchone()[0] + 1\n else:\n count = state\n globalcount = sqlconn.execute(\"SELECT COUNT(*) FROM badeggs\").fetchone()[0]\n currentTime = datetime.datetime.utcnow()\n\n try:\n username = user.getName(recentBans)\n except User.MessageError:\n username = \"ID: \" + str(user.id)\n await m.channel.send(\"I wasn't able to find a username for that user, but whatever, I'll do it anyway.\")\n\n mes = Utils.parseMessage(m.content, username)\n if len(m.attachments) != 0:\n for item in m.attachments:\n mes += '\\n{}'.format(item.url)\n\n if mes == \"\":\n await m.channel.send(\"Please give a reason for why you want to log them.\")\n return\n\n params = [globalcount + 1, user.id, username, count, currentTime, mes, m.author.name]\n\n # Generate message for log channel\n import Visualize\n if state == LogTypes.BAN:\n logMessage = \"[{}] **{}** - Banned by {} - {}\\n\".format(Utils.formatTime(currentTime), params[2], m.author.name, mes)\n Visualize.updateCache(sqlconn, m.author.name, (1, 0), Utils.formatTime(currentTime))\n elif state == LogTypes.WARN:\n logMessage = \"[{}] **{}** - Warning #{} by {} - {}\\n\".format(Utils.formatTime(currentTime), params[2], count, m.author.name, mes)\n Visualize.updateCache(sqlconn, m.author.name, (0, 1), Utils.formatTime(currentTime))\n elif state == LogTypes.KICK:\n logMessage = \"[{}] **{}** - Kicked by {} - {}\\n\".format(Utils.formatTime(currentTime), params[2], m.author.name, mes)\n elif state == LogTypes.UNBAN:\n def unban_check(check_mes):\n if check_mes.author == m.author and check_mes.channel == m.channel:\n # The API is stupid, returning a boolean will keep the check open, you have to return something non-false\n if check_mes.content.upper() == 'YES' or check_mes.content.upper() == 'Y':\n return 'Y'\n else:\n return 'N'\n\n # In the event of an unban, we need to first\n # A. Ask if they are sure they meant to do this\n await m.channel.send(\"In order to log an unban, all old logs will be removed. Are you sure? Y/[N]\")\n check = await client.wait_for('message', check=unban_check, timeout=10.0)\n # I have no idea why this returns a message and not just 'Y'\n if check.content.upper() == 'Y':\n # B. If so, clear out all previous logs\n await m.channel.send(\"Very well, removing all old logs to unban\")\n logs = user.search()\n for log in logs:\n sqlconn.execute(\"REPLACE INTO badeggs (dbid, id, username, num, date, message, staff, post) VALUES (?, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\", [log[5]])\n\n # C. Proceed with the unbanning\n logMessage = \"[{}] **{}** - Unbanned by {} - {}\\n\".format(Utils.formatTime(currentTime), params[2], m.author.name, mes)\n Visualize.updateCache(sqlconn, m.author.name, (-1, 0), Utils.formatTime(currentTime))\n else:\n await m.channel.send(\"Unban aborted.\")\n sqlconn.close()\n return\n else: # LogTypes.NOTE\n noteCount = sqlconn.execute(\"SELECT COUNT(*) FROM badeggs WHERE id=? AND num = -1\", [user.id]).fetchone()[0] + 1\n logMessage = \"Note #{} made for {}\".format(noteCount, username)\n\n await m.channel.send(logMessage)\n\n # Send ban recommendation, if needed\n if (state == LogTypes.WARN and count >= warnThreshold):\n await m.channel.send(\"This user has received {} warnings or more. It is recommended that they be banned.\".format(warnThreshold))\n\n logMesID = 0\n if state != LogTypes.NOTE:\n # Send message to log channel\n try:\n chan = client.get_channel(logChannel)\n logMes = await chan.send(logMessage)\n logMesID = logMes.id\n except discord.errors.InvalidArgument:\n await m.channel.send(\"The logging channel has not been set up in `config.json`. In order to have a visual record, please specify a channel ID.\")\n\n # Send a DM to the user\n try:\n u = user.getMember()\n if u != None:\n DMchan = u.dm_channel\n if DMchan == None:\n DMchan = await u.create_dm()\n\n if state == LogTypes.BAN and sendBanDM:\n await DMchan.send(\"Hi there! You've been banned from the Stardew Valley Discord for violating the rules: `{}`. If you have any questions, you can send a message to the moderators via the sidebar at , and they'll forward it to us.\".format(mes))\n elif state == LogTypes.WARN and sendWarnDM:\n await DMchan.send(\"Hi there! You received warning #{} in the Stardew Valley Discord for violating the rules: `{}`. Please review <#445729591533764620> and <#445729663885639680> for more info. If you have any questions, you can reply directly to this message to contact the staff.\".format(count, mes))\n elif state == LogTypes.KICK and sendBanDM:\n await DMchan.send(\"Hi there! You've been kicked from the Stardew Valley Discord for violating the following reason: `{}`. If you have any questions, you can send a message to the moderators via the sidebar at , and they'll forward it to us.\".format(mes))\n\n # I don't know if any of these are ever getting tripped\n except discord.errors.HTTPException as e:\n await m.channel.send(\"ERROR: While attempting to DM, there was an unexpected error. Tell aquova this: {}\".format(e))\n except discord.errors.Forbidden:\n await m.channel.send( \"ERROR: I am not allowed to DM the user. It is likely that they are not accepting DM's from me.\")\n except discord.errors.NotFound:\n await m.channel.send(\"ERROR: I was unable to find the user to DM. I'm unsure how this can be the case, unless their account was deleted\")\n\n # Update database\n params.append(logMesID)\n sqlconn.execute(\"INSERT INTO badeggs (dbid, id, username, num, date, message, staff, post) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\", params)\n sqlconn.commit()\n sqlconn.close()\n\n# Removes last database entry for specified user\n# m: Discord message object\n# edit: Boolean, signifies if this is a deletion or an edit\nasync def removeError(m, edit):\n try:\n user = User(m, recentBans)\n except User.MessageError:\n if edit:\n await m.channel.send(\"I wasn't able to understand that message: `$remove USER [num] new_message`\")\n else:\n await m.channel.send(\"I wasn't able to understand that message: `$remove USER [num]`\")\n return\n\n # Needed for multi-word usernames\n try:\n username = user.getName(recentBans)\n except User.MessageError:\n username = str(user.id)\n\n mes = Utils.parseMessage(m.content, username)\n if mes == \"\":\n if edit:\n await m.channel.send(\"You need to specify an edit message\")\n return\n else:\n mes = \"0\"\n\n try:\n index = int(mes.split(\" \")[0]) - 1\n mes = Utils.strip(mes)\n except (IndexError, ValueError):\n index = -1\n\n # Find most recent entry in database for specified user\n sqlconn = sqlite3.connect(DATABASE_PATH)\n searchResults = sqlconn.execute(\"SELECT dbid, id, username, num, date, message, staff, post FROM badeggs WHERE id=?\", [user.id]).fetchall()\n\n if searchResults == []:\n await m.channel.send(\"I couldn't find that user in the database\")\n elif (index > len(searchResults) - 1) or index < -1:\n await m.channel.send(\"I can't modify item number {}, there aren't that many for this user\".format(index+1))\n else:\n item = searchResults[index]\n import Visualize\n if edit:\n if item[3] == LogTypes.NOTE:\n currentTime = datetime.datetime.utcnow()\n # Make a copy of the original log, then modify a few fields\n params = list(item)\n params[4] = currentTime\n params[5] = mes\n params[6] = m.author.name\n sqlconn.execute(\"REPLACE INTO badeggs (dbid, id, username, num, date, message, staff, post) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\", params)\n out = \"The following log was edited:\\n[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n out = \"The log now reads as follows:\\n[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(params[4]), params[2], params[6], params[5])\n await m.channel.send(out)\n\n sqlconn.commit()\n sqlconn.close()\n return\n else:\n await m.channel.send(\"You can only edit notes for now\")\n sqlconn.close()\n return\n\n # Everything after here is deletion\n sqlconn.execute(\"REPLACE INTO badeggs (dbid, id, username, num, date, message, staff, post) VALUES (?, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\", [item[0]])\n out = \"The following log was deleted:\\n\"\n\n if item[3] == LogTypes.BAN:\n out += \"[{}] **{}** - Banned by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n Visualize.updateCache(sqlconn, item[6], (-1, 0), Utils.formatTime(item[4]))\n elif item[3] == LogTypes.NOTE:\n out += \"[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n elif item[3] == LogTypes.UNBAN:\n out += \"[{}] **{}** - Unbanned by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n Visualize.updateCache(sqlconn, item[6], (1, 0), Utils.formatTime(item[4]))\n elif item[3] == LogTypes.KICK:\n out += \"[{}] **{}** - Kicked by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n else: # LogTypes.WARN\n out += \"[{}] **{}** - Warning #{} by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[3], item[6], item[5])\n Visualize.updateCache(sqlconn, item[6], (0, -1), Utils.formatTime(item[4]))\n await m.channel.send(out)\n\n # Search logging channel for matching post, and remove it\n if item[7] != 0:\n chan = client.get_channel(logChannel)\n m = await chan.fetch_message(item[7])\n await m.delete()\n sqlconn.commit()\n sqlconn.close()\n\n# Prevents DM from a specific user from being forwarded\n# message: Discord message object\n# block: Boolean, true for block, false for unblock\nasync def blockUser(m, block):\n global blockList\n try:\n user = User(m, recentBans)\n except User.MessageError:\n await m.channel.send(\"I wasn't able to understand that message: `$block USER`\")\n return\n\n sqlconn = sqlite3.connect(DATABASE_PATH)\n if block:\n if user.id in blockList:\n await m.channel.send(\"Um... That user was already blocked...\")\n else:\n sqlconn.execute(\"INSERT INTO blocks (id) VALUES (?)\", [user.id])\n blockList.append(user.id)\n await m.channel.send(\"I have now blocked {}. Their messages will no longer display in chat, but they will be logged for later review.\".format(user.id))\n else:\n if user.id not in blockList:\n await m.channel.send(\"That user hasn't been blocked...\")\n else:\n sqlconn.execute(\"DELETE FROM blocks WHERE id=?\", [user.id])\n blockList.remove(user.id)\n await m.channel.send(\"I have now unblocked {}. You will once again be able to hear their dumb bullshit in chat.\".format(user.id))\n sqlconn.commit()\n sqlconn.close()\n\n# Sends a private message to the specified user\nasync def reply(m):\n if m.content.split(\" \")[1] == \"^\":\n if recentReply != None:\n u = recentReply\n else:\n await m.channel.send(\"Sorry, I have no previous user stored. Gotta do it the old fashioned way.\")\n return\n else:\n try:\n user = User(m, recentBans)\n except User.MessageError:\n await m.channel.send(\"I wasn't able to understand that message: `$reply USER`\")\n return\n\n u = user.getMember()\n if u == None:\n await m.channel.send(\"Sorry, but they need to be in the server for me to message them\")\n return\n try:\n mes = Utils.removeCommand(m.content)\n if len(m.attachments) != 0:\n for item in m.attachments:\n mes += '\\n{}'.format(item.url)\n ts = m.created_at.strftime('%Y-%m-%d %H:%M:%S')\n uname = \"{}#{}\".format(u.name, u.discriminator)\n with open(\"private/DMs.txt\", 'a', encoding='utf-8') as openFile:\n openFile.write(\"{} - {} sent a DM to {}: {}\\n\".format(ts, m.author.name, uname, mes))\n\n DMchan = u.dm_channel\n if DMchan == None:\n DMchan = await u.create_dm()\n await DMchan.send(\"A message from the SDV staff: {}\".format(mes))\n await m.channel.send(\"Message sent to {}.\".format(uname))\n\n # I don't know if any of these are ever getting tripped\n except discord.errors.HTTPException as e:\n await m.channel.send(\"ERROR: While attempting to DM, there was an unexpected error. Tell aquova this: {}\".format(e))\n except discord.errors.Forbidden:\n await m.channel.send(\"ERROR: I am not allowed to DM the user. It is likely that they are not accepting DM's from me.\")\n except discord.errors.NotFound:\n await m.channel.send(\"ERROR: I was unable to find the user to DM. I'm unsure how this can be the case, unless their account was deleted\")\n\nasync def notebook(m):\n sqlconn = sqlite3.connect(DATABASE_PATH)\n allNotes = sqlconn.execute(\"SELECT * FROM badeggs WHERE num=-1\").fetchall()\n sqlconn.commit()\n sqlconn.close()\n\n with open(\"private/notes.txt\", \"w\") as f:\n for item in allNotes:\n note = \"[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n f.write(note)\n\n await m.channel.send(\"Your notes, as requested.\")\n with open(\"./private/notes.txt\", \"r\") as f:\n await m.channel.send(file=discord.File(f))\n\n\n# Posts the usernames of all users whose oldest logs are older than reviewThreshold\nasync def userReview(channel):\n # There's probably a clever way to have these first two arrays merged\n usernames = []\n ids = []\n tooNew = []\n sqlconn = sqlite3.connect(DATABASE_PATH)\n # Reverse order so newest logs are checked/eliminated first\n allLogs = sqlconn.execute(\"SELECT id, username, date, num FROM badeggs WHERE num > -1\").fetchall()[::-1]\n\n now = datetime.datetime.now()\n for log in allLogs:\n # Don't want to list users who have been banned\n if log[3] == 0:\n tooNew.append(log[0])\n if log[0] not in ids and log[0] not in tooNew:\n day = log[2].split(\" \")[0]\n dateval = datetime.datetime.strptime(day, \"%Y-%m-%d\")\n testDate = dateval + datetime.timedelta(days=30*reviewThreshold)\n if testDate < now:\n ids.append(log[0])\n usernames.append(log[1])\n else:\n tooNew.append(log[0])\n\n sqlconn.close()\n\n mes = \"These users had their most recent log greater than {} months ago.\\n\".format(reviewThreshold)\n # Reverse order so oldest are first\n for user in usernames[::-1]:\n # This gets past Discord's 2000 char limit\n if len(mes) + len(user) + 2 < charLimit:\n mes += \"`{}`, \".format(user)\n else:\n await channel.send(mes)\n mes = \"`{}`, \".format(user)\n\n await channel.send(mes)\n\nasync def uptime(channel):\n currTime = datetime.datetime.now()\n delta = currTime - startTime\n hours, remainder = divmod(delta.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n mes = \"I have been running for {} days, {} hours, and {} minutes\".format(delta.days, hours, minutes)\n\n await channel.send(mes)\n\n@client.event\nasync def on_ready():\n global blockList\n global startTime\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n\n startTime = datetime.datetime.now()\n\n sqlconn = sqlite3.connect(DATABASE_PATH)\n blockDB = sqlconn.execute(\"SELECT * FROM blocks\").fetchall()\n blockList = [str(x[0]) for x in blockDB]\n sqlconn.close()\n\n activity_object = discord.Activity(name=\"for your reports!\", type=discord.ActivityType.watching)\n await client.change_presence(activity=activity_object)\n\n@client.event\nasync def on_member_update(before, after):\n if debugBot:\n return\n if before.nick != after.nick:\n if after.nick == None:\n mes = \"**{}#{}** has reset their username\".format(after.name, after.discriminator)\n else:\n new = after.nick\n mes = \"**{}#{}** is now known as `{}`\".format(after.name, after.discriminator, after.nick)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n elif before.roles != after.roles:\n # Temporary debugging\n try:\n if len(before.roles) > len(after.roles):\n missing = [r for r in before.roles if r not in after.roles]\n mes = \"**{}#{}** had the role `{}` removed.\".format(after.name, after.discriminator, missing[0])\n else:\n newRoles = [r for r in after.roles if r not in before.roles]\n mes = \"**{}#{}** had the role `{}` added.\".format(after.name, after.discriminator, newRoles[0])\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n except IndexError as e:\n print(\"Error: Same role indexing issue as before.\")\n print(\"Old roles: {}\".format(before.roles))\n print(\"New roles: {}\".format(after.roles))\n print(\"Error message: {}\".format(e))\n\n@client.event\nasync def on_member_ban(server, member):\n global recentBans\n if debugBot:\n return\n recentBans[member.id] = \"{}#{} : {}\".format(member.name, member.discriminator, member.id)\n mes = \"**{}#{} ({})** has been banned.\".format(member.name, member.discriminator, member.id)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\nasync def on_member_remove(member):\n # I know they aren't banned, but still we may want to log someone after they leave\n global recentBans\n if debugBot:\n return\n recentBans[member.id] = \"{}#{} : {}\".format(member.name, member.discriminator, member.id)\n mes = \"**{}#{} ({})** has left\".format(member.name, member.discriminator, member.id)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\n# Needs to be raw reaction so it can still get reactions after reboot\nasync def on_raw_reaction_add(payload):\n if debugBot:\n return\n if payload.message_id == cfg[\"gatekeeper\"][\"message\"] and payload.emoji.name == cfg[\"gatekeeper\"][\"emoji\"]:\n # Raw payload just returns IDs, so need to iterate through connected servers to get server object\n # Since each bouncer instance will only be in one server, it should be quick.\n # If bouncer becomes general purpose (god forbid), may need to rethink this\n try:\n server = [x for x in client.guilds if x.id == payload.guild_id][0]\n new_role = discord.utils.get(server.roles, id=cfg[\"gatekeeper\"][\"role\"])\n target_user = discord.utils.get(server.members, id=payload.user_id)\n await target_user.add_roles(new_role)\n except IndexError as e:\n print(\"Something has seriously gone wrong.\")\n print(\"Error: {}\".format(e))\n\n@client.event\nasync def on_message_delete(message):\n if debugBot:\n return\n # Don't allow bouncer to react to its own deleted messages\n if message.author.id == client.user.id:\n return\n mes = \"**{}#{}** deleted in <#{}>: `{}`\".format(message.author.name, message.author.discriminator, message.channel.id, message.content)\n if message.attachments != []:\n for item in message.attachments:\n mes += '\\n' + item.url\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\nasync def on_message_edit(before, after):\n if debugBot:\n return\n # This is to prevent embedding of content from triggering the log\n if before.content == after.content:\n return\n try:\n if len(before.content) + len(after.content) > 200:\n mes1 = \"**{}#{}** modified in <#{}>: `{}`\".format(before.author.name, before.author.discriminator, before.channel.id, before.content)\n mes2 = \"to `{}`\".format(after.content)\n if before.attachments != []:\n for item in before.attachments:\n mes1 += '\\n' + item.url\n if after.attachments != []:\n for item in after.attachments:\n mes2 += '\\n' + item.url\n chan = client.get_channel(systemLog)\n await chan.send(mes1)\n await chan.send(mes2)\n else:\n mes = \"**{}#{}** modified in <#{}>: `{}` to `{}`\".format(before.author.name, before.author.discriminator, before.channel.id, before.content, after.content)\n if after.attachments != []:\n for item in after.attachments:\n mes += '\\n' + item.url\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n except discord.errors.HTTPException as e:\n print(\"Unknown error with editing message. This message was unable to post for this reason: {}\\n\".format(e))\n\n@client.event\nasync def on_member_join(member):\n if debugBot:\n return\n mes = \"**{}#{} ({})** has joined\".format(member.name, member.discriminator, member.id)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\nasync def on_voice_state_update(member, before, after):\n if debugBot:\n return\n if (after.channel == None):\n mes = \"**{}#{}** has left voice channel {}\".format(member.name, member.discriminator, before.channel.name)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n elif (before.channel == None):\n mes = \"**{}#{}** has joined voice channel {}\".format(member.name, member.discriminator, after.channel.name)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\nasync def on_reaction_add(reaction, user):\n if user.id == client.user.id:\n return\n\n if hunter.getWatchedChannel() == reaction.message.channel.id:\n hunter.addReaction(user)\n\n@client.event\nasync def on_message(message):\n global recentReply\n global debugging\n if message.author.id == client.user.id:\n return\n try:\n # Enable debugging\n if message.content.startswith(\"$debug\") and message.author.id == cfg['owner']:\n if not debugBot:\n debugging = not debugging\n await message.channel.send(\"Debugging {}\".format(\"enabled\" if debugging else \"disabled\"))\n return\n\n # If debugging, the real bot should ignore the owner\n if debugging and message.author.id == cfg['owner']:\n return\n # The debug bot should only ever obey the owner\n elif debugBot and message.author.id != cfg['owner']:\n return\n\n # If they sent a private DM to bouncer\n if type(message.channel) is discord.channel.DMChannel:\n # Regardless of blocklist or not, log their messages\n ts = message.created_at.strftime('%Y-%m-%d %H:%M:%S')\n\n # Store who the most recent user was, for $reply ^\n recentReply = message.author\n\n mes = \"**{}#{}** (ID: {}): {}\".format(message.author.name, message.author.discriminator, message.author.id, message.content)\n if message.attachments != []:\n for item in message.attachments:\n mes += '\\n' + item.url\n\n with open(\"private/DMs.txt\", 'a', encoding='utf-8') as openFile:\n openFile.write(\"{} - {}\\n\".format(ts, mes))\n\n if str(message.author.id) not in blockList:\n chan = client.get_channel(validInputChannels[0])\n await chan.send(mes)\n\n # Temporary - notify if UB3R-BOT has removed something on its word censor\n elif (message.author.id == 85614143951892480 and message.channel.id == 233039273207529472) and (\"Word Censor Triggered\" in message.content) and not debugBot:\n mes = \"Uh oh, looks like the censor might've been tripped.\\nhttps://discordapp.com/channels/{}/{}/{}\".format(message.guild.id, message.channel.id, message.id)\n chan = client.get_channel(validInputChannels[0])\n await chan.send(mes)\n\n # If a user pings bouncer\n elif client.user in message.mentions:\n mes = \"**{}#{}** (ID: {}) pinged me in <#{}>: {}\".format(message.author.name, message.author.discriminator, message.author.id, message.channel.id, message.content)\n if message.attachments != []:\n for item in message.attachments:\n mes += '\\n' + item.url\n mes += \"\\nhttps://discordapp.com/channels/{}/{}/{}\".format(message.guild.id, message.channel.id, message.id)\n chan = client.get_channel(validInputChannels[0])\n await chan.send(mes)\n\n elif Utils.checkRoles(message.author, validRoles):\n # Special case for the egg hunt functions. We want only permitted roles to access them,\n # but their channel will always be new, so allow any channel access\n if message.content.startswith(\"$starthunt\"):\n words = message.clean_content.split(\" \")\n if len(words) != 2:\n await message.channel.send(\"Invalid command. `$starthunt EMOJI`\")\n return\n hunter.setWatchedChannel(message.channel)\n mes = await message.channel.send(\"{}\".format(words[1]))\n try:\n emoji = words[1].split(\":\")[1]\n emojiObject = [x for x in message.guild.emojis if x.name == emoji][0]\n await mes.add_reaction(emojiObject)\n except IndexError:\n emoji = words[1].replace(\":\", \"\")\n await mes.add_reaction(emoji)\n await message.delete()\n elif message.content.startswith(\"$endhunt\"):\n hunter.stopWatching()\n await message.channel.send(\"I hope your hunt has been victorious!\")\n elif message.content.startswith(\"$gethunt\"):\n hunter.export()\n with open(\"./private/hunters.csv\", \"r\") as f:\n await message.channel.send(file=discord.File(f))\n\n # If they have privledges to access bouncer functions\n elif message.channel.id in validInputChannels:\n # This if/elif thing isn't ideal, but it's by far the simpliest way\n if message.content.upper() == \"$HELP\":\n helpMes = (\n \"Issue a warning: `$warn USER message`\\n\"\n \"Log a ban: `$ban USER reason`\\n\"\n \"Log an unbanning: `$unban USER reason`\\n\"\n \"Log a kick: `$kick USER reason`\\n\"\n \"Search for a user: `$search USER`\\n\"\n \"Create a note about a user: `$note USER message`\\n\"\n \"Show all notes: `$notebook`\\n\"\n \"Remove a user's log: `$remove USER index(optional)`\\n\"\n \"Edit a user's note: `$edit USER index(optional) new_message`\\n\"\n \"Stop a user from sending DMs to us: `$block/$unblock USERID`\\n\"\n \"Reply to a user in DMs: `$reply USERID` - To reply to the most recent DM: `$reply ^`\\n\"\n \"Plot warn/ban stats: `$graph`\\nReview which users have old logs: `$review`\\n\"\n \"View bot uptime: `$uptime`\\n\"\n \"DMing users when they are banned is `{}`\\n\"\n \"DMing users when they are warned is `{}`\".format(sendBanDM, sendWarnDM)\n )\n await message.channel.send(helpMes)\n elif message.content.upper() == \"$NOTEBOOK\":\n await notebook(message)\n elif message.content.upper() in helpInfo.keys():\n await message.channel.send(helpInfo[message.content.upper()])\n elif message.content.upper() == \"$UPDATE\":\n if message.author.id == cfg[\"owner\"]:\n await message.channel.send(\"Updating and restarting...\")\n subprocess.call([\"git\", \"pull\"])\n sys.exit()\n else:\n await message.channel.send(\"Who do you think you are.\")\n return\n elif message.content.upper() == \"$GRAPH\":\n import Visualize # Import here to avoid debugger crashing from matplotlib issue\n Visualize.genUserPlot()\n Visualize.genMonthlyPlot()\n with open(\"./private/user_plot.png\", 'rb') as f:\n await message.channel.send(file=discord.File(f))\n\n with open(\"./private/month_plot.png\", 'rb') as f:\n await message.channel.send(file=discord.File(f))\n elif message.content.upper() == \"$REVIEW\":\n await userReview(message.channel)\n elif message.content.upper() == \"$UPTIME\":\n await uptime(message.channel)\n elif message.content.upper() == \"$GETROLES\":\n output = await Utils.fetchRoleList(message.guild)\n await message.channel.send(output)\n elif message.content.startswith(\"$search\"):\n await userSearch(message)\n elif message.content.startswith(\"$warn\"):\n await logUser(message, LogTypes.WARN)\n elif message.content.startswith(\"$ban\"):\n await logUser(message, LogTypes.BAN)\n elif message.content.startswith(\"$kick\"):\n await logUser(message, LogTypes.KICK)\n elif message.content.startswith(\"$unban\"):\n await logUser(message, LogTypes.UNBAN)\n elif message.content.startswith(\"$remove\"):\n await removeError(message, False)\n elif message.content.startswith(\"$block\"):\n await blockUser(message, True)\n elif message.content.startswith(\"$unblock\"):\n await blockUser(message, False)\n elif message.content.startswith(\"$reply\"):\n await reply(message)\n elif message.content.startswith(\"$note\"):\n await logUser(message, LogTypes.NOTE)\n elif message.content.startswith(\"$edit\"):\n await removeError(message, True)\n\n # Debug functions only to be executed by the owner\n elif message.content.upper() == \"$DUMPBANS\" and message.author.id == cfg[\"owner\"]:\n output = await Utils.dumpbans(recentBans)\n await message.channel.send(output)\n\n except discord.errors.HTTPException as e:\n print(\"HTTPException: {}\", e)\n pass\n\nclient.run(discordKey)\n", "sub_path": "bouncer.py", "file_name": "bouncer.py", "file_ext": "py", "file_size_in_byte": 36892, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.Client", "line_number": 32, "usage_type": "call"}, {"api_name": "Hunt.Hunter", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 49, "usage_type": "call"}, {"api_name": "Utils.DATABASE_PATH", "line_number": 49, "usage_type": "argument"}, {"api_name": "User.User", "line_number": 92, "usage_type": "call"}, {"api_name": "User.User.MessageError", "line_number": 93, "usage_type": "attribute"}, {"api_name": "User.User", "line_number": 93, "usage_type": "name"}, {"api_name": "User.User.MessageError", "line_number": 103, "usage_type": "attribute"}, {"api_name": "User.User", "line_number": 103, "usage_type": "name"}, {"api_name": "Utils.formatTime", "line_number": 113, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 115, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 118, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 120, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 122, "usage_type": "call"}, {"api_name": "User.User", "line_number": 140, "usage_type": "call"}, {"api_name": "User.User.MessageError", "line_number": 141, "usage_type": "attribute"}, {"api_name": "User.User", "line_number": 141, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 148, "usage_type": "call"}, {"api_name": "Utils.DATABASE_PATH", "line_number": 148, "usage_type": "argument"}, {"api_name": "datetime.datetime.utcnow", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 154, "usage_type": "attribute"}, {"api_name": "User.User.MessageError", "line_number": 158, "usage_type": "attribute"}, {"api_name": "User.User", "line_number": 158, "usage_type": "name"}, {"api_name": "Utils.parseMessage", "line_number": 162, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 176, "usage_type": "call"}, {"api_name": "Visualize.updateCache", "line_number": 177, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 177, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 179, "usage_type": "call"}, {"api_name": "Visualize.updateCache", "line_number": 180, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 180, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 182, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 205, "usage_type": "call"}, {"api_name": "Visualize.updateCache", "line_number": 206, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 206, "usage_type": "call"}, {"api_name": "discord.errors", "line_number": 228, "usage_type": "attribute"}, {"api_name": "discord.errors", "line_number": 247, "usage_type": "attribute"}, {"api_name": "discord.errors", "line_number": 249, "usage_type": "attribute"}, {"api_name": "discord.errors", "line_number": 251, "usage_type": "attribute"}, {"api_name": "User.User", "line_number": 265, "usage_type": "call"}, {"api_name": "User.User.MessageError", "line_number": 266, "usage_type": "attribute"}, {"api_name": "User.User", "line_number": 266, "usage_type": "name"}, {"api_name": "User.User.MessageError", "line_number": 276, "usage_type": "attribute"}, {"api_name": "User.User", "line_number": 276, "usage_type": "name"}, {"api_name": "Utils.parseMessage", "line_number": 279, "usage_type": "call"}, {"api_name": "Utils.strip", "line_number": 289, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 294, "usage_type": "call"}, {"api_name": "Utils.DATABASE_PATH", "line_number": 294, "usage_type": "argument"}, {"api_name": "datetime.datetime.utcnow", "line_number": 306, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 306, "usage_type": "attribute"}, {"api_name": "Utils.formatTime", "line_number": 313, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 314, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 330, "usage_type": "call"}, {"api_name": "Visualize.updateCache", "line_number": 331, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 331, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 333, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 335, "usage_type": "call"}, {"api_name": "Visualize.updateCache", "line_number": 336, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 336, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 338, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 340, "usage_type": "call"}, {"api_name": "Visualize.updateCache", "line_number": 341, "usage_type": "call"}, {"api_name": "Utils.formatTime", "line_number": 341, "usage_type": "call"}, {"api_name": "User.User", "line_number": 358, "usage_type": "call"}, {"api_name": "User.User.MessageError", "line_number": 359, "usage_type": "attribute"}, {"api_name": "User.User", "line_number": 359, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 363, "usage_type": "call"}, {"api_name": "Utils.DATABASE_PATH", "line_number": 363, "usage_type": "argument"}, {"api_name": "User.User", "line_number": 391, "usage_type": "call"}, {"api_name": "User.User.MessageError", "line_number": 392, "usage_type": "attribute"}, {"api_name": "User.User", "line_number": 392, "usage_type": "name"}, {"api_name": "Utils.removeCommand", "line_number": 401, "usage_type": "call"}, {"api_name": "discord.errors", "line_number": 417, "usage_type": "attribute"}, {"api_name": "discord.errors", "line_number": 419, "usage_type": "attribute"}, {"api_name": "discord.errors", "line_number": 421, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 425, "usage_type": "call"}, {"api_name": "Utils.DATABASE_PATH", "line_number": 425, "usage_type": "argument"}, {"api_name": "Utils.formatTime", "line_number": 432, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 437, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 446, "usage_type": "call"}, {"api_name": "Utils.DATABASE_PATH", "line_number": 446, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 450, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 450, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 457, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 457, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 458, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 480, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 480, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 496, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 496, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 498, "usage_type": "call"}, {"api_name": "Utils.DATABASE_PATH", "line_number": 498, "usage_type": "argument"}, {"api_name": "discord.Activity", "line_number": 503, "usage_type": "call"}, {"api_name": "discord.ActivityType", "line_number": 503, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 567, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 567, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 568, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 568, "usage_type": "attribute"}, {"api_name": "discord.errors", "line_number": 615, "usage_type": "attribute"}, {"api_name": "discord.channel", "line_number": 669, "usage_type": "attribute"}, {"api_name": "Utils.checkRoles", "line_number": 704, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 728, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 759, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 760, "usage_type": "call"}, {"api_name": "Visualize.genUserPlot", "line_number": 766, "usage_type": "call"}, {"api_name": "Visualize.genMonthlyPlot", "line_number": 767, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 769, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 772, "usage_type": "call"}, {"api_name": "Utils.fetchRoleList", "line_number": 778, "usage_type": "call"}, {"api_name": "Utils.dumpbans", "line_number": 805, "usage_type": "call"}, {"api_name": "discord.errors", "line_number": 808, "usage_type": "attribute"}]} +{"seq_id": "573303021", "text": "import itertools as it\r\nimport time\r\n\r\nREPEATS = 7\r\nDIGIT = '2'\r\nSIGNS = ('+', '-', '*', '/')\r\nRESULTS_FROM = 100\r\nRESULTS_TO = 2000\r\n\r\ndeclension = {\r\n 1: 'вариант',\r\n 2: 'варианта',\r\n 3: 'варианта',\r\n 4: 'варианта',\r\n 5: 'вариантов',\r\n 6: 'вариантов',\r\n 7: 'вариантов',\r\n 8: 'вариантов',\r\n 9: 'вариантов',\r\n 0: 'вариантов',\r\n}\r\n\r\n\r\ndef find_combinations():\r\n \"\"\"Перебирает комбинации, проверяет их на условие.\"\"\"\r\n correct_variants = {}\r\n expand_signs = it.chain(SIGNS, ('',))\r\n signs_sets = it.combinations_with_replacement(expand_signs, REPEATS)\r\n copy_set, signs_sets = it.tee(signs_sets, 2)\r\n count_of_items = len(list(copy_set))\r\n counter = it.count()\r\n for signs_set in signs_sets:\r\n permut_sings = it.permutations(signs_set)\r\n for _ in permut_sings:\r\n test = ''.join(it.chain.from_iterable(it.product(_, DIGIT)))\r\n if test[0] not in ('*', '/'):\r\n res = eval(test)\r\n if res % 1 == 0 and RESULTS_FROM <= res <= RESULTS_TO: # условие\r\n res_s = str(int(res))\r\n if not correct_variants.get(res_s):\r\n correct_variants[res_s] = set()\r\n correct_variants[res_s].add(test)\r\n print(next(counter), 'из', count_of_items) # прогресс\r\n return correct_variants\r\n\r\n\r\ndef default_format(expression):\r\n \"\"\"Приводит вариант к универсальному формату, чтобы отсечь повторения.\"\"\"\r\n if expression[0] == '+':\r\n expression = expression[1:]\r\n prev, split_exp = 0, []\r\n for _num, _symbol in enumerate(expression[1:]):\r\n if _symbol == '+':\r\n split_exp.append(expression[prev:_num + 1])\r\n prev = _num + 2\r\n elif _symbol == '-':\r\n split_exp.append(expression[prev:_num + 1])\r\n prev = _num + 1\r\n split_exp.append(expression[prev:])\r\n for n, sub_split_exp in enumerate(split_exp):\r\n if sub_split_exp[0] == '-':\r\n minus = '-'\r\n sub_split_exp = sub_split_exp[1:]\r\n else:\r\n minus = ''\r\n prev, sub_list = 0, []\r\n for _num, _symbol in enumerate(sub_split_exp[1:]):\r\n if _symbol == '*' or _symbol == '/':\r\n if prev == 0:\r\n sub_list.append(f'*{sub_split_exp[prev:_num + 1]}')\r\n prev = _num + 1\r\n else:\r\n sub_list.append(sub_split_exp[prev:_num + 1])\r\n prev = _num + 1\r\n sub_list.append(sub_split_exp[prev:])\r\n sub_list.sort(key=lambda _: _.count('/'))\r\n sub_list.sort(key=len, reverse=True)\r\n split_exp[n] = f\"{minus}{''.join(sub_list)}\".replace('-*', '-')\r\n if split_exp[n][0] == '*':\r\n split_exp[n] = split_exp[n][1:]\r\n split_exp.sort(key=lambda _: _.count('/'))\r\n split_exp.sort(key=lambda _: _.count('-'))\r\n split_exp.sort(key=len, reverse=True)\r\n return '+'.join(split_exp).replace('+-', '-')\r\n\r\n\r\ndef delete_duplicates(variants):\r\n for _value in variants:\r\n new_value = set()\r\n for _variant in variants[_value]:\r\n new_value.add(default_format(_variant))\r\n variants[_value] = new_value\r\n return variants\r\n\r\n\r\ndef print_results(variants):\r\n for _value in variants:\r\n variants_count = len(variants[_value])\r\n print(f'{_value} имеет {variants_count} {declension[variants_count % 10]}')\r\n for _variant in variants[_value]:\r\n print(_variant)\r\n\r\n\r\nstart_moment = time.perf_counter()\r\nstart_time = time.process_time()\r\n\r\ngood_variants = find_combinations()\r\nperfect_variants = delete_duplicates(good_variants)\r\nprint_results(perfect_variants)\r\n\r\nstop_moment = time.perf_counter()\r\nstop_time = time.process_time()\r\nprint(f'Выполнилось за {stop_moment-start_moment} с по perf_counter')\r\nprint(f'Выполнилось за {(stop_time-start_time)} с по process_time')\r\n", "sub_path": "permutations.py", "file_name": "permutations.py", "file_ext": "py", "file_size_in_byte": 4163, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "itertools.chain", "line_number": 27, "usage_type": "call"}, {"api_name": "itertools.combinations_with_replacement", "line_number": 28, "usage_type": "call"}, {"api_name": "itertools.tee", "line_number": 29, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 31, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 33, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 35, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 35, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 35, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 104, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 105, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 111, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "397899138", "text": "import gym\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom irlc.ex08.agent import Agent\nfrom irlc.ex13.buffer import FixSizeBuffer, BasicBuffer\nfrom irlc.ex13.keras_networks import KerasNetwork\nfrom irlc import cache_write, cache_read, cache_exists\nimport os\n\ndef linear_interp(maxval, minval, delay, miniter):\n \"\"\"\n maxval if episodes < minval\n interpolate between maxval to minval over a period of miniter\n return miniter else.\n \"\"\"\n return lambda steps, episodes: min(max([maxval- ((steps-delay)/miniter)*(maxval-minval), minval]), maxval)\n\n\n\"\"\" Test. not for serious use right now. \"\"\"\nclass DeepQAgent(Agent): #!f\n def __init__(self, env, network=None, buffer=None, gamma=0.99, epsilon=None, alpha=0.001, batch_size=32,\n replay_buffer_size=2000, replay_buffer_minreplay=500):\n epsilon = epsilon if callable(epsilon) else lambda steps, episodes: 0.2\n super().__init__(env, gamma=gamma, epsilon=epsilon)\n self.memory = BasicBuffer(replay_buffer_size) if buffer is None else buffer\n self.Q = network(env, trainable=True) if network else KerasNetwork(env, trainable=True, learning_rate=alpha)\n self.batch_size = batch_size\n self.replay_buffer_minreplay = replay_buffer_minreplay\n self.steps, self.episodes = 0, 0\n\n def pi(self, s):\n eps_ = self.epsilon(self.steps, self.episodes)\n return self.env.action_space.sample() if np.random.rand() < eps_ else np.argmax(self.Q(s[np.newaxis,...]))\n\n def train(self, s, a, r, sp, done=False):\n self.memory.push(s, a, r, sp, done)\n if len(self.memory) > self.replay_buffer_minreplay:\n self.experience_replay()\n self.steps, self.episodes = self.steps + 1, self.episodes + done\n\n def experience_replay(self):\n s,a,r,sp,done = self.memory.sample(self.batch_size)\n y = r[:,0] + self.gamma * np.max(self.Q(sp), axis=1) * (1-done)\n target = self.Q(s)\n target[range(len(a)), a] = y\n self.Q.fit(s, None, target)\n\n def save(self, path):\n self.Q.save(os.path.join(path, \"Q\"))\n cache_write(dict(steps=self.steps, episodes=self.episodes), os.path.join(path, \"agent.pkl\"))\n self.memory.save(os.path.join(path, \"memory.pkl\"))\n\n def load(self, path):\n if not cache_exists(os.path.join(path, \"agent.pkl\")):\n return False\n for k, v in cache_read(os.path.join(path, \"agent.pkl\")).items():\n self.__dict__[k] = v\n self.Q.load(os.path.join(path, \"Q\"))\n self.memory.load(os.path.join(path, \"memory.pkl\"))\n return True\n\n def __str__(self):\n return f\"basic_DQN{self.gamma}\"\n\n# epsilon_cartpole = lambda steps, episodes: max(.5 * .99**episodes, 0.01)\n# MAX_EPISODES = 200\n# from irlc.ex13.keras_dqn_agent import epsilon_cartpole\n# MAX_EPSILON = 1\n# MIN_EPSILON = 0.01\n# EPSILON_MIN_ITER = 5000\n# GAMMA = 0.95\n# BATCH_SIZE = 32\n# TAU = 0.08\n# POST_PROCESS_IMAGE_SIZE = (105, 80, 1)\n# DELAY_TRAINING = 300\n\ncartpole_dqn_options = dict(gamma=0.95, epsilon=linear_interp(maxval=1,minval=0.01,delay=300,miniter=5000), replay_buffer_minreplay=300,\n replay_buffer_size=500000)\n\ndef mk_cartpole():\n env = gym.make(\"CartPole-v0\")\n agent = DeepQAgent(env, **cartpole_dqn_options)\n return env, agent\n\nif __name__ == \"__main__\":\n env_id = \"CartPole-v0\"\n MAX_EPISODES = 100\n for j in range(1):\n # env = gym.make(env_id)\n # agent = DeepQAgent(env, epsilon=cartpole_dqn_options['epsilon'])\n # e2, a2 = mk_cartpole()\n env, agent = mk_cartpole()\n from irlc.ex08.agent import train\n ex = \"exps/dqn_sl\"\n MAX_EPISODES = 20\n train(env, agent, experiment_name=ex, num_episodes=MAX_EPISODES, saveload_model=True)\n from irlc import main_plot\n main_plot([\"dqnC\", ex], units=\"Unit\", estimator=None, smoothing_window=1)\n plt.show()\n # episode_rewards = mini_batch_train(env, agent, MAX_EPISODES, MAX_STEPS, BATCH_SIZE)\n", "sub_path": "irlc/ex13/deepq_agent.py", "file_name": "deepq_agent.py", "file_ext": "py", "file_size_in_byte": 4017, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "irlc.ex08.agent.Agent", "line_number": 20, "usage_type": "name"}, {"api_name": "irlc.ex13.buffer.BasicBuffer", "line_number": 25, "usage_type": "call"}, {"api_name": "irlc.ex13.keras_networks.KerasNetwork", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "irlc.cache_write", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "irlc.cache_exists", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "irlc.cache_read", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "gym.make", "line_number": 81, "usage_type": "call"}, {"api_name": "irlc.ex08.agent.train", "line_number": 96, "usage_type": "call"}, {"api_name": "irlc.main_plot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "85117537", "text": "# -*- coding: UTF8 -*-\n'''\nCreated on 2012-10-24\n\n@author: hongchenzai\n'''\nfrom common import cache_file\nfrom component.multi_lang_name import *\nfrom component.ta.multi_lang_name_ta import MultiLangNameTa\nfrom component.ta.dictionary_ta import ShieldTA\nfrom component.guideinfo_signpost_uc import comp_guideinfo_signpost_uc\nSIGN_POST_TYPE_EXIT_NO = 1\nSIGN_POST_TYPE_SHIELD_ID = 2\nSIGN_POST_TYPE_SHIELD_NUM = 3\nSIGN_POST_TYPE_NAME = 4\n\n\nclass comp_guideinfo_signpost_uc_ta(comp_guideinfo_signpost_uc):\n '''TomTom方面\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n comp_guideinfo_signpost_uc.__init__(self)\n\n def _Do(self):\n from component.ta.dictionary_ta import comp_dictionary_ta\n dict_ta = comp_dictionary_ta()\n dict_ta.set_language_code()\n # 名称、番号、出口番号\n self._make_signpost_element()\n # SignPost的link序\n self._make_path_link()\n # 合并以上所有的信息\n self._merge_all_info()\n return 0\n\n def _make_signpost_element(self):\n self.log.info('Start Make SignPost element.')\n # 注:有TTS音素的名称:4G, 6T, 4I, 9D, 4E, RN\n # 前五个种别做成名称,4E做成出口番号,RN,不收录\n sqlcmd = \"\"\"\n SELECT id::bigint,\n array_agg(seqnr) as seqnrs,\n array_agg(destseq) as destseqs,\n array_agg(infotyp) as infotyps,\n array_agg(rnpart) as rnparts,\n array_agg(txtcont) as names,\n array_agg(txtcontlc) as lang_codes,\n array_agg(phoneme_info) as phoneme_infos\n FROM (\n SELECT id, seqnr, destseq, infotyp,\n rnpart, txtcont, txtcontlc,\n mid_get_phoneme(id::bigint, 2128, 0, -- 2128: SignPost\n txtcont, txtcontlc, infotyp) as phoneme_info\n FROM org_si\n order by id, seqnr, destseq, rnpart\n ) AS A\n GROUP BY id;\n \"\"\"\n self.CreateTable2('mid_temp_signpost_element')\n temp_file_obj = cache_file.open('signpost_element') # 创建临时文件\n shield_obj = ShieldTA()\n signs = self.get_batch_data(sqlcmd)\n for sign_info in signs:\n sign_id = sign_info[0] # 看板id\n seqnr = sign_info[1] # 方向番号\n destseqs = sign_info[2] # 同个方向内顺序号\n infotyps = sign_info[3] # 种别\n # rnparts = sign_info[3] # 道路番号内部无素的序号\n names = sign_info[5] # 名称、番号\n lang_codes = sign_info[6] # 语种\n phoneme_infos = sign_info[7] # 音素\n sign_post = SignPostElementTa(sign_id, seqnr)\n shield_id = None\n exit_no = None\n route_no = None\n signpost_name = None\n for destseq, infotyp, name, lang_code, phoneme_info in \\\n zip(destseqs, infotyps, names, lang_codes, phoneme_infos):\n sign_type = self._get_sign_type(infotyp)\n if not sign_type: # 空不收录\n continue\n name_type = self._cvt_name_type(sign_type)\n # ## 处理各种种别的数据\n if SIGN_POST_TYPE_SHIELD_ID == sign_type: # shield id\n # 取得shield id\n shield_id = shield_obj.convert_shield_id(int(name))\n elif SIGN_POST_TYPE_SHIELD_NUM == sign_type: # shield number\n if shield_id: # 盾牌号存在\n shield_number = name\n if not lang_code or lang_code == 'UND':\n lang_code = MultiLangNameTa.get_lang_code_by_id(sign_id)\n route_no = MultiLangShield(shield_id,\n shield_number,\n lang_code\n )\n sign_post.add_route_no(route_no)\n shield_id = None\n else:\n # 盾牌和番号是成对出现的,而且盾牌在前\n self.log.error('No Shield id. sign_id=%d, seqnr=%d, destseq=%d'\n % (sign_id, seqnr, destseq))\n elif SIGN_POST_TYPE_EXIT_NO == sign_type: # 出口番号\n if exit_no:\n # 存在多个出口番号,其他出口番号做出口番号的别名\n alter_exit_no = MultiLangNameTa(sign_id,\n lang_code,\n name,\n name_type\n )\n # ## TTS音素\n phoneme_list, language_list = \\\n alter_exit_no.split_phoneme_info(phoneme_info)\n alter_exit_no.add_all_tts(phoneme_list,\n language_list,\n lang_code\n )\n exit_no.add_alter(alter_exit_no)\n else:\n # 第一个出口番号\n exit_no = MultiLangNameTa(sign_id,\n lang_code,\n name,\n name_type\n )\n # ## TTS音素\n phoneme_list, language_list = \\\n exit_no.split_phoneme_info(phoneme_info)\n exit_no.add_all_tts(phoneme_list,\n language_list,\n lang_code\n )\n elif SIGN_POST_TYPE_NAME == sign_type: # 方面名称\n if signpost_name:\n # 其他名称番号\n alter_sign_name = MultiLangNameTa(sign_id,\n lang_code,\n name,\n name_type\n )\n # ## TTS音素\n phoneme_list, language_list = \\\n alter_sign_name.split_phoneme_info(phoneme_info)\n alter_sign_name.add_all_tts(phoneme_list,\n language_list,\n lang_code\n )\n signpost_name.add_alter(alter_sign_name)\n else: # 第一个名称\n signpost_name = MultiLangNameTa(sign_id,\n lang_code,\n name,\n name_type\n )\n # ## TTS音素\n phoneme_list, language_list = \\\n signpost_name.split_phoneme_info(phoneme_info)\n signpost_name.add_all_tts(phoneme_list,\n language_list,\n lang_code\n )\n else:\n continue\n if route_no or exit_no or signpost_name:\n sign_post.set_exit_no(exit_no)\n sign_post.set_signpost_name(signpost_name)\n str_info = sign_post.to_string()\n if str_info:\n self._store_name_to_temp_file(temp_file_obj, str_info)\n else:\n self.log.error('No SignPost Info. sign_id=%d' % sign_id)\n else:\n self.log.warning('No route_no/exit_no/signpost_name.sign_id=%d'\n % (sign_id))\n # ## 把名称导入数据库\n temp_file_obj.seek(0)\n self.pg.copy_from2(temp_file_obj, 'mid_temp_signpost_element')\n self.pg.commit2()\n # close file\n #temp_file_obj.close()\n cache_file.close(temp_file_obj,True)\n self.log.info('End Make SignPost element.')\n return 0\n\n def _store_name_to_temp_file(self, file_obj, str_info):\n if file_obj:\n file_obj.write('%s\\n' % str_info)\n return 0\n\n def _get_sign_type(self, infotyp):\n d = {\"4E\": SIGN_POST_TYPE_EXIT_NO, # 出口番号\n \"6W\": SIGN_POST_TYPE_SHIELD_ID,\n \"RV\": SIGN_POST_TYPE_SHIELD_NUM,\n \"RN\": None, # route num(当前不收录)\n \"7G\": None, # direction(当前不收录)\n \"7V\": None, # Validity Direction(不收录)\n \"RJ\": None, # Route Name(不收录)\n \"7A\": None, # Street Name Type(不收录)\n \"4G\": SIGN_POST_TYPE_NAME, # Exit Name(做成名称)\n \"4I\": SIGN_POST_TYPE_NAME, # Other(做成名称)\n \"6T\": SIGN_POST_TYPE_NAME, # Street Name(做成名称)\n \"9D\": SIGN_POST_TYPE_NAME, # Place Name(做成名称)\n \"4H\": None # Pictogram(不收录)\n }\n # 值为空,不收录\n return d.get(infotyp)\n\n def _cvt_name_type(self, sign_type):\n if SIGN_POST_TYPE_EXIT_NO == sign_type:\n return 'office_name'\n if (SIGN_POST_TYPE_SHIELD_ID == sign_type or\n SIGN_POST_TYPE_SHIELD_NUM == sign_type):\n return 'shield'\n if SIGN_POST_TYPE_NAME == sign_type:\n return 'office_name'\n\n def _make_path_link(self):\n \"找到SignPost对应的link序 (如:in_link_id, out_link_id, passlid, passlid_cnt)\"\n # 同一个id中,\n # seqnr = 1的, 作为in_link_id\n # seqnr最大的,作为out_link_id\n # 处于以上两者之间的,作为pass link\n self.CreateTable2('mid_temp_signpost_passlink')\n # self.CreateFunction2('mid_make_signpost_path_links')\n temp_file_obj = cache_file.open('signpost_passlink') # 创建临时文件\n sqlcmd = \"\"\"\n SELECT id, array_agg(link_id) as link_ids\n from (\n SELECT id, seqnr, trpelid::bigint as link_id\n FROM org_sp\n order by id, seqnr\n ) as p\n group by id;\n \"\"\"\n pathes = self.get_batch_data(sqlcmd)\n for path_info in pathes:\n sign_id = path_info[0]\n link_ids = path_info[1]\n if not sign_id:\n self.log.error('Error Sign ID.')\n continue\n if link_ids:\n in_link_id = link_ids[0]\n out_link_id = link_ids[-1]\n pass_link = '|'.join([str(p) for p in link_ids[1:-1]])\n pass_link_cnt = len(link_ids[1:-1])\n\n str_info = '%d\\t%d\\t%d\\t%s\\t%d' % (sign_id,\n in_link_id,\n out_link_id,\n pass_link,\n pass_link_cnt\n )\n self._store_name_to_temp_file(temp_file_obj, str_info)\n\n # ## 把名称导入数据库\n temp_file_obj.seek(0)\n self.pg.copy_from2(temp_file_obj, 'mid_temp_signpost_passlink')\n self.pg.commit2()\n # close file\n #temp_file_obj.close()\n cache_file.close(temp_file_obj,True)\n return 0\n\n def _merge_all_info(self):\n \"把SignPost的数据合并起来(Name, Route_NO, Exit_No, Path_Link)\"\n # 注:这里要把mid_temp_signpost_element放最前面, 因为有看板的无素没有收录。\n sqlcmd = \"\"\"\n INSERT INTO signpost_uc_tbl(\n id, nodeid, inlinkid,\n outlinkid, passlid, passlink_cnt,\n sp_name, route_no1, route_no2,\n route_no3, route_no4, exit_no)\n (\n SELECT e.sign_id, org_sg.jnctid as nodeid, in_link_id,\n out_link_id, passlid, passlink_cnt,\n signpost_name, route_no1, route_no2,\n route_no3, route_no4, exit_no\n FROM mid_temp_signpost_element AS e\n LEFT JOIN mid_temp_signpost_passlink as p\n ON e.sign_id = p.sign_id\n LEFT JOIN org_sg -- NODE ID\n ON e.sign_id = org_sg.id\n order by e.sign_id\n );\n \"\"\"\n if self.pg.execute2(sqlcmd) == -1:\n return -1\n else:\n self.pg.commit2()\n return 0\n\n\nfrom component.rdf.guideinfo_singpost_uc_rdf import SignPostElement\n\n\n#######################################################################\n# ## SignPostElement\n#######################################################################\nclass SignPostElementTa(SignPostElement):\n def __init__(self, sign_id, dest_number):\n '''\n Constructor\n '''\n SignPostElement.__init__(self, sign_id, None)\n self._sign_id = sign_id\n self._dest_number = dest_number\n self._exit_no = None # 出口番号(MultLangName对象)\n\n def set_exit_no(self, exit_no_obj):\n if exit_no_obj:\n self._exit_no = exit_no_obj\n\n def to_string(self):\n if self._sign_id:\n rst_str = '%d' % self._sign_id\n else:\n return None\n if self._signpost_name:\n json_sp_name = self._signpost_name.json_format_dump()\n else:\n json_sp_name = ''\n rst_str += '\\t%s' % json_sp_name\n\n rn_cnt = 0\n while rn_cnt < len(self._route_no) and rn_cnt < self.MAX_ROUTE_NO:\n rst_str += '\\t%s' % self._route_no[rn_cnt].json_format_dump()\n rn_cnt += 1\n\n while rn_cnt < self.MAX_ROUTE_NO:\n rst_str += '\\t%s' % ''\n rn_cnt += 1\n # 加上出口番号\n if self._exit_no:\n json_exit_no = self._exit_no.json_format_dump()\n if not json_exit_no:\n json_exit_no = ''\n else:\n json_exit_no = ''\n rst_str += '\\t%s' % json_exit_no\n return rst_str\n", "sub_path": "Suntec/Road_Format13IDDN/source/V13/iDDN/Org2Middle/src/component/ta/guideinfo_signpost_uc_ta.py", "file_name": "guideinfo_signpost_uc_ta.py", "file_ext": "py", "file_size_in_byte": 14777, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "component.guideinfo_signpost_uc.comp_guideinfo_signpost_uc", "line_number": 18, "usage_type": "name"}, {"api_name": "component.guideinfo_signpost_uc.comp_guideinfo_signpost_uc.__init__", "line_number": 26, "usage_type": "call"}, {"api_name": "component.guideinfo_signpost_uc.comp_guideinfo_signpost_uc", "line_number": 26, "usage_type": "name"}, {"api_name": "component.ta.dictionary_ta.comp_dictionary_ta", "line_number": 30, "usage_type": "call"}, {"api_name": "common.cache_file.open", "line_number": 64, "usage_type": "call"}, {"api_name": "common.cache_file", "line_number": 64, "usage_type": "name"}, {"api_name": "component.ta.dictionary_ta.ShieldTA", "line_number": 65, "usage_type": "call"}, {"api_name": "component.ta.multi_lang_name_ta.MultiLangNameTa.get_lang_code_by_id", "line_number": 95, "usage_type": "call"}, {"api_name": "component.ta.multi_lang_name_ta.MultiLangNameTa", "line_number": 95, "usage_type": "name"}, {"api_name": "component.ta.multi_lang_name_ta.MultiLangNameTa", "line_number": 109, "usage_type": "call"}, {"api_name": "component.ta.multi_lang_name_ta.MultiLangNameTa", "line_number": 124, "usage_type": "call"}, {"api_name": "component.ta.multi_lang_name_ta.MultiLangNameTa", "line_number": 139, "usage_type": "call"}, {"api_name": "component.ta.multi_lang_name_ta.MultiLangNameTa", "line_number": 153, "usage_type": "call"}, {"api_name": "common.cache_file.close", "line_number": 184, "usage_type": "call"}, {"api_name": "common.cache_file", "line_number": 184, "usage_type": "name"}, {"api_name": "common.cache_file.open", "line_number": 228, "usage_type": "call"}, {"api_name": "common.cache_file", "line_number": 228, "usage_type": "name"}, {"api_name": "common.cache_file.close", "line_number": 265, "usage_type": "call"}, {"api_name": "common.cache_file", "line_number": 265, "usage_type": "name"}, {"api_name": "component.rdf.guideinfo_singpost_uc_rdf.SignPostElement", "line_number": 303, "usage_type": "name"}, {"api_name": "component.rdf.guideinfo_singpost_uc_rdf.SignPostElement.__init__", "line_number": 308, "usage_type": "call"}, {"api_name": "component.rdf.guideinfo_singpost_uc_rdf.SignPostElement", "line_number": 308, "usage_type": "name"}]} +{"seq_id": "409510818", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRead and plot EPA and Purple site\n\n\"\"\"\n__author__ = \"Nastaran Moghimi\"\n__copyright__ = \"Copyright 2017, UCAR/NOAA\"\n__license__ = \"GPL\"\n__version__ = \"1.0\"\n__email__ = \"nastarann.moghimi@gmail.com\"\n\n\n# Updated\n# Thu 13 Jun 2019 10:12:35 PM EDT read new json file and find stations in Cali\n#\n#\n\n\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os,sys\nimport datetime\nimport string\nimport pandas as pd\n#import geopandas as gpd\n#import fiona\n\nfrom collections import defaultdict\n\n#live maps\nimport folium\n#import mplleaflet\n\n#static maps\nimport cartopy.crs as ccrs\nfrom cartopy.mpl.gridliner import (LONGITUDE_FORMATTER,\n LATITUDE_FORMATTER)\nimport cartopy.feature as cfeature \nfrom matplotlib.offsetbox import AnchoredText\n\nimport pl_tools\n\nfrom matplotlib.path import Path\n\n###functions\ndef find_purple():\n\n #### JSON ######\n json_file = inp_dir + 'purpleair_13june2019.json'\n purp_jsonall = pd.read_json(json_file)\n\n purp = defaultdict(dict)\n for il in range (len( purp_jsonall)):\n res = purp_jsonall.iloc[il]['results']\n try:\n purp [res['ID']]['lat' ] = float(res['Lat'])\n purp [res['ID']]['lon' ] = float(res['Lon'])\n purp [res['ID']]['pm2_5'] = res['PM2_5Value']\n purp [res['ID']]['last_seen'] = datetime.datetime(1970,1,1) + datetime.timedelta(seconds = res['LastSeen'] )\n purp [res['ID']]['label' ] = res['Label']\n purp [res['ID']]['id' ] = res['ID']\n except:\n print (il, '===None')\n\n purp_df = pd.DataFrame.from_dict(purp)\n #purp_df.dropna(inplace=True)\n\n return purp_df\n\n\n\n\ndef make_map(projection=ccrs.PlateCarree()): \n \n \"\"\" \n Generate fig and ax using cartopy \n input: projection \n output: fig and ax \n \"\"\" \n alpha = 0.5 \n subplot_kw = dict(projection=projection) \n fig, ax = plt.subplots(figsize=(9, 13), \n subplot_kw=subplot_kw) \n gl = ax.gridlines(draw_labels=True) \n gl.xlabels_top = gl.ylabels_right = False \n gl.xformatter = LONGITUDE_FORMATTER \n gl.yformatter = LATITUDE_FORMATTER \n \n # Put a background image on for nice sea rendering. \n ax.stock_img() \n \n # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth\n states_provinces = cfeature.NaturalEarthFeature( \n category='cultural', \n name='admin_1_states_provinces_lines',\n scale='10m', \n facecolor='none') \n\n #coast = cfeature.NaturalEarthFeature(\n # category='physical', \n # name='coastline',\n # scale='10m',\n # facecolor='none') \n \n SOURCE = 'Natural Earth'\n LICENSE = 'public domain'\n \n ax.add_feature(cfeature.LAND,zorder=0,alpha=alpha) \n ax.add_feature(cfeature.COASTLINE,zorder=1,alpha=alpha)\n ax.add_feature(cfeature.BORDERS,zorder=1,alpha=2*alpha)\n \n ax.add_feature(states_provinces, edgecolor='gray',zorder=1)\n \n # Add a text annotation for the license information to the\n # the bottom right corner. \n text = AnchoredText(r'$\\mathcircled{{c}}$ {}; license: {}'\n ''.format(SOURCE, LICENSE),\n loc=4, prop={'size': 9}, frameon=True) \n ax.add_artist(text) \n \n ax.set_xlim(-132,-65) #lon limits \n ax.set_ylim( 20 , 55) #lat limits \n return fig, ax\n\n\n\n\ndef wait():\n \"\"\"\n \n \"\"\"\n while True:\n choice = input(\"Enter 1 when you are done .. > \")\n if choice == 1 :\n break\n\n\ndef create_boundary(name = '', region = {}):\n \"\"\"\n \n \"\"\"\n lim = region[name]\n filename = '../inp/'+name + '_bou.txt'\n\n if (not os.path.exists(filename)):\n fig,ax = make_map() \n ax.set_xlim(lim['xmin'],lim['xmax']) \n ax.set_ylim(lim['ymin'],lim['ymax'])\n\n poly = pl_tools.InteractiveLine(type='cblin')\n plt.show()\n wait()\n \n # Open filename\n f = open(filename,'w')\n for i in range(len(poly.x)):\n f.write( str(poly.x[i])+ ' ' +str(poly.y[i]) + ' \\n' )\n \n f.close() \n \n lons = poly.x\n lats = poly.y\n else:\n bou = np.loadtxt(filename)\n lons = bou[:,0]\n lats = bou[:,1] \n\n return lons,lats \n\n\n\ndef get_ind_poly(lons_bound,lats_bound,lons,lats):\n \"\"\"\n Return index of points inside a polygon\n \n Input:\n bou_lons,bou_lats: \n lons and lats: data poits coordinates\n \n Output:\n ind: index of points inside rectangle\n \n \n \"\"\"\n\n vertices = np.c_[lons_bound,lats_bound]\n p = Path(vertices)\n\n data = np.c_[lons, lats]\n mask = p.contains_points(data)\n\n ind = np.where(mask==True)\n return ind[0]\n\n\ninp_dir = '../inp/'\n\n\n#####\nregion = defaultdict(dict)\n#####\n#name = 'us'\n#region[name]['xmin'] = -125.0\n#region[name]['xmax'] = -55.\n#region[name]['ymin'] = 15.0\n#region[name]['ymax'] = 46.3\n\nname = 'california'\nregion[name]['xmin'] = -125.2\nregion[name]['xmax'] = -112.8\nregion[name]['ymin'] = 31.\nregion[name]['ymax'] = 43.1\n\n\n\n#### MAIN\nbou_type = 'poly'\n\nif bou_type == 'poly':\n lons_bou,lats_bou = create_boundary(name = name, region = region)\n\n\n\npurp = find_purple()\n\n\n####\nlons = []\nlats = []\nids = []\nlbs = []\n\nfor key in purp.keys():\n #get purple located outside \n try:\n \n if not purp [key]['label'].endswith(' B'):\n lons.append (purp [key]['lon'] )\n lats.append (purp [key]['lat'] )\n lbs.append (purp [key]['label'])\n ids.append (purp [key]['id'] )\n except:\n print (key)\n pass\n\nlons = np.array(lons)\nlats = np.array(lats)\nlbs = np.array(lbs)\nids = np.array(ids)\n\npurp_ind = get_ind_poly(lons_bound = lons_bou,lats_bound=lats_bou,lons =lons ,lats=lats)\n\n\nids_inside = ids [purp_ind]\nlons_inside = lons [purp_ind]\nlats_inside = lats [purp_ind]\nlbs_inside = lbs [purp_ind]\n#\ndata_sta = np.c_[ids_inside,lons_inside,lats_inside,lbs_inside]\n\ndf = pd.DataFrame(data=data_sta,columns=['id','lon','lat','label'])\ndf = df.dropna()\ndf.to_csv(name + '_purple_air_june2019.csv', sep='\\t', encoding = 'utf-8',index=False)\n\n\n\nlim =region[name]\n\n#plot stations\nprint ('Static Cartopy map ...')\nfig,ax = make_map() \nax.set_title( name.capitalize() + ' Stations')\nax.scatter(x = lons_inside , y =lats_inside ,s=20,lw=0, c= 'purple',alpha=0.85,label = 'Purple air')\nax.legend(ncol=4)\nax.set_xlim(lim['xmin'],lim['xmax'])\nax.set_ylim(lim['ymin'],lim['ymax'])\n\n\nplt.savefig(name +'_all.png',dpi=450)\nplt.show()\n#plt.close('all')\n#######################################\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "02-jun2019/10_read_epa_purple_bou_poly.py", "file_name": "10_read_epa_purple_bou_poly.py", "file_ext": "py", "file_size_in_byte": 8097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pandas.read_json", "line_number": 53, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 76, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "cartopy.mpl.gridliner.LONGITUDE_FORMATTER", "line_number": 89, "usage_type": "name"}, {"api_name": "cartopy.mpl.gridliner.LATITUDE_FORMATTER", "line_number": 90, "usage_type": "name"}, {"api_name": "cartopy.feature.NaturalEarthFeature", "line_number": 96, "usage_type": "call"}, {"api_name": "cartopy.feature", "line_number": 96, "usage_type": "name"}, {"api_name": "cartopy.feature.LAND", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cartopy.feature", "line_number": 111, "usage_type": "name"}, {"api_name": "cartopy.feature.COASTLINE", "line_number": 112, "usage_type": "attribute"}, {"api_name": "cartopy.feature", "line_number": 112, "usage_type": "name"}, {"api_name": "cartopy.feature.BORDERS", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cartopy.feature", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.offsetbox.AnchoredText", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pl_tools.InteractiveLine", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 189, "usage_type": "attribute"}, {"api_name": "matplotlib.path.Path", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 192, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 195, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 262, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}]} +{"seq_id": "312964556", "text": "# Histogram equalization\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport argparse\nimport cv2\n\nimage = cv2.imread('path/to/image')\nimage = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\ncv2.imshow(\"image\",image)\ncv2.waitKey(0)\n\neq = cv2.equalizeHist(image)\n##hist = cv2.calcHist([image],[0],None,[256],[0,256])\nhist2 = cv2.calcHist([eq],[0],None,[256],[0,256])\ncv2.imshow(\"Histogram Equalization\", np.hstack([eq]))\ncv2.waitKey(0)\nplt.figure()\nplt.title(\"Grayscale Histogram\")\nplt.xlabel(\"Bins\")\nplt.ylabel(\"# of pixels\")\nplt.plot(hist2)\nplt.xlim([0,256])\nplt.show()\ncv2.waitKey(0)\n", "sub_path": "histogram_eq.py", "file_name": "histogram_eq.py", "file_ext": "py", "file_size_in_byte": 589, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cv2.imread", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.calcHist", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "cv2.waitKey", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "418209555", "text": "import cv2 as cv\r\nimport numpy as np\r\n\r\nimg_path = \"./hand_sample.jpg\"\r\nimg = cv.imread(img_path)\r\n#img = cv.VideoCapture(0)\r\nimg = cv.resize(img,dsize=(800,800),interpolation=cv.INTER_AREA)\r\n\r\n###YCrCb 변환\r\n##ycrcb = cv2.cvtColor(img,cv2.COLOR_BGR2YCrCb)\r\n###Cr:133~173, Cb:77~127\r\n##mask_hand = cv2.inRange(ycrcb,np.array([0,133,77]),np.array([255,173,127]))\r\n## \r\n##cv2.imshow(\"Hands\",mask_hand)\r\n##cv2.imshow(\"Origin\",img)\r\n## \r\n##cap.release()\r\n##cv2.destroyAllWindows()\r\n## \r\n\r\nhsvim = cv.cvtColor(img, cv.COLOR_BGR2HSV)\r\nlower = np.array([0, 48, 80], dtype = \"uint8\")\r\nupper = np.array([20, 255, 255], dtype = \"uint8\")\r\nskinRegionHSV = cv.inRange(hsvim, lower, upper)\r\nblurred = cv.blur(skinRegionHSV, (2,2))\r\nret,thresh = cv.threshold(blurred,0,255,cv.THRESH_BINARY)\r\ncv.imshow(\"thresh\", thresh)\r\n\r\ncontours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\r\ncontours = max(contours, key=lambda x: cv.contourArea(x))\r\ncv.drawContours(img, [contours], -1, (255,255,0), 2)\r\ncv.imshow(\"contours\", img)\r\n\r\nhull = cv.convexHull(contours)\r\ncv.drawContours(img, [hull], -1, (0, 255, 255), 2)\r\ncv.imshow(\"hull\", img)\r\n\r\nhull = cv.convexHull(contours, returnPoints=False)\r\ndefects = cv.convexityDefects(contours, hull)\r\n\r\nif defects is not None:\r\n cnt = 0\r\nfor i in range(defects.shape[0]): # calculate the angle\r\n s, e, f, d = defects[i][0]\r\n start = tuple(contours[s][0])\r\n end = tuple(contours[e][0])\r\n far = tuple(contours[f][0])\r\n a = np.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)\r\n b = np.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)\r\n c = np.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)\r\n angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) # cosine theorem\r\n if angle <= np.pi / 2: # angle less than 90 degree, treat as fingers\r\n cnt += 1\r\n cv.circle(img, far, 4, [0, 0, 255], -1)\r\nif cnt > 0:\r\n cnt = cnt+1\r\ncv.putText(img, str(cnt), (0, 50), cv.FONT_HERSHEY_SIMPLEX,1, (255, 0, 0) , 2, cv.LINE_AA)\r\n\r\ncv.imshow('final_result',img)\r\n", "sub_path": "손모양검출.py", "file_name": "손모양검출.py", "file_ext": "py", "file_size_in_byte": 2039, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.blur", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.convexHull", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.convexHull", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.convexityDefects", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 57, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 57, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "79574013", "text": "import bs4\n\nfrom selenium import webdriver\n\nimport sys\nimport time\nimport os\nimport re\nimport pickle\nfrom time import gmtime, strftime\n\nmax_not_available = 10\n\ndef get_now():\n return strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n\ndef getWFSlot(productUrl):\n driver = webdriver.Firefox()\n driver.get(productUrl) \n html = driver.page_source\n soup = bs4.BeautifulSoup(html)\n for i in reversed(range(0, 45)):\n time.sleep(1)\n print(f\"Waiting {i} more seconds before continuing...\")\n # time.sleep(60)\n no_open_slots = True\n\n while no_open_slots:\n driver.refresh()\n html = driver.page_source\n soup = bs4.BeautifulSoup(html, features=\"html.parser\")\n time.sleep(4)\n\n slot_patterns = ['Next available', '1-hour delivery windows', '2-hour delivery windows']\n try:\n next_slot_text = soup.find('h4', class_ ='ufss-slotgroup-heading-text a-text-normal').text\n if any(next_slot_text in slot_pattern for slot_pattern in slot_patterns):\n print('SLOTS OPEN!')\n os.system('say \"Slots for delivery opened!\"')\n no_open_slots = False\n time.sleep(1400)\n except AttributeError:\n pass\n\n try:\n slot_opened_text = \"Not available\"\n all_dates = soup.findAll(\"div\", {\"class\": \"ufss-date-select-toggle-text-availability\"})\n for each_date in all_dates:\n if slot_opened_text not in each_date.text:\n print('SLOTS OPEN!')\n os.system('say \"Slots for delivery opened!\"')\n no_open_slots = False\n time.sleep(1400)\n except AttributeError:\n pass\n\n try:\n regex = re.compile('([0-9]+) items not available')\n button_text = soup.find('button', class_='slotButton').text\n soup.find('button', class_='a-spacing-micro')\n matches = regex.search(button_text)\n sentence = ' '.join(button_text.split())\n os.system(f'say \"Slots are opened! {sentence}\"')\n print(f\"Regex Match: {re.search('([0-9]+) items not available', button_text)}\")\n if (matches is not None) and (int(matches.group(1)) < max_not_available):\n no_open_slots = False\n if (int(matches.group(1)) > 0):\n li_items = soup.find_all('li', class_='a-spacing-micro')\n for item in li_items:\n print(f\"Not available: {item.text.strip()}\")\n os.system(f'say \"{item.text.strip()} is not available.\"')\n else:\n os.system(f'say \"All items are available!\"')\n \n print(f'-----------------------------------------------------')\n print(f'-----------------------------------------------------')\n print(f'{get_now()} SLOT AVAILABLE!!!!')\n print(f'-----------------------------------------------------')\n print(f'-----------------------------------------------------')\n \n for i in reversed(range(0, 240)):\n time.sleep(1)\n print(f\"Waiting {i} more seconds before restarting. Complete your order before a refresh!\")\n os.system(f'say \"Timed out. Restarting slot search.\"')\n except AttributeError: \n print(f' {get_now()} NO SLOTS OPEN!')\n\n\ngetWFSlot('https://www.amazon.co.uk/gp/buy/shipoptionselect/handlers/display.html?hasWorkingJavascript=1')\n\n\n", "sub_path": "whole_foods_delivery_slot_firefox.py", "file_name": "whole_foods_delivery_slot_firefox.py", "file_ext": "py", "file_size_in_byte": 3390, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "time.strftime", "line_number": 15, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 18, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 18, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "os.system", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "os.system", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 58, "usage_type": "call"}, {"api_name": "os.system", "line_number": 63, "usage_type": "call"}, {"api_name": "re.search", "line_number": 64, "usage_type": "call"}, {"api_name": "os.system", "line_number": 71, "usage_type": "call"}, {"api_name": "os.system", "line_number": 73, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "os.system", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "131265998", "text": "from datetime import datetime\n\nfrom discord import Color, Embed\nfrom discord.ext.commands import Bot as Base_Bot\n\nfrom bot import config\n\n\nclass Bot(Base_Bot):\n def __init__(self, extensions: list, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.extension_list = extensions\n self.initial_call = True\n\n async def on_ready(self) -> None:\n if self.initial_call:\n self.initial_call = False\n\n # Log new connection\n self.log_channel = self.get_channel(config.log_channel)\n embed = Embed(\n title=\"Bot Connection\",\n description=\"New connection initialized.\",\n timestamp=datetime.utcnow(),\n color=Color.dark_teal(),\n )\n await self.log_channel.send(embed=embed)\n\n # Load all extensions\n for extension in self.extension_list:\n try:\n self.load_extension(extension)\n print(f\"Cog {extension} loaded.\")\n except Exception as e:\n print(\n f\"Cog {extension} failed to load with {type(e)}: {e}\"\n )\n else:\n embed = Embed(\n title=\"Bot Connection\",\n description=\"Connection re-initialized.\",\n timestamp=datetime.utcnow(),\n color=Color.dark_teal(),\n )\n await self.log_channel.send(embed=embed)\n\n print(\"Bot is ready\")\n\n async def close(self) -> None:\n await super().close()\n", "sub_path": "bot/core/bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 1597, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 9, "usage_type": "name"}, {"api_name": "bot.config.log_channel", "line_number": 20, "usage_type": "attribute"}, {"api_name": "bot.config", "line_number": 20, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "discord.Color.dark_teal", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 25, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "name"}, {"api_name": "discord.Color.dark_teal", "line_number": 43, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "306809772", "text": "import csv\nimport datetime\n\n#---MAIN CLASS---#\nclass Employee:\n\n num_of_emp = 0# Counting how many employees there are.\n raise_amount = 1.04\n\n def __init__(self, firstname, lastname):\n self.firstname = firstname\n self.lastname = lastname\n self.mail = '{}.{}@yh.nackademin.se'.format(firstname, lastname)\n\n Employee.num_of_emp += 1# Keeping track of the employees\n\n def set_name(self, firstname):\n self.firstname = firstname# If you want to change name you can set the name.\n def get_name(self):\n return self.firstname# Getting the new name\n\n def set_lastname(self, lastname):\n self.lastname = lastname\n def get_lastname(self):\n return self.lastname\n\n#---Subclass to the mainclass---#\n#---Two new atributes from the mainclass---#\nclass Manager(Employee):\n def __init__(self, firstname, lastname):\n super().__init__(firstname, lastname)\n self.profession = 'Manager'\n self.paygrade = 45000\n\n #---Function to return the fullname of any employee---#\n def employee_fullname(self):\n return '{} {} {} ({}kr/month)'.format(self.firstname, self.lastname, self.profession, self.paygrade)\n\n #---Function to return the e-mail adres of any employee---#\n def employee_mail(self):\n return self.mail\n\n def apply_raise_amount(self):\n self.paygrade = int(self.paygrade * self.raise_amount)\n\n#---Another subclass to the mainclass---#\n#---Two new atributes from the mainclass---#\nclass Developer(Employee):\n def __init__(self, firstname, lastname):\n super().__init__(firstname, lastname)\n self.profession = 'Developer'\n self.paygrade = 35000\n\n #---Function to return the fullname of any employee---#\n def employee_fullname(self):\n return '{} {} {} ({}kr/month)'.format(self.firstname, self.lastname, self.profession, self.paygrade)\n\n #---Function to return the e-mail adres of any employee---#\n def employee_mail(self):\n return self.mail\n\n def apply_raise_amount(self):\n self.paygrade = int(self.paygrade * self.raise_amount)\n\n#---Another subclass to the mainclass---#\n#---Two new atributes from the mainclass---#\nclass WebDeveloper(Employee):\n def __init__(self, firstname, lastname):\n super().__init__(firstname, lastname)\n self.profession = 'WebDeveloper'\n self.paygrade = 30000\n\n #---Function to return the fullname of any employee---#\n def employee_fullname(self):\n return '{} {} {} ({}kr/month)'.format(self.firstname, self.lastname, self.profession, self.paygrade)\n\n #---Function to return the e-mail adres of any employee---#\n def employee_mail(self):\n return self.mail\n\n def apply_raise_amount(self):\n self.paygrade = int(self.paygrade * self.raise_amount)\n\n#---The list where all the new employees will be saved---#\nlist_emp = []\ndt_now = datetime.date.today()\n#---The loop för the meny---#\nwhile True:\n print('\\n', dt_now)\n try:\n user_choice = int(input(2*'\\n' + '\\tVälkommen\\n[1] Ladda upp csv fil med anställda\\n'\n '[2] Visa anställda\\n[3] Sök anställd\\n'\n '[4] Sök anställds email\\n[5] Lägg till ny anställd'\n '\\n[6] Visa total månadskostnad\\n[7] Radera anställd\\n'\n '[8] Ändra namn på anställd\\n[9] Avsluta\\nVälj: ' + '\\n'))\n\n #---The menychoice for the user to enter a csv file and csvv file only---#\n if user_choice == 1:\n user_file = input('Ange sökväg till csv fil: ')\n csv_inp = 'csv'\n #---A try and except block in case the file is not found---#\n try:\n if user_file.endswith(csv_inp):\n with open(user_file, 'r') as csv_file:\n csv_file = csv.reader(csv_file, delimiter='-')\n for line in csv_file:\n if line[2] == 'Manager':\n new_emp = Manager(line[0], line[1])\n list_emp.append(new_emp)\n elif line[2] == 'Developer':\n new_emp = Developer(line[0], line[1])\n list_emp.append(new_emp)\n elif line[2] == 'Webdeveloper':\n new_emp = WebDeveloper(line[0], line[1])\n list_emp.append(new_emp)\n print('{} har sparats i listan'.format(Employee.num_of_emp))\n else:\n print('Fel filformat, kan bara ladda in csv filer.')\n except FileNotFoundError as e:\n print('Error, filen kunde inte hittas', e)\n\n #---The menychoice to show all the employees who are saved---#\n elif user_choice == 2:\n for person in list_emp:\n print(person.employee_fullname())\n print('Totalt finns det {} stycken anställda'.format(Employee.num_of_emp))\n\n\n #---The menychoice to search för any employees, both firstname and lastname---#\n elif user_choice == 3:\n save_name = []# Saving the search result in case there is more then one\n count = 0# Count how many times the for loops runs.\n search_inp = input('Ange förnamn eller efternamn: ')\n correct_name = 'Hittade ingen anställd med det namnet!'\n for name in list_emp:\n if search_inp == name.firstname or search_inp == name.lastname:\n count += 1\n correct_name = name.employee_fullname()\n save_name.append(name.employee_fullname())\n else:\n continue\n if count <= 0:# If count os smaler or equal to zero then return the variabel.\n print(correct_name)\n else:#Otherwise return the list for saved result of the search\n for item in save_name:\n print(item)\n\n #---Similar to search employees, but here you get the email---#\n elif user_choice == 4:\n save_email = []# Saving the search result in case there is more then one\n count = 0# Count how many times the for loops runs.\n search_mail = input('Sök förnamn eller efternamn: ')\n correct_mail = 'Hittade ingen mail med det namnet'\n for mail in list_emp:\n if search_mail == mail.firstname or search_mail == mail.lastname:\n count += 1\n correct_mail = mail.employee_mail()\n save_email.append(correct_mail)\n else:\n continue\n if count <= 0:# If count os smaler or equal to zero then return the variabel correct_mail.\n print(correct_mail)\n else:#Otherwise return the list for saved result of the search\n for item in save_email:\n print(item)\n\n #---Menychoice to add a new employee---#\n elif user_choice == 5:\n firstname_inp = input('Förnamn: ')\n lastname_inp = input('Efternamn: ')\n profession_inp = int(input('[1] Manager [2] Developer [3] Webdeveloper\\nVälj: '))\n if profession_inp == 1:\n new_emp = Manager(firstname_inp, lastname_inp)\n list_emp.append(new_emp)# Appending the new employee to list\n print(new_emp.employee_fullname(), 'har nu sparats!')\n with open('New_Employe.txt', 'a') as w_employe:\n w_employe.write(new_emp.employee_fullname() + '\\n')#Saving the new employee to a txt file.\n elif profession_inp == 2:\n new_emp = Developer(firstname_inp, lastname_inp)\n list_emp.append(new_emp)# Appending the new employee to list\n print(new_emp.employee_fullname(), 'har nu sparats!')\n with open('New_Employe.txt', 'a') as w_employe:\n w_employe.write(new_emp.employee_fullname() + '\\n')#Saving the new employee to a txt file.\n elif profession_inp == 3:\n new_emp = WebDeveloper(firstname_inp, lastname_inp)\n list_emp.append(new_emp)# Appending the new employee to list\n print(new_emp.employee_fullname(), 'har nu sparats!')\n with open('New_Employe.txt', 'a') as w_employe:\n w_employe.write(new_emp.employee_fullname() + '\\n')#Saving the new employee to a txt file.\n else:\n print('Fel inmatning, försök igen!')\n\n #---The menychoice that allowes the user to gett the monthly cost of all employees---#\n elif user_choice == 6:\n x = 0\n for sallery in list_emp:\n x += sallery.paygrade# Setting variabel x equal till all the pays in the list\n print('---------------------------------------------------------------------------')\n print(3*'\\n' + 'Månadskostnaden för alla anställda är i dagsläget {}kr'.format(x) + '\\n'*3)\n print('---------------------------------------------------------------------------')\n\n #---The menychoice that allowes the user to delete an employee, and that emplyee will be saved in a textfile.---#\n elif user_choice == 7:\n for item in list_emp:\n print(item.employee_fullname())\n deleted_employee = input('\\nAnge namn på anställd som ska tas bort: ')\n for person in list_emp:\n if person.firstname == deleted_employee:\n print('Är du säker på att du vill ta bort {}'. format(person.employee_fullname()))\n inp_choice = input('Y/N: ').lower()\n if inp_choice == 'y':\n try:\n with open('deleted_employees.txt', 'a') as del_emp:\n del_emp.write(person.employee_fullname() + '\\n')# Writing the deleted employee to a txt file.\n list_emp.remove(person)# Removing the chosen employee.\n Employee.num_of_emp -= 1# The count of how many employees there is in the list.\n deleted_employee = '{} {} har nu tagits bort från listan'.format(person.firstname, person.lastname)\n print(deleted_employee)\n except FileNotFoundError:\n print('Error, filen saknas')\n elif inp_choice == 'n':\n deleted_employee = '{} {} har INTE tagits bort från listan'.format(person.firstname, person.lastname)\n print(deleted_employee)\n continue\n else:\n print('Error, ange \"y\" eller \"n\"')\n #---Here you can change a persons first or lastname---#\n elif user_choice == 8:\n incorrect = 'Ingen anställd med det namnet!'\n change_name = input('Vilken anställd vill du ändra namn på:')\n for person in list_emp:\n if change_name == person.firstname + ' ' + person.lastname:\n change_first_or_last = input('Vill du ändra [F]örnamn eller [E]fternamn\\n ').lower()\n if change_first_or_last == 'f':\n change_firstname = input('Ange nya förnamnet:')\n person.set_name(change_firstname)# Setting the chosen persons firstname to a new name.\n incorrect = person.employee_fullname()\n elif change_first_or_last == 'e':\n change_lastname = input('Ange nya efternamnet:')\n person.set_lastname(change_lastname)# Setting the chosen persons lastname to a new name.\n incorrect = person.employee_fullname()\n else:\n print('Ange [F] eller [E] tack.')\n print(incorrect)\n #---Quiting the program---#\n elif user_choice == 9:# Exiting the program\n print('Välkommen åter!')\n break\n else:# Exceptation in case the user picking a number that doesnt exists in the meny.\n print('Ange siffra mellan 1-8 tack!')\n\n except Exception as e:# Exceptetion in case the user dont picking a number\n print('Error, ange siffra')\n continue\n\n\n\n\n\n\n\n\n", "sub_path": "Projektarbete/first_projekt.py", "file_name": "first_projekt.py", "file_ext": "py", "file_size_in_byte": 12531, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "datetime.date.today", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 86, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "600183897", "text": "import pytest\n\nimport ray\nfrom ray.data.tests.util import column_udf, extract_values\nfrom ray.tests.conftest import * # noqa\n\nNUM_REPEATS = 10\nNUM_TASKS = 10\n\n\n# This test can be flaky if there is resource deadlock between the pipeline\n# stages. Run it a lot to ensure no regressions.\ndef test_basic_actors(shutdown_only):\n ray.init(num_cpus=2)\n for _ in range(NUM_REPEATS):\n ds = ray.data.range(NUM_TASKS)\n ds = ds.window(blocks_per_window=1)\n assert sorted(\n extract_values(\n \"id\",\n ds.map(\n column_udf(\"id\", lambda x: x + 1),\n compute=ray.data.ActorPoolStrategy(),\n ).take(),\n )\n ) == list(range(1, NUM_TASKS + 1))\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", __file__]))\n", "sub_path": "python/ray/data/tests/test_pipeline_nohang.py", "file_name": "test_pipeline_nohang.py", "file_ext": "py", "file_size_in_byte": 847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "ray.init", "line_number": 14, "usage_type": "call"}, {"api_name": "ray.data.range", "line_number": 16, "usage_type": "call"}, {"api_name": "ray.data", "line_number": 16, "usage_type": "attribute"}, {"api_name": "ray.data.tests.util.extract_values", "line_number": 19, "usage_type": "call"}, {"api_name": "ray.data.tests.util.column_udf", "line_number": 22, "usage_type": "call"}, {"api_name": "ray.data.ActorPoolStrategy", "line_number": 23, "usage_type": "call"}, {"api_name": "ray.data", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.main", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "147297599", "text": "# -*- coding: utf-8 -*-\n\nfrom kivy.app import App\nfrom kivy.clock import Clock\nfrom kivy.logger import Logger\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.button import Button\n\nimport time\n\nclass CustomPopup(Popup):\n def on_open(self):\n Logger.info('on_open()')\n self.event = Clock.schedule_once(self.on_press_b, 10)\n\n def on_press_a(self, args):\n Logger.info('A')\n Logger.info(args)\n self.dismiss()\n\n def on_press_b(self, args):\n Logger.info('B')\n Logger.info(args)\n self.dismiss()\n\n def on_dismiss(self):\n Logger.info('on_dismiss()')\n self.event.cancel()\n\n\nclass MainApp(App):\n def build(self):\n b = Button(on_press=self.show_popup, text=\"Show Popup\")\n return b\n\n def show_popup(self, b):\n p = CustomPopup()\n p.open()\n\n\nif __name__ == '__main__':\n MainApp().run()\n", "sub_path": "Popup/MainApp.py", "file_name": "MainApp.py", "file_ext": "py", "file_size_in_byte": 887, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "kivy.uix.popup.Popup", "line_number": 11, "usage_type": "name"}, {"api_name": "kivy.logger.Logger.info", "line_number": 13, "usage_type": "call"}, {"api_name": "kivy.logger.Logger", "line_number": 13, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 14, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 14, "usage_type": "name"}, {"api_name": "kivy.logger.Logger.info", "line_number": 17, "usage_type": "call"}, {"api_name": "kivy.logger.Logger", "line_number": 17, "usage_type": "name"}, {"api_name": "kivy.logger.Logger.info", "line_number": 18, "usage_type": "call"}, {"api_name": "kivy.logger.Logger", "line_number": 18, "usage_type": "name"}, {"api_name": "kivy.logger.Logger.info", "line_number": 22, "usage_type": "call"}, {"api_name": "kivy.logger.Logger", "line_number": 22, "usage_type": "name"}, {"api_name": "kivy.logger.Logger.info", "line_number": 23, "usage_type": "call"}, {"api_name": "kivy.logger.Logger", "line_number": 23, "usage_type": "name"}, {"api_name": "kivy.logger.Logger.info", "line_number": 27, "usage_type": "call"}, {"api_name": "kivy.logger.Logger", "line_number": 27, "usage_type": "name"}, {"api_name": "kivy.app.App", "line_number": 31, "usage_type": "name"}, {"api_name": "kivy.uix.button.Button", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "17067864", "text": "import models\nimport yaml\nimport logging\ntry:\n from google.appengine.ext import ndb\nexcept:\n logging.info(\"google.appengine.ext not found. \"\n \"We must be running in a unit test.\")\n import fake_ndb\n ndb = fake_ndb.FakeNdb()\n\n\ndef FindStudent(student_email, students):\n # is students iterable?\n try:\n _ = (e for e in students)\n except TypeError:\n return None\n for student in students:\n if student_email == student['email']:\n return student\n return None\n\n\ndef EligibleClassIdsForStudent(student, classes):\n \"\"\"Return the set of class ids that the student is eligible to take.\"\"\"\n class_ids = []\n for c in classes:\n class_id = str(c['id'])\n if not c['prerequisites']:\n class_ids.append(class_id)\n for prereq in c['prerequisites']:\n if 'email' in prereq:\n if student['email'] == prereq['email']:\n class_ids.append(class_id)\n if 'current_grade' in prereq:\n if student['current_grade'] == prereq['current_grade']:\n class_ids.append(class_id)\n if 'group' in prereq:\n #TODO: fix me so student groups work\n class_ids.append(class_id)\n return class_ids\n\n\nclass _ClassRoster(object):\n\n def __init__(self, institution, session, class_obj):\n self.institution = institution\n self.session = session\n self.class_obj = class_obj\n new_class_id = class_obj['id']\n roster = models.ClassRoster.FetchEntity(institution, session, new_class_id)\n self.emails = roster['emails']\n\n def add(self, student_email):\n self.emails.append(student_email)\n self.emails = list(set(self.emails))\n emails = ','.join(self.emails)\n logging.info(\"new emails in [%s]: %s\" % (self.class_obj['id'], emails))\n models.ClassRoster.Store(\n self.institution, self.session, self.class_obj, emails)\n\n def remove(self, student_email):\n self.emails = [ e for e in self.emails if e != student_email ]\n emails = ','.join(self.emails)\n logging.info(\"remaining emails in [%s]: %s\" % (self.class_obj['id'], emails))\n models.ClassRoster.Store(\n self.institution, self.session, self.class_obj, emails)\n\n\nclass _ClassInfo(object):\n\n def __init__(self, institution, session):\n classes = models.Classes.Fetch(institution, session)\n classes = yaml.load(classes)\n self.dayparts_by_class_id = {}\n self.classes_by_id = {}\n for c in classes:\n class_id = str(c['id'])\n self.classes_by_id[class_id] = c\n self.dayparts_by_class_id[class_id] = [s['daypart'] for s in c['schedule']]\n\n def getClassObj(self, class_id):\n class_obj = self.classes_by_id[class_id]\n if not class_obj:\n logging.fatal('no class_obj')\n if not 'id' in class_obj:\n logging.fatal('class_obj has no id')\n return class_obj\n\n def RemoveConflicts(self, class_ids, new_class_id):\n \"\"\"return new_class_id and non-conflicting old class_ids\"\"\" \n if new_class_id in self.dayparts_by_class_id:\n new_dayparts = self.dayparts_by_class_id[new_class_id]\n else:\n new_dayparts = []\n new_class_ids = [new_class_id]\n for c_id in class_ids:\n if c_id == '':\n continue\n remove = False\n for daypart in self.dayparts_by_class_id[c_id]:\n if daypart in new_dayparts:\n remove = True\n if not remove:\n new_class_ids.append(c_id)\n return new_class_ids\n\n\nclass _StudentSchedule(object):\n\n def __init__(self, institution, session, student_email, class_info):\n \"\"\"Args:\n class_info is a _ClassInfo object\n \"\"\"\n self.institution = institution\n self.session = session\n self.student_email = student_email\n class_ids = models.Schedule.Fetch(institution, session, student_email)\n self.class_ids = class_ids.split(\",\")\n self.class_info = class_info\n\n def add(self, new_class_id):\n class_ids = self.class_ids\n class_ids = self.class_info.RemoveConflicts(class_ids, new_class_id)\n self.class_ids = class_ids\n self.store()\n\n def remove(self, old_class_id):\n class_ids = self.class_ids\n class_ids = [ i for i in class_ids if i != old_class_id ]\n self.class_ids = class_ids\n self.store()\n\n def store(self):\n class_ids = \",\".join(self.class_ids)\n models.Schedule.Store(\n self.institution, self.session, self.student_email, class_ids)\n\n\n@ndb.transactional(retries=3, xg=True)\ndef AddStudentToClass(institution, session, student_email, new_class_id):\n class_info = _ClassInfo(institution, session)\n s = _StudentSchedule(institution, session, student_email, class_info)\n s.add(new_class_id)\n class_obj = class_info.getClassObj(new_class_id)\n r = _ClassRoster(institution, session, class_obj)\n r.add(student_email)\n\n\n@ndb.transactional(retries=3, xg=True)\ndef RemoveStudentFromClass(institution, session, student_email, old_class_id):\n class_info = _ClassInfo(institution, session)\n class_obj = class_info.getClassObj(old_class_id)\n r = _ClassRoster(institution, session, class_obj)\n r.remove(student_email)\n s = _StudentSchedule(institution, session, student_email)\n s.remove(class_obj['id'])\n", "sub_path": "logic.py", "file_name": "logic.py", "file_ext": "py", "file_size_in_byte": 5043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.info", "line_number": 7, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 10, "usage_type": "name"}, {"api_name": "fake_ndb.FakeNdb", "line_number": 10, "usage_type": "call"}, {"api_name": "models.ClassRoster.FetchEntity", "line_number": 52, "usage_type": "call"}, {"api_name": "models.ClassRoster", "line_number": 52, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 59, "usage_type": "call"}, {"api_name": "models.ClassRoster.Store", "line_number": 60, "usage_type": "call"}, {"api_name": "models.ClassRoster", "line_number": 60, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 66, "usage_type": "call"}, {"api_name": "models.ClassRoster.Store", "line_number": 67, "usage_type": "call"}, {"api_name": "models.ClassRoster", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.Classes.Fetch", "line_number": 74, "usage_type": "call"}, {"api_name": "models.Classes", "line_number": 74, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.fatal", "line_number": 86, "usage_type": "call"}, {"api_name": "logging.fatal", "line_number": 88, "usage_type": "call"}, {"api_name": "models.Schedule.Fetch", "line_number": 119, "usage_type": "call"}, {"api_name": "models.Schedule", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.Schedule.Store", "line_number": 137, "usage_type": "call"}, {"api_name": "models.Schedule", "line_number": 137, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb.transactional", "line_number": 141, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 141, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.transactional", "line_number": 151, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "388691632", "text": "#!/usr/bin/env python\n\"\"\" Query MongoDB indexed HSPs, save results in PivotTable.js html files.\n Current version supports search results against UniProt sequences only\n \"\"\"\nfrom __future__ import division\nimport json\n\nimport argh\nimport pandas as pd\nfrom argh import arg\nfrom nosqlbiosets.dbutils import DBconnection\nfrom nosqlbiosets.uniprot.query import QueryUniProt\nfrom pivottablejs import pivot_ui\nimport logging\n\n\nINDEX = \"biosets\"\nqryuniprot = QueryUniProt(\"MongoDB\", INDEX, \"uniprot\")\nmdbc = DBconnection(\"MongoDB\", INDEX)\nlog = logging.getLogger(__name__)\n\nclass QueryHSPs():\n\n def _topmatches_qc(self, bitscore, mismatch):\n qc = {\"bitscore\": {\"$gte\": bitscore},\n \"mismatch\": {\"$lte\": mismatch}\n }\n return qc\n\n # Check whether HSP ids are UniProt names or accessions\n def _is_id_name(self, collection):\n r = mdbc.mdbi[collection].find({}, limit=1)\n r = list(r)\n assert 1 == len(r)\n id = r[0]['sseqid']\n return True if '_' in id else False\n\n\n def _topmatches_linked2uniprot_qc(self, collection, bitscore, mismatch):\n qc = self._topmatches_qc(bitscore, mismatch)\n lookupfield = '_id' if self._is_id_name(collection) else 'accession'\n aggqc = [\n {\"$match\": qc},\n {\"$lookup\": {\n \"from\": 'uniprot',\n \"localField\": \"sseqid\",\n \"foreignField\": lookupfield,\n \"as\": \"uniprot\"\n }}\n ]\n return aggqc\n\n def topmatches_linked2UniProt(self, collection, bitscore,\n mismatch, limit):\n aggqc = self._topmatches_linked2uniprot_qc(collection, bitscore,\n mismatch)\n aggqc += [\n {\"$project\": {\"uniprot.gene\":1, \"uniprot.dbReference\": 1,\n \"uniprot.organism\": 1}},\n {\"$unwind\": \"$uniprot\"},\n {\"$unwind\": \"$uniprot.gene\"},\n {\"$unwind\": \"$uniprot.gene.name\"},\n {\"$match\": {\"uniprot.gene.name.type\":\n {\"$in\": [\"primary\"]}}},\n {\"$unwind\": \"$uniprot.dbReference\"},\n {\"$match\": {\"uniprot.dbReference.type\": {\"$in\": ['GO']}}},\n {\"$group\": {\n \"_id\": {\n \"sample\": \"$_id.sample\",\n \"goannot\": {\"$arrayElemAt\": [\n \"$uniprot.dbReference.property\", 0]},\n \"gene\": \"$uniprot.gene.name.#text\",\n \"organism\": \"$uniprot.organism.name.#text\"\n },\n \"abundance\": {\"$sum\": 1},\n # TODO: normalized abundance values\n \"bitscore\": {\"$sum\": \"$bitscore\"}\n }},\n {\"$sort\": {\"abundance\": -1}},\n {\"$limit\": limit}\n ]\n cr = mdbc.mdbi[collection].aggregate(aggqc, allowDiskUse=True)\n log.info('topmatches_linked2UniProt query returned')\n r = []\n nsamples = {}\n for i in cr:\n goterm = i['_id']['goannot']['value'][2:]\n goclass = i['_id']['goannot']['value'][:1]\n if goclass == 'C':\n goclass = 'Cellular component'\n elif goclass == 'P':\n goclass = 'Biological process'\n else:\n goclass = 'Molecular function'\n sample = i['_id']['sample']\n gene = i['_id']['gene']\n organism = i['_id']['organism']\n abundance = i['abundance']\n bitscore = i['bitscore']\n r.append((sample, organism, goclass, goterm, gene,\n abundance, bitscore))\n if sample in nsamples:\n nsamples[sample] += 1\n else:\n nsamples[sample] = 1\n print(nsamples)\n n = sum(nsamples.values())\n r = [row +\n (row[5] * n // nsamples[row[0]],)\n for row in r]\n return r\n\n @staticmethod\n def save_topmatches_linked2UniProt(r, outfile,\n rows=['GO group', 'GO term', 'Gene']):\n json.dump(r, open(outfile+'.json', 'w'), indent=4)\n df = pd.DataFrame(r,\n columns=['Sample', 'Organism', 'GO group', 'GO term',\n 'Gene', 'Abundance', 'Bitscore',\n 'Normalized abundance'])\n if not outfile.endswith('.html'):\n outfile += '.html'\n pivot_ui(df, outfile_path=outfile,\n rows=rows,\n cols=['Sample'],\n rendererName=\"Heatmap\",\n aggregatorName=\"Integer Sum\",\n rowOrder='value_z_to_a',\n vals=[\"Normalized abundance\"])\n print('Pivot table of query results saved in '+ outfile)\n\n\n@arg('study', help='Name of the MongoDB collection for HSPs of a study')\n@arg('outfile', help='Name for the pivot table html file to be generated')\n@arg('--bitscore', help='Minimum bitscore of HSPs')\n@arg('--mismatch', help='Maximum mismatch in HSPs')\n@arg('--limit', help='Maximum number of groups in the aggreagted results')\n@arg('--rows', help='Default rows for the output pivot table')\ndef topgenes(study, outfile, bitscore=100, mismatch=1, limit=14000,\n rows='Gene'):\n \"\"\"\n Abundance of HSPs grouped by organisms, genes, and GO annotations.\n Query results are saved in a json file and as PivotTable.js html files\n \"\"\"\n qry = QueryHSPs()\n r = qry.topmatches_linked2UniProt(study, bitscore, mismatch, limit)\n rows = [r.strip() for r in rows.split(\",\")]\n qry.save_topmatches_linked2UniProt(r, outfile, rows)\n\n\nif __name__ == \"__main__\":\n argh.dispatch_commands([\n topgenes\n ])\n", "sub_path": "hspsdb/queryHSPs.py", "file_name": "queryHSPs.py", "file_ext": "py", "file_size_in_byte": 5752, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "nosqlbiosets.uniprot.query.QueryUniProt", "line_number": 18, "usage_type": "call"}, {"api_name": "nosqlbiosets.dbutils.DBconnection", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 117, "usage_type": "call"}, {"api_name": "pivottablejs.pivot_ui", "line_number": 123, "usage_type": "call"}, {"api_name": "argh.arg", "line_number": 133, "usage_type": "call"}, {"api_name": "argh.arg", "line_number": 134, "usage_type": "call"}, {"api_name": "argh.arg", "line_number": 135, "usage_type": "call"}, {"api_name": "argh.arg", "line_number": 136, "usage_type": "call"}, {"api_name": "argh.arg", "line_number": 137, "usage_type": "call"}, {"api_name": "argh.arg", "line_number": 138, "usage_type": "call"}, {"api_name": "argh.dispatch_commands", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "487980459", "text": "from django.shortcuts import render,redirect,get_object_or_404\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\nfrom django.db.models import Count,Q\nfrom marketing.models import Signup\nfrom django.urls import reverse\nfrom .models import*\nfrom .forms import *\n\n\ndef get_author(user):\n\tqs = Author.objects.filter(user=user)\n\tif qs.exists():\n\t\treturn qs[0]\n\treturn None\n\ndef get_category_count():\n\tqueryset=Post.objects.values('categories__title').annotate(Count('categories__title'))\n\treturn queryset\n\ndef search(request):\n\tquerset=Post.objects.all()\n\tquery=request.GET.get('q')\n\tif query:\n\t\tqueryset=querset.filter(\n\t\t\tQ(title__icontains=query)|\n\t\t\tQ(overview__icontains=query)\n\t\t\t).distinct()\n\tcontext={'queryset':queryset}\n\treturn render(request,'search_results.html',context)\n\n\n\ndef index(request):\n\tobject_list=Post.objects.filter(featured=True)\n\tlatest=Post.objects.order_by('-timestamp')[:3]\n\tif request.POST:\n\t\temail=request.POST['email']\n\t\tnew_signup=Signup()\n\t\tnew_signup.email=email\n\t\tnew_signup.save()\n\tcontext={'object_list':object_list,\n\t\t\t'latest':latest,\n\t}\n\treturn render(request,'index.html',context)\n\ndef Blog(request):\n\tcategory_count=get_category_count()\n\tmost_recent=Post.objects.order_by('-timestamp')[:3]\n\tobject_list=Post.objects.all()\n\tpaginator = Paginator(object_list, 4)\n\tpage_request_var = 'page'\n\tpage = request.GET.get(page_request_var)\n\ttry:\n\t\tpaginated_queryset = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpaginated_queryset = paginator.page(1)\n\texcept EmptyPage:\n\t\tpaginated_queryset = paginator.page(paginator.num_pages)\n\tcontext={'object_list': paginated_queryset,\n\t\t\t'page_request_var': page_request_var,\n\t\t\t\"most_recent\":most_recent,\n\t\t\t'category_count':category_count\n\t}\n\treturn render(request,'blog.html',context)\ndef PostDetail(request,pk):\n\tcategory_count=get_category_count()\n\tmost_recent=Post.objects.order_by('-timestamp')[:3]\n\tpost=get_object_or_404(Post,pk=pk)\n\n\tform = CommentForm(request.POST or None)\n\tif request.method == \"POST\":\n\t\tif form.is_valid():\n\t\t\tform.instance.user = request.user\n\t\t\tform.instance.post = post\n\t\t\tform.save()\n\t\t\treturn redirect(reverse(\"detail\", kwargs={\n\t\t\t\t\t\t\t'pk': post.pk\n\t\t\t\t\t\t\t}))\n\tcontext={'post':post,\n\t\"most_recent\":most_recent,\n\t'category_count':category_count,\n\t'form':form\n\t}\n\treturn render(request,'post.html',context)\n\n\ndef PostCreate(request):\n\ttitle='Create'\n\tform=PostForm(request.POST or None, request.FILES or None)\n\tauthor=get_author(request.user)\n\tif request.POST:\n\t\tif form.is_valid():\n\t\t\tform.instance.author=author\n\t\t\tform.save()\n\t\t\treturn redirect(reverse('detail',kwargs={'pk':form.instance.pk}))\n\tcontext={'form':form,'title':title}\n\treturn render(request,'post_create.html',context)\n\ndef PostDelete(request,pk):\n\tpost=get_object_or_404(Post,pk=pk)\n\tpost.delete()\n\treturn redirect(reverse('index'))\n\ndef PostUpdate(request,pk):\n\ttitle='Update'\n\tpost=get_object_or_404(Post,pk=pk)\n\tform=PostForm(request.POST or None, request.FILES or None,instance=post)\n\tauthor=get_author(request.user)\n\tif request.POST:\n\t\tif form.is_valid():\n\t\t\tform.instance.author=author\n\t\t\tform.save()\n\t\t\treturn redirect(reverse('detail',kwargs={'pk':form.instance.pk}))\n\tcontext={'form':form,'title':title}\n\treturn render(request,'post_create.html',context)", "sub_path": "posts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3259, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.db.models.Count", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "marketing.models.Signup", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 50, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 55, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 57, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 64, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 68, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 76, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 95, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 97, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 100, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 102, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 106, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 113, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 113, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "209921950", "text": "# -*- coding: utf-8 -*-\n\nimport Searcher\nimport bs4\n\n\ndef main():\n url_template = r'http://www.alib.ru/find3.php4?tfind='\n s = Searcher.Searcher()\n s.set_url(url_template)\n\n example = s.get_page('Пушкин')\n with open(r'alib.html', 'w', encoding='cp1251') as output_file:\n output_file.write(example)\n\n soup = bs4.BeautifulSoup(example, 'lxml')\n # print(soup.prettify())\n h3 = [s.text for s in soup.find_all('h3')] # titles\n print(h3)\n\n l = [s for s in soup.find_all('p')]\n print(l) # parse it with scikit-learn\n\n\n\"\"\"\n example = s.get_page('Python')\n with open(r'alib2.html', 'w', encoding='cp1251') as output_file:\n output_file.write(example)\n\"\"\"\n\nif __name__ == '__main__':\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "Searcher.Searcher", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "453308863", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n \n# data to plot\nn_groups = 9\nprecision = (75.27, 75.00, 58.38, 58.25, 62.5, 74.78, 76.37, 77.15)\nrecall = (58.33, 63.75, 72.50, 75, 73.75, 72.91, 75.41, 74.54)\nf1_score = ('USDA', 'Twitter', 'WordNet',\n 'Pluralization', 'Phrase Machine', 'Banned Words',\n 'Span Merging', 'WSD')\nfig, ax = plt.subplots()\nax.scatter(precision, recall)\n\nfor index, text in enumerate(f1_score):\n ax.annotate(text, (precision[index], recall[index]))\nplt.title('Precision Vs Recall')\nplt.xlabel('Precision')\nplt.ylabel('Recall')\n# plt.legend()\nplt.tight_layout()\nplt.show() \n\n# # create plot\n# fig, ax = plt.subplots()\n# index = np.arange(n_groups) * 2.0\n# bar_width = 0.35\n# opacity = 0.8\n \n# rects1 = plt.bar(index, precision, bar_width,\n# alpha=opacity,\n# color='b',\n# label='Precision')\n \n# rects2 = plt.bar(index + bar_width, recall, bar_width,\n# alpha=opacity,\n# color='g',\n# label='Recall')\n\n# rects3 = plt.bar(index + bar_width + bar_width, recall, bar_width ,\n# alpha=opacity,\n# color='r',\n# label='F1_score')\n \n\n# plt.title('Precision Vs Recall')\n# plt.xticks(index + bar_width, ('A', 'B', 'C', 'D'))\n# plt.xticks(index + bar_width, ('USDA', 'Twitter', 'WordNet',\n# \t\t\t\t\t\t\t\t'Pluralization', 'Phrase Machine', 'Banned Words',\n# \t\t\t\t\t\t\t\t'Span Merging', 'WSD', 'Levenshtein'))\n# plt.legend()\n \n# # plt.tight_layout()\n# plt.show()", "sub_path": "bar_charts/scripts/precision_recall_curve.py", "file_name": "precision_recall_curve.py", "file_ext": "py", "file_size_in_byte": 1547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "269744526", "text": "# -*- coding:utf-8 -*-\n# @Desc : \n# @Date : 2020-12-17 9:27\n\nimport itertools\n\n## itertools.count(start[, step])\n# start:循环开始的数字; step:循环中的间隔\n\n\n## itertools.cycle(iterable)\n# 环状循环: 无限循环迭代器中的元素,相当于while True\nn = itertools.cycle('abcd')\nfor i in n:\n print(i)\n# 执行结果: a b c d a b c d a b ...\n\n\n## itertools.repeat(object[, times])\n# 重复循环: 重复循环迭代对象object,除非设置times对象,否则一直循环下去\n# n = itertools.repeat('hello')\nn = itertools.repeat('hello', 5)\nfor i in n:\n print(i)\n# 执行结果1: hello hello hello ...\n# 执行结果2: hello hello hello hello hello\n\n\n## itertools.chain(*iterables)\n# 链式迭代: 可以串联多个可迭代对象\nn = itertools.chain('abc', [2, 1, 3],{30, 20, 10})\nfor i in n:\n print(i)\n# 执行结果: a b c 2 1 3 10 20 30\n\n\n## itertools.product(*iterables, repeat=1)\nfor temp in itertools.product([1, 2, 3], repeat=2):\n print(temp)\n# 执行结果: (1, 1) (1, 2) (1, 3) (2, 1) (2, 2) (2, 3) (3, 1) (3, 2) (3, 3)\n\n\n## itertools.permutations(iterable[, r])\n# 排列: 不重复,无序\nfor temp in itertools.permutations([1, 2, 3], 3):\n print(temp)\n# 执行结果: (1, 2, 3) (1, 3, 2) (2, 1, 3) (2, 3, 1) (3, 1, 2) (3, 2, 1)\n\n\n## itertools.combinations(iterable, r)\n# 组合: 不重复,有序\nfor temp in itertools.combinations([1, 2, 3, 4], 3):\n print(temp)\n# 执行结果: (1, 2, 3) (1, 2, 4) (1, 3, 4) (2, 3, 4)\n\n\n", "sub_path": "[06]Python-内置函数与模块/Python内置模块部分/09内置模块-itertools.py", "file_name": "09内置模块-itertools.py", "file_ext": "py", "file_size_in_byte": 1468, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "itertools.cycle", "line_number": 13, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 22, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 31, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 38, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 45, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "230503509", "text": "#!/usr/bin/env python3\n# agent.py\n#This program is realised by evaluation and tree. First, it search all the possibility, such as, searching the mist,\n#blowing up the wall, make aboat, blow up wall,blow up door. Every possible step ,it will find out.\n#Second, it is to pruning the tree, for instance, record a harbour for a island, and this could reduce the possiblity of\n#tree.\n#Third, it is to find out wich wall need to explode,if we consider every wall, it will be exponential.\n#We recorded the searched place and try to search the unknown place.\n#The evaluation part is to customized the score by state. For example, if I have dynamites, it will be high score. if I\n#used a dynamites wronly, it will score low.\n#\n#This program can be invoke by some hidden parament, [-print] could show everystep , --w [float] could show the algorithm\n#between greegy and uniform search, when the float value is one, then it is A* search\n#We finished this assignment by 7 days after the assinment has released.\n#Huang,Wei z5119435 ChengWen, Peng z5103407\n#\n#\nimport socket\nfrom argparse import ArgumentParser\nfrom collections import deque\nimport os\nimport sys\nimport math\n\nview=[[str('m') for i in range(5) ] for _ in range(5)]\nview[2][2]='I'\nimap=15\nw=1\nbest_path=[]\ntemp_path=[]\nused_wall=[]\nused_tree=[]\nused_ground_mist=[]\nused_ocean_mist=[]\nisland_ocean_index=[]\ncenter_x=round(imap/2)\ncenter_y=round(imap/2)\noff_x=0\noff_y=0\nreduce_mark=0\nsys.setrecursionlimit(2147483640)\n\nparser = ArgumentParser()\nparser.add_argument('-p', type=int,dest = 'port', required = True)\nparser.add_argument('--imap', type=int,dest = 'imap', required = False)\nparser.add_argument('--w', type=float,dest = 'w', required = False)\nparser.add_argument('-print', dest = 'print', action='store_true',required = False)\nargs = parser.parse_args()\nport = args.port\nif(args.imap):\n imap=args.imap\nif(args.w):\n w=args.w\n#port=12344\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect(('localhost',port))\n\nirow=round(imap/2)-1\nicol=round(imap/2)\nrow=round(imap/2)\ncol=round(imap/2)\neast = 2\nnorth = 1\nwest = 0\nsouth = 3\ndirn = 1\nmap=[['m' for _ in range(imap+1)] for _ in range(imap+1)]\nmap[round(imap/2)][round(imap/2)]=' '\nhave_axe = 0\nhave_key = 0\nhave_raft = 0\ngame_won = False\ngame_lost = False\nhave_treasure = 0\nnum_dynamites_held = 0\nsimulate_mark=0\n\n\n\n\ndef action(a):\n if(a):\n #print('take action:',a)\n sock.send(bytes(a,encoding='utf-8'))\n\ndef print_view():\n global view\n for i in view:\n print(i)\n\ndef manhattan_dist(x, y, dx, dy):\n return abs(x - dx) + abs(y - dy)\n\ndef read_view():\n global view\n for i in range(5):\n for j in range(5):\n if ((i == 2) and (j == 2)):\n continue\n k = str(sock.recv(1), encoding='utf-8')\n view[i][j]=k\ndef draw_map():\n global map\n global view\n global row\n global col\n temp=[[str(i) for i in range(5) ] for _ in range(5)]\n for i in range(5):\n for j in range(5):\n if(dirn==0):\n temp[4 - j][ i] = view[i][j]\n if (dirn == 1):\n temp[i][j] = view[i][j]\n if (dirn == 2):\n temp[j][4 - i] = view[i][j]\n if (dirn == 3):\n temp[4 - i][4 - j] = view[i][j]\n for i in range(-2,3):\n for j in range(-2,3):\n if(i!=0 or j!=0):\n map[row + i][col + j] = temp[2+i][2+j]\n\ndef forward_step():\n global irow\n global icol\n if(dirn==0):\n irow = row;\n icol = col - 1;\n if (dirn == 1):\n irow = row - 1;\n icol = col;\n if (dirn == 2):\n irow = row;\n icol = col + 1;\n if (dirn == 3):\n irow = row + 1;\n icol = col;\n\ndef judge_move(x,y,dx,dy,action=None):\n dd=0\n global dirn\n op = [['', 'R', 'RR', 'L'],\n ['L', '', 'R', 'RR'],\n ['RR', 'L', '', 'R'],\n ['R', 'RR', 'L', '']]\n if(x==dx and y==dy ):\n return ''\n if(x-1==dx and y==dy):#1\n dd=1\n if(x+1==dx and y==dy):#3\n dd=3\n if(x==dx and y+1==dy):#2\n dd=2\n if(x==dx and y-1==dy):#0\n dd=0\n if(action in {'C','U','B'}):\n return op[dirn][dd]+action+'F'\n return op[dirn][dd]+'F'\n\ndef change_dirn(a):\n global dirn\n if(a=='L'):\n dirn = (dirn-1)%4\n if(a=='R'):\n dirn = (dirn+1)%4\n\n################################################################################\ndef find_all_continents(x,y):\n options = find_ocean_options(x,y)\n temp_all_continents =[]\n all_continents=[]\n def looking_for_land(x,y):\n nonlocal temp_all_continents\n nonlocal all_continents\n new_land=[]\n temp=[]\n for i in range(len(temp_all_continents)):\n for j in range(len(temp_all_continents[i])):\n if(temp_all_continents[i][j]==[x,y]):\n all_continents[i].append([[x,y],' '])\n return\n new_land = find_ground_options(x,y)\n for n in new_land:\n temp.append(n[0])\n temp_all_continents.append(temp)\n all_continents.append([[[x,y],' ']])\n for opt in options:\n for o in opt[1:]:\n if(o[2]==' '):\n looking_for_land(o[0],o[1])\n return all_continents\n\ndef find_all_oceans(x,y):\n options = find_ground_options(x,y)\n temp_all_ocean = []\n all_oceans=[]\n def looking_for_ocean(gx,gy,x, y):\n nonlocal all_oceans\n nonlocal temp_all_ocean\n new_ocean = []\n temp = []\n for i in range(len(temp_all_ocean)):\n for j in range(len(temp_all_ocean[i])):\n if (temp_all_ocean[i][j] == [x, y]):\n all_oceans[i].append([[gx,gy],[x, y],'~'])\n return\n new_ocean = find_ocean_options(x, y)\n for n in new_ocean:\n temp.append(n[0])\n temp_all_ocean.append(temp)\n all_oceans.append([[[gx,gy],[x, y],'~']])\n\n for opt in options:\n for o in opt[1:]:\n if (o[2] == '~'):\n looking_for_ocean(opt[0][0],opt[0][1],o[0], o[1])\n return all_oceans\n################################################################################\ndef ground_neighbor(x,y):\n neighbor=[[x,y]]\n if (map[x - 1][y] not in {' ','.'}):\n neighbor.append([x - 1,y,map[x - 1][y]])\n if (map[x][y + 1] not in {' ','.'}):\n neighbor.append([x, y+1, map[x][y+1]])\n if (map[x + 1][y] not in {' ','.'}):\n neighbor.append([x+1, y, map[x+1][y]])\n if (map[x][y - 1] not in {' ','.'}):\n neighbor.append([x, y-1, map[x][y-1]])\n\n for i in range(-2,3):\n for j in range(-2,3):\n if (map[x + i][y +j] == 'm' and [x + i, y + j, map[x + i][y + j]] not in neighbor) and (i!=0 or j!=0):\n neighbor.append([x + i, y + j, map[x + i][y + j]])\n if len(neighbor)>1:\n return neighbor\n else:\n return []\n\ndef find_ground_options(x,y):\n u = deque()\n temp=[]\n option=[]\n used=[]\n global map\n if(map[x][y] ==' '):\n u.append([x,y])\n else :\n return []\n\n while(u):\n [x, y]=u.popleft()\n temp=ground_neighbor(x,y)\n if (temp):\n option.append(temp)\n if([x,y]==[center_x,center_y] and [x,y] not in used and [x,y] not in u):\n option.append([[x,y]])\n used.append([x, y])\n if (map[x-1][y] == ' ' and [x-1,y] not in used and [x-1,y] not in u):\n u.append([x-1, y])\n if (map[x][y+1] == ' ' and [x,y+1] not in used and [x,y+1] not in u):\n u.append([x, y+1])\n if (map[x+1][y] == ' ' and [x+1,y] not in used and [x+1,y] not in u):\n u.append([x+1, y])\n if (map[x][y-1] == ' ' and [x,y-1] not in used and [x,y-1] not in u):\n u.append([x, y-1])\n return option\n\ndef ocean_neighbor(x,y):\n neighbor=[[x,y]]\n if (map[x - 1][y] not in {'~','.'}):\n neighbor.append([x - 1, y, map[x - 1][y]])\n if (map[x][y + 1] not in {'~','.'}):\n neighbor.append([x, y + 1, map[x][y + 1]])\n if (map[x + 1][y] not in {'~','.'}):\n neighbor.append([x + 1, y, map[x + 1][y]])\n if (map[x][y - 1] not in {'~','.'}):\n neighbor.append([x, y - 1, map[x][y - 1]])\n\n for i in range(-2,3):\n for j in range(-2,3):\n if (map[x + i][y +j] == 'm' and [x + i, y + j, map[x + i][y + j]] not in neighbor)and (i!=0 or j!=0):\n neighbor.append([x + i, y + j, map[x + i][y + j]])\n if len(neighbor)>1:\n return neighbor\n else:\n return []\n\ndef find_ocean_options(x,y):\n u = deque()\n option=[]\n temp=[]\n used=[]\n global map\n\n if(map[x][y] =='~'):\n u.append([x,y])\n else :\n return []\n\n while(u):\n [x, y]=u.popleft()\n temp=ocean_neighbor(x,y)\n if (temp ):\n option.append(temp)\n used.append([x,y])\n if (map[x-1][y] =='~' and [x-1,y] not in used and [x-1,y] not in u):\n u.append([x-1,y])\n if (map[x][y+1] =='~' and [x,y+1] not in used and [x,y+1] not in u):\n u.append([x,y+1])\n if (map[x+1][y] =='~' and [x+1,y] not in used and [x+1,y] not in u):\n u.append([x+1, y])\n if (map[x][y-1] =='~' and [x,y-1] not in used and [x,y-1] not in u):\n u.append([x, y-1])\n return option\n\n\n\ndef find_path(x,y,dx,dy):\n\n used_path = []\n path1 = []\n global map\n temp1=''\n temp2=''\n if (map[x][y] in {'d', 'k', 'a', '$'}):\n temp1 = map[x][y]\n map[x][y] = ' '\n if (map[dx][dy] in {'d', 'k', 'a', '$'}):\n temp2 = map[dx][dy]\n map[dx][dy] = ' '\n current_state = map[x][y]\n\n if (current_state != map[dx][dy] and current_state in {' ','~'} and map[dx][dy] in {' ','~'} and\n ((dx - 1 == x and dy == y) or (dx + 1 == x and dy == y) or (dx == x and dy - 1 == y)\n or (dx == x and dy + 1 == y))):\n return [[x, y], [dx, dy]]\n def recursive_path(cx,cy):\n nonlocal path1\n nonlocal used_path\n nonlocal current_state\n nonlocal x\n nonlocal y\n nonlocal dx\n nonlocal dy\n global w\n path1.append([cx,cy])\n used_path.append([cx,cy])\n if([cx-1,cy]==[dx,dy]):\n path1.append([cx-1, cy])\n return True\n if([cx,cy+1]==[dx,dy]):\n path1.append([cx,cy+1])\n return True\n if([cx+1,cy]==[dx,dy]):\n path1.append([cx+1,cy])\n return True\n if([cx,cy-1]==[dx,dy]):\n path1.append([cx,cy-1])\n return True\n for _ in range(4):\n a_star = []\n temp = []\n if(map[cx-1][cy] == current_state and [cx-1,cy] not in used_path):\n a_star.append([cx-1,cy,w*manhattan_dist(x,y,cx-1,cy)+(2-w)*manhattan_dist(dx,dy,cx-1,cy)])\n if(map[cx][cy+1] == current_state and [cx,cy+1] not in used_path):\n a_star.append([cx,cy+1,w*manhattan_dist(x,y,cx,cy+1)+(2-w)*manhattan_dist(dx,dy,cx,cy+1)])\n if(map[cx+1][cy] == current_state and [cx+1,cy] not in used_path):\n a_star.append([cx+1,cy,w*manhattan_dist(x,y,cx+1,cy)+(2-w)*manhattan_dist(dx,dy,cx+1,cy)])\n if(map[cx][cy-1] == current_state and [cx,cy-1] not in used_path):\n a_star.append([cx,cy-1,w*manhattan_dist(x,y,cx,cy-1)+(2-w)*manhattan_dist(dx,dy,cx,cy-1)])\n if(a_star):\n temp=min(a_star,key=lambda x:x[2])\n if(temp):\n if( recursive_path(temp[0],temp[1])):\n return True\n else:\n path1.pop()\n return False\n\n if(recursive_path(x,y)):\n if (temp1):\n map[x][y]=temp1\n if (temp2):\n map[dx][dy]=temp2\n return path1\n else:\n if (temp1):\n map[x][y]=temp1\n if (temp2):\n map[dx][dy]=temp2\n return []\n\ndef ground_evaluate(x,y):\n option=[]\n global used1\n global num_dynamites_held#mark=100\n global have_raft#mark=20\n global have_axe#mark=5\n global have_key#mark=10\n global have_treasure # mark=99999\n global game_won\n global reduce_mark\n mist=[]\n mark=0\n explored_mist=[]\n dynamites=[]\n tree=[]\n key=False\n axe=False\n treasure=[]\n temp_raft=0\n if(map[x][y]==' '):\n option=find_ground_options(x,y)\n for o in option:\n for op in o[1:]:\n if (op[2]=='m' and [op[0],op[1]] not in mist):\n mist.append([op[0],op[1]])\n if (op[2] == 'd' and [op[0], op[1]] not in dynamites):\n dynamites.append([op[0], op[1]])\n if (op[2] == 'T' and [op[0], op[1]] not in tree):\n tree.append([op[0], op[1]])\n if (op[2] == '$' and [op[0], op[1]] not in treasure):\n treasure.append([op[0], op[1]])\n if (op[2] == 'X' and [op[0], op[1]] not in explored_mist):\n explored_mist.append([op[0], op[1]])\n if (op[2] == 'k'):\n key=True\n if(op[2]=='a' ):\n axe=True\n #if([[center_x,center_y]] in option and have_treasure ):\n # mark=99999\n # return mark\n mark=len(dynamites)*100+num_dynamites_held*200+((axe or have_axe) and len(tree)>=1)*20\\\n +axe*5+ bool(have_axe)*20+bool(have_raft)*4+bool(have_key)*20\\\n +key*10+len(explored_mist)*3+len(mist)*2+reduce_mark\n return mark\n\ndef ocean_evaluate(x,y):\n global used1\n global num_dynamites_held#mark=100\n global have_raft#mark=20\n global have_axe#mark=5\n global have_key#mark=10\n global reduce_mark\n used1=[]\n explored_mist=[]\n mist=[]\n mark = 0\n if(map[x][y]=='~'):\n option = find_ocean_options(x,y)\n for o in option:\n for op in o[1:]:\n if (op[2] == 'm' and [op[0],op[1]] not in mist):\n mist.append([op[0],op[1]])\n if (op[2] == 'X' and [op[0], op[1]] not in explored_mist):\n explored_mist.append([op[0], op[1]])\n mark = len(mist) + len(explored_mist) * 1.2 + num_dynamites_held * 200 +bool(have_axe) * 20 \\\n + bool(have_raft) * 4 + len(mist)*2 + len(explored_mist) * 3 + bool(have_key) * 20 +reduce_mark\n return mark\ndef is_value_tree(x,y):\n global map\n for i in range(-2,3):\n for j in range(-2, 3):\n if(map[x+i][y+j] in {'d', 'k', 'a', '$','m'}):\n return True\n return False\ndef is_value(x1,y1):\n used_wall1 = []\n def is_v(x,y,x0,y0,depth=num_dynamites_held):\n global num_dynamites_held\n global map\n nonlocal used_wall1\n used_wall1.append([x, y])\n if (map[x][y] in {'d', 'k', 'a', '$','m'}):\n return True\n if(depth<1):\n return False\n if(manhattan_dist(x,y,x0,y0)>4):\n return False\n for i in {-1,1}:\n if ([x + i,y] not in used_wall1 and map[x + i][y] not in {'.','~'}):\n if(map[x + i][y]=='*' and num_dynamites_held>1):\n num_dynamites_held-=1\n if(is_v(x + i,y,x0,y0)):\n num_dynamites_held+=1\n return True\n num_dynamites_held += 1\n elif(map[x + i][y]!='*' and is_v(x + i, y,x0,y0)):\n return True\n if ([x,y + i] not in used_wall1 and map[x][y + i] not in {'.','~'}):\n if(map[x][y + i]=='*' and num_dynamites_held>1):\n num_dynamites_held-=1\n if(is_v(x, y + i,x0,y0)):\n num_dynamites_held+=1\n return True\n num_dynamites_held += 1\n elif(map[x][y + i]!='*' and is_v(x, y + i,x0,y0)):\n return True\n used_wall1.pop()\n return False\n return is_v(x1,y1,x1,y1)\n\ndef init_value():\n global simulate_mark\n global best_path\n global used_wall\n global used_tree\n global used_ground_mist\n global used_ocean_mist\n simulate_mark=0\n best_path = []\n used_wall = []\n used_tree = []\n used_ground_mist=[]\n used_ocean_mist=[]\n\ndef clear_mist(x,y):\n global map\n temp=[]\n for i in range(-2, 3):\n for j in range(-2, 3):\n if (map[x + i][y + j] == 'm'):\n temp.append([x + i,y + j])\n map[x + i][y + j] = 'X'\n return temp\n\ndef recover_mist(mist):\n global map\n for temp in mist:\n if (map[temp[0]][temp[1]] == 'X'):\n map[temp[0]][temp[1]]='m'\n\ndef simulate(x,y):\n global temp_path\n global best_path\n global used_tree\n global used_wall\n global used_ground_mist\n global used_ocean_mist\n global simulate_mark\n global have_axe\n global have_key\n global have_raft\n global game_won\n global have_treasure\n global num_dynamites_held\n global island_ocean_index\n global reduce_mark\n option=[]\n if(map[x][y] == ' '):\n g_option=find_ground_options(x,y)\n if (map[x][y] == '~'):\n o_option=find_ocean_options(x,y)\n if( best_path == []):# do nothing ,just waiting for death\n simulate_mark=-99999\n if (game_won):\n return\n if (map[x][y] == ' '): # find the treasure and go home\n if(have_treasure and [[center_x,center_y]] in g_option):\n temp1=[[center_x,center_y],' ']\n ##print('[center_x,center_y]:',[[center_x,center_y],' '])\n temp_path.append(temp1)\n #print('game_won:',game_won)\n game_won = True\n temp_mark = 99999\n if (temp_mark>simulate_mark or(temp_mark==simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))\n or not temp1):\n temp1=opt[0]\n if(temp1):\n used_ground_mist.append(temp1)\n mist=clear_mist(temp1[0],temp1[1])\n temp_path.append([temp1,'m'])\n temp_mark = ground_evaluate(temp1[0],temp1[1])\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))\n or not temp1):\n temp1=opt[0]\n temp2=[o[0],o[1]]\n if(temp1 and temp2 ):\n mist1=clear_mist(temp1[0], temp1[1])\n mist2=clear_mist(temp2[0], temp2[1])\n if(map[temp2[0]][temp2[1]]=='-'):\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp1,temp2,'U'])\n temp_mark = ground_evaluate(x,y)\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))or not temp2):\n temp2=o\n if(temp2):\n mist=clear_mist(temp2[0], temp2[1])\n if(map[temp2[0]][temp2[1]]=='d'):\n num_dynamites_held+=1\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp2,'d'])\n if(map[temp2[0]][temp2[1]]=='k'):\n have_key+=1\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp2,'k'])\n if(map[temp2[0]][temp2[1]]=='a'):\n have_axe+=1\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp2,'a'])\n if(map[temp2[0]][temp2[1]]=='$'):\n have_treasure+=1\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp2,'$'])\n temp_mark = ground_evaluate(temp2[0],temp2[1])\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))\n or not temp1):\n temp1=opt[0]\n if(temp1 ):\n used_ocean_mist.append(temp1)\n mist=clear_mist(temp1[0], temp1[1])\n temp_path.append([temp1,'m'])\n temp_mark = ocean_evaluate(x,y)\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))or not temp1):\n temp1=opt[0]\n temp2=[o[0],o[1]]\n if (temp1 and temp2):\n mist1=clear_mist(temp1[0], temp1[1])\n mist2=clear_mist(temp2[0], temp2[1])\n if(map[temp2[0]][temp2[1]]=='-'):\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp1,temp2,'U'])\n temp_mark = ground_evaluate(x,y)\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))\n or not temp1)):\n temp1 = opt[0]\n temp2 = [o[0], o[1]]\n if(temp1):\n used_tree.append([temp2[0],temp2[1]])\n mist1=clear_mist(temp1[0], temp1[1])\n mist2=clear_mist(temp2[0], temp2[1])\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp1,temp2,'C'])\n have_raft += 1\n temp_mark = ground_evaluate(temp2[0],temp2[1])\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path)= len(map)):\n temp_map = [['m' for _ in range(len(map[0]))] for _ in range(len(map) + 2)]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n elif (row - 6 < 0):\n temp_map = [['m' for _ in range(len(map[0]))] for _ in range(len(map) + 2)]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i + 2][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n center_x += 2\n off_x += 2\n elif (col + 6 >= len(map[0])):\n temp_map = [['m' for _ in range(len(map[0]) + 2)] for _ in range(len(map))]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n elif (col - 6 < 0):\n temp_map = [['m' for _ in range(len(map[0]) + 2)] for _ in range(len(map))]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j + 2] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n center_y += 2\n off_y += 2\n [row, col] = [p[0]+off_x, p[1]+off_y]\n #print('off_x,off_y:',off_x,off_y)\n action(a_s)\n change_dirn(a_s)\n read_view()\n draw_map()\n forward_step()\n #print_view()#################\n if(args.print):\n print_map()###################\n [x, y] = [x + off_x, y + off_y]\n [dx, dy] = [dx + off_x, dy + off_y]\n if bool(act):\n #print('row,col,dx,dy,act:',row,col,dx,dy,act)\n action_str=judge_move(row,col,dx,dy,action=act)\n #print('action_str:', action_str)\n for a_s in action_str:\n #print('current_direction:',dirn)\n if(a_s == 'F'):\n if (row + 6 > len(map)):\n temp_map = [['m' for _ in range(len(map[0]))] for _ in range(len(map) + 2)]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n elif (row - 6 < 0):\n temp_map = [['m' for _ in range(len(map[0]))] for _ in range(len(map) + 2)]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i + 2][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n center_x += 2\n off_x += 2\n row+=1\n elif (col + 6 > len(map[0])):\n temp_map = [['m' for _ in range(len(map[0]) + 2)] for _ in range(len(map))]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n elif (col - 6 < 0):\n temp_map = [['m' for _ in range(len(map[0]) + 2)] for _ in range(len(map))]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j + 2] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n center_y += 2\n off_y += 2\n col+=1\n else:\n [row,col]=[dx,dy]\n action(a_s)\n change_dirn(a_s)\n read_view()\n draw_map()\n forward_step()\n #print_view()###############\n #print_map()#############\n #print('find path~:',temp)\n if(map[row][col]=='d'):\n map[row][col]=' '\n num_dynamites_held += 1\n if(map[row][col]=='k'):\n map[row][col]=' '\n have_key += 1\n if(map[row][col]=='a'):\n map[row][col]=' '\n have_axe += 1\n if(map[row][col]=='$'):\n map[row][col]=' '\n have_treasure += 1\n #print('have boat:',have_raft)\n return\nmain_option=[]\nmain_path=[]\nmain_temp1=[]\ng_think = False\no_think = False\nread_view()\ndraw_map()\ntry:\n while(not game_won):\n #print('direction: ',dirn)\n #print_view()\n #print_map()\n #print('boat##############################',have_raft)\n #print('num_dynamites_held:',num_dynamites_held)\n main_temp1=[]\n main_temp2=[]\n off_x = 0\n off_y = 0\n #print('current location:',map[row][col])\n if (map[row][col] ==' '):\n main_option=find_ground_options(row,col)\n #print('options:',main_option)\n #print(main_option)\n #print('current_location:'[row,col])\n for opt in main_option:\n for o in opt[1:]:\n if (o[2] == 'm'):\n if ((main_temp1 and manhattan_dist(row, col, main_temp1[0], main_temp1[1])\n > manhattan_dist(row, col, opt[0][0], opt[0][1])) or not main_temp1):\n main_temp1 = opt[0]\n g_think=True\n break\n if(main_temp1):\n break\n if(main_temp1):\n move(row,col,main_temp1[0],main_temp1[1])\n continue\n elif(g_think):\n g_think=False\n for i in find_all_oceans(row, col):\n if (i[0] not in island_ocean_index):\n island_ocean_index.append(i[0])\n if(have_axe):\n main_temp1 = []\n main_temp2 = []\n for opt in main_option:\n for o in opt[1:]:\n if (o[2] == 'T' and is_value_tree(o[0],o[1])):\n if ((main_temp1 and manhattan_dist(row, col, main_temp1[0], main_temp1[1])\n > manhattan_dist(row, col, opt[0][0], opt[0][1])) or not main_temp1):\n main_temp1 = opt[0]\n main_temp2 = [o[0],o[1]]\n break\n if (main_temp1):\n break\n if(main_temp1):\n move(main_temp1[0], main_temp1[1], main_temp2[0], main_temp2[1],act='C')\n have_raft=1\n continue\n for opt in main_option:\n for o in opt[1:]:\n if (o[2] in { 'd','a','$','k' }):\n if ((main_temp1 and manhattan_dist(row, col, main_temp1[0], main_temp1[1])\n > manhattan_dist(row, col, opt[0][0], opt[0][1])) or not main_temp1):\n main_temp1 = [o[0],o[1]]\n break\n if (main_temp1):\n move(row, col,main_temp1[0], main_temp1[1])\n continue\n if (map[row][col] == '~'):\n main_option=find_ocean_options(row,col)\n for opt in main_option:\n for o in opt[1:]:\n if (o[2] == 'm'):\n if ((main_temp1 and manhattan_dist(row, col, main_temp1[0], main_temp1[1])\n > manhattan_dist(row, col, opt[0][0], opt[0][1])) or not main_temp1):\n main_temp1 = opt[0]\n o_think=True\n break\n if(main_temp1):\n move(row,col,main_temp1[0],main_temp1[1])\n continue\n elif(o_think):\n o_think=False\n for i in find_all_continents(row, col):\n if (i[0] not in island_ocean_index):\n island_ocean_index.append(i[0])\n\n if (not main_temp1):\n #print('have_axe:',str(have_axe))\n #print(find_ground_options(row,col))\n init_value()\n temp_off_x=0\n temp_off_y=0\n simulate(row,col)\n #print('best_path:', best_path, 'simulate_mark:', simulate_mark)\n for i in range(len(best_path)):\n off_x=0\n off_y=0\n path=best_path[i]\n if(path[-1] not in {'U','B','C'}):\n #print('path[' + str(i) + ']:', path[0], path[1])\n move(row,col,path[0][0]+temp_off_x,path[0][1]+temp_off_y)\n temp_off_x += off_x\n temp_off_y += off_y\n if(path[-1]=='~'):\n move(row, col, path[1][0]+temp_off_x, path[1][1]+temp_off_y)\n temp_off_x += off_x\n temp_off_y += off_y\n if(path[0]==[center_x,center_y]):\n sock.close()\n if(path[-1] in {'U','B','C'}):\n #print('path[0][0], path[0][1],path[1][0],path[1][1],a=path[2]:',path[0][0], path[0][1], path[1][0], path[1][1],path[-1])\n move(path[0][0]+temp_off_x, path[0][1]+temp_off_y, path[1][0]+temp_off_x, path[1][1]+temp_off_y,act=path[-1])\n temp_off_x += off_x\n temp_off_y += off_y\n #print('main_best_path:', best_path)\nexcept ConnectionResetError:\n os.system('clear')\n sys.exit()\n\n\n\n", "sub_path": "all/agent1.py", "file_name": "agent1.py", "file_ext": "py", "file_size_in_byte": 50686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.setrecursionlimit", "line_number": 41, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 43, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 55, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 55, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 55, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 240, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 289, "usage_type": "call"}, {"api_name": "os.system", "line_number": 1027, "usage_type": "call"}, {"api_name": "os.system", "line_number": 1291, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1292, "usage_type": "call"}]} +{"seq_id": "240958625", "text": "# \n#========================================================= \n# newmark.py\n#=========================================================\n# Coded by Luis Bedriñana,\n# Universidad de Ingenieria y Tecnologia - UTEC \n# Jul, 2020\n# Version: 1.02\n# \n# Script to analyze the dynamic linear response of 1-DOF subjected to GM record by the\n# Newmark beta method\n#\n# INPUT:\n# per: period of the analyzed system\n# damp: Equivalent damping ratio of the analyzed system\n# GM_acc: Ground motion record read form a text file \n#\n# OUTPUT:\n# Out_acce: Time history of the acceleration response\n# Out_disp: Time history of the displacement response \n# peak_acc: Peak acceleration\n# peak_disp: peak displacement\n#\n# Imports \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n#\n# FUNCTIONS\n# ---------------------------------------------------------------\n# Reading GM record from file\n# be careful with the format of the input file\ndef readGM (fname):\n #fname should include the path\n fhand = open(fname,'r')\n GMlist=list()\n PGA = None\n print('Reading from input file...', fhand)\n ii = 0\n for line in fhand:\n line = line.rstrip()\n if ii == 0:\n npoints = line\n elif ii == 1:\n dtime = line\n else :\n GMlist.append(line)\n peaktemp = float(line)\n if PGA is None or abs(peaktemp) > PGA:\n PGA = abs(peaktemp)\n ii = ii + 1 \n return npoints, dtime, PGA, GMlist \n#\n# ------------------------------------------------------------------\n#\n# ------------------------------ Newmark Method ---------------\n# This formulation only works for linear systems\ndef newbet(acc_s, damp, ww, dtime, gamma, beta, dd, vv, aa):\n #\n cc = 2.*damp*ww\n # constants\n #\n bdv1 = gamma/(beta*dtime)\n bdv2 = 1.-(gamma/beta)\n bdv3 = 1.- (0.5*gamma/beta)\n bda1 = 1./(beta*dtime*dtime)\n bda2 = 1./(beta*dtime)\n bda3 = (0.5/beta)-1.\n p1 = bda1+(bdv1)*cc\n p2 = bda2 + ((gamma/beta) - 1.)*cc\n p3 = bda3 + ((0.5*gamma/beta) - 1.)*dtime*cc\n #equivalent linear stiffness\n kk = ww*ww+bdv1*cc+bda1\n ppn = -acc_s+p1*dd+p2*vv+p3*aa\n # New displacement \n ddn=ppn/kk\n # New velocity\n vn=bdv1*(ddn-dd)+bdv2*vv+dtime*bdv3*aa\n # New in acceleration\n an=bda1*(ddn-dd)-bda2*vv-bda3*aa\n return ddn, vn, an\n#\n# ========================== MAIN ============================\n# Setting initial values for input record\n# Input motion path\nfpath = '.\\\\Input_GM\\\\'\n# GM text file name\nfname = 'centro_NS.txt'\nfname_t = fpath + fname\noutRecord = readGM(fname_t)\n# Input data for the system\nper = 1.0\ndamp = 0.02\n#\nnpoints = outRecord[0]\ndtime = float(outRecord[1])\nPGA = float(outRecord[2])\nGMlist = outRecord[3]\nGM_acc = np.array(GMlist, dtype='float32')\n# Checking the input data\nprint('Number of points:', npoints)\nprint('Time interval of data:', dtime)\nprint (GM_acc)\nprint('Peak Ground acceleration:',PGA)\n#\n# Preparing the data for calculations\ngamma = 0.5\nbeta = 0.25\npi = 3.14159265359\n# initial response\ndd = 0.\nvv = 0.\naa = -GM_acc[0]\n#\nww = 2.*pi/per\nprint(ww)\n#\nXd = list()\nXat = list()\n#\nXd_peak = None\nXat_peak = None\n# looping in time\nfor xwave in GM_acc:\n acc_s = xwave\n # Calling the function\n #outNewbet = newbet(acc_s, damp, ww, dtime, gamma, beta, dd, vv, aa)\n dd, vv, aa = newbet(acc_s, damp, ww, dtime, gamma, beta, dd, vv, aa)\n # tuplet output ddn, vn, an, ant\n # total acceleration\n aat = aa + acc_s\n # saving output \n Xd.append(dd)\n #Xa.append(aa)\n Xat.append(aat)\n if Xd_peak is None or abs(dd) > Xd_peak:\n Xd_peak = abs(dd)\n #\n if Xat_peak is None or abs(aat) > Xat_peak:\n Xat_peak = abs(aat)\n#\n# Peak values\nprint ('Peak displacement: ', Xd_peak)\nprint ('Peak total acceleration: ', Xat_peak)\n# Output and figures \n#\n# Path for output\nfpath2 = '.\\\\Output_Newmark\\\\'\nif not os.path.exists(fpath2):\n os.makedirs(fpath2)\n#\ntime = np.arange(0.0, dtime*int(npoints), dtime)\n# \nplt.title(\"Displacement response\", fontsize = 20)\nplt.ylabel(\"Displacement\", fontsize=14)\nplt.xlabel(\"Time\", fontsize=14)\nplt.plot(time,Xd)\nplt.savefig(fpath2 + \"TopDisp.png\")\n#\nfig, axarr=plt.subplots(2,1,sharex=True)\naxarr[0].plot(time,Xat)\naxarr[0].set_title('Acceleration Response')\naxarr[1].plot(time,GM_acc)\naxarr[1].set_title('Ground Motion')\n#\nplt.savefig(fpath2 +\"Acce.png\")\n# Writing csv output file\nfname2 = fpath2 + \"outwave.csv\"\nwith open(fname2,'w') as fhand2:\n for i in range(len(Xat)):\n out_string = \"\"\n out_string += str(time[i])\n out_string += \",\" + str(Xat[i])\n out_string += \",\" + str(Xd[i])\n out_string += \"\\n\"\n fhand2.write(out_string) \nfhand2.close()\nplt.show()\n#\n", "sub_path": "newmark.py", "file_name": "newmark.py", "file_ext": "py", "file_size_in_byte": 4767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}]} +{"seq_id": "468599104", "text": "import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nURL=\"https://www.instagram.com/{}/\"\r\n\r\ndef scrape(username):\r\n full_url=URL.format(username) #making url full concat with URL\r\n r=requests.get(full_url)\r\n s=BeautifulSoup(r.text,'html.parser')\r\n\r\n tag=s.find(\"meta\", attrs={\"name\" : \"description\"})\r\n text=tag.attrs['content']\r\n main_text=text.split(\"-\")[0]\r\n\r\n return main_text\r\n\r\nUSERNAME=\"tusharchamoli_15\"\r\ndata = scrape(USERNAME)\r\nprint(data)\r\n", "sub_path": "instagramscrapper.py", "file_name": "instagramscrapper.py", "file_ext": "py", "file_size_in_byte": 469, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "422672320", "text": "# -*- coding:utf-8 -*-\nimport os\nimport time\nimport hashlib\n\nfrom functools import partial\nfrom scrapy.loader.processors import MapCompose\nfrom toolkit import strip, rid\n\nfrom . import JaySpider\nfrom ..items.kenzo_item import KenzoItem, KenzoColorItem\nfrom ..utils import CustomLoader, enrich_wrapper, ItemCollectorPath\n\n\nclass KenzoSpider(JaySpider):\n name = \"kenzo\"\n item_xpath = ('//a[@class=\"product\"]/@href',)\n page_xpath = ('//a[@id=\"lazyloadTemplate\"]/@data-href',)\n\n custom_settings = {\n \"ITEM_PIPELINES\": {\n 'crawling.pipelines.%s_pipeline.%sKafkaPipeline' % (name, name.capitalize()): None if JaySpider.debug else 100,\n 'crawling.pipelines.%s_pipeline.%sFilePipeline' % (name, name.capitalize()): 100 if JaySpider.debug else None,\n },\n }\n\n @staticmethod\n def get_base_loader(response):\n return CustomLoader(item=KenzoItem())\n\n @enrich_wrapper\n def enrich_data(self, item_loader, response):\n item_loader.add_re(\"product_id\", r\"id: '(\\w+)'\")\n item_loader.add_xpath(\"part_number\", '//span[@class=\"MFC\"]/text()')\n item_loader.add_xpath(\"title\", \"//h1/text()\")\n item_loader.add_xpath(\"description\", '//div[@itemprop=\"description\"]/div/text()')\n item_loader.add_value(\"color_images\", \"\")\n color_urls = response.xpath('//div[contains(@class, \"kz-pp-fiche\")]//ul[@id=\"va-color\"]/li/@data-value').extract()\n for color_url in color_urls:\n color_url = color_url + \"&format=ajax\"\n response.meta[\"item_collector\"].item_loaders[ItemCollectorPath(os.path.join(\n str(response.meta[\"path\"]), \"colors#%s\" % hashlib.sha1(\n color_url.encode(\"utf-8\")).hexdigest()))] = CustomLoader(item=KenzoColorItem())\n return [{\"url\": color_url + \"&format=ajax\",\n \"meta\": {\"path\": ItemCollectorPath(os.path.join(\n str(response.meta[\"path\"]), \"colors#%s\" % hashlib.sha1(\n (color_url + \"&format=ajax\").encode(\"utf-8\")).hexdigest()))}} for color_url in color_urls]\n\n @enrich_wrapper\n def enrich_colors(self, item_loader, response):\n item_loader.add_value(\"status_code\", response.status)\n item_loader.add_value(\"url\", response.url)\n item_loader.add_value(\"timestamp\", time.strftime(\"%Y%m%d%H%M%S\"))\n item_loader.add_xpath(\"image_urls\", '//div[@class=\"row\"]//img[@itemprop=\"image\"]/@src')\n item_loader.add_xpath(\"color\", '//div[contains(@class, \"kz-pp-fiche\")]//ul[@id=\"va-color\"]/li[@class=\"selected\"]/span/text()')\n item_loader.add_xpath(\"color_id\", '//div[contains(@class, \"kz-pp-fiche\")]//ul[@id=\"va-color\"]/li[@class=\"selected\"]/@data-color3dobject')\n item_loader.add_xpath(\"sizes\", '//div[contains(@class, \"kz-pp-fiche\")]//select[@id=\"va-size\"]/option/text()')\n item_loader.add_xpath(\"availabilities\", '//div[contains(@class, \"kz-pp-fiche\")]//select[@id=\"va-size\"]/option/@data-custom-class')\n item_loader.add_xpath(\"price\", '//div[@class=\"kz-pp-fiche-price\"]/div[@class=\"price\"]//text()', MapCompose(strip), MapCompose(partial(rid, old=\"\\u20ac\", new=\"$\")))", "sub_path": "jay-scraper/crawling/spiders/kenzo_spider.py", "file_name": "kenzo_spider.py", "file_ext": "py", "file_size_in_byte": 3145, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "utils.CustomLoader", "line_number": 29, "usage_type": "call"}, {"api_name": "items.kenzo_item.KenzoItem", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.ItemCollectorPath", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "hashlib.sha1", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.CustomLoader", "line_number": 43, "usage_type": "call"}, {"api_name": "items.kenzo_item.KenzoColorItem", "line_number": 43, "usage_type": "call"}, {"api_name": "utils.ItemCollectorPath", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "hashlib.sha1", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.enrich_wrapper", "line_number": 31, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 53, "usage_type": "call"}, {"api_name": "scrapy.loader.processors.MapCompose", "line_number": 59, "usage_type": "call"}, {"api_name": "toolkit.strip", "line_number": 59, "usage_type": "argument"}, {"api_name": "functools.partial", "line_number": 59, "usage_type": "call"}, {"api_name": "toolkit.rid", "line_number": 59, "usage_type": "argument"}, {"api_name": "utils.enrich_wrapper", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "522182198", "text": "\"\"\"Run the bagging process on the local machine (i.e., not from a queue)\n\nUSAGE\nlocal_bag_all.py \ne.g., local_bag_all.py x/1/2 no-bag\n\nThe filter limits the b numbers returned to a filtered set, based on keys.\nThe spread of b numbers is fairly even:\n\n> local_bag_all.py 0/ no-bag\n...yields 1/11 of the total b numbers (because of the additional x checksum)\n...processes mets only.\n\n> local_bag_all.py 0/3/4/2 bag\n...yields about 0.01% of all the b numbers\n\n> local_bag_all.py 0/3/4 bag\n...yields about 0.1% of all the b numbers\n\n\"\"\"\n\nimport sys\nimport time\nimport logging\nimport bagger_processor\nimport aws\nfrom mets_filesource import b_numbers_from_s3\n\n\ndef main():\n if len(sys.argv) != 3:\n print(\"usage: local_bag_all.py \")\n print(\"e.g., local_bag_all.py x/1/2 no-bag\")\n else:\n filter = sys.argv[1]\n skip = sys.argv[2] != \"bag\"\n start = time.time()\n counter = 0\n for b_number in b_numbers_from_s3(filter):\n logging.info(\"processing \" + b_number)\n message = {\"identifier\": b_number, \"do_not_bag\": skip}\n result = bagger_processor.process_bagging_message(message)\n error = result.get(\"error\", None)\n if error is not None:\n aws.log_processing_error(result)\n counter = counter + 1\n time_taken = time.time() - start\n print(\"----------------\")\n print(\"{0} items in {1} seconds.\".format(counter, time_taken))\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "archive/bagger/src/local_bag_all.py", "file_name": "local_bag_all.py", "file_ext": "py", "file_size_in_byte": 1545, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "mets_filesource.b_numbers_from_s3", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 40, "usage_type": "call"}, {"api_name": "bagger_processor.process_bagging_message", "line_number": 42, "usage_type": "call"}, {"api_name": "aws.log_processing_error", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "653993809", "text": "from django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom apps.core.models.mixins import TimestampMixin\n\n\nclass Application(TimestampMixin, models.Model):\n title = models.CharField(\n max_length=255,\n verbose_name=_('VN__TITLE'),\n help_text=_('HT__TITLE'),\n )\n url = models.URLField(\n null=True,\n blank=True,\n verbose_name=_('VN__URL'),\n help_text=_('HT__URL'),\n )\n last_stored_snapshots_count = models.PositiveIntegerField(\n null=True,\n blank=True,\n )\n\n class Meta:\n verbose_name = _('VN__APPLICATION')\n verbose_name_plural = _('VN__APPLICATIONS')\n\n def __str__(self):\n return self.title\n", "sub_path": "server/apps/application/models/application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 728, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "apps.core.models.mixins.TimestampMixin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 10, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models.URLField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 16, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "44451799", "text": "from confluent_kafka import Producer\nimport time\n\np = Producer({'bootstrap.servers': 'localhost:9092,localhost:9192,localhost:9292'})\n\ndef delivery_report(err, msg):\n \"\"\" Called once for each message produced to indicate delivery result.\n Triggered by poll() or flush(). \"\"\"\n if err is not None:\n print('Message delivery failed: {}'.format(err))\n else:\n print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))\n\n\nimport json\n\nprint(\"Start producer...\")\ndata = {\"_t\": \"pv\",\"title\":\"Die Hard\",\"ticket_total_value\":12}\nwhile True:\n\n p.poll(0)\n \n p.produce('streams-numbers-input', key=\"\", value=json.dumps(data))\n \n time.sleep(1)\n\np.flush()", "sub_path": "Week4/part2/sum/producer-sum.py", "file_name": "producer-sum.py", "file_ext": "py", "file_size_in_byte": 700, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "confluent_kafka.Producer", "line_number": 4, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "380365952", "text": "# coding=utf-8\n\nimport re\nimport asyncio\nimport uuid\nimport os\nimport json\n\nimport aiohttp\nimport aiofiles\nfrom bs4 import BeautifulSoup, element\nfrom tenacity import retry\n\nurl = 'http://stdweb2.korean.go.kr/section/north_list.jsp'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'\n ' Chrome/70.0.3538.77 Safari/537.36'}\npattern_word = re.compile(\n '

.*?(?P.+?).*?.*?'\n '((?P.*?).*?)?(?P.+?)

',\n re.DOTALL)\npattern_num = re.compile(r'
.*?[(](\\d+?)건[)].*?
', re.DOTALL)\n\n\nclass EmptyPage(Exception):\n pass\n\n\n@retry\nasync def http_request(page=1, letter='ㄱ', num_per_page=10):\n _page_div, _page_mod = divmod(page, 10)\n if _page_mod == 1:\n gogroup = _page_div + 1\n page = ''\n else:\n gogroup = ''\n\n data = dict(go=page, gogroup=gogroup, TableTemp='WORD', GubunTemp=0, Letter=letter, PageRow=num_per_page)\n\n async with aiohttp.ClientSession() as session:\n async with session.post(url, data=data, headers=headers, timeout=300) as response:\n if not response.status == 200:\n raise Exception('bad status code: {}'.format(response.status))\n return await response.text()\n\n\ndef _get_word_num(html):\n rex = pattern_num.search(html)\n if rex:\n return int(rex.group(1))\n\n\ndef parse(html):\n result = []\n rex = pattern_word.findall(html)\n for k in rex:\n word = BeautifulSoup(k[0], 'html.parser').text\n part = k[2]\n meaning = BeautifulSoup(k[3], 'html.parser').text\n result.append((word, part, meaning))\n\n return result\n\n\nasync def async_parse(html):\n loop = asyncio.get_event_loop()\n data = await loop.run_in_executor(None, parse, html)\n if not data:\n raise EmptyPage()\n return json.dumps(data)\n\n\nasync def async_dump_json(data: str, target_dir='tmp'):\n if not os.path.isdir(target_dir):\n os.mkdir(target_dir)\n filename = '{}.json'.format(uuid.uuid4().hex)\n file_path = os.path.join(target_dir, filename)\n\n async with aiofiles.open(file_path, mode='w') as f:\n await f.write(data)\n\n return file_path\n\n\nasync def request(semaphore: asyncio.Semaphore, target_dir, letter, num_per_page=1000):\n loop = asyncio.get_event_loop()\n async with semaphore:\n print('request {}\\t({}/?) ...'.format(letter, 1))\n html = await http_request(1, letter, num_per_page)\n\n word_num = await loop.run_in_executor(None, _get_word_num, html)\n if word_num:\n print('{} 개수 {} 개'.format(letter, word_num))\n page_cnt, page_mod = divmod(word_num, num_per_page)\n if page_mod:\n page_cnt += 1\n\n data = await async_parse(html)\n await async_dump_json(data, target_dir)\n\n for page in range(2, page_cnt + 1):\n async with semaphore:\n print('request {}\\t({}/{}) ...'.format(letter, page, page_cnt))\n html = await http_request(page, letter, num_per_page)\n\n try:\n data = await async_parse(html)\n await async_dump_json(data, target_dir)\n except EmptyPage:\n print('단어 없음: {}'.format(letter))\n\n\nasync def run(target_dir):\n semaphore = asyncio.Semaphore(25)\n\n jobs = []\n for letter in 'ㄱㄴㄷㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎ':\n jobs.append(request(semaphore, target_dir, letter, 1000))\n\n await asyncio.gather(*jobs)\n\n\ndef merge_json(target_dir):\n if os.path.isdir(target_dir):\n data = []\n\n for filename in os.listdir(target_dir):\n file_path = os.path.join(target_dir, filename)\n with open(file_path, 'r') as f:\n data.extend(json.load(f))\n\n with open('async_output.json', 'w') as f:\n json.dump(data, f)\n\n\nif __name__ == '__main__':\n target_dir = 'tmp'\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(run(target_dir))\n merge_json(target_dir)\n", "sub_path": "kp_stdweb2_with_async.py", "file_name": "kp_stdweb2_with_async.py", "file_ext": "py", "file_size_in_byte": 4088, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 22, "usage_type": "attribute"}, {"api_name": "aiohttp.ClientSession", "line_number": 40, "usage_type": "call"}, {"api_name": "tenacity.retry", "line_number": 29, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 57, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 59, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 66, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 75, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "aiofiles.open", "line_number": 79, "usage_type": "call"}, {"api_name": "asyncio.Semaphore", "line_number": 85, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 86, "usage_type": "call"}, {"api_name": "asyncio.Semaphore", "line_number": 114, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 130, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 133, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "116800898", "text": "#!/usr/bin/env python3\n\n\"\"\"\nThis file contains code to extract quiz question from Django controlled db.\n\nFunctions:\n read_questions()\n write_questions()\n main()\n\"\"\"\n\nimport os\nimport sys\nimport django\nfrom django.apps import AppConfig\n\n\nclass QexportConfig(AppConfig):\n name = 'qexport'\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\n\nSECRET_KEY = os.getenv('SECRET_KEY')\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings') # setting DB variables from mysite/settings.py\ndjango.setup()\nfrom devops.models import Question # noqa E402\n\n\ndef read_questions(mod_nm):\n \"\"\"\n reads questions for module 'mod_nm'\n \"\"\"\n recs = None\n if mod_nm is None:\n recs = Question.objects.values()\n else:\n recs = Question.objects.filter(module=mod_nm).values()\n return recs\n\n\ndef write_questions(recs):\n \"\"\"\n Args:\n recs: the data to output\n Returns:\n None (for now: we probably want success or error codes)\n \"\"\"\n i = 1\n for question in recs:\n print(str(i) + \". (1 point)\")\n print(question[\"text\"])\n print()\n\n # list of answer options\n answers = ['answerA', 'answerB', 'answerC', 'answerD', 'answerE']\n ans_options = [question[i] for i in answers]\n\n # separate list for answer option bullets\n options = [\"a.\", \"b.\", \"c.\", \"d.\", \"e.\"]\n\n # marking the correct answer by '*'\n correct = question[\"correct\"].lower() + \".\"\n options[options.index(correct)] = \"*\" + options[options.index(correct)]\n for option in ans_options:\n if option:\n # matching the index for 'options' &\n # 'ans_options' to get correct alphabet\n print(options[ans_options.index(option)] + \" \" + option)\n else:\n break\n i += 1\n print()\n\n\ndef main():\n mod_nm = None\n if len(sys.argv) > 1:\n mod_nm = sys.argv[1]\n\n recs = read_questions(mod_nm)\n write_questions(recs)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "qexport.py", "file_name": "qexport.py", "file_ext": "py", "file_size_in_byte": 2108, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.apps.AppConfig", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ.setdefault", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 27, "usage_type": "call"}, {"api_name": "devops.models.Question.objects.values", "line_number": 37, "usage_type": "call"}, {"api_name": "devops.models.Question.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "devops.models.Question", "line_number": 37, "usage_type": "name"}, {"api_name": "devops.models.Question.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "devops.models.Question.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "devops.models.Question", "line_number": 39, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 79, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "71019341", "text": "# encoding: utf8\n\n# Copyright (c) 2020 Kenneth S. Kundert and Kale Kundert\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n\"\"\"\nNestedText: A Human Readable and Writable Data Format\n\"\"\"\n\n__version__ = \"0.0.4\"\n\n__all__ = (\n \"load\",\n \"loads\",\n \"dump\",\n \"dumps\",\n \"DuplicateFieldBehaviour\",\n \"NestedtextError\",\n \"NestedtextType\",\n)\n\nimport collections\nimport enum\nimport io\nimport json\nimport re\nfrom typing import Any, Dict, Iterable, Iterator, List, NoReturn, Optional, Tuple, Union\n\n\nNestedtextType = Union[str, List[\"NestedtextType\"], Dict[str, \"NestedTextType\"]]\nNestedtextContainerType = Union[List[NestedtextType], Dict[str, NestedtextType]]\n\n\nclass NestedtextError(Exception):\n def __init__(\n self, message: str, lineno: Optional[int] = None, colno: Optional[int] = None\n ):\n if lineno is not None:\n message += f\": {lineno}\"\n if colno is not None:\n message += f\":{colno}\"\n super().__init__(message)\n self.lineno = lineno\n self.colno = colno\n\n\ndef _report(message, line, *args, colno=None, **kwargs) -> NoReturn:\n raise NestedtextError(message, line.lineno, colno)\n\n\ndef _indentation_error(line, depth) -> NoReturn:\n _report(\"invalid indentation\", line, colno=depth)\n\n\n# ------------------------------------------------------------------------------\n# Parsing logic\n# ------------------------------------------------------------------------------\n\n\nclass DuplicateFieldBehaviour(str, enum.Enum):\n USE_FIRST = \"use_first\"\n USE_LAST = \"use_last\"\n ERROR = \"error\"\n\n def __repr__(self):\n return str(self)\n\n\nclass _LineType(enum.Enum):\n BLANK = enum.auto()\n COMMENT = enum.auto()\n STRING = enum.auto()\n LIST_ITEM = enum.auto()\n OBJECT_ITEM = enum.auto()\n OBJECT_KEY = enum.auto()\n INLINE_CONTAINER = enum.auto()\n\n def __repr__(self):\n return str(self)\n\n def is_ignorable(self) -> bool:\n return self in [self.BLANK, self.COMMENT]\n\n\nclass _Line(collections.namedtuple(\"_Line\", \"text, lineno, kind, depth, value\")):\n def __new__(\n cls,\n text: str,\n lineno: int,\n kind: _LineType,\n depth: int,\n value: Union[None, str, Tuple[str, Optional[str]]],\n ):\n return super().__new__(cls, text, lineno, kind, depth, value)\n\n\nclass _InvalidLineType(enum.Enum):\n NON_SPACE_INDENT = enum.auto()\n UNRECOGNISED = enum.auto()\n\n def __repr__(self):\n return str(self)\n\n\nclass _InvalidLine(collections.namedtuple(\"_InvalidLine\", \"text, lineno, kind, colno\")):\n def __new__(\n cls,\n text: str,\n lineno: int,\n kind: _InvalidLineType,\n colno: int,\n ):\n return super().__new__(cls, text, lineno, kind, colno)\n\n\nclass _LinesIter(Iterable[_Line]):\n def __init__(self, lines: Iterable[str]):\n self._generator = self._read_lines(lines)\n self._next_line: Optional[_Line] = self._advance_to_next_content_line()\n\n def __iter__(self):\n return self\n\n def __next__(self) -> _Line:\n if self._next_line is None:\n raise StopIteration\n\n this_line = self._next_line\n self._next_line = self._advance_to_next_content_line()\n return this_line\n\n def _read_lines(self, lines: Iterable[str]) -> Iterator[Union[_Line, _InvalidLine]]:\n for idx, line in enumerate(lines):\n yield self._read_line(line, idx + 1)\n\n def _read_line(self, line: str, lineno: int) -> Union[_Line, _InvalidLine]:\n if not line.strip():\n return _Line(line, lineno, _LineType.BLANK, 0, None)\n\n text = line.rstrip(\"\\r\\n\")\n\n # Comments can have any leading whitespace.\n if text.lstrip()[0] == \"#\":\n return _Line(line, lineno, _LineType.COMMENT, 0, text.lstrip()[1:])\n\n stripped = text.lstrip(\" \")\n depth = len(text) - len(stripped)\n\n # Otherwise check leading whitespace consists only of spaces.\n if len(stripped.lstrip()) < len(stripped):\n return _InvalidLine(line, lineno, _InvalidLineType.NON_SPACE_INDENT, depth)\n\n def _read_content_line() -> Optional[Tuple[_LineType, Any]]:\n # Now handle normal content lines!\n if stripped == \"-\" or stripped.startswith(\"- \"):\n return _LineType.LIST_ITEM, stripped[2:] or None\n elif stripped == \">\" or stripped.startswith(\"> \"):\n # Include end-of-line characters.\n value = re.sub(r\"> ?\", \"\", line.lstrip(\" \"), count=1)\n return _LineType.STRING, value\n elif stripped == \":\" or stripped.startswith(\": \"):\n # Include end-of-line characters.\n value = re.sub(r\": ?\", \"\", line.lstrip(\" \"), count=1)\n return _LineType.OBJECT_KEY, value\n elif stripped[0] in \"[{\":\n return _LineType.INLINE_CONTAINER, stripped\n\n # Object item?\n match = re.fullmatch(r\"(?P.+?)\\s*:(?: (?P.*))?\", stripped)\n if match:\n return _LineType.OBJECT_ITEM, tuple(match.groups())\n\n return None\n\n result = _read_content_line()\n if result:\n return _Line(line, lineno, result[0], depth, result[1])\n else:\n return _InvalidLine(line, lineno, _InvalidLineType.UNRECOGNISED, depth)\n\n def _advance_to_next_content_line(self) -> Optional[_Line]:\n \"\"\"Advance the generator the next useful line and return it.\"\"\"\n while True:\n next_line = next(self._generator, None)\n if isinstance(next_line, _InvalidLine):\n _report(\"invalid line\", next_line, colno=next_line.colno)\n if next_line is None or not next_line.kind.is_ignorable():\n break\n return next_line\n\n def peek_next(self) -> Optional[_Line]:\n return self._next_line\n\n\nclass _Parser:\n def __init__(self, *, on_dup=DuplicateFieldBehaviour.ERROR):\n self.on_dup = on_dup\n\n def parse(self, lines: Iterable[str]):\n lines = _LinesIter(lines)\n if lines.peek_next() is None:\n return None\n return self._read_value(lines, 0)\n\n def _read_value(self, lines: _LinesIter, depth: int) -> Union[str, List, Dict]:\n if lines.peek_next().kind is _LineType.STRING:\n return self._read_string(lines, depth)\n elif lines.peek_next().kind is _LineType.LIST_ITEM:\n return self._read_list(lines, depth)\n elif lines.peek_next().kind in [_LineType.OBJECT_ITEM, _LineType.OBJECT_KEY]:\n return self._read_object(lines, depth)\n elif lines.peek_next().kind is _LineType.INLINE_CONTAINER:\n return self._read_inline_container(lines, depth)\n _report(\"unrecognized line\", next(lines))\n\n def _read_string(self, lines: _LinesIter, depth: int) -> str:\n data = []\n while (\n lines.peek_next()\n and lines.peek_next().kind is _LineType.STRING\n and lines.peek_next().depth >= depth\n ):\n line = next(lines)\n data.append(line.value)\n if line.depth != depth:\n _indentation_error(line, depth)\n data[-1] = data[-1].rstrip(\"\\r\\n\")\n return \"\".join(data)\n\n def _read_list(self, lines: _LinesIter, depth: int) -> List[NestedtextType]:\n data = []\n while lines.peek_next() and lines.peek_next().depth >= depth:\n line = next(lines)\n if line.depth != depth:\n _indentation_error(line, depth)\n if line.kind is not _LineType.LIST_ITEM:\n _report(\"expected list item\", line, colno=depth)\n if line.value:\n data.append(line.value)\n else:\n # Value may simply be empty, or it may be on next line, in which\n # case it must be indented.\n if lines.peek_next() is None:\n value = \"\"\n else:\n depth_of_next = lines.peek_next().depth\n if depth_of_next > depth:\n value = self._read_value(lines, depth_of_next)\n else:\n value = \"\"\n data.append(value)\n return data\n\n def _read_object(self, lines: _LinesIter, depth: int) -> Dict[str, NestedtextType]:\n data = {}\n while lines.peek_next() and lines.peek_next().depth >= depth:\n line = lines.peek_next()\n if line.depth != depth:\n _indentation_error(line, depth)\n if line.kind is _LineType.OBJECT_ITEM:\n next(lines) # Advance the iterator\n key, value = line.value\n elif line.kind is _LineType.OBJECT_KEY:\n key = self._read_object_key(lines, depth)\n value = None\n else:\n _report(\"expected object item\", line, colno=depth)\n if not value:\n if lines.peek_next() is None:\n if line.kind is _LineType.OBJECT_KEY:\n raise NestedtextError(\n \"expected value after multiline object key\"\n )\n value = \"\"\n else:\n depth_of_next = lines.peek_next().depth\n if depth_of_next > depth:\n value = self._read_value(lines, depth_of_next)\n elif line.kind is _LineType.OBJECT_KEY:\n raise NestedtextError(\n \"expected value after multiline object key\"\n )\n else:\n value = \"\"\n if key in data:\n # Found duplicate key.\n if self.on_dup == DuplicateFieldBehaviour.USE_FIRST:\n continue\n elif self.on_dup == DuplicateFieldBehaviour.USE_LAST:\n pass\n elif self.on_dup == DuplicateFieldBehaviour.ERROR:\n _report(\"duplicate key\", line, colno=depth)\n data[key] = value\n return data\n\n def _read_object_key(self, lines: _LinesIter, depth: int) -> str:\n data = []\n while (\n lines.peek_next()\n and lines.peek_next().kind is _LineType.OBJECT_KEY\n and lines.peek_next().depth == depth\n ):\n line = next(lines)\n data.append(line.value)\n data[-1] = data[-1].rstrip(\"\\r\\n\")\n return \"\".join(data)\n\n def _read_inline_container(\n self, lines: _LinesIter, depth: int\n ) -> NestedtextContainerType:\n line = next(lines)\n assert line.kind is _LineType.INLINE_CONTAINER\n line_text = line.value\n\n # Convert into valid JSON!\n\n # Escape quotes and tabs.\n line_text.replace('\"', '\\\\\"')\n line_text.replace(\"\\t\", \"\\\\t\")\n # Quote list items.\n line_text = re.sub(\n r\"([\\[,])\\s*(?P[^\\[\\]\\{\\}]+?)\\s*(?=[,\\]])\", r'\\1\"\\2\"', line_text\n )\n # Quote dict keys.\n line_text = re.sub(\n r\"([\\{,])\\s*(?P[^\\[\\]\\{\\}:,]+?)\\s*(?=:)\", r'\\1\"\\2\"', line_text\n )\n # Quote dict values.\n line_text = re.sub(\n r\"([\\{,][^\\[\\]]+?):\\s*(?P[^\\[\\]\\{\\}:]+?)\\s*(?=[,\\}])\",\n r'\\1:\"\\2\"',\n line_text,\n )\n try:\n return json.loads(line_text)\n except json.JSONDecodeError:\n _report(\"Invalid inline list/object\", line)\n\n\ndef loads(\n content: str, *, on_dup=DuplicateFieldBehaviour.ERROR\n) -> Optional[NestedtextType]:\n \"\"\"\n Deserialize 'content' (a NestedText document) to a Python object.\n \"\"\"\n return load(io.StringIO(content), on_dup=on_dup)\n\n\ndef load(\n stream: Iterable, *, on_dup=DuplicateFieldBehaviour.ERROR\n) -> Optional[NestedtextType]:\n \"\"\"\n Deserialize 'stream' (an iterable of lines corresponding to a NestedText\n document) to a Python object.\n \"\"\"\n return _Parser(on_dup=on_dup).parse(stream)\n\n\n# ------------------------------------------------------------------------------\n# Dumping logic\n# ------------------------------------------------------------------------------\n\n\nclass _Dumper:\n def __init__(self, sort_keys: bool, indent: int):\n self.sort_keys = sort_keys\n self.indent_size = indent\n\n def dump(self, obj, writer):\n if isinstance(obj, str):\n self._dump_multiline_str(obj, writer, 0)\n elif isinstance(obj, list):\n self._dump_list(obj, writer, 0)\n elif isinstance(obj, dict):\n self._dump_object(obj, writer, 0)\n else:\n raise NestedtextError(\n \"Unsupported type to dump {!r}\".format(type(obj).__name__)\n )\n\n def _dump_multiline_str(self, string: str, writer, indent: int):\n lines = string.splitlines(keepends=True)\n for line in lines:\n writer.write(\" \" * indent)\n writer.write(\"> \" if line.strip(\"\\r\\n\") else \">\")\n writer.write(line)\n if string == \"\" or string[-1] in \"\\r\\n\":\n writer.write(\" \" * indent)\n writer.write(\">\")\n\n def _dump_list(self, values: list, writer, indent: int):\n if len(values) == 0:\n writer.write(\" \" * indent)\n writer.write(\"[]\\n\")\n return\n for value in values:\n writer.write(\" \" * indent)\n writer.write(\"-\")\n if isinstance(value, str):\n if \"\\r\" in value or \"\\n\" in value:\n writer.write(\"\\n\")\n self._dump_multiline_str(value, writer, indent + self.indent_size)\n elif value:\n writer.write(\" \")\n writer.write(value)\n writer.write(\"\\n\")\n elif isinstance(value, list):\n writer.write(\"\\n\")\n self._dump_list(value, writer, indent + self.indent_size)\n elif isinstance(value, dict):\n writer.write(\"\\n\")\n self._dump_object(value, writer, indent + self.indent_size)\n else:\n raise NestedtextError(\n \"Unsupported type to dump {!r}\".format(type(value).__name__)\n )\n\n def _dump_object(self, obj: dict, writer, indent: int):\n if len(obj) == 0:\n writer.write(\" \" * indent)\n writer.write(\"{}\\n\")\n return\n for key, value in obj.items():\n if not isinstance(key, str):\n raise NestedtextError(\"Unsupported object key type to dump\")\n if not re.fullmatch(r\"[^\\[\\{:\\->\\s][^:\\r\\n]*\", key):\n self._dump_multiline_object_key(key, writer, indent)\n force_multiline = True\n else:\n writer.write(\" \" * indent)\n writer.write(key)\n writer.write(\":\")\n force_multiline = False\n\n if isinstance(value, str):\n if \"\\r\" in value or \"\\n\" in value or force_multiline:\n writer.write(\"\\n\")\n self._dump_multiline_str(value, writer, indent + self.indent_size)\n elif value:\n writer.write(\" \")\n writer.write(value)\n writer.write(\"\\n\")\n elif isinstance(value, list):\n writer.write(\"\\n\")\n self._dump_list(value, writer, indent + self.indent_size)\n elif isinstance(value, dict):\n writer.write(\"\\n\")\n self._dump_object(value, writer, indent + self.indent_size)\n else:\n raise NestedtextError(\n \"Unsupported type to dump {!r}\".format(type(value).__name__)\n )\n\n def _dump_multiline_object_key(self, key: str, writer, indent: int):\n lines = key.splitlines(keepends=True)\n for line in lines:\n writer.write(\" \" * indent)\n writer.write(\": \" if line.strip(\"\\r\\n\") else \":\")\n writer.write(line)\n if key == \"\" or key[-1] in \"\\r\\n\":\n writer.write(\" \" * indent)\n writer.write(\":\")\n\n\ndef dumps(obj, *, sort_keys: bool = False, indent: int = 4) -> str:\n \"\"\"\n Serialize 'obj' to NestedText and return the result.\n \"\"\"\n string = io.StringIO()\n dump(obj, string, sort_keys=sort_keys, indent=indent)\n return string.getvalue()\n\n\ndef dump(obj, writer, *, sort_keys: bool = False, indent: int = 4) -> None:\n \"\"\"\n Serialize 'obj' to NestedText and write out to 'writer' (an object that\n supports the '.write()' method).\n \"\"\"\n _Dumper(sort_keys, indent).dump(obj, writer)\n", "sub_path": "src/nestedtext.py", "file_name": "nestedtext.py", "file_ext": "py", "file_size_in_byte": 17729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "typing.Union", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.NoReturn", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.NoReturn", "line_number": 69, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 78, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 87, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 88, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 89, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 90, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 91, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 92, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 93, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 94, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 103, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 110, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 115, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 116, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 117, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 123, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 134, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 137, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 150, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 177, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 181, "usage_type": "call"}, {"api_name": "re.fullmatch", "line_number": 187, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 171, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 171, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 171, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 154, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 199, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 209, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 217, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 248, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 272, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 339, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 343, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 347, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 353, "usage_type": "call"}, {"api_name": "json.JSONDecodeError", "line_number": 354, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 364, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 360, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 368, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 369, "usage_type": "name"}, {"api_name": "re.fullmatch", "line_number": 444, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 487, "usage_type": "call"}]} +{"seq_id": "334230230", "text": "# -*- coding: utf-8 -*-\n\nimport json\nimport logging\nfrom functools import wraps\nfrom flask import request, jsonify\n\nimport auth\nimport views\nfrom app import app\nfrom models import db, Stand, user_to_team\n\n\nwith app.app_context():\n db.create_all()\n\napp.some_map = {}\n\n\ndef my_route(action_name):\n def wrapper(func):\n @wraps(func)\n def wrapped_func(*args, **kwargs):\n return func(*args, **kwargs)\n app.some_map[action_name] = wrapped_func\n return wrapped_func\n return wrapper\n\n\n@app.errorhandler(Exception)\ndef handle_exceptions(error):\n logging.exception(error)\n return jsonify({'text': 'unhandled exception ({})'.format(type(error))}), 500\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return app.send_static_file('index.html')\n\n\n@app.route('/slack/auth', methods=['GET'])\ndef after_grant():\n code = request.args.get('code')\n\n token_info = auth.get_token_by_code(code, app)\n\n return jsonify({'Wow': token_info})\n\n\n@app.route('/stand/message', methods=['GET', 'POST'])\ndef response_to_message():\n data = json.loads(request.form.get('payload'))\n user = data.get('user', {}).get('name')\n\n if user not in user_to_team:\n return jsonify({'text': 'Вы не можете брать стенды', 'replace_original': False})\n\n stand_name = data.get('actions', [])[0].get('name')\n\n if not stand_name:\n return jsonify({'text': 'Что-то пошло не так', 'replace_original': False})\n\n stand = Stand.by_name(stand_name)\n\n if stand is None:\n return jsonify({'text': 'Не могу найти стенд', 'replace_original': False})\n\n if not stand.is_free and stand.owner != user:\n return jsonify({'text': 'Не могу забрать чужой стенд', 'replace_original': False})\n\n if stand.is_free:\n team = user_to_team.get(user)\n if team != stand.team:\n return jsonify({'text': 'Этот стенд принадлежит чужой команде', 'replace_original': False})\n stand.take(user)\n else:\n stand.release()\n\n return stands_list({'user_name': user})\n\n\n@app.route('/stand', methods=['GET', 'POST'])\ndef stand_slash_command():\n data = request.form\n\n if not data:\n return jsonify({'text': 'not well-formed request - empty data'}), 400\n\n if data.get('token') != app.config.get('HHRU_SLACK_TOKEN'):\n return jsonify({'text': 'bad credentials ({})'.format(data)}), 403\n\n text = data.get('text')\n\n if text is None:\n return jsonify({'text': 'not well-formed request - empty action'}), 400\n\n action = text.strip().split(' ')[0].strip()\n handler = app.some_map.get(action)\n\n if handler is None:\n return jsonify({'text': 'incorrect action'}), 404\n\n return handler(data)\n\n\n@my_route('list-all')\ndef all_stands_list(_):\n stands = Stand.get_list()\n\n if not stands:\n return jsonify({'text': 'Что-то нет стендов'})\n\n return jsonify({\n 'response_type': 'in_channel',\n 'text': 'Стенды',\n 'color': '#36a64f',\n 'attachments': [\n {\n 'fields': [\n {\n 'title': stand.name,\n 'value': '({})'.format(stand.team),\n 'short': False\n }\n for stand in stands\n ]\n }\n ]\n })\n\n\n@my_route('')\n@my_route('list')\ndef stands_list(data):\n user = data.get('user_name')\n if user not in user_to_team:\n return jsonify({'text': 'Простите, вас нет в списках, {}'.format(user)}), 403\n\n stands = Stand.list_by_team(user_to_team.get(user))\n\n if not stands:\n return jsonify({'text': 'У вашей команды нет стендов'})\n\n return jsonify(views.get_stands_info(stands))\n\n\nif __name__ == '__main__':\n app.run(port=app.config['PORT'])\n", "sub_path": "pystand/pystand.py", "file_name": "pystand.py", "file_ext": "py", "file_size_in_byte": 3948, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "app.app.app_context", "line_number": 14, "usage_type": "call"}, {"api_name": "app.app", "line_number": 14, "usage_type": "name"}, {"api_name": "models.db.create_all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.db", "line_number": 15, "usage_type": "name"}, {"api_name": "app.app.some_map", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 17, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 22, "usage_type": "call"}, {"api_name": "app.app.some_map", "line_number": 25, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 25, "usage_type": "name"}, {"api_name": "logging.exception", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "app.app.errorhandler", "line_number": 30, "usage_type": "call"}, {"api_name": "app.app", "line_number": 30, "usage_type": "name"}, {"api_name": "app.app.send_static_file", "line_number": 38, "usage_type": "call"}, {"api_name": "app.app", "line_number": 38, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 36, "usage_type": "call"}, {"api_name": "app.app", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "auth.get_token_by_code", "line_number": 45, "usage_type": "call"}, {"api_name": "app.app", "line_number": 45, "usage_type": "argument"}, {"api_name": "flask.jsonify", "line_number": 47, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 41, "usage_type": "call"}, {"api_name": "app.app", "line_number": 41, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "models.user_to_team", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Stand.by_name", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Stand", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 69, "usage_type": "call"}, {"api_name": "models.user_to_team.get", "line_number": 72, "usage_type": "call"}, {"api_name": "models.user_to_team", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 74, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 50, "usage_type": "call"}, {"api_name": "app.app", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 87, "usage_type": "call"}, {"api_name": "app.app.config.get", "line_number": 89, "usage_type": "call"}, {"api_name": "app.app.config", "line_number": 89, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 95, "usage_type": "call"}, {"api_name": "app.app.some_map.get", "line_number": 98, "usage_type": "call"}, {"api_name": "app.app.some_map", "line_number": 98, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 101, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 82, "usage_type": "call"}, {"api_name": "app.app", "line_number": 82, "usage_type": "name"}, {"api_name": "models.Stand.get_list", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Stand", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 113, "usage_type": "call"}, {"api_name": "models.user_to_team", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 137, "usage_type": "call"}, {"api_name": "models.Stand.list_by_team", "line_number": 139, "usage_type": "call"}, {"api_name": "models.Stand", "line_number": 139, "usage_type": "name"}, {"api_name": "models.user_to_team.get", "line_number": 139, "usage_type": "call"}, {"api_name": "models.user_to_team", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 144, "usage_type": "call"}, {"api_name": "views.get_stands_info", "line_number": 144, "usage_type": "call"}, {"api_name": "app.app.run", "line_number": 148, "usage_type": "call"}, {"api_name": "app.app", "line_number": 148, "usage_type": "name"}, {"api_name": "app.app.config", "line_number": 148, "usage_type": "attribute"}]} +{"seq_id": "304320504", "text": "\"\"\"The tamuro API server\"\"\"\nimport logging\nimport logging.config\nfrom flask import Flask, Blueprint, request, session, make_response, g, jsonify\nfrom tamuro.database import create_pool, init_db\nfrom tamuro.models import sesses\nfrom tamuro import service\n\nclass ServiceContext(object):\n \"\"\"The service context\"\"\"\n\n def __init__(self, conn, sch, sess):\n self.conn = conn\n self.sch = sch\n self.sess = sess\n\nclass Api(object):\n \"\"\"The tamuro API server\"\"\"\n\n def __init__(self, conf, pool, name):\n \"\"\"Constructor\"\"\"\n self.conf = conf\n self.pool = pool\n self.name = name\n self.blueprint = Blueprint(name, __name__)\n logging.info('initialized: HTTP server')\n\n def init_api(self):\n \"\"\"Initialize API environment.\"\"\"\n init_db(self.pool, self.name)\n logging.info('initialized: database schema')\n\n # registers handlers\n self.blueprint.add_url_rule(\n '/',\n methods=['GET'],\n view_func=self.route_top\n )\n self.blueprint.add_url_rule(\n '/setup',\n methods=['GET'],\n view_func=self.route_setup\n )\n self.blueprint.add_url_rule(\n '/groups/',\n methods=['GET', 'PUT', 'DELETE'],\n view_func=self.route_groups\n )\n self.blueprint.add_url_rule(\n '/groups//groups',\n methods=['PUT'],\n view_func=self.route_groups_groups\n )\n self.blueprint.add_url_rule(\n '/groups//sub_groups',\n methods=['POST', 'PUT'],\n view_func=self.route_groups_sub_groups\n )\n self.blueprint.add_url_rule(\n '/groups//owners',\n methods=['PUT'],\n view_func=self.route_groups_owners\n )\n self.blueprint.add_url_rule(\n '/groups//members',\n methods=['POST', 'PUT'],\n view_func=self.route_groups_members\n )\n self.blueprint.add_url_rule(\n '/users/',\n methods=['GET', 'PUT', 'DELETE'],\n view_func=self.route_users\n )\n self.blueprint.add_url_rule(\n '/users//groups',\n methods=['PUT'],\n view_func=self.route_users_groups\n )\n self.blueprint.add_url_rule(\n '/users//own_groups',\n methods=['PUT'],\n view_func=self.route_users_own_groups\n )\n self.blueprint.add_url_rule(\n '/users//profs',\n methods=['GET', 'PUT'],\n view_func=self.route_users_profs\n )\n self.blueprint.add_url_rule(\n '/users//certs',\n methods=['GET', 'PUT', 'DELETE'],\n view_func=self.route_users_certs\n )\n self.blueprint.add_url_rule(\n '/sessions',\n methods=['POST'],\n view_func=self.route_sessions\n )\n self.blueprint.add_url_rule(\n '/sessions//to/',\n methods=['GET'],\n view_func=self.route_sessions_from_to\n )\n self.blueprint.add_url_rule(\n '/sessions/',\n methods=['DELETE'],\n view_func=self.route_sessions_sess_id\n )\n self.blueprint.add_url_rule(\n '/tokens',\n methods=['GET', 'POST'],\n view_func=self.route_tokens\n )\n self.blueprint.add_url_rule(\n '/tokens/',\n methods=['GET'],\n view_func=self.route_my_token\n )\n self.blueprint.add_url_rule(\n '/my_session',\n methods=['GET', 'DELETE'],\n view_func=self.route_my_session\n )\n logging.info('initialized: HTTP request routes')\n\n def route_top(self):\n \"\"\"Route: /\"\"\"\n ret = service.get_top(self.ctx())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_setup(self):\n \"\"\"Route: /setup\"\"\"\n return self.set_cookies(service.get_setup(self.ctx()))\n\n def route_groups(self, group_id):\n \"\"\"Route: /groups/\"\"\"\n if request.method == 'GET':\n ret = service.get_groups(self.ctx(), to_array(group_id))\n elif request.method == 'PUT':\n ret = service.put_group(self.ctx(), group_id, request.get_json())\n elif request.method == 'DELETE':\n ret = service.delete_group(self.ctx(), group_id, request.get_json())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_groups_groups(self, group_id):\n \"\"\"Route: /groups//groups\"\"\"\n ret = service.put_group_groups(self.ctx(), group_id, request.get_json())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_groups_sub_groups(self, group_id):\n \"\"\"Route: /groups//sub_groups\"\"\"\n if request.method == 'POST':\n ret = service.post_sub_group(self.ctx(), group_id, request.get_json())\n elif request.method == 'PUT':\n ret = service.put_sub_groups(self.ctx(), group_id, request.get_json())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_groups_owners(self, group_id):\n \"\"\"Route: /groups//owners\"\"\"\n ret = service.put_owners(self.ctx(), group_id, request.get_json())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_groups_members(self, group_id):\n \"\"\"Route: /groups//members\"\"\"\n if request.method == 'POST':\n ret = service.post_member(self.ctx(), group_id, request.get_json())\n elif request.method == 'PUT':\n ret = service.put_members(self.ctx(), group_id, request.get_json())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_users(self, user_id):\n \"\"\"Route: /users/\"\"\"\n if request.method == 'GET':\n ret = service.get_users(self.ctx(), to_array(user_id))\n elif request.method == 'PUT':\n ret = service.put_user(self.ctx(), user_id, request.get_json())\n elif request.method == 'DELETE':\n ret = service.delete_user(self.ctx(), user_id, request.get_json())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_users_groups(self, user_id):\n \"\"\"Route: /users//groups\"\"\"\n ret = service.put_user_groups(self.ctx(), user_id, request.get_json())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_users_own_groups(self, user_id):\n \"\"\"Route: /users//own_groups\"\"\"\n ret = service.put_own_groups(self.ctx(), user_id, request.get_json())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_users_profs(self, user_id):\n \"\"\"Route: /users//profs\"\"\"\n if request.method == 'GET':\n ret = service.get_user_profs(self.ctx(), user_id)\n elif request.method == 'PUT':\n ret = service.put_user_profs(self.ctx(), user_id, request.get_json())\n return jsonify(ret) if ret or ret == [] else (jsonify({}), 403)\n\n def route_users_certs(self, user_id):\n \"\"\"Route: /users//certs\"\"\"\n if request.method == 'GET':\n ret = service.get_certs(self.ctx(), user_id)\n elif request.method == 'PUT':\n ret = service.put_cert(self.ctx(), user_id, request.get_json(),\n self.conf['auth']['seed'])\n elif request.method == 'DELETE':\n ret = service.delete_cert(self.ctx(), user_id, request.get_json())\n return jsonify(ret) if ret or ret == [] else (jsonify({}), 403)\n\n def route_sessions(self):\n \"\"\"Route: /sessions\"\"\"\n sess = service.post_session(self.ctx(), request.get_json(), self.conf['auth']['seed'])\n if sess:\n session['tamuro_sess_id'] = sess['id']\n del sess['id']\n return jsonify(sess)\n else:\n return jsonify({}), 403\n\n def route_sessions_from_to(self, from_ts, to_ts):\n \"\"\"Route: /sessions//to/\"\"\"\n ret = service.get_sessions(self.ctx(), from_ts, to_ts)\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_sessions_sess_id(self, sess_id):\n \"\"\"Route: /sessions/\"\"\"\n ret = service.delete_session(self.ctx(), sess_id)\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_tokens(self):\n \"\"\"Route: /tokens\"\"\"\n if request.method == 'GET':\n ret = service.get_token_status(self.ctx())\n return jsonify(ret) if ret else (jsonify({}), 403)\n elif request.method == 'POST':\n ret = service.post_token(self.ctx(), request.get_json())\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def route_my_token(self, sess_id):\n \"\"\"Route: /tokens/\"\"\"\n sess = sesses.touch(self.ctx().conn, self.name, sess_id)\n if not sess or sess['provider'] != 'token':\n g.conn.rollback()\n return (jsonify({}), 404)\n else:\n g.conn.commit()\n return self.set_cookies(sess['id'])\n\n def route_my_session(self):\n \"\"\"Route: /my_session\"\"\"\n sess = self.ctx().sess\n if not sess:\n return jsonify({}), 403\n if request.method == 'GET':\n del sess['id']\n return jsonify(sess)\n elif request.method == 'DELETE':\n ret = service.delete_session(self.ctx(), sess['id'])\n session.pop('tamuro_sess_id', None)\n return jsonify(ret) if ret else (jsonify({}), 403)\n\n def ctx(self):\n \"\"\"Returns the service context.\"\"\"\n if not hasattr(g, 'conn'):\n g.conn = self.pool.getconn()\n g.pool = self.pool\n sess = None\n if 'tamuro_sess_id' in session:\n sess = sesses.touch(g.conn, self.name, session['tamuro_sess_id'])\n g.conn.commit()\n return ServiceContext(g.conn, self.name, sess)\n\n def set_cookies(self, sess_id):\n \"\"\"Sets session id in cookies\"\"\"\n if sess_id:\n session['tamuro_sess_id'] = sess_id\n resp = make_response(jsonify({}), 302)\n resp.headers['Location'] = self.conf['instances'][self.name]['app_url']\n return resp\n else:\n resp = make_response(jsonify({}), 403)\n resp.set_cookie('tamuro_sess_id', '')\n return resp\n\ndef to_array(value):\n \"\"\"Convert a string value to array if the value starts with '+'\"\"\"\n return value[1:].split('+') if value[0] == '+' else value\n\ndef close_db(error):\n \"\"\"Closes the database connection.\"\"\"\n if hasattr(g, 'conn'):\n g.pool.putconn(g.conn)\n\ndef init_app(conf):\n \"\"\"Initializes the server.\"\"\"\n logging.config.dictConfig(conf['logging'])\n logging.info('initialized: logger')\n pool = create_pool(conf['database'])\n logging.info('initialized: database connection pool')\n app = Flask(__name__)\n app.config.update(conf['flask'])\n logging.info('configured: Flask app')\n for name in conf['instances']:\n api = Api(conf, pool, name)\n api.init_api()\n logging.info('initialized: blueprint instance: %s', name)\n app.register_blueprint(api.blueprint, url_prefix=conf['instances'][name]['path_prefix'])\n logging.info('registered: blueprint instance: %s', name)\n app.teardown_appcontext(close_db)\n logging.info('initialized: Flask app')\n return app, pool\n", "sub_path": "tamuro/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 11595, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.Blueprint", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 26, "usage_type": "call"}, {"api_name": "tamuro.database.init_db", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 124, "usage_type": "call"}, {"api_name": "tamuro.service.get_top", "line_number": 128, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 128, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 129, "usage_type": "call"}, {"api_name": "tamuro.service.get_setup", "line_number": 133, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 137, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 137, "usage_type": "name"}, {"api_name": "tamuro.service.get_groups", "line_number": 138, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 139, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 139, "usage_type": "name"}, {"api_name": "tamuro.service.put_group", "line_number": 140, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "tamuro.service.delete_group", "line_number": 142, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 143, "usage_type": "call"}, {"api_name": "tamuro.service.put_group_groups", "line_number": 147, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 147, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 147, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 148, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 152, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 152, "usage_type": "name"}, {"api_name": "tamuro.service.post_sub_group", "line_number": 153, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "name"}, {"api_name": "tamuro.service.put_sub_groups", "line_number": 155, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 155, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 155, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 156, "usage_type": "call"}, {"api_name": "tamuro.service.put_owners", "line_number": 160, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 160, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 160, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 160, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 161, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 165, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 165, "usage_type": "name"}, {"api_name": "tamuro.service.post_member", "line_number": 166, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 166, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 166, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 167, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 167, "usage_type": "name"}, {"api_name": "tamuro.service.put_members", "line_number": 168, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 168, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 168, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 173, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 173, "usage_type": "name"}, {"api_name": "tamuro.service.get_users", "line_number": 174, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 174, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 175, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 175, "usage_type": "name"}, {"api_name": "tamuro.service.put_user", "line_number": 176, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 176, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 177, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 177, "usage_type": "name"}, {"api_name": "tamuro.service.delete_user", "line_number": 178, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 178, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 178, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 178, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 179, "usage_type": "call"}, {"api_name": "tamuro.service.put_user_groups", "line_number": 183, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 183, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 183, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 183, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 184, "usage_type": "call"}, {"api_name": "tamuro.service.put_own_groups", "line_number": 188, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 188, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 188, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 188, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 193, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 193, "usage_type": "name"}, {"api_name": "tamuro.service.get_user_profs", "line_number": 194, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 194, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 195, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 195, "usage_type": "name"}, {"api_name": "tamuro.service.put_user_profs", "line_number": 196, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 196, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 196, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 196, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 197, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 201, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 201, "usage_type": "name"}, {"api_name": "tamuro.service.get_certs", "line_number": 202, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 202, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 203, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 203, "usage_type": "name"}, {"api_name": "tamuro.service.put_cert", "line_number": 204, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 204, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 204, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 204, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 206, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 206, "usage_type": "name"}, {"api_name": "tamuro.service.delete_cert", "line_number": 207, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 207, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 207, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 207, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 208, "usage_type": "call"}, {"api_name": "tamuro.service.post_session", "line_number": 212, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 212, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 212, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 212, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 216, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 218, "usage_type": "call"}, {"api_name": "tamuro.service.get_sessions", "line_number": 222, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 222, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 223, "usage_type": "call"}, {"api_name": "tamuro.service.delete_session", "line_number": 227, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 227, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 228, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 232, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 232, "usage_type": "name"}, {"api_name": "tamuro.service.get_token_status", "line_number": 233, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 233, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 234, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 235, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 235, "usage_type": "name"}, {"api_name": "tamuro.service.post_token", "line_number": 236, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 236, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 236, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 237, "usage_type": "call"}, {"api_name": "tamuro.models.sesses.touch", "line_number": 241, "usage_type": "call"}, {"api_name": "tamuro.models.sesses", "line_number": 241, "usage_type": "name"}, {"api_name": "flask.g.conn.rollback", "line_number": 243, "usage_type": "call"}, {"api_name": "flask.g.conn", "line_number": 243, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 243, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 244, "usage_type": "call"}, {"api_name": "flask.g.conn.commit", "line_number": 246, "usage_type": "call"}, {"api_name": "flask.g.conn", "line_number": 246, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 246, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 253, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 254, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 254, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 257, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 257, "usage_type": "name"}, {"api_name": "tamuro.service.delete_session", "line_number": 258, "usage_type": "call"}, {"api_name": "tamuro.service", "line_number": 258, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 259, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 259, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 260, "usage_type": "call"}, {"api_name": "flask.g", "line_number": 264, "usage_type": "argument"}, {"api_name": "flask.g.conn", "line_number": 265, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 265, "usage_type": "name"}, {"api_name": "flask.g.pool", "line_number": 266, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 266, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 268, "usage_type": "name"}, {"api_name": "tamuro.models.sesses.touch", "line_number": 269, "usage_type": "call"}, {"api_name": "tamuro.models.sesses", "line_number": 269, "usage_type": "name"}, {"api_name": "flask.g.conn", "line_number": 269, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 269, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 269, "usage_type": "name"}, {"api_name": "flask.g.conn.commit", "line_number": 270, "usage_type": "call"}, {"api_name": "flask.g.conn", "line_number": 270, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 270, "usage_type": "name"}, {"api_name": "flask.g.conn", "line_number": 271, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 271, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 276, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 277, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 277, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 281, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 281, "usage_type": "call"}, {"api_name": "flask.g", "line_number": 291, "usage_type": "argument"}, {"api_name": "flask.g.pool.putconn", "line_number": 292, "usage_type": "call"}, {"api_name": "flask.g.pool", "line_number": 292, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 292, "usage_type": "name"}, {"api_name": "flask.g.conn", "line_number": 292, "usage_type": "attribute"}, {"api_name": "logging.config.dictConfig", "line_number": 296, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 296, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 297, "usage_type": "call"}, {"api_name": "tamuro.database.create_pool", "line_number": 298, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 299, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 300, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 302, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 306, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 308, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 310, "usage_type": "call"}]} +{"seq_id": "99149476", "text": "from machine import reset, Timer\nfrom HTTPServer import parse_querystring, failure, success, http_daemon\nfrom PyComFunctions import get_device_sid, log_message\nimport json\n\n\ndef landing_endpoint(**kwargs):\n with open(\"configuration.html\", \"r\") as f:\n html = f.read()\n\n if 'message' in kwargs:\n message = kwargs['message']\n else:\n message = ''\n\n html = html.format(device_sid=get_device_sid(), message=message)\n\n return html\n\n\ndef reset_handler(*args, **kwargs):\n log_message(\"Reseting device\")\n reset()\n\n\ndef configure_form_endpoint(**kwargs):\n try:\n log_message(\"Decoding credentials\")\n body = parse_querystring(kwargs['body'])\n ssid = body['ssid']\n password = body['password']\n except Exception as e:\n log_message(\"Malformed request: {}\".format(e))\n return failure\n\n try:\n configuration = {}\n configuration['ssid'] = ssid\n configuration['password'] = password\n log_message(\"Storing configuration: {}\".format(configuration))\n with open(\"configuration.json\", \"w\") as f:\n f.write(json.dumps(configuration))\n\n log_message(\"Configuration successful. Returning successful response and resetting in 5 seconds\")\n # Schedule reset\n Timer.Alarm(handler=reset_handler, s=5)\n return success\n\n except Exception as e:\n log_message(\"Failed to configure network settings: {}\".format(e))\n return landing_endpoint(message=\"Provided Configuration Failed: {}\".format(e))\n\n\npath_to_handler = {\n \"/\": landing_endpoint,\n \"/configure_form\": configure_form_endpoint\n}\n\n\ndef jormunitor_server_daemon():\n return http_daemon(ssid=\"Jormunitor-{}\".format(get_device_sid()),\n path_to_handler=path_to_handler)\n", "sub_path": "WiPy/ConfigurationWebServer.py", "file_name": "ConfigurationWebServer.py", "file_ext": "py", "file_size_in_byte": 1795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "PyComFunctions.get_device_sid", "line_number": 16, "usage_type": "call"}, {"api_name": "PyComFunctions.log_message", "line_number": 22, "usage_type": "call"}, {"api_name": "machine.reset", "line_number": 23, "usage_type": "call"}, {"api_name": "PyComFunctions.log_message", "line_number": 28, "usage_type": "call"}, {"api_name": "HTTPServer.parse_querystring", "line_number": 29, "usage_type": "call"}, {"api_name": "PyComFunctions.log_message", "line_number": 33, "usage_type": "call"}, {"api_name": "HTTPServer.failure", "line_number": 34, "usage_type": "name"}, {"api_name": "PyComFunctions.log_message", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "PyComFunctions.log_message", "line_number": 44, "usage_type": "call"}, {"api_name": "machine.Timer.Alarm", "line_number": 46, "usage_type": "call"}, {"api_name": "machine.Timer", "line_number": 46, "usage_type": "name"}, {"api_name": "HTTPServer.success", "line_number": 47, "usage_type": "name"}, {"api_name": "PyComFunctions.log_message", "line_number": 50, "usage_type": "call"}, {"api_name": "HTTPServer.http_daemon", "line_number": 61, "usage_type": "call"}, {"api_name": "PyComFunctions.get_device_sid", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "353147633", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nPresence analyzer unit tests.\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport datetime\nimport json\nimport os.path\nimport unittest\nfrom collections import defaultdict\n\nfrom presence_analyzer import main, utils\n\nTEST_DATA_CSV = os.path.join(\n os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv'\n)\nUSERS_TEST_XML_FILE = os.path.join(\n os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'users_test.xml'\n)\n\n\n# pylint: disable=maybe-no-member, too-many-public-methods\nclass PresenceAnalyzerViewsTestCase(unittest.TestCase):\n \"\"\"\n Views tests.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Before each test, set up a environment.\n \"\"\"\n main.app.config.update(\n {\n 'DATA_CSV': TEST_DATA_CSV,\n 'USERS_XML_FILE': USERS_TEST_XML_FILE,\n }\n )\n self.client = main.app.test_client()\n utils.TIMESTAMPS['get_data'] = 0\n\n def tearDown(self):\n \"\"\"\n Get rid of unused objects after each test.\n \"\"\"\n pass\n\n def test_mainpage(self):\n \"\"\"\n Test main page redirect.\n \"\"\"\n resp = self.client.get('/')\n self.assertEqual(resp.status_code, 302)\n assert resp.headers['Location'].endswith('/presence_weekday')\n\n def test_api_users_v1(self):\n \"\"\"\n Test users listing.\n \"\"\"\n resp = self.client.get('/api/v1/users')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)\n self.assertDictEqual(data[0], {'user_id': 10, 'name': 'User 10'})\n\n def test_api_users_v2(self):\n \"\"\"\n Test users listing.\n \"\"\"\n resp = self.client.get('/api/v2/users')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)\n\n expected_data = [\n {\n 'user_id': '26',\n 'name': 'Andrzej S.',\n 'avatar': 'https://intranet.stxnext.pl/api/images/users/26',\n },\n {\n 'user_id': '165',\n 'name': 'Anna D.',\n 'avatar': 'https://intranet.stxnext.pl/api/images/users/165',\n },\n ]\n self.assertListEqual(json.loads(resp.data), expected_data)\n\n def test_mean_time_weekday_view(self):\n \"\"\"\n Test correctness of presenting data of mean presence\n time of given user grouped by weekday.\n \"\"\"\n resp = self.client.get('/api/v1/mean_time_weekday/10')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n\n data = json.loads(resp.data)\n self.assertEqual(len(data), 7)\n\n expected_res = [\n [\"Mon\", 0],\n [\"Tue\", 30047.0],\n [\"Wed\", 24465.0],\n [\"Thu\", 23705.0],\n [\"Fri\", 0],\n [\"Sat\", 0],\n [\"Sun\", 0],\n ]\n self.assertEqual(expected_res, data)\n\n resp = self.client.get('/api/v1/mean_time_weekday/1000')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(json.loads(resp.data), \"NO_USER_DATA\")\n\n def test_presence_weekday_view(self):\n \"\"\"\n Test correctness of presenting data of presence by weekday.\n \"\"\"\n resp = self.client.get('/api/v1/presence_weekday/10')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n\n data = json.loads(resp.data)\n self.assertEqual(len(data), 8)\n\n expected_res = [\n [\"Weekday\", \"Presence (s)\"],\n [\"Mon\", 0],\n [\"Tue\", 30047],\n [\"Wed\", 24465],\n [\"Thu\", 23705],\n [\"Fri\", 0],\n [\"Sat\", 0],\n [\"Sun\", 0],\n ]\n self.assertEqual(expected_res, data)\n\n resp = self.client.get('/api/v1/presence_weekday/1000')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(json.loads(resp.data), \"NO_USER_DATA\")\n\n def test_presence_start_end(self):\n \"\"\"\n Test if function has returned correct values of user average\n working start time and user average working end time and.\n \"\"\"\n resp = self.client.get('/api/v1/presence_start_end/10')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n\n data = json.loads(resp.data)\n self.assertDictEqual(\n {\n '0': {\n 'start': [0, 0, 0],\n 'end': [0, 0, 0],\n },\n '1': {\n 'start': [9, 39, 5],\n 'end': [17, 59, 52],\n },\n '2': {\n 'start': [9, 19, 52],\n 'end': [16, 7, 37],\n },\n '3': {\n 'start': [10, 48, 46],\n 'end': [17, 23, 51],\n },\n '4': {\n 'start': [0, 0, 0],\n 'end': [0, 0, 0],\n },\n '5': {\n 'end': [0, 0, 0],\n 'start': [0, 0, 0],\n },\n '6': {\n 'end': [0, 0, 0],\n 'start': [0, 0, 0],\n },\n },\n data,\n )\n resp = self.client.get('/api/v1/presence_start_end/1000')\n self.assertEqual(json.loads(resp.data), 'NO_USER_DATA')\n self.assertEqual(resp.status_code, 200)\n\n def test_standard_deviation(self):\n \"\"\"\n Test if function has returned correct standard deviation\n value of starting and ending work time.\n \"\"\"\n resp = self.client.get('/api/v1/standard_deviation/10')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n expected_data = {\n 'start_variation': [[9, 39, 5], [9, 39, 5]],\n 'end_variation': [[17, 59, 52], [17, 59, 52]],\n }\n self.assertDictEqual(expected_data, json.loads(resp.data)['1'])\n\n resp = self.client.get('/api/v1/presence_start_end/1000')\n self.assertEqual(json.loads(resp.data), 'NO_USER_DATA')\n self.assertEqual(resp.status_code, 200)\n\n def test_render_html(self):\n \"\"\"\n Test if function operate template rendering correctly.\n \"\"\"\n resp = self.client.get('/presence_weekday')\n self.assertNotEqual(\n resp.data.find('Presence by weekday'),\n -1,\n )\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'text/html; charset=utf-8')\n\n resp = self.client.get('/presence_weekday_that_not_exists')\n self.assertEqual(resp.status_code, 404)\n\n resp = self.client.get('/template_with_errors')\n self.assertEqual(resp.status_code, 404)\n\n\nclass PresenceAnalyzerUtilsTestCase(unittest.TestCase):\n \"\"\"\n Utility functions tests.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Before each test, set up a environment.\n \"\"\"\n main.app.config.update({'DATA_CSV': TEST_DATA_CSV})\n utils.TIMESTAMPS['get_data'] = 0\n self.client = main.app.test_client()\n\n def tearDown(self):\n \"\"\"\n Get rid of unused objects after each test.\n \"\"\"\n pass\n\n def test_get_data(self):\n \"\"\"\n Test parsing of CSV file.\n \"\"\"\n data = utils.get_data()\n self.assertIsInstance(data, dict)\n self.assertItemsEqual(data.keys(), [10, 11])\n sample_date = datetime.date(2013, 9, 10)\n self.assertIn(sample_date, data[10])\n self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end'])\n self.assertEqual(\n data[10][sample_date]['start'],\n datetime.time(9, 39, 5)\n )\n\n def test_jsonify(self):\n \"\"\"\n Test if function create a response result in JSON representation.\n \"\"\"\n @utils.jsonify\n def return_dict():\n \"\"\"\n Simple function returning dictionary for further tests.\n \"\"\"\n return {1: 1, 2: 2}\n res = return_dict()\n self.assertEqual({'1': 1, '2': 2}, json.loads(res.data))\n\n def test_group_by_weekday(self):\n \"\"\"\n Test if function groups presence entries by weekday.\n \"\"\"\n example_date = {\n datetime.date(2013, 9, 10): {\n 'start': datetime.time(9, 39, 5),\n 'end': datetime.time(17, 59, 52),\n },\n datetime.date(2013, 9, 12): {\n 'start': datetime.time(10, 48, 46),\n 'end': datetime.time(17, 23, 51),\n },\n datetime.date(2013, 9, 11): {\n 'start': datetime.time(9, 19, 52),\n 'end': datetime.time(16, 7, 37),\n },\n }\n tested_res = utils.group_by_weekday(example_date)\n self.assertListEqual(\n [\n [],\n [30047],\n [24465],\n [23705],\n [],\n [],\n [],\n ],\n tested_res,\n )\n\n def test_seconds_since_midnight(self):\n \"\"\"\n Test if function correctly calculate amount\n of seconds since midnight.\n \"\"\"\n res = utils.seconds_since_midnight(datetime.time(22, 33, 11))\n self.assertEqual(81191, res)\n\n def test_interval(self):\n \"\"\"\n Test if interval between two datetime.time objects is correct.\n \"\"\"\n tested_interval = utils.interval(\n datetime.time(11, 22, 33),\n datetime.time(8, 11, 31)\n )\n self.assertEqual(-11462, tested_interval)\n\n def test_mean(self):\n \"\"\"\n Test if arythmetic mean returned by funcion is correct.\n \"\"\"\n self.assertEqual(0, utils.mean([]))\n self.assertEqual(2., utils.mean([1, 2, 3]))\n self.assertEqual(-2., utils.mean([-1, -2, -3]))\n self.assertEqual(2., utils.mean([1., 2., 3.]))\n\n def test_memorize(self):\n \"\"\"\n Test caching of get_data method.\n \"\"\"\n expected_data = {\n datetime.date(2013, 9, 10): {\n 'start': datetime.time(9, 39, 5),\n 'end': datetime.time(17, 59, 52),\n },\n datetime.date(2013, 9, 12): {\n 'start': datetime.time(10, 48, 46),\n 'end': datetime.time(17, 23, 51),\n },\n datetime.date(2013, 9, 11): {\n 'start': datetime.time(9, 19, 52),\n 'end': datetime.time(16, 7, 37),\n },\n }\n self.assertDictEqual(expected_data, utils.get_data()[10])\n utils.CACHE = {'get_data': {10: 'rubbish'}}\n self.assertNotEqual(expected_data, utils.get_data()[10])\n utils.TIMESTAMPS['get_data'] = 0\n self.assertDictEqual(expected_data, utils.get_data()[10])\n\n def test_mean_start_end_for_sv(self):\n \"\"\"\n Test if function correctly counts mean time of\n starting and ending work time.\n \"\"\"\n expected_data = {\n 'start': 34745.0,\n 'data_examples_num': 1,\n 'end': 64792.0\n }\n self.assertDictEqual(\n expected_data,\n utils.get_mean_start_end(\n utils.get_data()[10]\n )[1],\n )\n\n def test_variation_start_end(self):\n \"\"\"\n Test if function correctly counts variation from\n given data.\n \"\"\"\n data = utils.get_data()\n day_start_end = {}\n for day_idx in range(7):\n day_start_end[day_idx] = defaultdict(lambda: 0)\n\n expected_data = {\n 'end_variation': 2247001.0,\n 'start_variation': 2292196.0,\n }\n weekdays = utils.get_mean_start_end(\n data[11]\n )\n day_start_end = utils.variation_for_day_start_end(\n day_start_end,\n data[11],\n weekdays,\n )\n self.assertDictEqual(day_start_end[3], expected_data)\n\n def t_standard_deviation_from_data(self):\n \"\"\"\n Test if function retuns standard deviation\n from given variations.\n \"\"\"\n data = utils.get_data()\n day_start_end = {}\n for day_idx in range(7):\n day_start_end[day_idx] = defaultdict(lambda: 0)\n weekdays = utils.get_mean_start_end(\n data[11]\n )\n day_start_end = utils.variation_for_day_start_end(\n day_start_end,\n data[11],\n weekdays,\n )\n expected_data = {\n 'end_variation': [[15, 51, 27], [16, 41, 25]],\n 'start_variation': [[9, 28, 8], [10, 18, 36]],\n }\n self.assertEqual(\n expected_data,\n utils.standard_deviation_from_data(\n day_start_end,\n weekdays)[3],\n )\n\n def test_equation_for_day(self):\n \"\"\"\n Test if function properly counts particural\n part of variance equation.\n \"\"\"\n self.assertAlmostEqual(\n 22739.259661982247, utils.equation_for_day(\n datetime.time(8, 54, 29),\n 30553.52475247525, 101\n )\n )\n\n\ndef suite():\n \"\"\"\n Default test suite.\n \"\"\"\n base_suite = unittest.TestSuite()\n base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))\n base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))\n return base_suite\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "src/presence_analyzer/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 13863, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.path.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "presence_analyzer.main.app.config.update", "line_number": 33, "usage_type": "call"}, {"api_name": "presence_analyzer.main.app", "line_number": 33, "usage_type": "attribute"}, {"api_name": "presence_analyzer.main", "line_number": 33, "usage_type": "name"}, {"api_name": "presence_analyzer.main.app.test_client", "line_number": 39, "usage_type": "call"}, {"api_name": "presence_analyzer.main.app", "line_number": 39, "usage_type": "attribute"}, {"api_name": "presence_analyzer.main", "line_number": 39, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.TIMESTAMPS", "line_number": 40, "usage_type": "attribute"}, {"api_name": "presence_analyzer.utils", "line_number": 40, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 63, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 75, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 101, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 117, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 127, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 144, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 155, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 190, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 205, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 208, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 230, "usage_type": "attribute"}, {"api_name": "presence_analyzer.main.app.config.update", "line_number": 239, "usage_type": "call"}, {"api_name": "presence_analyzer.main.app", "line_number": 239, "usage_type": "attribute"}, {"api_name": "presence_analyzer.main", "line_number": 239, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.TIMESTAMPS", "line_number": 240, "usage_type": "attribute"}, {"api_name": "presence_analyzer.utils", "line_number": 240, "usage_type": "name"}, {"api_name": "presence_analyzer.main.app.test_client", "line_number": 241, "usage_type": "call"}, {"api_name": "presence_analyzer.main.app", "line_number": 241, "usage_type": "attribute"}, {"api_name": "presence_analyzer.main", "line_number": 241, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.get_data", "line_number": 253, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 253, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 256, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 261, "usage_type": "call"}, {"api_name": "presence_analyzer.utils.jsonify", "line_number": 268, "usage_type": "attribute"}, {"api_name": "presence_analyzer.utils", "line_number": 268, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 275, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 282, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 286, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 290, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 283, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 284, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 287, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 288, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 291, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 292, "usage_type": "call"}, {"api_name": "presence_analyzer.utils.group_by_weekday", "line_number": 295, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 295, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.seconds_since_midnight", "line_number": 314, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 314, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 314, "usage_type": "call"}, {"api_name": "presence_analyzer.utils.interval", "line_number": 321, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 321, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 322, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 323, "usage_type": "call"}, {"api_name": "presence_analyzer.utils.mean", "line_number": 331, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 331, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.mean", "line_number": 332, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 332, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.mean", "line_number": 333, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 333, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.mean", "line_number": 334, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 334, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 341, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 345, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 349, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 342, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 343, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 346, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 347, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 350, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 351, "usage_type": "call"}, {"api_name": "presence_analyzer.utils.get_data", "line_number": 354, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 354, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.CACHE", "line_number": 355, "usage_type": "attribute"}, {"api_name": "presence_analyzer.utils", "line_number": 355, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.get_data", "line_number": 356, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 356, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.TIMESTAMPS", "line_number": 357, "usage_type": "attribute"}, {"api_name": "presence_analyzer.utils", "line_number": 357, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.get_data", "line_number": 358, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 358, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.get_mean_start_end", "line_number": 372, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 372, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.get_data", "line_number": 373, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 373, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.get_data", "line_number": 382, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 382, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 385, "usage_type": "call"}, {"api_name": "presence_analyzer.utils.get_mean_start_end", "line_number": 391, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 391, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.variation_for_day_start_end", "line_number": 394, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 394, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.get_data", "line_number": 406, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 406, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 409, "usage_type": "call"}, {"api_name": "presence_analyzer.utils.get_mean_start_end", "line_number": 410, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 410, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.variation_for_day_start_end", "line_number": 413, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 413, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.standard_deviation_from_data", "line_number": 424, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 424, "usage_type": "name"}, {"api_name": "presence_analyzer.utils.equation_for_day", "line_number": 435, "usage_type": "call"}, {"api_name": "presence_analyzer.utils", "line_number": 435, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 436, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 446, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 447, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 448, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 453, "usage_type": "call"}]} +{"seq_id": "492578113", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport scipy.linalg\nfrom tqdm import tqdm\nfrom scipy.linalg import toeplitz\n\nimport tkinter as tk\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\n\n__version__ = '0.18'\n\nclass lscf():\n \"\"\"\n Least-Squares Complex Frequency-domain estimate.\n \"\"\"\n\n def __init__(self, \n frf=None, \n freq=None,\n dt=None,\n lower=50, \n upper=10000, \n pol_order_high=100, \n pyfrf=False):\n \"\"\"The LSCF method is an frequency-domain Linear Least Squares\n estimator optimized for modal parameter estimation. The choice of\n the most important algorithm characteristics is based on the\n results in [1] (Section 5.3.3.) and can be summarized as:\n\n - Formulation: the normal equations [1]\n (Eq. 5.26: [sum(Tk - Sk.H * Rk^-1 * Sk)]*ThetaA=D*ThetaA = 0)\n are constructed for the common denominator discrete-time\n model in the Z-domain. Consequently, by looping over the\n outputs and inputs, the submatrices Rk, Sk, and Tk are\n formulated through the use of the FFT algorithm as Toeplitz\n structured (n+1) square matrices. Using complex coefficients,\n the FRF data within the frequency band of interest (FRF-zoom)\n is projected in the Z-domain in the interval of [0, 2*pi] in\n order to improve numerical conditioning. (In the case that\n real coefficients are used, the data is projected in the\n interval of [0, pi].) The projecting on an interval that does\n not completely describe the unity circle, say [0, alpha*2*pi]\n where alpha is typically 0.9-0.95. Deliberately over-modeling\n is best applied to cope with discontinuities. This is\n justified by the use of a discrete time model in the Z-domain,\n which is much more robust for a high order of the transfer\n function polynomials.\n\n - Solver: the normal equations can be solved for the\n denominator coefficients ThetaA by computing the Least-Squares\n (LS) or mixed Total-Least-Squares (TLS) solution. The inverse\n of the square matrix D for the LS solution is computed by\n means of a pseudo inverse operation for reasons of numerical\n stability, while the mixed LS-TLS solution is computed using\n an SVD (Singular Value Decomposition).\n\n Literature:\n [1] Verboven, P., Frequency-domain System Identification for\n Modal Analysis, Ph. D. thesis, Mechanical Engineering Dept.\n (WERK), Vrije Universiteit Brussel, Brussel, (Belgium),\n May 2002, (http://mech.vub.ac.be/avrg/PhD/thesis_PV_web.pdf)\n [2] Verboven, P., Guillaume, P., Cauberghe, B., Parloo, E. and\n Vanlanduit S., Stabilization Charts and Uncertainty Bounds\n For Frequency-Domain Linear Least Squares Estimators, Vrije\n Universiteit Brussel(VUB), Mechanical Engineering Dept.\n (WERK), Acoustic and Vibration Research Group (AVRG),\n Pleinlaan 2, B-1050 Brussels, Belgium,\n e-mail: Peter.Verboven@vub.ac.be, url:\n (http://sem-proceedings.com/21i/sem.org-IMAC-XXI-Conf-s02p01\n -Stabilization-Charts-Uncertainty-Bounds-Frequency-Domain-\n Linear-Least.pdf)\n [3] P. Guillaume, P. Verboven, S. Vanlanduit, H. Van der\n Auweraer, B. Peeters, A Poly-Reference Implementation of the\n Least-Squares Complex Frequency-Domain Estimator, Vrije\n Universiteit Brussel, LMS International\n\n\n :param frf: Frequency response function matrix (must be receptance!)\n :type frf: ndarray\n :param freq: Frequency array\n :type freq: array\n :param lower: Lower limit for pole determination [Hz]\n :type lower: int, float\n :param upper: Upper limit for pole determination [Hz]\n :type upper: int, float\n :param pol_order_high: Highest order of the polynomial\n :type pol_order_high: int\n \"\"\"\n if pyfrf:\n self.frf = 0\n elif not pyfrf and frf is not None and freq is not None:\n try:\n self.frf = np.asarray(frf)\n except:\n raise Exception('cannot contert frf to numpy ndarray')\n if self.frf.ndim == 1:\n self.frf = np.array([self.frf])\n\n try:\n self.freq = np.asarray(freq)\n except:\n raise Exception('cannot convert freq to numpy array')\n if self.freq.ndim != 1:\n raise Exception(\n f'ndim of freq is not equal to 1 ({self.freq.ndim})')\n else:\n raise Exception('input arguments are not defined')\n\n try:\n self.lower = float(lower)\n except:\n raise Exception('lower must be float or integer')\n if self.lower < 0:\n raise Exception('lower must be positive or equal to zero')\n\n try:\n self.upper = float(upper)\n except:\n raise Exception('upper must be flaot or integer')\n if self.upper < self.lower:\n raise Exception('upper must be greater than lower')\n\n try:\n self.pol_order_high = int(pol_order_high)\n except:\n raise Exception('cannot convert pol_order_high to integer')\n if self.pol_order_high <= 0:\n raise Exception('pol_order_high must be positive')\n\n if not pyfrf:\n self.omega = 2 * np.pi * self.freq\n if dt is None:\n self.sampling_time = 1/(2*self.freq[-1])\n else:\n self.sampling_time = dt\n\n def add_frf(self, pyfrf_object):\n \"\"\"Add a FRF at a next location.\n\n This method can be used in relation to pyFRF from Open Modal (https://github.com/openmodal)\n\n >>> for file in files:\n >>> lvm_data = lvm\n \n :param pyfrf_object: FRF object from pyFRF\n :type pyfrf_object: object\n \"\"\"\n freq = pyfrf_object.get_f_axis()\n sel = (freq >= 1.0e-1)\n\n self.freq = freq[sel]\n self.omega = 2 * np.pi * self.freq\n self.sampling_time = 1/(2*self.freq[-1])\n\n new_frf = np.vstack(pyfrf_object.get_FRF(form='receptance')[sel])\n\n if isinstance(self.frf, int):\n self.frf = new_frf.T\n else:\n self.frf = np.concatenate((self.frf, new_frf.T), axis=0)\n \n def get_poles(self, show_progress=False):\n \"\"\"Compute poles.\n\n Source: https://github.com/openmodal/OpenModal/blob/master/OpenModal/analysis/lscf.py\n \"\"\"\n if show_progress:\n tqdm_range = tqdm\n else:\n tqdm_range = lambda x:x\n\n self.all_poles = []\n self.pole_freq = []\n self.pole_xi = []\n\n lower_ind = np.argmin(np.abs(self.freq - self.lower))\n n = self.pol_order_high * 2\n nf = 2 * (self.frf.shape[1] - 1)\n nr = self.frf.shape[0]\n\n indices_s = np.arange(-n, n+1)\n indices_t = np.arange(n+1)\n\n sk = -irfft_adjusted_lower_limit(self.frf, lower_ind, indices_s)\n t = irfft_adjusted_lower_limit(\n self.frf.real**2 + self.frf.imag**2, lower_ind, indices_t)\n r = -(np.fft.irfft(np.ones(lower_ind), n=nf))[indices_t]*nf\n r[0] += nf\n\n s = []\n for i in range(nr):\n s.append(toeplitz(sk[i, n:], sk[i, :n+1][::-1]))\n t = toeplitz(np.sum(t[:, :n+1], axis=0))\n r = toeplitz(r)\n\n sr_list = []\n for j in tqdm_range(range(2, n+1, 2)):\n d = 0\n for i in range(nr):\n rinv = np.linalg.inv(r[:j+1, :j+1])\n snew = s[i][:j+1, :j+1]\n d -= np.dot(np.dot(snew[:j+1, :j+1].T,\n rinv), snew[:j+1, :j+1]) # sum\n d += t[:j+1, :j+1]\n\n a0an1 = np.linalg.solve(-d[0:j, 0:j], d[0:j, j])\n # the numerator coefficients\n sr = np.roots(np.append(a0an1, 1)[::-1])\n\n # Z-domain (for discrete-time domain model)\n poles = -np.log(sr) / self.sampling_time\n f_pole, ceta = complex_freq_to_freq_and_damp(poles)\n\n self.all_poles.append(poles)\n self.pole_freq.append(f_pole)\n self.pole_xi.append(ceta)\n\n def stab_chart(self, poles='all', fn_temp=0.001, xi_temp=0.05, legend=False, latex_render=False, title=None):\n \"\"\"Render stability chart.\n\n Interactive pole selection is possible. Identification of natural \n frequency and damping coefficients is executed on-the-fly,\n as well as computing reconstructed FRF and modal constants.\n\n The identification can be done in two ways:\n 1.\n >>> a.stab_chart() # pick poles\n >>> a.nat_freq # natural frequencies\n >>> a.nat_xi # damping coefficients\n >>> a.H # reconstructed FRF matrix\n >>> a.A # modal constants (a.A[:, -2:] are Lower and Upper residual)\n\n 2.\n >>> approx_nat_freq = [234, 545]\n >>> a.select_closest_poles(approx_nat_freq)\n >>> a.nat_freq # natural frequencies\n >>> a.nat_xi # damping coefficients\n >>> H, A = a.lsfd(whose_poles='own', FRF_ind='all) # reconstruction\n \"\"\"\n if poles == 'all':\n poles = self.all_poles\n\n def replot(init=False):\n \"\"\"Replot the measured and reconstructed FRF based on new selected poles.\"\"\"\n ax2.clear()\n ax2.semilogy(self.freq, np.average(\n np.abs(self.frf), axis=0), alpha=0.7, color='k')\n\n if not init:\n self.H, self.A = self.lsfd(whose_poles='own', FRF_ind='all')\n ax2.semilogy(self.freq, np.average(\n np.abs(self.H), axis=0), color='r', lw=2)\n\n ax1.set_xlim([self.lower, self.upper])\n ax1.set_ylim([0, self.pol_order_high+5])\n\n Nmax = self.pol_order_high\n fn_temp, xi_temp, test_fn, test_xi = stabilisation(\n poles, Nmax, err_fn=fn_temp, err_xi=xi_temp)\n\n\n root = tk.Tk() # Tkinter\n root.title('Stability Chart') # Tkinter\n fig = Figure(figsize=(20, 8)) # Tkinter\n ax2 = fig.add_subplot(111) # Tkinter\n\n ax1 = ax2.twinx()\n ax1.grid(True)\n replot(init=True)\n\n ax1.set_xlabel(r'$f$ [Hz]', fontsize=12)\n ax1.set_ylabel(r'Polynomial order', fontsize=12)\n ax2.set_ylabel(r'$|\\alpha|$', fontsize=12)\n\n if latex_render is True:\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n ax1.tick_params(axis='both', which='major', labelsize=12)\n ax2.tick_params(axis='both', which='major', labelsize=12)\n ax1.set_xlabel(r'$f$ [Hz]', fontsize=12)\n ax1.set_ylabel(r'Polynomial order', fontsize=12)\n ax2.set_ylabel(r'$|\\alpha|_{log}$', fontsize=12)\n ax1.set_xlim([self.lower, self.upper])\n\n # stable eigenfrequencues, unstable damping ratios\n a = np.argwhere((test_fn > 0) & ((test_xi == 0) | (xi_temp <= 0)))\n # stable eigenfrequencies, stable damping ratios\n b = np.argwhere((test_fn > 0) & ((test_xi > 0) & (xi_temp > 0)))\n # unstable eigenfrequencues, unstable damping ratios\n c = np.argwhere((test_fn == 0) & ((test_xi == 0) | (xi_temp <= 0)))\n # unstable eigenfrequencues, stable damping ratios\n d = np.argwhere((test_fn == 0) & ((test_xi > 0) & (xi_temp > 0)))\n\n p1 = ax1.plot(fn_temp[a[:, 0], a[:, 1]], 1+a[:, 1], 'bx',\n markersize=4, label=\"stable frequency, unstable damping\")\n p2 = ax1.plot(fn_temp[b[:, 0], b[:, 1]], 1+b[:, 1], 'gx',\n markersize=7, label=\"stable frequency, stable damping\")\n p3 = ax1.plot(fn_temp[c[:, 0], c[:, 1]], 1+c[:, 1], 'r.',\n markersize=4, label=\"unstable frequency, unstable damping\")\n p4 = ax1.plot(fn_temp[d[:, 0], d[:, 1]], 1+d[:, 1], 'r*',\n markersize=4, label=\"unstable frequency, stable damping\")\n\n if legend:\n ax1.legend(loc='upper center', ncol=2, frameon=True)\n plt.tight_layout()\n\n print('SHIFT + LEFT mouse button to pick a pole.\\nRIGHT mouse button to erase the last pick.')\n self.nat_freq = []\n self.nat_xi = []\n self.pole_ind = []\n\n line, = ax1.plot(self.nat_freq, np.repeat(\n self.pol_order_high, len(self.nat_freq)), 'kv', markersize=8)\n\n # Mark selected poles\n selected, = ax1.plot([],[], 'ko')\n\n self.shift_is_held = False\n def on_key_press(event):\n \"\"\"Function triggered on key press (shift).\"\"\"\n if event.key == 'shift':\n self.shift_is_held = True\n \n def on_key_release(event):\n \"\"\"Function triggered on key release (shift).\"\"\"\n if event.key == 'shift':\n self.shift_is_held = False\n\n def onclick(event):\n # on button 1 press (left mouse button) + shift is held\n if event.button == 1 and self.shift_is_held:\n self.y_data_pole = [event.ydata]\n self.x_data_pole = event.xdata\n self._select_closest_poles_on_the_fly()\n\n replot()\n \n print(f'{len(self.nat_freq)}. Frequency: ~{int(np.round(event.xdata))} -->\\t{self.nat_freq[-1]} Hz\\t(xi = {self.nat_xi[-1]:.4f})')\n\n # On button 3 press (left mouse button)\n elif event.button == 3 and self.shift_is_held:\n try:\n del self.nat_freq[-1] # delete last point\n del self.nat_xi[-1]\n del self.pole_ind[-1]\n replot()\n print('Deleting the last pick...')\n except:\n pass\n\n line.set_xdata(np.asarray(self.nat_freq)) # update data\n line.set_ydata(np.repeat(Nmax*1.04, len(self.nat_freq)))\n\n selected.set_xdata([self.pole_freq[p[0]][p[1]] for p in self.pole_ind]) # update data\n selected.set_ydata([p[0] for p in self.pole_ind])\n fig.canvas.draw()\n\n canvas = FigureCanvasTkAgg(fig, root) # Tkinter\n canvas.get_tk_widget().pack(side='top', fill='both', expand=1) # Tkinter\n NavigationToolbar2Tk(canvas, root) # Tkinter\n \n # Connecting functions to event manager\n fig.canvas.mpl_connect('key_press_event', on_key_press)\n fig.canvas.mpl_connect('key_release_event', on_key_release)\n fig.canvas.mpl_connect('button_press_event', onclick)\n\n if title is not None:\n plt.savefig(title)\n\n root.mainloop() # Tkinter\n\n def _select_closest_poles_on_the_fly(self):\n \"\"\"On-the-fly selection of the closest poles. \n \"\"\"\n y_ind = int(np.argmin(np.abs(np.arange(0, len(self.pole_freq))-self.y_data_pole))) # Find closest pole order\n sel = np.argmin(np.abs(self.pole_freq[y_ind] - self.x_data_pole)) # Find cloeset frequency\n \n self.pole_ind.append([y_ind, sel])\n self.nat_freq.append(self.pole_freq[y_ind][sel])\n self.nat_xi.append(self.pole_xi[y_ind][sel])\n\n def select_closest_poles(self, approx_nat_freq):\n \"\"\"Identification of natural frequency and damping.\n\n If `approx_nat_freq` is used, the method finds closest poles of of the polynomial with\n the highest order.\n\n :param approx_nat_freq: Approximate natural frequency value\n :type approx_nat_freq: list\n \"\"\"\n y_ind = -1\n pole_ind = []\n for i, fr in enumerate(approx_nat_freq):\n sel = np.argmin(np.abs(self.pole_freq[y_ind] - fr))\n pole_ind.append(\n [y_ind, np.argmin(np.abs(self.pole_freq[y_ind] - self.pole_freq[y_ind][sel]))])\n \n pole_ind = np.asarray(pole_ind)\n self.nat_freq = self.pole_freq[y_ind][pole_ind[:, 1]]\n self.nat_xi = self.pole_xi[y_ind][pole_ind[:, 1]]\n self.pole_ind = pole_ind \n\n def lsfd(self, whose_poles='own', FRF_ind=None):\n \"\"\"\n Modal constants and FRF reconstruction based on LSFD method.\n\n :param whose_poles: Use own poles or poles from another object (object)\n :param FRF_ind: Reconstruct FRF on location (int) with this index or \n reconstruct all ('all') or reconstruct None, defaults to None\n :return: modal constants or reconstructed FRF, modal constants\n \"\"\"\n ndim = self.frf.ndim\n if whose_poles == 'own':\n whose_poles = self\n\n pole_ind = np.asarray(whose_poles.pole_ind, dtype=int)\n n_poles = pole_ind.shape[0]\n poles = []\n for i in range(n_poles):\n poles.append(whose_poles.all_poles[pole_ind[i, 0]][pole_ind[i, 1]])\n poles = np.asarray(poles)\n\n\n w = np.append(-self.omega[1:][::-1], self.omega[1:])\n alpha = np.append(self.frf[:, 1:].conjugate()[\n :, ::-1], self.frf[:, 1:], ndim-1)\n TA = np.ones([len(w), n_poles+2], complex)\n\n for n in range(n_poles):\n TA[:, n] = 1/(1j*w - poles[n])\n TA[:, -2] = -1/w**2\n TA[:, -1] = np.ones_like(w)\n AT = np.linalg.pinv(TA)\n\n if ndim == 1:\n A_LSFD = np.dot(AT, self.frf)\n elif ndim == 2:\n IO = self.frf.shape[0]\n A_LSFD = np.zeros([IO, n_poles+2], complex)\n for v in range(IO):\n A_LSFD[v, :] = np.dot(AT, alpha[v, :])\n self.A = A_LSFD\n self.poles = poles\n\n # FRF reconstruction\n if FRF_ind is None:\n return A_LSFD\n elif FRF_ind == 'all':\n n = self.frf.shape[0]\n frf_ = np.zeros((n, len(self.omega)), complex)\n for i in range(n):\n frf_[i] = self.FRF_reconstruct(i)\n return frf_, A_LSFD\n elif isinstance(FRF_ind, int):\n frf_ = self.FRF_reconstruct(FRF_ind)\n return frf_, A_LSFD\n else:\n raise Exception('FRF_ind must be None, \"all\" or int')\n\n def FRF_reconstruct(self, FRF_ind):\n \"\"\"Reconstruct FRF based on modal constants.\n\n :param FRF_ind: Reconstruct FRF on location with this index, int\n :return: Reconstructed FRF\n \"\"\"\n\n FRF_true = np.zeros(len(self.omega), complex)\n for n in range(self.A.shape[1]-2):\n FRF_true += (self.A[FRF_ind, n] /\n (1j*self.omega - self.poles[n])) + \\\n (np.conjugate(self.A[FRF_ind, n]) /\n (1j*self.omega - np.conjugate(self.poles[n])))\n\n FRF_true += -self.A[FRF_ind, -2] / \\\n (self.omega**2) + self.A[FRF_ind, -1]\n return FRF_true\n\n def print_modal_data(self):\n \"\"\"Show modal data in a table-like structure.\"\"\"\n print(' Nat. f. Damping')\n print(23*'-')\n for i, f in enumerate(self.nat_freq):\n print(f'{i+1}) {f:6.1f}\\t{self.nat_xi[i]:5.4f}')\n\n\ndef complex_freq_to_freq_and_damp(sr):\n \"\"\"\n Convert the complex natural frequencies to natural frequencies and the\n corresponding dampings.\n\n :param sr: complex natural frequencies\n :return: natural frequency and damping\n \"\"\"\n\n fr = np.sign(np.imag(sr)) * np.abs(sr)\n xir = -sr.real/fr\n fr /= (2 * np.pi)\n\n return fr, xir\n\n\ndef redundant_values(omega, xi, prec):\n \"\"\"\n This function supresses the redundant values of frequency and damping\n vectors, which are the consequence of conjugate values\n\n :param omega: eiqenfrquencies vector\n :param xi: damping ratios vector\n :param prec: absoulute precision in order to distinguish between two values\n \"\"\"\n\n N = len(omega)\n test_omega = np.zeros((N, N), dtype='int')\n for i in range(1, N):\n for j in range(0, i):\n if np.abs((omega[i] - omega[j])) < prec:\n test_omega[i, j] = 1\n else:\n test_omega[i, j] = 0\n\n test = np.zeros(N, dtype='int')\n\n for i in range(0, N):\n test[i] = np.sum(test_omega[i, :])\n\n omega_mod = omega[np.argwhere(test < 1)]\n xi_mod = xi[np.argwhere(test < 1)]\n\n return omega_mod, xi_mod\n\n\ndef stabilisation(sr, nmax, err_fn, err_xi):\n \"\"\"\n A function that computes the stabilisation matrices needed for the\n stabilisation chart. The computation is focused on comparison of\n eigenfrequencies and damping ratios in the present step \n (N-th model order) with the previous step ((N-1)-th model order). \n\n :param sr: list of lists of complex natrual frequencies\n :param n: maximum number of degrees of freedom\n :param err_fn: relative error in frequency\n :param err_xi: relative error in damping\n\n :return fn_temap eigenfrequencies matrix\n :return xi_temp: updated damping matrix\n :return test_fn: updated eigenfrequencies stabilisation test matrix\n :return test_xi: updated damping stabilisation test matrix\n \"\"\"\n\n # TODO: check this later for optimisation # this doffers by LSCE and LSCF\n fn_temp = np.zeros((2*nmax, nmax), dtype='double')\n xi_temp = np.zeros((2*nmax, nmax), dtype='double')\n test_fn = np.zeros((2*nmax, nmax), dtype='int')\n test_xi = np.zeros((2*nmax, nmax), dtype='int')\n\n for nr, n in enumerate(range(nmax)):\n fn, xi = complex_freq_to_freq_and_damp(sr[nr])\n # elimination of conjugate values in\n fn, xi = redundant_values(fn, xi, 1e-3)\n # order to decrease computation time\n if n == 1:\n # first step\n fn_temp[0:len(fn), 0:1] = fn\n xi_temp[0:len(fn), 0:1] = xi\n\n else:\n # Matrix test is created for comparison between present(N-th) and\n # previous (N-1-th) data (eigenfrequencies). If the value equals:\n # --> 1, the data is within relative tolerance err_fn\n # --> 0, the data is outside the relative tolerance err_fn\n fn_test = np.zeros((len(fn), len(fn_temp[:, n - 1])), dtype='int')\n for i in range(0, len(fn)):\n for j in range(0, len(fn_temp[0:2*(n), n-1])):\n if fn_temp[j, n-2] == 0:\n fn_test[i, j] = 0\n else:\n if np.abs((fn[i] - fn_temp[j, n-2])/fn_temp[j, n-2]) < err_fn:\n fn_test[i, j] = 1\n else:\n fn_test[i, j] = 0\n\n for i in range(0, len(fn)):\n # all rows are summed together\n test_fn[i, n - 1] = np.sum(fn_test[i, :])\n\n # The same procedure as for eigenfrequencies is applied for damping\n xi_test = np.zeros((len(xi), len(xi_temp[:, n - 1])), dtype='int')\n for i in range(0, len(xi)):\n for j in range(0, len(xi_temp[0:2*(n), n-1])):\n if xi_temp[j, n-2] == 0:\n xi_test[i, j] = 0\n else:\n if np.abs((xi[i] - xi_temp[j, n-2])/xi_temp[j, n-2]) < err_xi:\n xi_test[i, j] = 1\n else:\n xi_test[i, j] = 0\n for i in range(0, len(xi)):\n test_xi[i, n - 1] = np.sum(xi_test[i, :])\n\n # If the frequency/damping values corresponded to the previous iteration,\n # a mean of the two values is computed, otherwise the value stays the same\n for i in range(0, len(fn)):\n for j in range(0, len(fn_temp[0:2*(n), n-1])):\n if fn_test[i, j] == 1:\n fn_temp[i, n - 1] = (fn[i] + fn_temp[j, n - 2]) / 2\n elif fn_test[i, j] == 0:\n fn_temp[i, n - 1] = fn[i]\n for i in range(0, len(fn)):\n for j in range(0, len(fn_temp[0:2*(n), n-1])):\n if xi_test[i, j] == 1:\n xi_temp[i, n - 1] = (xi[i] + xi_temp[j, n - 2]) / 2\n elif xi_test[i, j] == 0:\n xi_temp[i, n - 1] = xi[i]\n\n return fn_temp, xi_temp, test_fn, test_xi\n\n\ndef irfft_adjusted_lower_limit(x, low_lim, indices):\n \"\"\"\n Compute the ifft of real matrix x with adjusted summation limits:\n y(j) = sum[k=-n-2, ... , -low_lim-1, low_lim, low_lim+1, ... n-2,\n n-1] x[k] * exp(sqrt(-1)*j*k* 2*pi/n),\n j =-n-2, ..., -low_limit-1, low_limit, low_limit+1, ... n-2, n-1\n\n :param x: Single-sided real array to Fourier transform.\n :param low_lim: lower limit index of the array x.\n :param indices: list of indices of interest\n :return: Fourier transformed two-sided array x with adjusted lower limit.\n Retruns values.\n\n Source: https://github.com/openmodal/OpenModal/blob/master/OpenModal/fft_tools.py\n \"\"\"\n\n nf = 2 * (x.shape[1] - 1)\n a = (np.fft.irfft(x, n=nf)[:, indices]) * nf\n b = (np.fft.irfft(x[:, :low_lim], n=nf)[:, indices]) * nf\n\n return a - b\n", "sub_path": "pyEMA.py", "file_name": "pyEMA.py", "file_ext": "py", "file_size_in_byte": 25330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.asarray", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 154, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 162, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 170, "usage_type": "name"}, {"api_name": "numpy.argmin", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.fft.irfft", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 189, "usage_type": "call"}, {"api_name": "scipy.linalg.toeplitz", "line_number": 194, "usage_type": "call"}, {"api_name": "scipy.linalg.toeplitz", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 195, "usage_type": "call"}, {"api_name": "scipy.linalg.toeplitz", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 202, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.roots", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 254, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.figure.Figure", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "numpy.argwhere", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "numpy.repeat", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 354, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 360, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_tkagg.NavigationToolbar2Tk", "line_number": 362, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 370, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 370, "usage_type": "name"}, {"api_name": "numpy.argmin", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 426, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 434, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 435, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 443, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 452, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 469, "usage_type": "call"}, {"api_name": "numpy.conjugate", "line_number": 473, "usage_type": "call"}, {"api_name": "numpy.conjugate", "line_number": 474, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 497, "usage_type": "call"}, {"api_name": "numpy.imag", "line_number": 497, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 497, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 499, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 518, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 523, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 526, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 529, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 553, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 554, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 555, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 556, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 573, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 579, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 586, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 595, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 600, "usage_type": "call"}, {"api_name": "numpy.fft.irfft", "line_number": 637, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 637, "usage_type": "attribute"}, {"api_name": "numpy.fft.irfft", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 638, "usage_type": "attribute"}]} +{"seq_id": "276145316", "text": "import torch\nfrom .base_model import BaseModel\nfrom . import networks\nfrom copy import deepcopy\nfrom models import create_model\n\n\nclass Pix2PixBrainModel(BaseModel):\n \"\"\" This class implements the pix2pix_brain model, for learning a mapping from input images to output images given paired data.\n It provides the option to use a weighted L1 loss to weight the bright tumour pixels more than the rest of the brain.\n It also allows the option to enable time prediction (TPN), which converts this architecture to the Pix2Pix with Time Labels\n \n Example of training a pix2pix_brain model:\n python train.py --dataroot #DATASET_LOCATION# --name #EXP_NAME# --model pix2pix_brain --direction AtoB --TPN time_prediction_10 --lambda_L2 0.15\n\n The model training requires '--dataset_mode aligned' dataset.\n By default, it uses a '--netG unet256' U-Net generator,\n a '--netD basic' discriminator (PatchGAN),\n and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).\n\n pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf\n \"\"\"\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n\n For pix2pix, we do not use image buffer\n The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1\n By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.\n \"\"\"\n # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)\n parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')\n parser.add_argument('--TPN', type=str, default=None, help='Use the Time Prediction Network (TPN), and load specified model')\n\n if is_train:\n parser.set_defaults(pool_size=0, gan_mode='vanilla')\n parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')\n parser.add_argument('--lambda_L2', type=float, default=0.0, help='weight for tumour tissue over rest of brain. Range [0,1]')\n parser.add_argument('--gamma', type=float, default=1.0, help='weight for time loss, when TPN is set to True')\n return parser\n\n def __init__(self, opt):\n \"\"\"Initialize the pix2pix_brain class.\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call \n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n # specify the images you want to save/display. The training/test scripts will call \n self.visual_names = ['real_A', 'fake_B', 'real_B']\n # specify the models you want to save to the disk. The training/test scripts will call and \n if self.isTrain:\n self.model_names = ['G', 'D']\n else: # during test time, only load G\n self.model_names = ['G']\n\n # Set TPN_enabled to true if opt.TPN is defined\n if opt.TPN:\n self.TPN_enabled = True\n else:\n self.TPN_enabled = False\n\n # Conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n discr_input_nc = opt.input_nc + opt.output_nc\n\n # If TPN is enabled, switch to the U-Net with TPN architecture\n if self.TPN_enabled:\n opt.netG = 'unet_256_TPN'\n discr_input_nc +=1 # Additional Channel for Time Input\n\n # define networks (both generator and discriminator)\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; \n self.netD = networks.define_D(discr_input_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.TPN_enabled:\n self.loss_names = ['G_GAN', 'G_L1', 'G_TPN', 'D_real', 'D_fake']\n\n # Store final gamma value and then set it to 0\n self.final_gamma = deepcopy(opt.gamma)\n opt.gamma = 0\n\n # Initiliaze m and c to None\n self.update_m = None\n self.update_c = None\n\n # Setup TPN if set to True\n print(\"\\nSetting up TPN\\n\")\n opt_TPN = deepcopy(opt) # copy train options and change later\n opt_TPN.model = 'time_predictor'\n opt_TPN.name = opt.TPN\n opt_TPN.netD = 'time_input'\n opt_TPN.ndf = 16 # Change depending on the ndf size used with the TPN model specified\n # hard-code some parameters for TPN test phase\n opt_TPN.display_id = -1 # no visdom display;\n opt_TPN.isTrain = False\n print(\"Options TPN: {}\\n\\n\".format(opt_TPN))\n self.TPN = create_model(opt_TPN) # create a model given opt_TPN.model and other options\n self.TPN.setup(opt_TPN) # regular setup: load\n\n if self.isTrain:\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionL1 = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function .\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n # Check if lambda_L2 is in range [0,1]\n assert (0 <= self.opt.lambda_L2 <= 1)\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.true_time = input['time_period'][0]\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions and .\"\"\"\n if self.TPN_enabled:\n self.fake_B = self.netG(self.real_A, torch.ones((1,1)) * self.true_time) # Pass the image and time\n\n if self.isTrain:\n # Predict the time between real image A and generated image B\n self.TPN.real_A = self.real_A\n self.TPN.real_B = self.fake_B\n self.TPN.forward()\n self.fake_time = self.TPN.prediction\n else:\n self.fake_B = self.netG(self.real_A) # G(A)\n\n def backward_D(self):\n \"\"\"Calculate GAN loss for the discriminator\"\"\"\n # Fake; stop backprop to the generator by detaching fake_B\n if self.TPN_enabled:\n self.true_time_layer = (torch.ones(self.real_A.shape) * self.true_time).to(self.device)\n fake_AB = torch.cat((self.true_time_layer, self.real_A, self.fake_B), 1) # we use conditional GANs with TPN; we need to feed both time, input and output to the discriminator\n else:\n fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator\n pred_fake = self.netD(fake_AB.detach())\n self.loss_D_fake = self.criterionGAN(pred_fake, False)\n\n # Real\n if self.TPN_enabled:\n real_AB = torch.cat((self.true_time_layer, self.real_A, self.real_B), 1)\n else:\n real_AB = torch.cat((self.real_A, self.real_B), 1)\n pred_real = self.netD(real_AB)\n self.loss_D_real = self.criterionGAN(pred_real, True)\n\n # combine loss and calculate gradients\n self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5\n self.loss_D.backward()\n\n def backward_G(self):\n \"\"\"Calculate GAN and L1 loss for the generator\"\"\"\n # First, G(A) should fake the discriminator\n if self.TPN_enabled:\n fake_AB = torch.cat((self.true_time_layer, self.real_A, self.fake_B), 1)\n else:\n fake_AB = torch.cat((self.real_A, self.fake_B), 1)\n pred_fake = self.netD(fake_AB)\n self.loss_G_GAN = self.criterionGAN(pred_fake, True)\n \n # Second, G(A) = B\n # Weighted L1 Loss\n if self.opt.lambda_L2 > 0: # If lambda_L2 is not > 0, no need to perform extra computation\n fake_B_tumour = self.fake_B.clone().detach()\n real_B_tumour = self.real_B.clone().detach()\n fake_B_tumour[fake_B_tumour < 0.5] = 0\n real_B_tumour[fake_B_tumour < 0.5] = 0\n self.loss_G_L1 = self.opt.lambda_L1 * (self.criterionL1(self.fake_B, self.real_B) * (1 - self.opt.lambda_L2) + \\\n self.criterionL1(fake_B_tumour, real_B_tumour) * self.opt.lambda_L2)\n else:\n ### ORIGINAL ###\n self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1\n\n # TPN Loss\n if self.TPN_enabled:\n true_time_tensor = torch.ones(self.fake_time.shape) * self.true_time\n self.loss_G_TPN = self.criterionL1(true_time_tensor, self.fake_time.cpu()) * self.opt.gamma\n # combine loss and calculate gradients\n self.loss_G = self.loss_G_GAN + self.loss_G_L1 + self.loss_G_TPN.to(self.device)\n else:\n # combine loss and calculate gradients\n self.loss_G = self.loss_G_GAN + self.loss_G_L1\n\n self.loss_G.backward()\n\n def optimize_parameters(self):\n self.forward() # compute fake images: G(A)\n # update D\n self.set_requires_grad(self.netD, True) # enable backprop for D\n self.optimizer_D.zero_grad() # set D's gradients to zero\n self.backward_D() # calculate gradients for D\n self.optimizer_D.step() # update D's weights\n # update G\n self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G\n self.optimizer_G.zero_grad() # set G's gradients to zero\n self.backward_G() # calculate gradients for G\n self.optimizer_G.step() # udpate G's weights\n\n def update_current_gamma(self, epoch):\n ''' Update gamma value for TPN from opt, depending on the epoch '''\n start_epoch = 50\n end_epoch = 100\n\n # Values should be None only at the first call\n if self.update_m == None and self.update_c == None:\n self.update_m = self.final_gamma / (end_epoch - start_epoch)\n self.update_c = -self.update_m * start_epoch\n\n if epoch < start_epoch:\n self.opt.gamma = 0\n elif start_epoch < epoch < end_epoch:\n # Linearly update gamma\n self.opt.gamma = self.update_m * epoch + self.update_c\n else: # epoch > end_epoch\n self.opt.gamma = self.final_gamma\n\n print('gamma = %.7f' % self.opt.gamma)\n", "sub_path": "models/pix2pix_brain_model.py", "file_name": "pix2pix_brain_model.py", "file_ext": "py", "file_size_in_byte": 12034, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "base_model.BaseModel", "line_number": 8, "usage_type": "name"}, {"api_name": "base_model.BaseModel.__init__", "line_number": 55, "usage_type": "call"}, {"api_name": "base_model.BaseModel", "line_number": 55, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 92, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 101, "usage_type": "call"}, {"api_name": "models.create_model", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn.L1Loss", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 118, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 202, "usage_type": "call"}]} +{"seq_id": "356055688", "text": "import argparse\nimport dataclasses\nimport functools\nimport importlib\nimport inspect\nimport logging\nfrom typing import Any, Tuple, Optional\n\n\nclass _ParseArgError(BaseException):\n pass\n\n\nclass _NoArgError(_ParseArgError):\n pass\n\n\nclass _ArgValueError(_ParseArgError):\n pass\n\n\nclass _WrappedNamespace(argparse.Namespace):\n def __getattribute__(self, key):\n try:\n return super().__getattribute__(key)\n except AttributeError:\n raise _NoArgError() from None\n\n\ndef _arg_parser(fn):\n\n @functools.wraps(fn)\n def _wrapper(args: argparse.Namespace):\n args = _WrappedNamespace(**vars(args))\n return fn(args)\n\n return _wrapper\n\n\ndef _parse_gen_params(gen, args: argparse.Namespace):\n if args.params is None:\n return {}\n # NOTE(rsetaluri): We need to do this check because of how inspect.signature\n # deals with metaclasses. Specifically, for Generator2 subclasses, it grabs\n # the metaclass's __call__ function which is generic (instead of the class's\n # __init__ function).\n if inspect.isclass(gen):\n gen_sig = inspect.signature(gen.__init__)\n else:\n gen_sig = inspect.signature(gen)\n parser = argparse.ArgumentParser(add_help=False, prog=gen.__name__)\n for gen_sig_param in gen_sig.parameters.values():\n if gen_sig_param.name == \"self\":\n continue\n kwargs = {}\n if gen_sig_param.annotation is not inspect.Parameter.empty:\n kwargs[\"type\"] = gen_sig_param.annotation\n if gen_sig_param.default is not inspect.Parameter.empty:\n kwargs[\"default\"] = gen_sig_param.default\n else:\n kwargs[\"required\"] = True\n parser.add_argument(f\"-{gen_sig_param.name}\", **kwargs)\n params = [\"-\" + p for p in args.params.split(\",\")]\n return vars(parser.parse_args(params))\n\n\ndef slice_args(args: argparse.Namespace, grp):\n keys = (action.dest for action in grp._group_actions)\n return argparse.Namespace(**{k: getattr(args, k) for k in keys})\n\n\ndef add_design_arguments(parser: argparse.ArgumentParser):\n design_grp = parser.add_argument_group(\"design\")\n design_grp.add_argument(\"--package\", type=str, required=True)\n name_grp = design_grp.add_mutually_exclusive_group(required=True)\n name_grp.add_argument(\"--module\", type=str)\n name_grp.add_argument(\"--generator\", type=str)\n design_grp.add_argument(\"--params\", type=str)\n return design_grp\n\n\n@_arg_parser\ndef parse_design_args(args: argparse.Namespace):\n if args.package is None:\n raise _ArgValueError()\n py_module = importlib.import_module(args.package)\n if args.module is not None:\n return getattr(py_module, args.module)\n if args.generator is not None:\n gen = getattr(py_module, args.generator)\n params = _parse_gen_params(gen, args)\n logging.info(f\"Generator params {params}\")\n return gen(**params)\n raise _ArgValueError()\n\n\ndef _try_get_default(field: dataclasses.Field) -> Tuple[bool, Any]:\n if field.default is not dataclasses.MISSING:\n return True, field.default\n if field.default_factory is not dataclasses.MISSING:\n return True, field.default_factory\n return False, None\n\n\ndef _add_bool_field(grp, field: dataclasses.Field) -> None:\n assert field.type is bool\n has_default, default_value = _try_get_default(field)\n if not has_default:\n sub_grp = grp.add_mutually_exclusive_group(required=True)\n sub_grp.add_argument(f\"--{field.name}\", action=\"store_true\")\n sub_grp.add_argument(f\"--no-{field.name}\", action=\"store_false\")\n return\n if not isinstance(default_value, bool):\n raise TypeError(\"Expected bool default value, got {default_value}\")\n if default_value:\n action = \"store_false\"\n name = f\"--no-{field.name}\"\n else:\n action = \"store_true\"\n name = f\"--{field.name}\"\n grp.add_argument(\n name,\n action=action,\n dest=field.name,\n help=f\"(default {field.name}={default_value})\")\n\n\ndef add_opt_arguments(\n parser: argparse.ArgumentParser, cls: type, name: Optional[str] = None):\n if not dataclasses.is_dataclass(cls) or not isinstance(cls, type):\n raise TypeError(f\"Expected dataclass, got {cls} ({type(cls)})\")\n if name is None:\n name = cls.__name__\n grp = parser.add_argument_group(name)\n fields = dataclasses.fields(cls)\n for field in fields:\n if field.type is bool:\n _add_bool_field(grp, field)\n continue\n kwargs = {\"type\": field.type}\n has_default, default_value = _try_get_default(field)\n if not has_default:\n kwargs[\"required\"] = False\n else:\n kwargs[\"help\"] = f\"(default={default_value})\"\n grp.add_argument(f\"--{field.name}\", **kwargs)\n return grp\n\n\ndef parse_opt_args(args: argparse.Namespace, cls: type):\n if not dataclasses.is_dataclass(cls) or not isinstance(cls, type):\n raise TypeError(f\"Expected dataclass, got {cls} ({type(cls)})\")\n opts = {}\n fields = dataclasses.fields(cls)\n for field in fields:\n try:\n opt = getattr(args, field.name)\n except AttributeError:\n pass\n else:\n if opt is not None:\n opts[field.name] = opt\n continue\n has_default, _ = _try_get_default(field)\n if not has_default:\n raise RuntimeError(f\"Missing opt '{field.name}'\")\n return cls(**opts)\n", "sub_path": "pdq/common/main_utils.py", "file_name": "main_utils.py", "file_ext": "py", "file_size_in_byte": 5491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "argparse.Namespace", "line_number": 22, "usage_type": "attribute"}, {"api_name": "argparse.Namespace", "line_number": 33, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 32, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 40, "usage_type": "attribute"}, {"api_name": "inspect.isclass", "line_number": 47, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 48, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 50, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 51, "usage_type": "call"}, {"api_name": "inspect.Parameter", "line_number": 56, "usage_type": "attribute"}, {"api_name": "inspect.Parameter", "line_number": 58, "usage_type": "attribute"}, {"api_name": "argparse.Namespace", "line_number": 67, "usage_type": "attribute"}, {"api_name": "argparse.Namespace", "line_number": 69, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 72, "usage_type": "attribute"}, {"api_name": "argparse.Namespace", "line_number": 83, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 86, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 92, "usage_type": "call"}, {"api_name": "dataclasses.Field", "line_number": 97, "usage_type": "attribute"}, {"api_name": "dataclasses.MISSING", "line_number": 98, "usage_type": "attribute"}, {"api_name": "dataclasses.MISSING", "line_number": 100, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 97, "usage_type": "name"}, {"api_name": "dataclasses.Field", "line_number": 105, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 129, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 129, "usage_type": "name"}, {"api_name": "dataclasses.is_dataclass", "line_number": 130, "usage_type": "call"}, {"api_name": "dataclasses.fields", "line_number": 135, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 150, "usage_type": "attribute"}, {"api_name": "dataclasses.is_dataclass", "line_number": 151, "usage_type": "call"}, {"api_name": "dataclasses.fields", "line_number": 154, "usage_type": "call"}]} +{"seq_id": "206862812", "text": "import os\n\nimport dj_database_url\n\n# pull in the default wazimap settings\nfrom wazimap.settings import * # noqa\n\n\n# insert our overrides before both census and wazimap\nINSTALLED_APPS = ['wazimap_ke'] + INSTALLED_APPS\n\n\nDATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://wazimap:wazimap@localhost/wazimap')\nDATABASES['default'] = dj_database_url.parse(DATABASE_URL)\nDATABASES['default']['ATOMIC_REQUESTS'] = True\n\n\n# Localise this instance of Wazimap\nWAZIMAP['name'] = 'Wazimap Kenya'\nWAZIMAP['url'] = 'http://kenya.wazimap.org'\nWAZIMAP['country_code'] = 'KE'\nWAZIMAP['profile_builder'] = 'wazimap_ke.profiles.get_census_profile'\nWAZIMAP['levels'] = {\n 'country': {\n 'plural': 'countries',\n 'children': ['county'],\n },\n 'county': {\n 'plural': 'counties',\n }\n}\nWAZIMAP['comparative_levels'] = ['country']\nWAZIMAP['geometry_data'] = {\n 'country': 'geo/country.topojson',\n 'county': 'geo/county.topojson',\n}\n\nWAZIMAP['ga_tracking_id'] = 'UA-44795600-8'\nWAZIMAP['twitter'] = '@Code4Africa'\n\nWAZIMAP['map_centre'] = [0.3051933453207569, 37.908818734483155]\nWAZIMAP['map_zoom'] = 6\n", "sub_path": "wazimap_ke/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 1124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "dj_database_url.parse", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "492410975", "text": "#!/usr/bin/env python\n\n\"\"\"Tests various webpages of the ``jwql`` web application to make sure\nthat loading times are not too long\n\nAuthors\n-------\n\n - Matthew Bourque\n\nUse\n---\n\n These tests can be run via the command line (omit the -s to\n suppress verbose output to stdout):\n\n ::\n\n pytest -s test_loading_times.py\n\"\"\"\n\nimport os\nimport pytest\nimport time\nimport urllib.request\n\nfrom jwql.utils.utils import get_base_url\n\nTIME_CONSTRAINT = 30 # seconds\n\n# Determine if tests are being run on jenkins\nON_JENKINS = os.path.expanduser('~') == '/home/jenkins'\n\nurls = []\n\n# Generic URLs\nurls.append('')\nurls.append('about/')\nurls.append('edb/')\n\n# Specific URLs\ntest_mappings = [('fgs', '86700', 'jw86600007001_02101_00001_guider2'),\n ('miri', '98012', 'jw98012001001_02102_00001_mirimage'),\n ('nircam', '93025', 'jw93065002001_02101_00001_nrcb2'),\n ('niriss', '00308', 'jw00308001001_02101_00001_nis'),\n ('nirspec', '96213', 'jw96213001001_02101_00001_nrs1')]\nfor mapping in test_mappings:\n (instrument, proposal, rootname) = mapping\n urls.append('{}/'.format(instrument))\n urls.append('{}/archive/'.format(instrument))\n urls.append('{}/archive/{}/'.format(instrument, proposal))\n urls.append('{}/{}/'.format(instrument, rootname))\n\n\n@pytest.mark.skipif(ON_JENKINS, reason='Requires access to central storage.')\n@pytest.mark.parametrize('url', urls)\ndef test_loading_times(url):\n \"\"\"Test to see if the given ``url`` returns a webpage successfully\n within a reasonable time.\n\n Parameters\n ----------\n url : str\n The url to the webpage of interest (e.g.\n ``http://127.0.0.1:8000/fgs/archive/'``).\n \"\"\"\n\n # Build full URL\n base_url = get_base_url()\n url = '{}/{}'.format(base_url, url)\n print('Testing {}'.format(url))\n\n t1 = time.time()\n url = urllib.request.urlopen(url)\n t2 = time.time()\n\n assert (t2 - t1) <= TIME_CONSTRAINT\n", "sub_path": "jwql/tests/test_loading_times.py", "file_name": "test_loading_times.py", "file_ext": "py", "file_size_in_byte": 1977, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.path.expanduser", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "jwql.utils.utils.get_base_url", "line_number": 69, "usage_type": "call"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 74, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 74, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 74, "usage_type": "name"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 56, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "74307279", "text": "__author__ = 'du'\n\nimport os\nimport collections\nimport sys\nROOTDIR = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-3]) + os.sep\nsys.path.append(ROOTDIR)\nfrom morphablegraphs.animation_data.bvh import BVHReader\nfrom morphablegraphs.animation_data.skeleton import Skeleton\nfrom ..libtest import params, pytest_generate_tests\nROOT_DIR = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-3]) + os.sep\nTEST_DATA_PATH = ROOT_DIR + '../test_data/animation_data'\n\n\nclass TestSkeleton(object):\n\n def setup_class(self):\n test_file = TEST_DATA_PATH + os.sep + 'walk_001_1_rightStance_86_128.bvh'\n test_bvhreader = BVHReader(test_file)\n self.skeleton = Skeleton(test_bvhreader)\n\n param_get_parent_dict = [{'res': {'Head': 'Neck'}},\n {'res': {'Bip01_L_Finger3': 'LeftHand'}}]\n\n @params(param_get_parent_dict)\n def test_get_parent_dict(self, res):\n parent_dic = self.skeleton._get_parent_dict()\n for key, value in res.iteritems():\n assert key in parent_dic.keys() and parent_dic[key] == value\n\n param_gen_all_parents = [{'node_name': 'Bip01_L_Finger0',\n 'res': ['LeftHand', 'LeftForeArm', 'LeftArm', 'LeftShoulder', 'Neck', 'Spine_1', 'Spine',\n 'Hips']}]\n\n @params(param_gen_all_parents)\n def test_gen_all_parents(self, node_name, res):\n parents = []\n for joint in self.skeleton.gen_all_parents(node_name):\n parents.append(joint)\n assert parents == res\n\n param_set_joint_weights = [{'res': [1.0, 0.36787944117144233, 0.1353352832366127, 0.049787068367863944,\n 0.018315638888734179, 0.018315638888734179, 0.006737946999085467,\n 0.0024787521766663585, 0.00091188196555451624, 0.018315638888734179,\n 0.006737946999085467, 0.0024787521766663585, 0.00091188196555451624,\n 0.36787944117144233, 0.1353352832366127, 0.049787068367863944,\n 0.36787944117144233, 0.1353352832366127, 0.049787068367863944]}]\n\n @params(param_set_joint_weights)\n def test_set_joint_weights(self, res):\n for i in xrange(len(self.skeleton.joint_weights)):\n assert round(self.skeleton.joint_weights[i], 5) == round(res[i], 5)\n\n param_create_filtered_node_name_map = [{'res': collections.OrderedDict([('Hips', 0), ('Spine', 1), ('Spine_1', 2),\n ('Neck', 3), ('Head', 4), ('LeftShoulder', 5),\n ('LeftArm', 6), ('LeftForeArm', 7),\n ('LeftHand', 8), ('RightShoulder', 9),\n ('RightArm', 10), ('RightForeArm', 11),\n ('RightHand', 12), ('LeftUpLeg', 13),\n ('LeftLeg', 14), ('LeftFoot', 15),\n ('RightUpLeg', 16), ('RightLeg', 17),\n ('RightFoot', 18)])}]\n", "sub_path": "python_src/test/animation_data/test_skeleton.py", "file_name": "test_skeleton.py", "file_ext": "py", "file_size_in_byte": 3440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.sep.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 18, "usage_type": "attribute"}, {"api_name": "morphablegraphs.animation_data.bvh.BVHReader", "line_number": 19, "usage_type": "call"}, {"api_name": "morphablegraphs.animation_data.skeleton.Skeleton", "line_number": 20, "usage_type": "call"}, {"api_name": "libtest.params", "line_number": 25, "usage_type": "call"}, {"api_name": "libtest.params", "line_number": 35, "usage_type": "call"}, {"api_name": "libtest.params", "line_number": 49, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "61172437", "text": "from django.db import models\nfrom django.contrib import admin\n\n\nclass User(models.Model):\n name = models.CharField(max_length=16, null=True)\n\n def __unicode__(self):\n return \"pk: \" + str(self.pk) + \" name: \" + str(self.name)\n\n\nclass Party(models.Model):\n host_id = models.ForeignKey(User)\n token = models.CharField(max_length=10, primary_key=True)\n PARTY_STATUS = (\n ('R', 'Running'),\n ('P', 'Paused'),\n )\n status = models.CharField(max_length=1, choices=PARTY_STATUS)\n\n def __unicode__(self):\n return str(self.token)\n\n\nclass Video(models.Model):\n party_id = models.ForeignKey(Party)\n votes = models.IntegerField(default=0)\n time_added = models.DateTimeField(auto_now_add=True)\n token = models.CharField(max_length=12)\n user_id = models.ForeignKey(User)\n VIDEO_STATUS = (\n ('F', 'Finished'),\n ('P', 'Playing'),\n ('Q', 'Queued'),\n )\n status = models.CharField(max_length=1, choices=VIDEO_STATUS, default='Q')\n\n def __unicode__(self):\n return str(self.token) + \" \" + str(self.party_id) + \" \" + self.status;\n\n\nclass UserParty(models.Model):\n user_id = models.ForeignKey(User)\n party_id = models.ForeignKey(Party)\n\n\nclass UserVote(models.Model):\n user = models.ForeignKey(User)\n video = models.ForeignKey(Video)\n delta = models.IntegerField()\n\n\nadmin.site.register(User)\nadmin.site.register(Party)\nadmin.site.register(Video)\nadmin.site.register(UserParty)\nadmin.site.register(UserVote)\n", "sub_path": "ytParty/queue/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1504, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 53, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 54, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 55, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 56, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 56, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "417310626", "text": "\nimport sys\nfrom io import StringIO\n\n\ndef render(image: bytes) -> None:\n for y in range(32):\n sys.stdout.write(\"{:3d}\".format(y))\n for x in range(128):\n if ((x+1) % 10) == 1:\n sys.stdout.write(\" \")\n sys.stdout.write(chr(ord(\"0\") + image[y*128 + x])) if image[y*128 + x] else sys.stdout.write(\".\")\n sys.stdout.write(\"\\n\")\n\n\ndef image_to_str(image):\n s = StringIO()\n for y in range(32):\n for x in range(128):\n if image[y * 128 + x]:\n s.write(chr(ord(\"0\") + image[y * 128 + x]))\n else:\n s.write(\".\")\n s.write(\"\\n\")\n return s.getvalue()\n\n\nclass ImageDecoder(object):\n\n def __init__(self, buffer: bytes):\n self.buffer = buffer\n self.image = bytearray(128 * 32)\n self.x = 0\n self.y = 0\n self.last_draw = False\n self.repeat_column_shift = False\n\n def _draw(self, x, y, color=1):\n assert 0 <= x < 128\n assert 0 <= y < 32\n assert 0 <= color < 256\n if y < 32 and x < 128:\n self.image[y * 128 + x] = color\n\n def _execute(self, b: int):\n cmd = (b & 0xc0) >> 6\n cnt = b & 0x3f\n if cmd == 0: # Skip column\n cnt += 1\n print(\"Skip column {}\".format(cnt))\n if self.last_draw:\n self.x += 1\n self.x += cnt\n self.y = 0\n self.last_draw = False\n self.repeat_column_shift = False\n elif cmd == 1: # Repeat column\n cnt += 1\n print(\"Repeat column {}\".format(cnt))\n if not self.repeat_column_shift:\n self.x += 1\n for _ in range(cnt):\n i = self.x\n for _ in range(32):\n self.image[i] = self.image[i - 1]\n i += 128\n self.x += 1\n self.y = 0\n self.last_draw = False\n self.repeat_column_shift = True\n elif cmd == 2: # Draw / Skip\n draw = cnt & 0x01\n cnt >>= 1\n if draw:\n cnt += 1\n print(\"Draw {}\".format(cnt))\n for _ in range(cnt):\n self._draw(self.x, self.y)\n self.y += 1\n else:\n draw = cnt & 0x01\n cnt >>= 1\n cnt += 1\n if draw:\n print(\"Draw2 {}\".format(cnt))\n for _ in range(cnt):\n self._draw(self.x, self.y)\n self.y += 1\n else:\n print(\"Skip {}\".format(cnt))\n self.y = cnt # Consecutive commands seem to be ignored.\n self.last_draw = True # FIXME\n self.repeat_column_shift = False\n else: # Draw column / Skip\n draw = cnt & 0x01\n cnt >>= 1\n if draw:\n cnt += 1\n print(\"Draw column {}\".format(cnt))\n for _ in range(cnt):\n self._draw(self.x, self.y)\n self.y += 1\n if self.y >= 31:\n self.repeat_column_shift = True\n self.x += 1\n self.y = 0\n else:\n self.repeat_column_shift = False\n else:\n draw = cnt & 0x01\n cnt >>= 1\n cnt += 1\n cnt += 16\n if draw:\n print(\"Draw column2 {}\".format(cnt))\n for _ in range(cnt):\n self._draw(self.x, self.y)\n self.y += 1\n if self.y >= 31:\n self.x += 1\n self.y = 0\n else:\n print(\"Skip2 {}\".format(cnt))\n self.y = cnt # Consecutive commands seem to be ignored.\n self.repeat_column_shift = False\n self.last_draw = False\n\n def decode(self) -> bytes:\n for i, b in enumerate(self.buffer):\n sys.stdout.write(\"{}: \".format(i))\n self._execute(b)\n return self.image\n", "sub_path": "pycozmo/image_encoder.py", "file_name": "image_encoder.py", "file_ext": "py", "file_size_in_byte": 4256, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.stdout.write", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 13, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 130, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 130, "usage_type": "attribute"}]} +{"seq_id": "168541195", "text": "\"\"\"\ntrain on GAN-CS \nexample\nexport CUDA_VISIBLE_DEVICES=0\npython srez_main.py --dataset_input /home/enhaog/GANCS/srez/dataset_MRI/phantom \\\n --dataset_output /home/enhaog/GANCS/srez/dataset_MRI/phantom \\\n --run train \\\n --gene_mse_factor 1.0\n\npython srez_main.py --dataset_input /home/enhaog/GANCS/srez/dataset_MRI/phantom2 \\\n --batch_size 4 --run train --summary_period 125 \\\n --sample_size 256 \\\n --train_time 10 \\\n --sample_test 32 --sample_train 1000 \\\n --train_dir tmp_specify_train \\\n --R_factor 8 \\\n --R_alpha 3 \\\n % R_seed<0 means non-fixed random seed\n --R_seed -1\n\npython3 srez_main.py --run train \\\n --dataset_train /mnt/raid5/morteza/datasets/Abdominal-DCE-616cases/train\\\n --dataset_test /mnt/raid5/morteza/datasets/Abdominal-DCE-616cases/test\\\n --sample_size 256 \\\n --sample_size_y 128 \\\n --batch_size 8 \\\n --summary_period 1000 \\\n --sample_test 128 \\\n --sample_train -1 \\\n --subsample_test 8 \\\n --subsample_train 10000 \\\n --train_time 6000 \\\n --R_seed -1 \\\n --R_alpha 2 \\\n --R_factor 10 \\\n --train_dir /mnt/raid5/morteza/GANCS-MRI/train_save_all\n\n\n\"\"\"\n# import srez_demo\nimport os\n\nimport srez_input\nimport srez_model\nimport srez_train\n\nimport time\nimport os.path\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nimport shutil, os\n\nfrom scipy import io as sio\n\nFLAGS = tf.app.flags.FLAGS\n\n# Configuration (alphabetically)\ntf.app.flags.DEFINE_string('gpu_id', '0',\n \"index of used gpu device.\")\n\ntf.app.flags.DEFINE_integer('number_of_copies', 3,\n \"Number of repeatitions for the generator network.\")\n\ntf.app.flags.DEFINE_integer('batch_size', 4,\n \"Number of samples per batch.\")\n\ntf.app.flags.DEFINE_string('checkpoint_dir', 'checkpoint',\n \"Output folder where checkpoints are dumped.\")\n\ntf.app.flags.DEFINE_integer('checkpoint_period', 1000,\n \"Number of batches in between checkpoints\")\n\ntf.app.flags.DEFINE_string('dataset_train', 'D:/Dataset/MRI/Thrive/0803/thrive_train_norm3_intep_a4_n050-100.tfrecords',\n \"Path to the train dataset directory.\")\n\ntf.app.flags.DEFINE_string('dataset_test', 'D:/Dataset/MRI/Thrive/0803/thrive_test_norm3_intep_a4_n050-100.tfrecords',\n \"Path to the test dataset directory.\")\n\ntf.app.flags.DEFINE_float('epsilon', 1e-8,\n \"Fuzz term to avoid numerical instability\")\n\ntf.app.flags.DEFINE_string('run', 'demo',\n \"Which operation to run. [demo|train]\") # demo\n\ntf.app.flags.DEFINE_float('gene_l1l2_factor', 1,\n \"The ratio of l1 l2 factor, MSE=alpha*l1+(1-alpha)*l2\")\n\ntf.app.flags.DEFINE_float('gene_ssim_factor', 0.1,\n \"The ratio of ssim vs l1l2 factor, MSE=beta*ssim+(1-beta)*l1l2\")\n\ntf.app.flags.DEFINE_float('gene_log_factor', 0,\n \"Multiplier for generator fool loss term, weighting log-loss vs LS loss\")\n\ntf.app.flags.DEFINE_float('gene_dc_factor', 0.1,\n \"Multiplier for generator data-consistency L2 loss term for data consistency, \"\n \"weighting Data-Consistency with GD-loss for GAN-loss\")\n\ntf.app.flags.DEFINE_float('gene_mse_factor', 0,\n \"Multiplier for generator MSE loss for regression accuracy, weighting MSE VS GAN-loss\")\n\ntf.app.flags.DEFINE_float('learning_beta1', 0.5,\n \"Beta1 parameter used for AdamOptimizer\")\n\ntf.app.flags.DEFINE_float('learning_rate_start', 0.00001,\n \"Starting learning rate used for AdamOptimizer\") # 0.000001\n\ntf.app.flags.DEFINE_integer('learning_rate_half_life', 10000,\n \"Number of batches until learning rate is halved\")\n\ntf.app.flags.DEFINE_bool('log_device_placement', False,\n \"Log the device where variables are placed.\")\n\ntf.app.flags.DEFINE_integer('sample_size', 288, # 192, 288\n \"Image sample size in pixels. Range [64,128]\")\n\ntf.app.flags.DEFINE_integer('sample_size_y', 360, # 240 360\n \"Image sample size in pixels. by default the sample as sample_size\")\n\ntf.app.flags.DEFINE_integer('summary_period', 1000,\n \"Number of batches between summary data dumps\")\n\ntf.app.flags.DEFINE_integer('summary_train_period', 50,\n \"Number of batches between train data dumps\")\n\ntf.app.flags.DEFINE_integer('random_seed', 0,\n \"Seed used to initialize rng.\")\n\ntf.app.flags.DEFINE_integer('sample_test', -1,\n \"Number of features to use for testing.\")\n\ntf.app.flags.DEFINE_integer('sample_train', -1,\n \"Number of features to use for train. default value is -1 for use all samples except testing samples\")\n\ntf.app.flags.DEFINE_integer('subsample_test', 32,\n \"Number of test sample to uniform sample. default value is -1 for using all test samples\")\n\ntf.app.flags.DEFINE_integer('subsample_train', -1,\n \"Number of train sample to uniform sample. default value is -1 for using all train samples\")\n\ntf.app.flags.DEFINE_string('train_dir', './train_save_all',\n \"Output folder where training logs are dumped.\")\n\ntf.app.flags.DEFINE_integer('train_time', 2000,\n \"Time in minutes to train the model\")\n\ntf.app.flags.DEFINE_float('gpu_memory_fraction', 0.9,\n \"specified the max gpu fraction used per device\")\n\ntf.app.flags.DEFINE_integer('hybrid_disc', 0,\n \"whether/level to augment discriminator input to image+kspace hybrid space.\")\n\ntf.app.flags.DEFINE_string('architecture', 'resnet',\n \"model arch used for generator, ex: resnet, aec, pool\")\n\ntf.app.flags.DEFINE_bool('respectively_norm', False,\n \"whether to normalize label and noise image respectively.\")\n\ntf.app.flags.DEFINE_string('GAN', 'BEGAN',\n \"the type of GAN, LSGAN or BEGAN\")\n\n\ndef setup_tensorflow(gpu_memory_fraction=0.80):\n os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_id\n\n # Create session\n config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement)\n\n if gpu_memory_fraction < 0:\n config.gpu_options.allow_growth = True\n else:\n config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction\n\n sess = tf.Session(config=config)\n print('TF session setup for gpu usage cap of {0}'.format(config.gpu_options.per_process_gpu_memory_fraction))\n\n # Initialize rng with a deterministic seed\n with sess.graph.as_default():\n tf.set_random_seed(FLAGS.random_seed)\n\n random.seed(FLAGS.random_seed)\n np.random.seed(FLAGS.random_seed)\n\n # tf.summary.FileWriter.\n summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)\n\n return sess, summary_writer\n\n\ndef _demo_placeholder():\n features = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='labels_and_features')\n loop_count = tf.placeholder(tf.int32, [], name='loop_count')\n\n gene_output = srez_model.create_model_test(features, None, loop_count)\n\n sess = tf.Session()\n\n tf.train.write_graph(sess.graph_def, './', \"model.pbtxt\", as_text=True)\n\n saver = tf.train.Saver()\n model_file = os.path.join('D:/Dataset/MRI/Thrive/0803/tfrecords/thrive_train_norm3_intep_a4_n060/checkpoint', 'model.ckpt-26000')\n saver.restore(sess, model_file)\n\n # npy_file = 'D:/Dataset/MRI/Thrive/ThriveDenoise_test/0717/org-nopre.npy'\n # npy = np.load(npy_file)\n # mat_file = 'D:/Dataset/MRI/Thrive/0903/gaomingda/intep.mat'\n # npy = sio.loadmat(mat_file)['intep']\n mat_file = 'D:/Dataset/MRI/Thrive/0925/wangyu/AI input 2 1.5/zerofillofilter.mat'\n mat = sio.loadmat(mat_file)['zerofillofilter']\n # mat_file = 'D:/Dataset/MRI/Thrive/0925/wangyu/comp 1.8 1.8 dpf/Intep.mat'\n # mat = sio.loadmat(mat_file)['Intep']\n mat = np.abs(mat)\n mat = np.transpose(mat, (2, 0, 1))\n mat.astype(np.uint16).tofile('../data/test/input.raw')\n\n for loop in range(3):\n npy = mat[:, :, :, np.newaxis].copy()\n\n # pre-process, divide by max of each slice\n max_vals = []\n for slice in npy:\n max_val = np.max(slice)\n slice /= max_val\n max_vals.append(max_val)\n\n half_size = npy.shape[0] // 2\n\n start_time = time.time()\n gene_output_np1 = sess.run(gene_output, feed_dict={loop_count: loop, features: npy[:half_size]})\n elapsed_time = 1000. * (time.time() - start_time)\n print(gene_output_np1.shape, 'time: %3f ms' % elapsed_time)\n # gene_output_np1.tofile('data/test/org_1-out-it%d.raw' % (loop + 1))\n\n gene_output_np2 = sess.run(gene_output, feed_dict={loop_count: loop, features: npy[half_size:]})\n # gene_output_np2.tofile('data/test/org_2-out-it%d.raw' % (loop + 1))\n\n gene_output_np = np.concatenate((gene_output_np1, gene_output_np2), axis=0)\n\n # post-process, filter\n gene_output_np = np.clip(gene_output_np, 0.0, 1.0)\n for i in range(gene_output_np.shape[0]):\n gene_output_np[i] *= max_vals[i]\n\n gene_output_np = gene_output_np.astype(np.uint16)\n gene_output_np.tofile('../data/test/out-it%d.raw' % (loop + 1))\n\n sess.close()\n\n\ndef _demo_using_dataset():\n ds_filenames = tf.placeholder(tf.string, shape=[None])\n loop_count = tf.placeholder(tf.int32, [], name='loop_count')\n\n dataset = tf.data.TFRecordDataset(ds_filenames)\n dataset = dataset.map(srez_input.parse_func)\n dataset = dataset.batch(FLAGS.batch_size)\n\n iterator = dataset.make_initializable_iterator()\n features, labels, gmaps = iterator.get_next()\n\n gene_output = srez_model.create_model_test(features, None, loop_count)\n\n node_names = [node.name for node in tf.get_default_graph().as_graph_def().node]\n op = tf.get_default_graph().get_operation_by_name('gene/Reshape')\n tensor = tf.get_default_graph().get_tensor_by_name(\"gene/Reshape:0\")\n\n sess = tf.Session()\n\n tf.train.write_graph(sess.graph_def, './', \"model_bx.pbtxt\", as_text=True)\n\n saver = tf.train.Saver()\n model_file = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt-30000')\n # model_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n saver.restore(sess, model_file)\n\n sess.run(iterator.initializer, feed_dict={ds_filenames: [FLAGS.dataset_test]})\n\n i = 0\n while True:\n try:\n start_time = time.time()\n features_np, labels_np, gene_output_np = sess.run(\n [features, labels, gene_output], feed_dict={loop_count: 1})\n\n # gene_output_np = sess.run([gene_output])\n # filename = '%02d-out.raw' % i\n # gene_output_np.tofile(filename)\n\n elapsed_time = 1000. * (time.time() - start_time)\n print('time: %3f ms' % elapsed_time)\n\n srez_train._summarize_progress_test(features_np, labels_np, gene_output_np, i)\n i += 1\n except tf.errors.OutOfRangeError:\n break\n\n sess.close()\n\n\nclass TrainData(object):\n def __init__(self, dictionary):\n self.__dict__.update(dictionary)\n\n\ndef _train():\n # Setup global tensorflow state\n sess, summary_writer = setup_tensorflow(FLAGS.gpu_memory_fraction)\n\n loop_count = tf.placeholder_with_default(2, [], name='loop_count')\n\n # sample train and test\n iterator_train = srez_input.setup_inputs_using_dataset(FLAGS.dataset_train, FLAGS.batch_size, is_training=True)\n iterator_test = srez_input.setup_inputs_using_dataset(FLAGS.dataset_test, FLAGS.batch_size, is_training=False)\n\n # Setup async input queues\n train_features, train_labels, train_gmaps = iterator_train.get_next()\n test_features, test_labels, test_gmaps = iterator_test.get_next()\n\n print('train_features', train_features.get_shape())\n print('train_labels', train_labels.get_shape())\n print('train_gmaps', train_gmaps.get_shape())\n\n # Add some noise during training (think denoising autoencoders)\n noise_level = .00\n noisy_train_features = train_features + tf.random_normal(tf.shape(train_features), stddev=noise_level)\n\n # Create and initialize model\n [gene_moutput, gene_output, gene_var_list,\n disc_real_output, disc_fake_output, disc_moutput, disc_var_list] = srez_model.create_model(\n noisy_train_features, train_labels, test_features, test_labels, loop_count, architecture=FLAGS.architecture)\n\n # [gene_minput, gene_moutput, gene_mgmap, gene_output, gene_var_list,\n # disc_real_output, disc_fake_output, disc_moutput, disc_var_list] = srez_model.create_model_rnn(\n # sess, noisy_train_features, train_labels, train_gmaps, architecture=FLAGS.architecture)\n\n gene_loss, gene_ls_loss, list_gene_losses, gene_mse_factor = srez_model.create_generator_loss(\n disc_fake_output, gene_output, None, train_features, train_labels)\n\n disc_real_loss, disc_fake_loss = srez_model.create_discriminator_loss(\n disc_real_output, train_labels, disc_fake_output, gene_output)\n\n if FLAGS.GAN == 'LSGAN':\n disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')\n else:\n k_t = tf.placeholder(dtype=tf.float32, name='kt_factor')\n disc_loss = tf.subtract(disc_real_loss, k_t * disc_fake_loss, name='disc_loss')\n\n # add gradient on disc loss\n disc_gradients = tf.gradients(disc_loss, [disc_fake_output, disc_real_output, gene_output])\n print('disc loss gradients:', [x.shape for x in disc_gradients])\n\n global_step, learning_rate, gene_minimize, disc_minimize = \\\n srez_model.create_optimizers(gene_loss, gene_var_list, disc_loss, disc_var_list)\n\n tf.summary.scalar('gene_loss', gene_loss)\n\n # Train model\n train_data = TrainData(locals())\n srez_train.train_model(train_data)\n\n sess.close()\n\n\ndef main(argv=None):\n _train()\n # _demo_using_dataset()\n # _demo_placeholder()\n\n\nif __name__ == '__main__':\n tf.app.run()\n", "sub_path": "denoise_py/srez_main.py", "file_name": "srez_main.py", "file_ext": "py", "file_size_in_byte": 14708, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "tensorflow.app", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 101, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 104, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_bool", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 113, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 128, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 131, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 134, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 137, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 140, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 143, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 149, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 155, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_bool", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 158, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 176, "usage_type": "call"}, {"api_name": "tensorflow.set_random_seed", "line_number": 181, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 184, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 187, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 187, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 193, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 193, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 194, "usage_type": "attribute"}, {"api_name": "srez_model.create_model_test", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 198, "usage_type": "call"}, {"api_name": "tensorflow.train.write_graph", "line_number": 200, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 200, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 202, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 202, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "scipy.io.loadmat", "line_number": 211, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 211, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 216, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 219, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 224, "usage_type": "call"}, {"api_name": "time.time", "line_number": 230, "usage_type": "call"}, {"api_name": "time.time", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 246, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 253, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 253, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 254, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 254, "usage_type": "attribute"}, {"api_name": "tensorflow.data.TFRecordDataset", "line_number": 256, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 256, "usage_type": "attribute"}, {"api_name": "srez_input.parse_func", "line_number": 257, "usage_type": "attribute"}, {"api_name": "srez_model.create_model_test", "line_number": 263, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 265, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 266, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 267, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 269, "usage_type": "call"}, {"api_name": "tensorflow.train.write_graph", "line_number": 271, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 271, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 273, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 273, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path", "line_number": 274, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 283, "usage_type": "call"}, {"api_name": "time.time", "line_number": 291, "usage_type": "call"}, {"api_name": "srez_train._summarize_progress_test", "line_number": 294, "usage_type": "call"}, {"api_name": "tensorflow.errors", "line_number": 296, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder_with_default", "line_number": 311, "usage_type": "call"}, {"api_name": "srez_input.setup_inputs_using_dataset", "line_number": 314, "usage_type": "call"}, {"api_name": "srez_input.setup_inputs_using_dataset", "line_number": 315, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 327, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 327, "usage_type": "call"}, {"api_name": "srez_model.create_model", "line_number": 331, "usage_type": "call"}, {"api_name": "srez_model.create_generator_loss", "line_number": 338, "usage_type": "call"}, {"api_name": "srez_model.create_discriminator_loss", "line_number": 341, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 345, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 347, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 347, "usage_type": "attribute"}, {"api_name": "tensorflow.subtract", "line_number": 348, "usage_type": "call"}, {"api_name": "tensorflow.gradients", "line_number": 351, "usage_type": "call"}, {"api_name": "srez_model.create_optimizers", "line_number": 355, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 357, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 357, "usage_type": "attribute"}, {"api_name": "srez_train.train_model", "line_number": 361, "usage_type": "call"}, {"api_name": "tensorflow.app.run", "line_number": 373, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 373, "usage_type": "attribute"}]} +{"seq_id": "367769290", "text": "# -*- coding: utf-8 -*-\nimport utils, os, pyprind\nimport spold2_reader as spold2\n\nversion = '3.4'\nsystem_model = 'Undefined'\nfolder = utils.version_system_model_path(version, system_model)\ndataset_folder = os.path.join(folder, 'datasets')\n\nfilelist = utils.build_file_list(dataset_folder)\ndf = []\nfor filename in pyprind.prog_bar(filelist):\n f = spold2.Dataset(dataset_folder, filename)\n for exc in f.iterate_exchanges():\n for field in ['mathematicalRelation', 'productionVolumeMathematicalRelation']:\n m = exc.get(field, '')\n if 'lifetime' in m and 'apv_electricity' in m:\n to_add = f.baseline()\n to_add.update(exc.baseline([field]))\n df.append(to_add)\ndf = utils.list_to_df(df)\nfolder = r'C:\\Dropbox (ecoinvent)\\ei-guillaume\\short_script_outputs'\nfilename = 'lifetime_apv.xlsx'\ncolumns = ['activityName', 'geography', 'group', 'name', 'activityLinkId', 'mathematicalRelation', 'amount']\ndfs = [(df, 'Sheet1', columns)]\nutils.dataframe_to_excel(folder, filename, dfs, feedback = True)", "sub_path": "projects/short_scripts/apv_lifetime.py", "file_name": "apv_lifetime.py", "file_ext": "py", "file_size_in_byte": 1065, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "utils.version_system_model_path", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "utils.build_file_list", "line_number": 10, "usage_type": "call"}, {"api_name": "pyprind.prog_bar", "line_number": 12, "usage_type": "call"}, {"api_name": "spold2_reader.Dataset", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.list_to_df", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.dataframe_to_excel", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "301521300", "text": "#!/usr/bin/env python\n\"\"\"\nAdd MR comments from the Scan Completed instrument on REDCap to the database.\n\nUsage:\n dm_redcap_scan_completed.py [options] \n\nArguments:\n Name of the study to process\n\nOptions:\n -q --quiet Less logging\n -v --verbose Verbose logging\n -d --debug Debug logging\n\"\"\"\n\nimport os\nimport sys\nimport requests\nimport logging\n\nfrom docopt import docopt\n\nimport datman.config\nimport datman.scanid\nimport datman.dashboard\n\nlogger = logging.getLogger(os.path.basename(__file__))\n\ncfg = None\ndashboard = None\nredcap_url = None\nredcap_version = None\nredcap_project = None\ninstrument = None\n\n\ndef read_token(token_file):\n if not os.path.isfile(token_file):\n logger.error('REDCap token file: {} not found'.format(token_file))\n raise IOError\n\n with open(token_file, 'r') as token_file:\n token = token_file.readline().strip()\n\n return token\n\n\ndef get_records(api_url, token, instrument):\n payload = {'token': token,\n 'content': 'record',\n 'forms': instrument,\n 'format': 'json',\n 'type': 'flat',\n 'rawOrLabel': 'raw',\n 'fields': 'record_id'}\n response = requests.post(api_url, data=payload)\n return response\n\n\ndef get_version(api_url, token):\n payload = {'token': token,\n 'content': 'version'}\n response = requests.post(api_url, data=payload)\n version = response.content\n return version\n\n\ndef add_session_redcap(record):\n record_id = record['record_id']\n subject_id = record[cfg.get_key(['REDCAP_SUBJ'])].upper()\n if not datman.scanid.is_scanid(subject_id):\n try:\n subject_id = subject_id + '_01'\n datman.scanid.is_scanid(subject_id)\n except:\n logger.error('Invalid session: {}, skipping'.format(subject_id))\n return\n\n ident = datman.scanid.parse(subject_id)\n session_name = ident.get_full_subjectid_with_timepoint()\n session_date = record[cfg.get_key(['REDCAP_DATE'])]\n\n try:\n session = dashboard.get_add_session(session_name,\n date=session_date,\n create=True)\n except datman.dashboard.DashboardException as e:\n logger.error('Failed adding session {} to dashboard'.format(session_name))\n\n try:\n datman.dashboard.add_redcap(session,\n record_id,\n session_date,\n cfg.get_key(['REDCAP_EVENTID'])[record['redcap_event_name']],\n record[cfg.get_key(['REDCAP_COMMENTS'])],\n redcap_url,\n redcap_version,\n redcap_project,\n instrument)\n except:\n logger.error('Failed adding REDCap info for session {} to dashboard'.format(session_name))\n\n\ndef main():\n global cfg\n global dashboard\n global redcap_url\n global redcap_version\n global redcap_project\n global instrument\n\n arguments = docopt(__doc__)\n study = arguments['']\n quiet = arguments['--quiet']\n verbose = arguments['--verbose']\n debug = arguments['--debug']\n\n # setup logging\n ch = logging.StreamHandler(sys.stdout)\n log_level = logging.WARN\n\n if quiet:\n log_level = logging.ERROR\n if verbose:\n log_level = logging.INFO\n if debug:\n log_level = logging.DEBUG\n\n logger.setLevel(log_level)\n ch.setLevel(log_level)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - {study} - '\n '%(levelname)s - %(message)s'.format(\n study=study))\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logging.getLogger('datman.utils').addHandler(ch)\n\n # setup the config object\n cfg = datman.config.config(study=study)\n\n # get paths\n dir_meta = cfg.get_path('meta')\n\n # set up the dashboard object\n try:\n dashboard = datman.dashboard.dashboard(study)\n except datman.dashboard.DashboardException as e:\n raise e\n logger.error('Failed to initialise dashboard')\n\n # configure redcap variables\n api_url = cfg.get_key(['REDCAP_URL'])\n redcap_url = api_url.replace('/api/', '/')\n\n token_path = os.path.join(dir_meta, cfg.get_key(['REDCAP_TOKEN']))\n token = read_token(token_path)\n\n redcap_project = cfg.get_key(['REDCAP_PROJECTID'])\n instrument = cfg.get_key(['REDCAP_INSTRUMENT'])\n\n redcap_version = get_version(api_url, token)\n\n response = get_records(api_url, token, instrument)\n\n project_records = []\n for item in response.json():\n # only grab records where instrument has been marked complete\n if not (item[cfg.get_key(['REDCAP_DATE'])] and\n item[cfg.get_key(['REDCAP_STATUS'])] == '1'):\n continue\n project_records.append(item)\n\n for record in project_records:\n add_session_redcap(record)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "bin/dm_redcap_scan_completed.py", "file_name": "dm_redcap_scan_completed.py", "file_ext": "py", "file_size_in_byte": 5138, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 57, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 64, "usage_type": "call"}, {"api_name": "datman.config.scanid.is_scanid", "line_number": 72, "usage_type": "call"}, {"api_name": "datman.config.scanid", "line_number": 72, "usage_type": "attribute"}, {"api_name": "datman.config", "line_number": 72, "usage_type": "name"}, {"api_name": "datman.config.scanid.is_scanid", "line_number": 75, "usage_type": "call"}, {"api_name": "datman.config.scanid", "line_number": 75, "usage_type": "attribute"}, {"api_name": "datman.config", "line_number": 75, "usage_type": "name"}, {"api_name": "datman.config.scanid.parse", "line_number": 80, "usage_type": "call"}, {"api_name": "datman.config.scanid", "line_number": 80, "usage_type": "attribute"}, {"api_name": "datman.config", "line_number": 80, "usage_type": "name"}, {"api_name": "datman.config.dashboard", "line_number": 88, "usage_type": "attribute"}, {"api_name": "datman.config", "line_number": 88, "usage_type": "name"}, {"api_name": "datman.config.dashboard.add_redcap", "line_number": 92, "usage_type": "call"}, {"api_name": "datman.config.dashboard", "line_number": 92, "usage_type": "attribute"}, {"api_name": "datman.config", "line_number": 92, "usage_type": "name"}, {"api_name": "docopt.docopt", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 120, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 120, "usage_type": "attribute"}, {"api_name": "logging.WARN", "line_number": 121, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 124, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 126, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 128, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 133, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 138, "usage_type": "call"}, {"api_name": "datman.config.config.config", "line_number": 141, "usage_type": "call"}, {"api_name": "datman.config.config", "line_number": 141, "usage_type": "attribute"}, {"api_name": "datman.config", "line_number": 141, "usage_type": "name"}, {"api_name": "datman.config.dashboard.dashboard", "line_number": 148, "usage_type": "call"}, {"api_name": "datman.config.dashboard", "line_number": 148, "usage_type": "attribute"}, {"api_name": "datman.config", "line_number": 148, "usage_type": "name"}, {"api_name": "datman.config.dashboard", "line_number": 149, "usage_type": "attribute"}, {"api_name": "datman.config", "line_number": 149, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}]} +{"seq_id": "44935953", "text": "\n\n#####################################################################################\n#### Create class for the posts data:\nclass Post:\n def __init__(self, link, account, date, media_type, media_link, \n description = None, location = None, loc_link = None, likes = None):\n self.link = link\n self.account = account\n self.date = date\n \n self.media_type = media_type\n self.set_media_type(media_type)\n \n self.media_link = media_link\n self.description = description\n self.location = location\n self.loc_link = loc_link\n self.likes = likes\n \n def set_media_type(self, media_type):\n if media_type in (\"Image\", \"Video\", \"Multi Image\"):\n self.media_type = media_type\n else:\n self.media_type = 'Error'\n print(\"Incorrect media type. Please enter one of the following values: Image, Video, Multi Image\")\n #raise ValueError(\"Incorrect media type. Please enter one of the following values: Image, Video, Multi Image\")\n \n def to_dict(self):\n return {'Link': self.link, \n 'Account': self.account, \n 'Date': self.date, \n 'Media Type': self.media_type, \n 'Media Link': self.media_link,\n 'Description': self.description, \n 'Location': self.location, \n 'Location Link': self.loc_link, \n 'Likes': self.likes} \n \n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport time \n\n#### Create function for scraping the posts:\n# Note it requires the initial article element and the dictionary to be given as inputs:\ndef scrape(article_elem_init, list_data): \n \n article_elem = driver.find_elements_by_xpath(\"//article\") # using this to count the number of posts we are going to scrape \n \n if len(article_elem) <= len(article_elem_init):\n n = 1\n n_end = 4\n else:\n n = 5\n n_end = 7\n \n while n <= n_end:\n \n \"\"\"Get the LINK to the post\"\"\" \n try:\n post_elem = driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/div[2]/section[2]/div/a\") \n link = post_elem.get_attribute('href')[:-9]\n except:\n link = 'n/a'\n \n \"\"\"Get the account USERNAME\"\"\"\n user_elem = driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/header/div/div/div/h2/a\") \n account = user_elem.text\n \n \n \"\"\"Get the DATE of the post\"\"\" \n date_elem=driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/div/div/a/time\").get_attribute('datetime')\n date = date_elem\n \n \n \"\"\"Get the TYPE of the post (e.g. Image vs Video)\"\"\" \n try:\n multi_elem = driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/div[1]/div/div/div/div[2]/div/div[1]/div/ul/li[1]/div/div/div/div[1]/img\") \n media_type = 'Multi Image'\n media_link = multi_elem.get_attribute('src')\n \n except NoSuchElementException:\n try:\n vid_elem = driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/div[1]/div/div/div/div[1]/div/video\") ## video link\n media_type = 'Video'\n media_link = vid_elem.get_attribute('src')\n except NoSuchElementException:\n try:\n img_elem = driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/div[1]/div/div/div/img\") ## image link\n media_type = 'Image'\n media_link = img_elem.get_attribute('src')\n except NoSuchElementException:\n try:\n img_elem = driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/div[1]/div/div/div/div/img\") ## image link\n media_type = 'Image'\n media_link = img_elem.get_attribute('src')\n except:\n media_type = None\n media_link = 'n/a'\n\n \n \"\"\"Get the post DESCRIPTION\"\"\" \n try:\n desc_elem=driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/div/div/ul/li/div/div/div/span\")\n description = desc_elem.text\n except:\n description = None\n \n \n \"\"\"Get the number of LIKES\"\"\" \n try: \n like_elem=driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/div[2]/section[2]/div\")\n likes = re.sub('[^0-9]','',like_elem.text)\n except: \n likes = 0\n \n \n \"\"\"Get the LOCATION\"\"\" \n try: \n loc_elem = driver.find_element_by_xpath(\"//article[\"+str(n)+\"]/header/div/div/a\")\n location = loc_elem.text\n loc_link = loc_elem.get_attribute('href')\n except:\n location = 'No location'\n loc_link = None\n\n list_data.append(Post(link, account, date, media_type, media_link, description, location, loc_link, likes))\n \n n+=1 \n \n driver.execute_script(\"arguments[0].scrollIntoView();\", article_elem[n_end-1])\n return(str(len(article_elem))+\" posts scraped.\")\n\n\n#####################################################################################\n#### Test the above:\n\n#### Open ChromeDriver and log in to Instagram:\nfrom Instagram.Admin.FileLocator import locate\nfrom Instagram.InstaLogin import login, credentials\ndriver = webdriver.Chrome(locate(\"chromedriver.exe\"))\n\n\n#### Log into Instagram:\nlogin_info = credentials('login.txt')\nlogin(driver, login_info[0], login_info[1])\ntime.sleep(4)\n\n\n#### Scrape data for each post \ndriver.execute_script(\"window.scrollTo(0, 1)\") # need to scroll a bit for the article element to appear\ntime.sleep(2)\narticle_elem_init = driver.find_elements_by_xpath(\"//article\") # using this to base the scrape count\nlist_data = []\nscrape(article_elem_init, list_data) # run this as many times as you want (maybe automate with a function)\n\n\n#### Create a dataframe and remove duplicates:\nimport pandas as pd\nimport datetime\ndf = pd.DataFrame([s.to_dict() for s in list_data]).drop_duplicates().reset_index()\ndf['Date of scrape'] = datetime.datetime.today().strftime('%Y-%m-%d %H:%M')\n\n \n \n#####################################################################################\n#### Clean up the location data:\n\n#### Create a class for location data: \nclass Location:\n def __init__(self, location, loc_link, lat, lon, city, country, street, website, phone, desc_posts):\n self.location = location\n self.loc_link = loc_link\n self.lat = lat\n self.lon = lon\n self.street = street\n self.city = city\n self.country = country\n self.website = website\n self.phone = phone\n self.desc_posts = desc_posts\n \n def to_dict(self):\n return {'Location': self.location, \n 'Location Link': self.loc_link, \n 'Lat': self.lat,\n 'Lon': self.lon,\n 'City': self.city,\n 'Country': self.country,\n 'Street': self.street,\n 'Website': self.website,\n 'Phone': self.phone,\n 'Posts Descriptions': self.desc_posts\n } \n \n#### Remove posts without location:\ndf_loc = df[df.Location!='No location'].reset_index(drop=True)\nprint(df_loc.head(5))\n\n\n#### Remove posts without specific locations:\nloc_distinct = df_loc[['Location','Location Link']].drop_duplicates().reset_index()\n\n\n#### Download info about each unique location and identify the ones that are just cities:\nfrom user_agent import generate_user_agent\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport json\n\nloc_data = []\nfor position,link in enumerate(loc_distinct['Location Link']): \n \n # Generate random user agents to not look suspicious:\n headers = {'User-Agent': generate_user_agent(device_type=\"desktop\", os=('mac', 'linux'))}\n \n url = link \n html = requests.get(url, timeout=5, headers=headers)\n soup = BeautifulSoup(html.text, 'lxml')\n script_tag = soup.find('script', text=re.compile('window\\._sharedData'))\n shared_data = script_tag.string.partition('=')[-1].strip(' ;')\n try:\n j = json.loads(shared_data)\n city = json.loads(j['entry_data']['LocationsPage'][0]['graphql']['location']['address_json'])['city_name']\n if city == loc_distinct['Location'][position]:\n lat = None\n lon = None\n street = 'Location is too general'\n country = json.loads(j['entry_data']['LocationsPage'][0]['graphql']['location']['address_json'])['country_code']\n website = j['entry_data']['LocationsPage'][0]['graphql']['location']['website']\n phone = j['entry_data']['LocationsPage'][0]['graphql']['location']['phone']\n posts = j['entry_data']['LocationsPage'][0]['graphql']['location']['edge_location_to_media']['edges']\n n = 0\n desc_posts = []\n while n < len(posts):\n try:\n desc_posts.append(j['entry_data']['LocationsPage'][0]['graphql']['location']['edge_location_to_media']['edges'][n]['node']['edge_media_to_caption']['edges'][0]['node']['text']) \n except:\n pass\n n+=1\n else:\n lat = j['entry_data']['LocationsPage'][0]['graphql']['location']['lat']\n lon = j['entry_data']['LocationsPage'][0]['graphql']['location']['lng']\n street = json.loads(j['entry_data']['LocationsPage'][0]['graphql']['location']['address_json'])['street_address']\n country = json.loads(j['entry_data']['LocationsPage'][0]['graphql']['location']['address_json'])['country_code']\n website = j['entry_data']['LocationsPage'][0]['graphql']['location']['website']\n phone = j['entry_data']['LocationsPage'][0]['graphql']['location']['phone']\n posts = j['entry_data']['LocationsPage'][0]['graphql']['location']['edge_location_to_media']['edges']\n n = 0\n desc_posts = []\n while n < len(posts):\n try:\n desc_posts.append(j['entry_data']['LocationsPage'][0]['graphql']['location']['edge_location_to_media']['edges'][n]['node']['edge_media_to_caption']['edges'][0]['node']['text']) \n except:\n pass\n n+=1\n loc_data.append(Location(loc_distinct['Location'][position], link, lat, lon, city, country, street, website, phone, desc_posts))\n \n except:\n print('Error with ',link)\n\n\n#### Create a dataframe:\ndf_loc_detail = pd.DataFrame([s.to_dict() for s in loc_data])\ndf_loc_detail = df_loc_detail[ df_loc_detail.Street != 'Location is too general' ]\ndf_loc_detail['Date of scrape'] = datetime.datetime.today().strftime('%Y-%m-%d %H:%M')\n\n\n#### Append to existing data and pickle again:\nimport pickle\nimport os\nfor o in [x for x in os.listdir('Instagram\\\\Data') if x.endswith(\".pkl\")]:\n if 'insta_locations' in o:\n df_loc_detail_old = pickle.load(open('Instagram\\\\Data\\\\'+o,'rb')).drop(['Date of scrape'], axis=1) \n df_loc_detail_new = df_loc_detail_old.append(df_loc_detail)\n df_loc_detail_new = df_loc_detail_new.drop_duplicates(subset=df_loc_detail_new.columns.difference(['Date of scrape'])).reset_index() \n pickle.dump(df_loc_detail_new, open('Instagram\\\\Data\\\\insta_locations.pkl', 'wb'))\n else:\n pickle.dump(df_loc_detail, open('Instagram\\\\Data\\\\insta_locations.pkl', 'wb'))\n\n\n#### Flag the locations that are not related to food:\n\n# Separate file for testng/building model \n \nimport re\nimport os\nimport pickle\n\n", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 11908, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 83, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 88, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 93, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 142, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 142, "usage_type": "name"}, {"api_name": "Instagram.Admin.FileLocator.locate", "line_number": 142, "usage_type": "call"}, {"api_name": "Instagram.InstaLogin.credentials", "line_number": 146, "usage_type": "call"}, {"api_name": "Instagram.InstaLogin.login", "line_number": 147, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 148, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 153, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 162, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 163, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 163, "usage_type": "attribute"}, {"api_name": "user_agent.generate_user_agent", "line_number": 217, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 220, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 221, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 222, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 225, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 226, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 231, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 246, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 247, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 266, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 268, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 268, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 274, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 276, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 279, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 281, "usage_type": "call"}]} +{"seq_id": "320468805", "text": "# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nfrom pants.base.exceptions import TargetDefinitionException\nfrom pants.base.payload import Payload\nfrom pants.base.payload_field import PrimitiveField\nfrom pants.build_graph.address import Address\nfrom pants.build_graph.target import Target\n\n\nclass RemoteSources(Target):\n \"\"\"A target that generates a synthetic target using deferred sources.\n\n This provides a mechanism for using the contents of a jar as sources for another target. The jar\n where the sources are specified from is given via the `sources_target` parameter, and the type for\n the target that should be created with those sources is given via the `dest` parameter. Any\n additional arguments for the new target go into the `args` parameter.\n \"\"\"\n\n def __init__(self, address=None, payload=None, sources_target=None, dest=None, args=None,\n **kwargs):\n \"\"\"\n :API: public\n\n :param string sources_target: The address of the (typically unpacked_jars) target to get sources\n from.\n :param dest: The target type of the synthetic target to generate (eg, java_library).\n :param dict args: Any additional arguments necessary to construct the synthetic destination\n target (sources and dependencies are supplied automatically).\n \"\"\"\n self.address = address\n if not sources_target:\n raise TargetDefinitionException(self, 'You must specify the address of a target to acquire '\n 'sources from via the \"sources_target\" parameter.')\n if not dest or not hasattr(dest, 'target_types'):\n raise TargetDefinitionException(self, 'You must specify a target type for the \"dest\" '\n 'parameter.')\n if len(dest.target_types) != 1:\n raise TargetDefinitionException(\n self,\n 'Target alias {} has multiple possible target types {}.'.format(dest, dest.target_types),\n )\n dest = dest.target_types[0]\n self._dest = dest\n self._dest_args = args\n payload = payload or Payload()\n payload.add_fields({\n 'sources_target_spec': PrimitiveField(self._sources_target_to_spec(address, sources_target)),\n 'dest': PrimitiveField(dest.__name__),\n })\n super(RemoteSources, self).__init__(address=address, payload=payload, **kwargs)\n\n @staticmethod\n def _sources_target_to_spec(address, sources_target):\n return Address.parse(sources_target, relative_to=address.spec_path).spec\n\n @classmethod\n def compute_dependency_specs(cls, kwargs=None, payload=None):\n for spec in super(RemoteSources, cls).compute_dependency_specs(kwargs, payload):\n yield spec\n\n if kwargs:\n address = kwargs.get('address')\n sources_target = kwargs.get('sources_target')\n if address and sources_target:\n yield cls._sources_target_to_spec(address, sources_target)\n elif payload:\n payload_dict = payload.as_dict()\n yield payload_dict['sources_target_spec']\n\n @property\n def sources_target(self):\n return self._build_graph.get_target_from_spec(self.payload.sources_target_spec)\n\n @property\n def destination_target_type(self):\n return self._dest\n\n @property\n def destination_target_args(self):\n return self._dest_args or {}\n", "sub_path": "src/python/pants/build_graph/remote_sources.py", "file_name": "remote_sources.py", "file_ext": "py", "file_size_in_byte": 3489, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pants.build_graph.target.Target", "line_number": 15, "usage_type": "name"}, {"api_name": "pants.base.exceptions.TargetDefinitionException", "line_number": 37, "usage_type": "call"}, {"api_name": "pants.base.exceptions.TargetDefinitionException", "line_number": 40, "usage_type": "call"}, {"api_name": "pants.base.exceptions.TargetDefinitionException", "line_number": 43, "usage_type": "call"}, {"api_name": "pants.base.payload.Payload", "line_number": 50, "usage_type": "call"}, {"api_name": "pants.base.payload_field.PrimitiveField", "line_number": 52, "usage_type": "call"}, {"api_name": "pants.base.payload_field.PrimitiveField", "line_number": 53, "usage_type": "call"}, {"api_name": "pants.build_graph.address.Address.parse", "line_number": 59, "usage_type": "call"}, {"api_name": "pants.build_graph.address.Address", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "310401722", "text": "# QueueDialog.py\nimport wx\nimport logging\n\nfrom handlers.Data import DataHandler\nfrom wxClasses.queue.PendingPanel import PendingPanel\nfrom wxClasses.queue.HistoryPanel import HistoryPanel\n\n\nclass QueueDialog(wx.Dialog):\n\n def __init__(self, parent, data):\n\n wx.Dialog.__init__(self, parent, title='ADI Queue',\n pos=wx.DefaultPosition,\n size=(600, 400),\n style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX)\n\n self.parent: wx.Frame = parent\n self.data: DataHandler = data\n\n self._create_widgets()\n\n self.Center()\n self.Show()\n\n def _create_widgets(self):\n self.notebook = wx.Notebook(self)\n self.pending_panel = PendingPanel(self.notebook, self.data)\n self.history_panel = HistoryPanel(self.notebook, self.data)\n\n self.notebook.AddPage(self.pending_panel, 'Pending')\n self.notebook.AddPage(self.history_panel, 'History')\n", "sub_path": "src/wxClasses/queue/QueueDialog.py", "file_name": "QueueDialog.py", "file_ext": "py", "file_size_in_byte": 979, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "wx.Dialog", "line_number": 10, "usage_type": "attribute"}, {"api_name": "wx.Dialog.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "wx.Dialog", "line_number": 14, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.SYSTEM_MENU", "line_number": 17, "usage_type": "attribute"}, {"api_name": "wx.CAPTION", "line_number": 17, "usage_type": "attribute"}, {"api_name": "wx.CLOSE_BOX", "line_number": 17, "usage_type": "attribute"}, {"api_name": "wx.Frame", "line_number": 19, "usage_type": "attribute"}, {"api_name": "handlers.Data.DataHandler", "line_number": 20, "usage_type": "name"}, {"api_name": "wx.Notebook", "line_number": 28, "usage_type": "call"}, {"api_name": "wxClasses.queue.PendingPanel.PendingPanel", "line_number": 29, "usage_type": "call"}, {"api_name": "wxClasses.queue.HistoryPanel.HistoryPanel", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "260848289", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Author: Fang Zhang \n# @Date: 2016-11-24 15:23:24\n\nimport sys,os,json,gzip,math,time,datetime,random,copy\nimport functools,itertools,requests,pickle,configparser\nimport argparse,logging,uuid,shutil,collections\nfrom urllib.parse import urlparse, parse_qs\nfrom collections import defaultdict as dd\nimport multiprocessing as mp\nimport numpy as np\nimport regex as re\nre.DEFAULT_VERSION = re.VERSION1\n\nimport gensim\nfrom gensim import corpora, models\n\nfrom history_analysis.utils.multiprocess import multiprocess_dir, multiprocess_file\nfrom history_analysis.utils.logger import simple_logger\nlogger = simple_logger(__name__, 'history_analysis/log/')\n\nfrom nltk.stem import WordNetLemmatizer\nlemmatize = WordNetLemmatizer().lemmatize\nfrom nltk.stem.porter import PorterStemmer\nstem = PorterStemmer().stem\nfrom stop_words import get_stop_words\nen_stop = get_stop_words('en')\n\nimport spacy\nnlp = spacy.load('en')\n\n# global variables are defined here\nconfig = configparser.ConfigParser()\nconfig_file = 'history_analysis/config/word2vec.cfg'\nconfig.read(config_file)\nsection = config['global']\n\nSIZE = eval(section['SIZE'])\nWINDOW = eval(section['WINDOW'])\nMIN_COUNT = eval(section['MIN_COUNT'])\nWORKERS = eval(section['WORKERS'])\nSAMPLE = eval(section['SAMPLE'])\n\nMIN_WORD_LENGTH = eval(section['MIN_WORD_LENGTH'])\nMIN_SENTENCE_LENGTH = eval(section['MIN_SENTENCE_LENGTH'])\nFORBIDDEN_POS = eval(section['FORBIDDEN_POS'])\nFORBIDDEN_TAGS = eval(section['FORBIDDEN_TAGS'])\nPOSSIBLE_FORBIDDEN_TAGS = eval(section['POSSIBLE_FORBIDDEN_TAGS'])\nMUST_TAGS = eval(section['MUST_TAGS'])\n\nPHRASE_LENGTHS = eval(section['PHRASE_LENGTHS'])\nMIN_PHRASE_LENGTHS, MAX_PHRASE_LENGTHS = min(PHRASE_LENGTHS), max(PHRASE_LENGTHS)\nUSE_FILTERED_WIKI_TITLES = eval(section['USE_FILTERED_WIKI_TITLES'])\nUSE_POS_FILTER = eval(section['USE_POS_FILTER'])\nif USE_FILTERED_WIKI_TITLES:\n _FILTERED = '_onFilteredWikiTitles'\nelif USE_POS_FILTER:\n _FILTERED = '_filtered'\nelse:\n _FILTERED = ''\n\nsection = config['LDA']\nMIN_DOC_WORDS = eval(section['MIN_DOC_WORDS'])\nNUM_TOPICS = eval(section['NUM_TOPICS'])\nPASSES = eval(section['PASSES'])\n\nsection = config['word_graph']\nNGRAM_WINDOW_SIZE = eval(section['NGRAM_WINDOW_SIZE'])\nDAMPING_FACTOR = eval(section['DAMPING_FACTOR'])\nEPSILON = eval(section['EPSILON'])\nTOP_K = eval(section['TOP_K'])\n\nlogger.info('configs have been read from {}'.format(config_file))\n\ndef text_rank(word_list, window_size, damping_factor, default_value_dict, epsilon):\n word_graph = {}\n word_graph_weights_ratio = {}\n for i in range(len(word_list) - window_size + 1):\n word_i = word_list[i]\n if word_i not in word_graph:\n word_graph[word_i] = {}\n for j in range(window_size - 1):\n word_j = word_list[i + 1 + j]\n if word_j not in word_graph:\n word_graph[word_j] = {}\n if word_j not in word_graph[word_i]:\n if word_i not in word_graph[word_j]:\n word_graph[word_j][word_i] = 1\n word_graph[word_i][word_j] = 1\n else:\n word_graph[word_j][word_i] += 1\n word_graph[word_i][word_j] = word_graph[word_j][word_i]\n else:\n word_graph[word_i][word_j] += 1\n word_graph[word_j][word_i] = word_graph[word_i][word_j]\n num_words = len(word_graph)\n if not default_value_dict:\n default_value_dict = {}\n for w in word_graph:\n default_value_dict[w] = 1 / num_words\n for w in word_graph:\n word_graph_weights_ratio[w] = {}\n tmp = 0\n for neighbor in word_graph[w]:\n tmp += word_graph[w][neighbor]\n for neighbor in word_graph[w]:\n word_graph_weights_ratio[w][neighbor] = word_graph[w][neighbor] / tmp\n for w in word_graph:\n if w not in default_value_dict:\n default_value_dict[w] = 0\n PR_values = {}\n for w in word_graph:\n PR_values[w] = 1 / num_words # initial value\n max_diff = 100\n while max_diff >= epsilon:\n max_diff = 0\n for w in PR_values:\n tmp = 0\n for neighbor in word_graph[w]:\n tmp += PR_values[neighbor] * word_graph_weights_ratio[neighbor][w]\n tmp = tmp * damping_factor + default_value_dict[w] * (1 - damping_factor)\n if abs(tmp - PR_values[w]) > max_diff:\n max_diff = abs(tmp - PR_values[w])\n PR_values[w] = tmp\n return PR_values\n\ndef parse_doc_from_Hulth(input_file):\n with open(input_file) as f:\n lines = f.readlines()\n current_doc = ''\n for line in lines:\n current_doc = '{} {}'.format(current_doc, line.strip())\n tokens = nlp(current_doc, tag=True, parse=False, entity=False)\n stem_lower_tokens = []\n tokens_pos = []\n for t in tokens:\n # if not t.is_digit and not t.is_punct and not t.is_space and not t.tag_ in FORBIDDEN_TAGS:\n tokens_pos.append(t.pos_)\n if not t.is_digit and not t.is_punct and not t.is_space and not t.pos_ in FORBIDDEN_POS:\n if t.lower_ not in en_stop:\n stem_lower_tokens.append(stem(t.lower_))\n # tokens_pos.append(t.pos_)\n keyphrases_candidates = {}\n pos2str = ''\n for t in tokens_pos:\n if t == 'ADJ':\n pos2str += '1'\n elif t == 'NOUN':\n pos2str += '2'\n else:\n pos2str += '0'\n p = re.compile(r'1*2+')\n for m in p.finditer(pos2str):\n tmp_keyphrase = ''\n for i in range(m.start(), m.end()):\n if not tmp_keyphrase:\n tmp_keyphrase = stem(tokens[i].lower_)\n else:\n tmp_keyphrase = '{}_{}'.format(tmp_keyphrase, stem(tokens[i].lower_))\n keyphrases_candidates[tmp_keyphrase] = 1\n\n return stem_lower_tokens, keyphrases_candidates\n\ndef rank_keyphrases(keyphrases_candidates, PR_values):\n scores = []\n for k in keyphrases_candidates:\n tmp = k.split('_')\n tmp_sum = 0\n for t in tmp:\n if t in PR_values:\n tmp_sum += PR_values[t]\n scores.append((k, tmp_sum))\n scores = sorted(scores, key=lambda x: x[1], reverse=True)\n return scores\n\ndef extract_keyphrases_from_one_doc(input_file, top_k):\n stem_lower_tokens, keyphrases_candidates = parse_doc_from_Hulth(input_file)\n PR_values = text_rank(stem_lower_tokens, NGRAM_WINDOW_SIZE, DAMPING_FACTOR, None, EPSILON)\n scores = rank_keyphrases(keyphrases_candidates, PR_values)[:top_k]\n tmp = []\n for i in scores:\n tmp.append(i[0])\n return tmp\n\ndef extract_keyphrases_from_human_annotations(input_file):\n keyphrases = ''\n with open(input_file) as f:\n lines = f.readlines()\n for line in lines:\n tmp = line.strip()\n keyphrases = '{} {}'.format(keyphrases, tmp)\n keyphrases = keyphrases.split(';')\n stemmed = []\n for k in keyphrases:\n tmp = k.split()\n for i in range(len(tmp)):\n tmp[i] = stem(tmp[i])\n stemmed.append('_'.join(tmp))\n return stemmed\n\ndef test_pagerank(test_dir, top_k):\n abstr = []\n # contr = []\n # uncontr = []\n for i in os.listdir(test_dir):\n if i.endswith('.abstr'):\n full_i = os.path.join(test_dir, i)\n abstr.append(full_i)\n # elif i.endswith('.contr'):\n # contr.append(full_i)\n # elif i.endswith('.uncontr'):\n # uncontr.append(full_i)\n num_correct, num_extract, num_standard = 0, 0, 0\n for doc in abstr:\n answer_file = doc[:-5] + 'uncontr'\n machine_outfile = doc[:-5] + 'machine'\n machine_keyphrases = extract_keyphrases_from_one_doc(doc, top_k)\n human_keyphrases = extract_keyphrases_from_human_annotations(answer_file)\n num_extract += len(machine_keyphrases)\n num_standard += len(human_keyphrases)\n for i in machine_keyphrases:\n if i in human_keyphrases:\n num_correct += 1\n with open(machine_outfile, 'w') as f:\n for i in machine_keyphrases:\n f.write('{}\\n'.format(i))\n p = num_correct / num_extract\n r = num_correct / num_standard\n if not p or not r:\n f = 0\n else:\n f = 2 * p * r / (p + r)\n logger.info('Results: Precision: {}, Recall: {}, F-score: {}'.format(p, r, f))\n\ndef precision_recall_f_score(predict_values, real_values):\n postive_true = 0\n for i in predict_values:\n if i in real_values:\n postive_true += 1\n p = postive_true / len(predict_values)\n r = postive_true / len(real_values)\n if p > 0 and r > 0:\n f = 2 / (1 / p + 1 / r)\n else:\n f = 0\n return p, r, f\n\ndef extract_keyphrases_from_one_doc_composed_of_only_possible_keyphrases(input_file, output_file, top_k=5):\n with open(input_file) as f:\n lines = f.readlines()\n current_doc = []\n with open(output_file, 'w') as f:\n for line in lines:\n line = line.strip()\n if not line:\n PR_values = text_rank(current_doc, NGRAM_WINDOW_SIZE, DAMPING_FACTOR, None, EPSILON)\n if top_k == 0:\n tmp = sorted(PR_values.items(), key=lambda x: x[1], reverse=True)\n else:\n tmp = sorted(PR_values.items(), key=lambda x: x[1], reverse=True)[:top_k]\n for k, v in tmp:\n f.write('{}\\t{}\\n'.format(k, v))\n f.write('\\n')\n current_doc = []\n else:\n current_doc.extend(line.split())\n\ndef text_rank_for_task1(infile, outfile, top_k=5):\n with open(infile) as fin, open(outfile, 'w') as fout:\n for line in fin:\n current_doc = line.split()\n PR_values = text_rank(current_doc, NGRAM_WINDOW_SIZE, DAMPING_FACTOR, None, EPSILON)\n if top_k == 0:\n tmp = sorted(PR_values.items(), key=lambda x: x[1], reverse=True)\n else:\n tmp = sorted(PR_values.items(), key=lambda x: x[1], reverse=True)[:top_k]\n tmp2 = []\n for k, v in tmp:\n tmp2.append(k)\n tmp2.append(str(v))\n tmp2 = ' '.join(tmp2)\n fout.write('{}\\n'.format(tmp2))\n\ndef normalize_topic(topic):\n topic = topic.split('_')\n for i in range(len(topic)):\n topic[i] = lemmatize(topic[i]).lower()\n topic = '_'.join(topic)\n return topic\n\ndef sum_weight_for_each_candidate_using_textrank(cate_and_arti_file, weights_file, outfile, top_k):\n with open(cate_and_arti_file, 'rb') as f:\n cate_and_arti, expand_categories, all_articles = pickle.load(f)\n candidate_topics = {}\n for i in expand_categories:\n tmp = i.split(':', maxsplit=1)[-1]\n candidate_topics[normalize_topic(tmp)] = 0\n with open(weights_file) as f:\n for line in f:\n tmp = line.split()\n tmp_iter = iter(tmp)\n for k, v in zip(tmp_iter, tmp_iter):\n if k in candidate_topics:\n candidate_topics[k] += float(v)\n tmp = sorted(candidate_topics.items(), key=lambda x: x[1], reverse=True)[:top_k]\n with open(outfile, 'w') as f:\n for k, v in tmp:\n f.write('{}\\t{}\\n'.format(k, v))\n\ndef main():\n if 0:\n test_dir = '/home/thuzhf/work/KEG/history_analysis/data/keyphrases/Hulth2003/All'\n test_pagerank(test_dir, TOP_K)\n\n if 0:\n AI_titles_and_abstracts_sentences_dir = '/home/thuzhf/work/paper_crawler/data/AI_processed_sentences'\n AI_titles_and_abstracts_keyphrase_rank_dir = '/home/thuzhf/work/paper_crawler/data/AI_processed_keyphrase_rank'\n multiprocess_dir(AI_titles_and_abstracts_sentences_dir, AI_titles_and_abstracts_keyphrase_rank_dir, extract_keyphrases_from_one_doc_composed_of_only_possible_keyphrases)\n\n if 0:\n in_dir = '/home/thuzhf/work/paper_crawler/data/selected_papers_topics_sentences'\n out_dir = '/home/thuzhf/work/paper_crawler/data/selected_papers_topics_sentences_text_rank'\n multiprocess_dir(in_dir, out_dir, extract_keyphrases_from_one_doc_composed_of_only_possible_keyphrases, 0)\n\n if 0:\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--infile')\n parser.add_argument('--outfile')\n parser.add_argument('--top_k', type=int, default=0)\n args = parser.parse_args()\n text_rank_for_task1(args.infile, args.outfile, args.top_k)\n\n if 1:\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--cate_and_arti_file')\n parser.add_argument('--weights_file')\n parser.add_argument('--outfile')\n parser.add_argument('--top_k', type=int, default=15)\n args = parser.parse_args()\n sum_weight_for_each_candidate_using_textrank(args.cate_and_arti_file, args.weights_file, args.outfile, args.top_k)\n\nif __name__ == '__main__':\n start_t = time.time()\n main()\n end_t = time.time()\n t = end_t - start_t\n print('Time elapsed: {:.4f} minutes'.format(t / 60.))", "sub_path": "src/keyphrases_extraction_new.py", "file_name": "keyphrases_extraction_new.py", "file_ext": "py", "file_size_in_byte": 13164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "regex.DEFAULT_VERSION", "line_number": 14, "usage_type": "attribute"}, {"api_name": "regex.VERSION1", "line_number": 14, "usage_type": "attribute"}, {"api_name": "history_analysis.utils.logger.simple_logger", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 24, "usage_type": "call"}, {"api_name": "nltk.stem.porter.PorterStemmer", "line_number": 26, "usage_type": "call"}, {"api_name": "stop_words.get_stop_words", "line_number": 28, "usage_type": "call"}, {"api_name": "spacy.load", "line_number": 31, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 34, "usage_type": "call"}, {"api_name": "regex.compile", "line_number": 153, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 294, "usage_type": "call"}, {"api_name": "history_analysis.utils.multiprocess.multiprocess_dir", "line_number": 319, "usage_type": "call"}, {"api_name": "history_analysis.utils.multiprocess.multiprocess_dir", "line_number": 324, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 327, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 327, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 335, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 335, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 344, "usage_type": "call"}, {"api_name": "time.time", "line_number": 346, "usage_type": "call"}]} +{"seq_id": "335572167", "text": "import base64\nimport Algorithmia\nimport os\n\nfrom flask import Flask, request\napp = Flask(__name__, static_url_path='')\n\n\napi_key = os.environ.get('API_KEY')\nif api_key is None:\n raise Exception(\"Environment variable API_KEY is not set.\")\n\nclient = Algorithmia.client(api_key) \nalgo = client.algo('deeplearning/ColorfulImageColorization/1.1.5')\n\ndef process_image(image_base64):\n req = {\n \"image\": image_base64\n }\n return algo.pipe(req).result\n\ndef get_image_base64(image_file):\n return \"data:image/jpeg;base64,\" + base64.b64encode(image_file.read())\n\n@app.route('/', methods=['GET'])\ndef root():\n return app.send_static_file('index.html')\n\n@app.route('/image', methods=['POST'])\ndef image():\n result = process_image(request.data);\n return get_image_base64(client.file(result['output']).getFile())\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0') \n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "Algorithmia.client", "line_number": 13, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "415119703", "text": "import numpy as np\nimport matplotlib.pyplot as plt ; plt.close('all')\n\nimport ctdtools\n\ndef plot_timeseries(fnames):\n \n fnames = ctdtools.sort_fnames(fnames)\n\n fig = plt.figure()\n ax = fig.gca()\n ax2 = ax.twinx()\n\n for fname in fnames:\n\n starttime = ctdtools.get_date(fname)\n casttimestr = starttime.strftime('UTC %Y-%m-%d %H:%M')\n\n data = ctdtools.get_data(fname) \n data = ctdtools.remove_invalid(data)\n data = ctdtools.remove_cast(data,'up')\n\n stationary = np.abs(data['pressure']) < data['pressure'].mean()+1e-2\n data = data[stationary]\n \n dt = ctdtools.get_dt(fname)\n time = starttime + (data['count']-1)*dt\n\n ls = ctdtools.lscycle.next()\n ax.plot(time,data['temp'],'b',ls=ls,label=casttimestr)\n ax2.plot(time,data['sal'],'r',ls=ls)\n\n fig.autofmt_xdate()\n\n ax.set_ylabel('Temperature (degC)',color='b')\n ax2.set_ylabel('Salinity (PSU)',color='r')\n ax.set_xlabel('Count')\n ax2.grid(False)\n\n ax.legend(loc=0)\n\n plt.show()\n\n return fig\n\n\n\nif __name__ == '__main__':\n\n import argparse\n parser = argparse.ArgumentParser(description=\"Plot Seabird CTD\")\n parser.add_argument('-f','--fnames',type=lambda s: s.split(','),help='file name')\n\n args = parser.parse_args()\n \n data = plot_timeseries(**vars(args))\n\n", "sub_path": "timeseries.py", "file_name": "timeseries.py", "file_ext": "py", "file_size_in_byte": 1352, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "matplotlib.pyplot.close", "line_number": 2, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 2, "usage_type": "name"}, {"api_name": "ctdtools.sort_fnames", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "ctdtools.get_date", "line_number": 16, "usage_type": "call"}, {"api_name": "ctdtools.get_data", "line_number": 19, "usage_type": "call"}, {"api_name": "ctdtools.remove_invalid", "line_number": 20, "usage_type": "call"}, {"api_name": "ctdtools.remove_cast", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 23, "usage_type": "call"}, {"api_name": "ctdtools.get_dt", "line_number": 26, "usage_type": "call"}, {"api_name": "ctdtools.lscycle.next", "line_number": 29, "usage_type": "call"}, {"api_name": "ctdtools.lscycle", "line_number": 29, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "167410528", "text": "\"\"\"triangular grid\n\nsee trimesh, networkx\n- load mesh\n- get triangle graph with neighbors\n- create the mesh as a graph\n- compute cells, render cell state onto mesh\n\n- communication\n-- sensorimotor\n-- OSC\n-- zmq\n\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport pickle, sys, time, threading, argparse, queue, signal\nimport itertools\nfrom functools import partial\n\nimport pygame\nfrom pygame.locals import *\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nimport numpy as np\nimport numpy.linalg as la\nfrom six.moves import range\nfrom pprint import pformat\n\nfrom scipy.spatial.transform import Rotation as R\nimport joblib\n# import matplotlib.pyplot as plt\n\n# use meshpy\nimport meshpy.triangle as triangle\nfrom meshpy.triangle import MeshInfo, build\n# use trimesh\nimport trimesh\n\nfrom oscpy.client import OSCClient\nfrom smp_meshgrid.oscsrv import OSCsrv\nimport liblo\n\n# # meshpy examples toolkit\n# import jw_meshtools as mt\n\ndef dl2ld(DL):\n v = [dict(zip(DL,t)) for t in zip(*DL.values())]\n return v\n\ndef ld2dl(LD):\n v = {k: [dic[k] for dic in LD] for k in LD[0]}\n return v\n \ndef make_vertex_facets_hexagon(params):\n \"\"\"make_vertex_facets_hexagon\n\n create a list of 2D vertices and line facets that make up a hexagon\n \"\"\"\n # vertices, points\n dim = params['dim']\n if dim == 2:\n points = [(0, 0)]\n for ang in [0, 60, 120, 180, 240, 300]:\n points.append(\n (np.cos(np.deg2rad(ang)) * params['c'],\n np.sin(np.deg2rad(ang)) * params['c']\n )\n )\n else:\n points = [(0, 0, 0)]\n # for ang in [0, 60, 120, 180, 240, 300]:\n for ang in [120, 180, 240, 300, 0, 60]:\n points.append(\n (np.cos(np.deg2rad(-ang)) * params['c'],\n np.sin(np.deg2rad(-ang)) * params['c'],\n 0.0)\n )\n \n # print('points = {0}'.format(pformat(points)))\n # facets := set of point pairs defining what again?\n facets = [\n [0,1], [0,2], [0,3],\n [0,4], [0,5], [0,6],\n [1,2], [2,3], [3,4],\n [4,5], [5,6], [6,1],\n # [2,3],\n # [3,4],\n # [4,5],\n # [5,6],\n # [6,4],\n ]\n # set of edges\n faces = [\n [0, 1, 2],\n [0, 2, 3],\n [0, 3, 4],\n [0, 4, 5],\n [0, 5, 6],\n [0, 1, 6],\n ]\n \n return (points, facets, faces)\n\ndef make_vertex_facets_line(params):\n \"\"\"make_vertex_facets_line\n\n create a list of 2d points and triangle facets that make up a line built\n \"\"\"\n dim = params['dim']\n if dim == 2:\n points = [(0, 0)]\n for trans in [0, 1, 2, 3, 4]:\n angs = [0, 60]\n # if trans > 0:\n # angs += [120]\n \n for ang in angs:\n points.append(\n (np.cos(np.deg2rad(ang)) * params['c'] + trans,\n np.sin(np.deg2rad(ang)) * params['c']\n )\n )\n else:\n points = [(0, 0, 0)]\n for trans in [0, 1, 2, 3, 4]:\n angs = [0, 60]\n # if trans > 0:\n # angs += [120]\n \n for ang in angs:\n points.append(\n (np.cos(np.deg2rad(ang)) * params['c'] + trans,\n np.sin(np.deg2rad(ang)) * params['c'],\n 0.0\n )\n )\n\n facets = [\n [0, 1], [0, 2], [1,2],\n [1, 2], [1, 4], [2,4],\n [1, 3], [1, 4], [3,4],\n [3, 4], [3, 6], [4,6],\n [3, 5], [3, 6], [5,6],\n [5, 6], [5, 8], [6,8],\n [5, 7], [5, 8], [7,8],\n ]\n\n faces = [\n [0, 1, 2],\n [1, 2, 4],\n [1, 3, 4],\n [3, 4, 6],\n [3, 5, 6],\n [5, 6, 8],\n [5, 7, 8]\n ]\n \n return (points, facets, faces)\n\n# def make_vertex_facets_rect(params, **kwargs):\n# \"\"\"make_vertex_facets_rect\n\n# create a list of 2d point and triangle facets that fill up an outer rectangle\n# \"\"\"\n# length = 0.15\n# # Simple mesh rectangle\n# p,v=mt.RectangleSegments([-2, -1.5],[2, 1.5], edge_length=length)\n \n# # p1,v1=mt.CircleSegments([1.,0],1,a_min=-np.pi/2,a_max=np.pi/2,num_points=20)\n# # p2,v2=mt.CircleSegments([1,0],3,a_min=np.pi/2.,a_max=3.*np.pi/2,num_points=20)\n# # p,v=mt.AddSegments(p1,p2,closed=True)\n# # p1,v1=mt.RectangleSegments([-2,-2],[2.5,3],edge_length=length)\n# # p2,v2=mt.CircleSegments([1,1],1,edge_length=length/5)\n# # p,v=mt.AddCurves(p1,v1,p2,v2)\n# # mt.DoTriMesh(p,v,edge_length=length)\n \n\n# # p1,v1=mt.LineSegments([-2,-3],[2,-3],num_points=12)\n# # p2,v2=mt.LineSegments([2,3],[-2,3],num_points=12)\n# # p,v=mt.AddSegments(p1,p2,closed=True)\n# # p3,v3=mt.CircleSegments([-0.5,0.5],0.5,edge_length=length)\n# # p,v=mt.AddCurves(p,v,p3,v3)\n# # p4,v4=mt.CircleSegments([1,-1],0.5,edge_length=length)\n# # p,v=mt.AddCurves(p,v,p4,v4)\n# # mt.DoTriMesh(p,v,edge_length=length,holes=[(-0.4,0.4),(0.95,-0.8)])\n# return (p, v)\n\ndef make_vertex_facets_rect_trimesh(params):\n \"\"\"make_vertex_facets_rect\n\n create a list of 2d point and triangle facets that fill up an outer rectangle\n \"\"\"\n length = 0.15\n mesh = trimesh.primitives.Box(\n center=[0, 0, 0],\n extents=[3, 3, 3],\n transform=trimesh.transformations.random_rotation_matrix(),\n sections=100,\n )\n # perim = np.random.uniform(-1, 1, (7, 3))\n # mesh = trimesh.creation.Polygon(perim)\n # mesh = trimesh.primitives.Cylinder()\n # mesh = trimesh.primitives.Capsule()\n # mesh = trimesh.primitives.Sphere()\n return mesh.vertices, None, mesh.faces\n\ndef make_vertex_facets_load(params):\n p = None\n v = None\n return (p, v)\n \nclass smnode(threading.Thread):\n def __init__(self, *args, **kwargs):\n super(smnode, self).__init__()\n\n self.isrunning = True\n \n # assert mesh is not None, \"Need to supply mesh argument\" \n # assert tris is not None, \"Need to supply tris argument\" \n # self.mesh = mesh\n # self.tris = tris\n self.cnt = 0\n\n self.smid = 0\n self.density = np.random.uniform(0, 0.05)\n self.freq = 1/self.density\n self.color = np.random.uniform(0, 1, (3, ))\n self.neighbors = []\n \n self.inputs = {}\n self.state = np.zeros((1,1))\n self.outputs = {\n 'state_o': np.zeros_like(self.state)\n }\n self.cap_budget = np.zeros_like(self.state)\n \n # set from available keyword arguments\n for k in ['smid', 'density', 'freq', 'color', 'neighbors', 'inputs']:\n if k in kwargs:\n setattr(self, k, kwargs[k])\n\n self.coef_loss = 0.8 # 0.95\n self.coef_coupling = 1.0 # 0.5\n # self.update = self.update_spontaneous\n self.update = self.update_liquid\n \n def run(self):\n while self.isrunning:\n # print('smnode {0}'.format(self.smid))\n # todo:\n # - read inputs\n # - compute output\n \n # self.mesh_update_state(self.cnt, self.mesh, self.tris, self.density)\n self.update()\n self.cnt += 1\n time.sleep(1/20.)\n\n def update_spontaneous(self):\n \"\"\"smnode.update\n\n compute smnode state update\n \"\"\"\n # print('smnode-{0}.update {1}'.format(self.smid, self.inputs))\n\n x_ = self.state\n # print(tri_i, x_)\n y_ = np.zeros_like(x_)\n # periodic activation\n \n # y_ += 0.05 * np.sin((self.cnt/20.0) * tris[tri_i]['freq'] * 2 * np.pi)\n # x_ += 1.0 * np.sin((self.cnt/20) * 2 * np.pi * self.freq)\n \n # if tri_i == 0 and cnt % 100 == 0:\n # if tri_i == 0 and np.random.uniform() < event_density:\n if np.random.uniform() < self.density:\n # print('refreshing state')\n # tris[tri_i]['state'] = 1.0\n y_ += 2.0 + np.random.uniform(0, 2)\n # tris[tri_i]['state'] = x_\n\n \n # print(tri_i, neighbors)\n \n # print(valid_neighbors)\n for input_n in self.inputs:\n # neighbor coupling\n if input_n.startswith('n') and self.inputs[input_n] > 0.0:\n # x_ = 0.0 * tris[tri_i]['state'] + (0.9 * tris[v_n]['state'])\n # x_ = 0.5 * tris[tri_i]['state'] + (0.5 * tris[v_n]['state'])\n # coupling = 0.05\n coupling = 1.0\n transfer = coupling * self.inputs[input_n]\n y_ += transfer\n # tris[v_n]['state'] -= transfer # coupling * tris[v_n]['state']\n \n \n # tris[tri_i]['state'] *= 0.5\n # x_ = np.tanh(x_)\n # x_ = np.sqrt(x_)\n \n # x_ = 0.92 * x_\n \n # decay activation\n # tris[tri_i]['state'] *= 0.98\n self.state *= 0.8\n \n # add inputs\n self.state += y_\n \n # output transfer function\n # tris[tri_i]['state_o'] = np.log(tris[tri_i]['state'] + 1) * 2\n self.outputs['state_o'] = np.tanh(self.state * 1)\n \n def update_liquid(self):\n \"\"\"smnode.update\n\n compute smnode state update as a liquid\n \"\"\"\n # print('smnode-{0}.update {1}'.format(self.smid, self.inputs))\n\n x_ = self.state\n # print(tri_i, x_)\n y_ = np.zeros_like(x_)\n # periodic activation\n \n # y_ += 0.05 * np.sin((self.cnt/20.0) * tris[tri_i]['freq'] * 2 * np.pi)\n # x_ += 0.02 * np.sin((self.cnt/20) * 2 * np.pi * self.freq)\n \n # print(tri_i, neighbors)\n \n # print(valid_neighbors)\n for input_n in self.inputs:\n # neighbor coupling\n if input_n.startswith('n'): # and self.inputs[input_n] > 0.0:\n transfer = self.coef_coupling * self.inputs[input_n]\n y_ += transfer\n # tris[v_n]['state'] -= transfer # coupling * tris[v_n]['state']\n\n # external coupling cap sense\n if input_n.startswith('cap'):\n # print('smnode-{2}.update_liquid input {0} = {1}'.format(input_n, self.inputs[input_n], self.smid))\n # if tri_i == 0 and cnt % 100 == 0:\n # if tri_i == 0 and np.random.uniform() < event_density:\n if np.random.uniform() < self.density:\n # print('refreshing state')\n # tris[tri_i]['state'] = 1.0\n self.cap_budget = 1.5 * np.random.uniform(-1, 1)\n # tris[tri_i]['state'] = x_\n y_ += 0.2 * self.cap_budget\n self.cap_budget *= 0.8\n \n # activation decay\n self.state *= self.coef_loss\n \n # add inputs\n self.state += y_\n\n if self.state >= 0:\n self.color = np.array((1., 1.0, 0))\n else:\n self.color = np.array((0., 1.0, 1.0))\n \n # output transfer function\n # tris[tri_i]['state_o'] = np.log(tris[tri_i]['state'] + 1) * 2\n # self.outputs['state_o'] = np.tanh(self.state * 5) * 0.5 + 0.5\n self.outputs['state_o'] = np.abs(np.tanh(self.state * 5))\n \nclass meshTrimesh(threading.Thread):\n def __init__(self, *args, **kwargs):\n super(meshTrimesh, self).__init__()\n # create a mesh with mesh generation parameters\n self.mesh = self.make_mesh_triangle_trimesh(**kwargs)\n\n # compute the neighbors for each cell\n self.valid_neighbors_all = self.mesh_get_neighbors_trimesh(self.mesh)\n # print('valid_neighbors_all = {0}'.format(self.valid_neighbors_all))\n \n # extend the mesh with an attribute dictionary\n self.tris = self.mesh_extended_trimesh(self.mesh)\n\n self.osc = kwargs['osc']\n # self.osc_target = liblo.Address('localhost', 1234)\n # self.osc_target = '1234'\n self.osc_target = liblo.Address('localhost', 1234, liblo.UDP)\n self.osc_target_hexagon = liblo.Address('localhost', 1236, liblo.UDP)\n\n self.sensors = [0.0 for _ in range(6)]\n \n # self.coupling = 0.2\n self.coupling = 0.02\n # self.coupling = 0.005\n self.isrunning = True\n self.cnt = 0\n \n def run(self):\n while self.isrunning:\n self.update()\n self.cnt += 1\n time.sleep(1/20.)\n\n def set_sensors(self, sensors):\n self.sensors = np.array(sensors)\n self.sensors += np.random.uniform(-1e-3, 1e-3, self.sensors.shape)\n \n def update(self):\n \"\"\"meshTrimesh.update\n \"\"\"\n # todo\n # - loop over neighbors\n # for nbrs in [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 0]]:\n for nbrs in self.mesh.face_adjacency:\n # print('nbrs', nbrs)\n # - populate node inputs with external values\n # self.mesh.face_attributes['smnode'][nbrs[0]].inputs['n{0}'.format(nbrs[1])] = self.mesh.face_attributes['state_o'][nbrs[1]]\n # self.mesh.face_attributes['smnode'][nbrs[1]].inputs['n{0}'.format(nbrs[0])] = self.mesh.face_attributes['state_o'][nbrs[0]]\n\n # # explicit coupling\n # self.mesh.face_attributes['smnode'][nbrs[0]].inputs['n{0}'.format(nbrs[1])] = self.coupling * self.mesh.face_attributes['smnode'][nbrs[1]].state\n # self.mesh.face_attributes['smnode'][nbrs[1]].state -= self.coupling * self.mesh.face_attributes['smnode'][nbrs[1]].state\n \n # self.mesh.face_attributes['smnode'][nbrs[1]].inputs['n{0}'.format(nbrs[0])] = self.coupling * self.mesh.face_attributes['smnode'][nbrs[0]].state\n # self.mesh.face_attributes['smnode'][nbrs[0]].state -= self.coupling * self.mesh.face_attributes['smnode'][nbrs[0]].state\n\n # sensor coupling\n self.mesh.face_attributes['smnode'][nbrs[0]].inputs['n{0}'.format(nbrs[1])] = self.coupling * self.sensors[nbrs[1]]\n self.mesh.face_attributes['smnode'][nbrs[1]].state -= self.coupling * self.sensors[nbrs[1]]\n \n self.mesh.face_attributes['smnode'][nbrs[1]].inputs['n{0}'.format(nbrs[0])] = self.coupling * self.sensors[nbrs[0]]\n self.mesh.face_attributes['smnode'][nbrs[0]].state -= self.coupling * self.sensors[nbrs[0]]\n \n def make_mesh_triangle_trimesh(self, **params):\n \"\"\"make_mesh_triangle_trimesh\n \n create mesh using trimesh.Trimesh\n \"\"\"\n c = params['c']\n mesh_info = MeshInfo()\n\n # generate vertices and facets\n if params['obj'] == 'line':\n points, facets, faces = make_vertex_facets_line(params)\n elif params['obj'] == 'hexagon':\n points, facets, faces = make_vertex_facets_hexagon(params)\n elif params['obj'] == 'rect':\n points, facets, faces = make_vertex_facets_rect_trimesh(params)\n \n # print('points = {0}\\nfacets = {1}'.format(pformat(points), pformat(facets)))\n\n # mesh = trimesh.Trimesh(vertices=[[0, 0, 0], [0, 0, 1], [0, 1, 0]],\n # faces=[[0, 1, 2]])\n\n # face_attributes = {\n # 'color': len(faces) * [0],\n # 'state': [],\n # 'freq': [],\n # }\n # print('face_attributes = {0}'.format(face_attributes))\n \n mesh = trimesh.Trimesh(vertices=points, faces=faces)\n\n # print('mesh.edges = {0}'.format(mesh.edges))\n \n # writing objects\n # mesh.write_vtk(\"trigrid.vtk\")\n # f = open('trigrid.pkl', 'wb')\n # pickle.dump(mesh, f)\n # f.close()\n # joblib.dump(mesh, 'trigrid.pkl')\n # sys.exit()\n return mesh\n \n def mesh_get_neighbors_trimesh(self, mesh):\n # nbrs = mesh.neighbors\n nbrs = mesh.face_adjacency\n valid_neighbors_all = []\n for nbr in nbrs:\n valid_neighbors_all.append([_ for _ in nbr if _ > -1])\n return(valid_neighbors_all)\n\n def mesh_extended_trimesh(self, mesh):\n \"\"\"mesh_extended_trimesh\n\n create mesh extended with face attributes\n \"\"\"\n # print('mesh.vertices = {0}'.format(pformat(mesh.vertices)))\n # print('mesh.faces = {0}'.format(pformat(mesh.faces)))\n # print('mesh.face_adjacency = {0}'.format(pformat(mesh.face_adjacency)))\n \n # plt.triplot(mesh.vertices[:, 0], mesh.vertices[:, 1], mesh_tris)\n # plt.aspect(1)\n # plt.show()\n\n colors_ = [\n np.array([1.0, 0.0, 0.0]),\n np.array([0.5, 0.5, 0.0]),\n np.array([0.0, 1.0, 0.0]),\n np.array([0.0, 0.5, 0.5]),\n np.array([0.0, 0.0, 1.0]),\n np.array([0.5, 0.0, 0.5]),\n ]\n \n # create list of attribute dictionaries\n tris = []\n # loop over faces\n for i, tri_ in enumerate(mesh.faces):\n # print('tri_ = {0}'.format(tri_))\n \n tri_l = []\n for tri_vert in tri_:\n vert = mesh.vertices[tri_vert].tolist()\n # print('tri_vert = {0}'.format(vert))\n # print()\n tri_l.append(vert)\n\n tris.append({\n 'vertices': tri_l,\n 'neighbors': [], # list(mesh.face_adjacency[i]),\n 'color': np.random.uniform(0, 1, (3,)),\n # 'color': np.array([0.7, 0.2, 0.1]),\n # 'color': colors_[i],\n 'freq': np.random.uniform(0.05, 0.2),\n 'state': 0., # np.random.uniform(0, 1)\n 'state_o': 0., # np.random.uniform(0, 1)\n # 'inputs': {}, # np.random.uniform(0, 1)\n })\n \n for nbr in mesh.face_adjacency:\n tris[nbr[0]]['neighbors'].append(nbr[1])\n # tris[nbr[0]]['inputs']['n{0}'.format(nbr[1])] = 0.\n tris[nbr[1]]['neighbors'].append(nbr[0])\n # tris[nbr[1]]['inputs']['n{0}'.format(nbr[0])] = 0.\n\n # update mesh face_attributes\n mesh.face_attributes.update(ld2dl(tris))\n \n # tris is list of attribute dictionaries\n # print('tris = {0}'.format(pformat(tris)))\n # want dictionary of attributes with list data\n # print('mesh.face_attributes = {0}'.format(pformat(mesh.face_attributes)))\n return tris\n\n def send_state(self, cnt, mesh, tris, valid_neighbors_all):\n mdir = 1.0\n mesh_points = np.array(mesh.vertices)\n mesh_tris = np.array(mesh.faces)\n \n for i, face in enumerate(mesh.faces):\n # v_color = mesh.face_attributes['color'][i]\n v_color = mesh.face_attributes['smnode'][i].color\n # hack\n # mesh.face_attributes['state_o'][i] = tris[i]['state_o']\n # v_state_o = mesh.face_attributes['state_o'][i]\n v_state_o = mesh.face_attributes['smnode'][i].outputs['state_o']\n # draw face vertices, taken directly from mesh.vertices\n verts = list(v_color)\n verts += v_state_o[0,:].tolist()\n for vert in mesh.vertices[face]:\n verts += vert.tolist()\n\n facecolor = (v_color * v_state_o)[0,:]\n facecolor_int = (v_color * v_state_o * 253 + 1)[0,:].astype(int)\n l_ = [i] + facecolor.tolist()\n # print('sending face color {0}'.format(l_))\n # self.osc.send_message(b'/vert', list(verts))\n # self.osc.send_message(b'/facecolor', l_)\n self.osc.server.send(self.osc_target, '/facecolor', *l_)\n # motors = [v_state_o[0,0] * 150] * 6\n motors = [i] + facecolor_int.tolist()\n # print('sending motors = {0}'.format(motors))\n self.osc.server.send(self.osc_target_hexagon, '/hexagon_motors', *motors)\n # self.face_attributes['color'] = v_color * v_state_o\n \ndef get_params(obj='line', c=1, dim=3):\n hc1 = np.sin(np.deg2rad(60)) * c\n r_i = hc1/3\n r_o = 2 * r_i\n params = {\n 'obj': obj,\n 'c': c,\n 'dim': dim,\n 'hc1': hc1,\n 'hc2': np.sqrt(3)/2 * c,\n 'r_i': r_i,\n 'r_o': r_o,\n 'c_2': ((c/2) * r_o) / hc1,\n }\n return params\n\n# def cb_hexagon_sensors(qu, path, args, types, target, unk):\n# # def cb_hexagon_sensors(*args, **kwargs):\n# # i, f = args\n# # print('args = {0}, kwargs = {1}'.format(args, kwargs))\n# # print('qu = {0}'.format(qu))\n# # print(\"cb_hexagon_sensors message {0} with arguments {1}\".format(path, args))\n# qu.put((path, args))\n# # print('received args {0}'.format(args))\n \ndef main(args):\n \"\"\"meshgrid.main\n\n create a mesh of computation nodes\n\n - create mesh (generate, load)\n - populate with attributes\n - populate with threaded nodes\n - render mesh based on fixed set of attributes\n \"\"\"\n # define interrupt handler\n def _interrupt_handler(signum, frame):\n \"\"\"Handle KeyboardInterrupt: quit application.\"\"\"\n print('Got QUIT event, terminating threads')\n # terminate osc server\n osc.isrunning = False\n osc.join()\n print(' osc stopped')\n # terminate mesh update thread\n mesh.isrunning = False\n mesh.join()\n print(' mesh stopped')\n # join smnode threads from mesh\n for i, face in enumerate(mesh.mesh.faces):\n mesh.mesh.face_attributes['smnode'][i].isrunning = False\n mesh.mesh.face_attributes['smnode'][i].join()\n print(' smnodes stopped')\n running = False\n pygame.quit()\n quit()\n \n # install interrupt handler\n signal.signal(signal.SIGINT, _interrupt_handler)\n\n # get mesh generation parameters\n if args.meshlib == 'trimesh':\n dim = 3\n meshClass = meshTrimesh\n elif args.meshlib == 'meshpy':\n dim = 2\n meshClass = meshMeshpy\n\n # osc = OSCClient('localhost', 1234)\n qu = queue.Queue(maxsize=10)\n osc = OSCsrv(port=1235, queue=qu)\n # osc.add_method(\n # path=\"/hexagon_sensors\",\n # types='iiiiiiiiiiii',\n # # use a partial here to bind the qu argument\n # callback=partial(cb_hexagon_sensors, qu)\n # )\n # osc_target = liblo.Address(1337)\n osc_target = '1234'\n # liblo.send(target, \"/reconnect\", 'bang')\n\n \n params = get_params(obj=args.mode, c=1, dim=dim)\n # print('params = {0}'.format(pformat(params)))\n params['osc'] = osc\n mesh = meshClass(**params)\n\n dim_state = 1\n \n # populate mesh with sensorimotor nodes\n mesh.mesh.face_attributes['smnode'] = []\n for i, face in enumerate(mesh.mesh.faces):\n smnode_inputs_ = {}\n face_nbrs = [_.tolist() for _ in mesh.mesh.face_adjacency if face in _]\n face_nbrs_flat = [_ for _ in list(itertools.chain(*face_nbrs)) if face != _]\n for face_nbr in face_nbrs_flat:\n smnode_inputs_['n{0}'.format(face_nbr)] = np.zeros((dim_state,1))\n smnode_inputs_['cap'] = np.zeros((2,1))\n \n mesh.mesh.face_attributes['smnode'].append(\n smnode(smid=i,\n density=args.density,\n color=mesh.mesh.face_attributes['color'][i],\n freq=mesh.mesh.face_attributes['freq'][i],\n inputs=smnode_inputs_,\n )\n )\n mesh.mesh.face_attributes['smnode'][-1].start()\n\n # # create state update thread\n # ru = runUpdate(mesh.mesh, mesh.tris, args.density)\n # # start state update thread\n # ru.start()\n\n # start mesh update thread\n mesh.start()\n\n meshfile = 'trigrid-mesh.json'\n mesh.mesh.export(meshfile)\n print('sending loadmesh')\n # osc.send_message(b'/load', [True])\n osc.server.send(osc_target, '/load', meshfile)\n \n # initialize pygame and OpenGL\n pygame.init()\n\n\n # import zmq\n\n # context = zmq.Context()\n\n # # Socket to talk to server\n # print(\"Connecting to hello world server…\")\n # socket = context.socket(zmq.REQ)\n # socket.connect(\"tcp://localhost:5555\")\n \n # # Do 10 requests, waiting each time for a response\n # for request in range(10):\n # print(\"Sending request %s …\" % request)\n # socket.send(b\"Hello\")\n\n # # Get the reply.\n # message = socket.recv()\n # print(\"Received reply %s [ %s ]\" % (request, message))\n\n # gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)\n # gluPerspective(60, (display[0]/display[1]), 0.1, 50.0)\n\n # for i in range(10):\n # osc.send_message(b'/address', [i, 0.1])\n \n # hexagon\n if args.mode == 'hexagon':\n trans = [0.0, 0.0, -5.0]\n # line\n elif args.mode == 'line':\n trans = [-6.0, 0.0, -10]\n # rect\n elif args.mode == 'rect':\n trans = [0, 0, -10]\n\n # osc.send_message(b'/translate', trans)\n osc.server.send(osc_target, '/perspective', 0)\n \n # osc.send_message(b'/translate', trans)\n osc.server.send(osc_target, '/translate', *trans)\n \n # glScalef(2.0, 2.0, 2.0)\n # glScalef(3.0, 3.0, 3.0)\n\n # osc.send_message(b'/scale', [3.0, 3.0, 3.0])\n osc.server.send(osc_target, '/scale', *([4.0, 4.0, 4.0]))\n\n sensors = np.array([0. for _ in range(6)])\n sensors_mean = np.array([0. for _ in range(6)])\n sensors_var = np.array([0. for _ in range(6)])\n sensors_std = np.array([0. for _ in range(6)])\n \n # start main loop\n running = True\n cnt = 0\n while running:\n # event handling\n for event in pygame.event.get():\n # quit event\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n # if event.type == pygame.QUIT:\n _interrupt_handler()\n \n # # glRotatef(1, 3, 3, 3)\n # glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n \n # while qu.qsize() > 0:\n # qud = qu.get()\n # if qud is not None:\n # # loopfunc_hexagon(qud[1], cord)\n # print('sensors {0}'.format(qud[1][6]))\n sys.stdout.write('sensors\\n')\n for i in range(6):\n # sys.stdout.write('{0:3d}, {1:3d}, '.format(osc.hexagon_sensors[i][6], osc.hexagon_sensors[i][7]))\n # sys.stdout.write('{0:3d}, '.format(osc.hexagon_sensors[i][7]))\n sensors[i] = osc.hexagon_sensors[i][7]\n sensors_mean[i] = 0.9 * sensors_mean[i] + 0.1 * sensors[i]\n sensors_var[i] = 0.9 * sensors_var[i] + 0.1 * np.square(sensors[i] - sensors_mean[i])\n if np.sum(sensors_var > 0.) >= 6:\n sensors_std[i] = (sensors[i] - sensors_mean[i]) / np.clip(0.01, 100, sensors_var[i])\n sys.stdout.write('{0}\\n'.format(sensors))\n sys.stdout.write('{0}\\n'.format(sensors_mean))\n sys.stdout.write('{0}\\n'.format(sensors_var))\n sys.stdout.write('{0}\\n'.format(sensors_std))\n sys.stdout.write('\\n')\n\n mesh.set_sensors(sensors_std)\n \n # # render function on mesh\n mesh.send_state(cnt, mesh.mesh, mesh.tris, mesh.valid_neighbors_all)\n \n # bookkeeping\n cnt += 1\n \n # pygame.display.flip()\n # # pygame.time.wait(20)\n # try:\n pygame.time.wait(50)\n # time.sleep(0.04)\n # except Exception as e:\n # print('failed with {0}'.format(e))\n\n# from https://coldfix.de/2016/11/08/pyqt-boilerplate/#keyboardinterrupt-ctrl-c\n# Call this function in your main after creating the QApplication\ndef setup_interrupt_handling():\n \"\"\"Setup handling of KeyboardInterrupt (Ctrl-C) for PyQt.\"\"\"\n \n # Regularly run some (any) python code, so the signal handler gets a\n # chance to be executed:\n # safe_timer(50, lambda: None)\n\n\n# Define this as a global function to make sure it is not garbage\n# collected when going out of scope:\n \nif __name__ == '__main__':\n # command line arguments\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-d', '--density', type=float, default=0.1, help='Density for random node activity [0.1]')\n parser.add_argument('-m', '--mode', type=str, default='hexagon', help='Mesh mode [hexagon] (hexagon, line, rect)')\n parser.add_argument('-l', '--meshlib', type=str, default='trimesh', help='Which meshlib to use [trimesh] (trimesh, meshpy)')\n\n args = parser.parse_args()\n\n main(args)\n", "sub_path": "scripts/trigrid.py", "file_name": "trigrid.py", "file_ext": "py", "file_size_in_byte": 28413, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.cos", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 138, "usage_type": "call"}, {"api_name": "trimesh.primitives.Box", "line_number": 199, "usage_type": "call"}, {"api_name": "trimesh.primitives", "line_number": 199, "usage_type": "attribute"}, {"api_name": "trimesh.transformations.random_rotation_matrix", "line_number": 202, "usage_type": "call"}, {"api_name": "trimesh.transformations", "line_number": 202, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 217, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 230, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 232, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 240, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 281, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 284, "usage_type": "attribute"}, {"api_name": "numpy.tanh", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 350, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 350, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 353, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 372, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 374, "usage_type": "attribute"}, {"api_name": "liblo.Address", "line_number": 390, "usage_type": "call"}, {"api_name": "liblo.UDP", "line_number": 390, "usage_type": "attribute"}, {"api_name": "liblo.Address", "line_number": 391, "usage_type": "call"}, {"api_name": "liblo.UDP", "line_number": 391, "usage_type": "attribute"}, {"api_name": "six.moves.range", "line_number": 393, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 409, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 409, "usage_type": "attribute"}, {"api_name": "meshpy.triangle.MeshInfo", "line_number": 443, "usage_type": "call"}, {"api_name": "trimesh.Trimesh", "line_number": 465, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 500, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 503, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 504, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 505, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 524, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 524, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 527, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 550, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 551, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 580, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 580, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 588, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 632, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 636, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 636, "usage_type": "attribute"}, {"api_name": "queue.Queue", "line_number": 647, "usage_type": "call"}, {"api_name": "smp_meshgrid.oscsrv.OSCsrv", "line_number": 648, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 672, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 674, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 675, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 702, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 751, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 751, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 752, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 752, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 753, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 753, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 754, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 754, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 761, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 761, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 763, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 763, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 763, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 775, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 775, "usage_type": "attribute"}, {"api_name": "six.moves.range", "line_number": 776, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 781, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 782, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 783, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 784, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 784, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 785, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 785, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 786, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 786, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 787, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 787, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 788, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 788, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 801, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 801, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 821, "usage_type": "call"}]} +{"seq_id": "306659114", "text": "# coding: utf-8\nimport datetime\n\nfrom django.views.generic import ListView\n\nfrom app.ticket.models import Ticket\n\nfrom .forms import ReportForm\n\n\nclass TotalReportView(ListView):\n template_name = \"site/sales/total_report.html\"\n model = Ticket\n\n def get_queryset(self):\n date_from = self.request.GET.get('date_from')\n date_to = self.request.GET.get('date_to')\n queryset = super(TotalReportView, self).get_queryset()\n queryset = queryset.filter(user=self.request.user)\n if date_from or date_to:\n try:\n if date_from:\n date_from = datetime.datetime.strptime(date_from, \"%m/%d/%Y\")\n queryset = queryset.filter(date_created__gte=date_from)\n if date_to:\n date_to = datetime.datetime.strptime(date_to, \"%m/%d/%Y\")\n queryset = queryset.filter(date_created__lte=date_to)\n except ValueError:\n queryset = []\n\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(TotalReportView, self).get_context_data(**kwargs)\n context['amount'] = 0\n for obj in context['object_list']:\n context['amount'] += obj.total_amount\n\n if 'get_report' in self.request.GET:\n context['show_results'] = True\n context['form'] = ReportForm(initial={\n 'date_from': self.request.GET.get('date_from'),\n 'date_to': self.request.GET.get('date_to'),\n })\n else:\n context['form'] = ReportForm()\n\n return context\n\n\nclass DetailReportView(TotalReportView):\n template_name = \"site/sales/detail_report.html\"\n", "sub_path": "app/sales/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.views.generic.ListView", "line_number": 11, "usage_type": "name"}, {"api_name": "app.ticket.models.Ticket", "line_number": 13, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "forms.ReportForm", "line_number": 41, "usage_type": "call"}, {"api_name": "forms.ReportForm", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "517296443", "text": "import json\nimport os\nimport sys\n\nsys.path.append(os.path.abspath('../'))\nfrom backend.models import Document, Dataset, Summary, SummaryGroup, SummariesPair, User\nfrom backend.app import create_app\nfrom flask_sqlalchemy import SQLAlchemy\n\ndataset_path = '../backend/BBC_pair'\ndef example_filter(sentence):\n toks = sentence.split(\" \")\n if toks.count(\"\") < 2:\n return True\n return False\n\ndef one_split(db, idx, sanity_data):\n # Insert dataset\n dataset = Dataset(name=\"ALG_FACT\"+str(idx))\n db.session.add(dataset)\n db.session.commit()\n\n summaries_path = os.path.join(dataset_path, 'summaries')\n documents_path = os.path.join(dataset_path, 'documents')\n for doc_id in sanity_data:\n file_name = doc_id + \".data\"\n file_path = os.path.join(documents_path, file_name)\n summ_path = os.path.join(summaries_path, file_name)\n with open(summ_path, 'r') as infile:\n summ_json = json.load(infile)\n with open(file_path, 'r') as infile:\n json_result = json.load(infile)\n did = json_result['doc_id']\n for i, item in enumerate(summ_json):\n if item['name'].find(\"|||\") == -1:\n continue\n if example_filter(item['text']):\n continue\n document = Document(\n dataset_id=dataset.id,\n doc_id=json_result['doc_id'],\n doc_json=json.dumps(json_result),\n summary=json.dumps(item),\n sanity_statement=sanity_data[did][\"sanity_statement\"],\n sanity_answer=sanity_data[did][\"sanity_answer\"]\n )\n db.session.add(document)\n db.session.commit()\n\n\ndef init_database_split(db, num_of_split):\n dataset_name = os.path.split(dataset_path)[1]\n sanity_path = os.path.join(dataset_path, 'sanity_id/sanity.txt')\n\n sanity_data = []\n for i in range(num_of_split):\n sanity_data.append({})\n\n for i, line in enumerate(open(sanity_path)):\n flist = line.strip().split(\"\\t\")\n split_id = i % num_of_split\n sanity_data[split_id][flist[0]] = {\"sanity_answer\": bool(int(flist[2])), \"sanity_statement\": flist[1]}\n\n # Insert documents\n for i in range(num_of_split):\n one_split(db, i, sanity_data[i])\n\ndef init_database(db):\n # user = User(email='admin@localhost', password='localhost')\n # db.session.add(user)\n # db.session.commit()\n dataset_path = '../backend/BBC_pair'\n dataset_name = os.path.split(dataset_path)[1]\n\n summaries_path = os.path.join(dataset_path, 'summaries')\n documents_path = os.path.join(dataset_path, 'documents')\n sanity_path = os.path.join(dataset_path, 'sanity_id/sanity.txt')\n\n # Existing dataset\n #dataset = db.session.query(Dataset).filter_by(name='BBC').first()\n # Insert dataset\n dataset = Dataset(name=\"BBC\")\n db.session.add(dataset)\n db.session.commit()\n\n sanity_data = {}\n for line in open(sanity_path):\n flist = line.strip().split(\"\\t\")\n sanity_data[flist[0]] = {\"sanity_answer\": bool(int(flist[2])), \"sanity_statement\": flist[1]}\n\n # Insert documents\n for file in os.listdir(documents_path):\n file_path = os.path.join(documents_path, file)\n summ_path = os.path.join(summaries_path, file)\n with open(summ_path, 'r') as infile:\n summ_json = json.load(infile)\n with open(file_path, 'r') as infile:\n json_result = json.load(infile)\n did = json_result['doc_id']\n for i, item in enumerate(summ_json):\n document = Document(\n dataset_id=dataset.id,\n doc_id=json_result['doc_id'],\n doc_json=json.dumps(json_result),\n summary=json.dumps(item),\n sanity_statement=sanity_data[did][\"sanity_statement\"],\n sanity_answer=sanity_data[did][\"sanity_answer\"]\n )\n db.session.add(document)\n db.session.commit()\n\nif __name__ == '__main__':\n app = create_app()\n db_app = SQLAlchemy(app)\n #init_database(db_app)\n init_database_split(db_app, 10)\n", "sub_path": "Human_eva/scripts/insert_dataset_ALG_Phrase.py", "file_name": "insert_dataset_ALG_Phrase.py", "file_ext": "py", "file_size_in_byte": 4238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "backend.models.Dataset", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 30, "usage_type": "call"}, {"api_name": "json.load", "line_number": 32, "usage_type": "call"}, {"api_name": "backend.models.Document", "line_number": 39, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "backend.models.Dataset", "line_number": 82, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 96, "usage_type": "call"}, {"api_name": "json.load", "line_number": 98, "usage_type": "call"}, {"api_name": "backend.models.Document", "line_number": 101, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 104, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 105, "usage_type": "call"}, {"api_name": "backend.app.create_app", "line_number": 113, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "561842087", "text": "# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\nfrom google.cloud.bigtable_admin_v2.types import common\n\n\n__protobuf__ = proto.module(\n package=\"google.bigtable.admin.v2\", manifest={\"Instance\", \"Cluster\", \"AppProfile\",},\n)\n\n\nclass Instance(proto.Message):\n r\"\"\"A collection of Bigtable [Tables][google.bigtable.admin.v2.Table]\n and the resources that serve them. All tables in an instance are\n served from all [Clusters][google.bigtable.admin.v2.Cluster] in the\n instance.\n\n Attributes:\n name (str):\n The unique name of the instance. Values are of the form\n ``projects/{project}/instances/[a-z][a-z0-9\\\\-]+[a-z0-9]``.\n display_name (str):\n Required. The descriptive name for this\n instance as it appears in UIs. Can be changed at\n any time, but should be kept globally unique to\n avoid confusion.\n state (google.cloud.bigtable_admin_v2.types.Instance.State):\n (``OutputOnly``) The current state of the instance.\n type_ (google.cloud.bigtable_admin_v2.types.Instance.Type):\n The type of the instance. Defaults to ``PRODUCTION``.\n labels (Sequence[google.cloud.bigtable_admin_v2.types.Instance.LabelsEntry]):\n Labels are a flexible and lightweight mechanism for\n organizing cloud resources into groups that reflect a\n customer's organizational needs and deployment strategies.\n They can be used to filter resources and aggregate metrics.\n\n - Label keys must be between 1 and 63 characters long and\n must conform to the regular expression:\n ``[\\p{Ll}\\p{Lo}][\\p{Ll}\\p{Lo}\\p{N}_-]{0,62}``.\n - Label values must be between 0 and 63 characters long and\n must conform to the regular expression:\n ``[\\p{Ll}\\p{Lo}\\p{N}_-]{0,63}``.\n - No more than 64 labels can be associated with a given\n resource.\n - Keys and values must both be under 128 bytes.\n \"\"\"\n\n class State(proto.Enum):\n r\"\"\"Possible states of an instance.\"\"\"\n STATE_NOT_KNOWN = 0\n READY = 1\n CREATING = 2\n\n class Type(proto.Enum):\n r\"\"\"The type of the instance.\"\"\"\n TYPE_UNSPECIFIED = 0\n PRODUCTION = 1\n DEVELOPMENT = 2\n\n name = proto.Field(proto.STRING, number=1,)\n display_name = proto.Field(proto.STRING, number=2,)\n state = proto.Field(proto.ENUM, number=3, enum=State,)\n type_ = proto.Field(proto.ENUM, number=4, enum=Type,)\n labels = proto.MapField(proto.STRING, proto.STRING, number=5,)\n\n\nclass Cluster(proto.Message):\n r\"\"\"A resizable group of nodes in a particular cloud location, capable\n of serving all [Tables][google.bigtable.admin.v2.Table] in the\n parent [Instance][google.bigtable.admin.v2.Instance].\n\n Attributes:\n name (str):\n The unique name of the cluster. Values are of the form\n ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``.\n location (str):\n (``CreationOnly``) The location where this cluster's nodes\n and storage reside. For best performance, clients should be\n located as close as possible to this cluster. Currently only\n zones are supported, so values should be of the form\n ``projects/{project}/locations/{zone}``.\n state (google.cloud.bigtable_admin_v2.types.Cluster.State):\n The current state of the cluster.\n serve_nodes (int):\n Required. The number of nodes allocated to\n this cluster. More nodes enable higher\n throughput and more consistent performance.\n default_storage_type (google.cloud.bigtable_admin_v2.types.StorageType):\n (``CreationOnly``) The type of storage used by this cluster\n to serve its parent instance's tables, unless explicitly\n overridden.\n encryption_config (google.cloud.bigtable_admin_v2.types.Cluster.EncryptionConfig):\n Immutable. The encryption configuration for\n CMEK-protected clusters.\n \"\"\"\n\n class State(proto.Enum):\n r\"\"\"Possible states of a cluster.\"\"\"\n STATE_NOT_KNOWN = 0\n READY = 1\n CREATING = 2\n RESIZING = 3\n DISABLED = 4\n\n class EncryptionConfig(proto.Message):\n r\"\"\"Cloud Key Management Service (Cloud KMS) settings for a CMEK-\n rotected cluster.\n\n Attributes:\n kms_key_name (str):\n Describes the Cloud KMS encryption key that will be used to\n protect the destination Bigtable cluster. The requirements\n for this key are:\n\n 1) The Cloud Bigtable service account associated with the\n project that contains this cluster must be granted the\n ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK\n key.\n 2) Only regional keys can be used and the region of the CMEK\n key must match the region of the cluster.\n 3) All clusters within an instance must use the same CMEK\n key.\n \"\"\"\n\n kms_key_name = proto.Field(proto.STRING, number=1,)\n\n name = proto.Field(proto.STRING, number=1,)\n location = proto.Field(proto.STRING, number=2,)\n state = proto.Field(proto.ENUM, number=3, enum=State,)\n serve_nodes = proto.Field(proto.INT32, number=4,)\n default_storage_type = proto.Field(proto.ENUM, number=5, enum=common.StorageType,)\n encryption_config = proto.Field(proto.MESSAGE, number=6, message=EncryptionConfig,)\n\n\nclass AppProfile(proto.Message):\n r\"\"\"A configuration object describing how Cloud Bigtable should\n treat traffic from a particular end user application.\n\n Attributes:\n name (str):\n (``OutputOnly``) The unique name of the app profile. Values\n are of the form\n ``projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.\n etag (str):\n Strongly validated etag for optimistic concurrency control.\n Preserve the value returned from ``GetAppProfile`` when\n calling ``UpdateAppProfile`` to fail the request if there\n has been a modification in the mean time. The\n ``update_mask`` of the request need not include ``etag`` for\n this protection to apply. See\n `Wikipedia `__ and\n `RFC\n 7232 `__\n for more details.\n description (str):\n Optional long form description of the use\n case for this AppProfile.\n multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny):\n Use a multi-cluster routing policy.\n single_cluster_routing (google.cloud.bigtable_admin_v2.types.AppProfile.SingleClusterRouting):\n Use a single-cluster routing policy.\n \"\"\"\n\n class MultiClusterRoutingUseAny(proto.Message):\n r\"\"\"Read/write requests are routed to the nearest cluster in the\n instance, and will fail over to the nearest cluster that is\n available in the event of transient errors or delays. Clusters\n in a region are considered equidistant. Choosing this option\n sacrifices read-your-writes consistency to improve availability.\n\n Attributes:\n cluster_ids (Sequence[str]):\n The set of clusters to route to. The order is\n ignored; clusters will be tried in order of\n distance. If left empty, all clusters are\n eligible.\n \"\"\"\n\n cluster_ids = proto.RepeatedField(proto.STRING, number=1,)\n\n class SingleClusterRouting(proto.Message):\n r\"\"\"Unconditionally routes all read/write requests to a specific\n cluster. This option preserves read-your-writes consistency but\n does not improve availability.\n\n Attributes:\n cluster_id (str):\n The cluster to which read/write requests\n should be routed.\n allow_transactional_writes (bool):\n Whether or not ``CheckAndMutateRow`` and\n ``ReadModifyWriteRow`` requests are allowed by this app\n profile. It is unsafe to send these requests to the same\n table/row/column in multiple clusters.\n \"\"\"\n\n cluster_id = proto.Field(proto.STRING, number=1,)\n allow_transactional_writes = proto.Field(proto.BOOL, number=2,)\n\n name = proto.Field(proto.STRING, number=1,)\n etag = proto.Field(proto.STRING, number=2,)\n description = proto.Field(proto.STRING, number=3,)\n multi_cluster_routing_use_any = proto.Field(\n proto.MESSAGE,\n number=5,\n oneof=\"routing_policy\",\n message=MultiClusterRoutingUseAny,\n )\n single_cluster_routing = proto.Field(\n proto.MESSAGE, number=6, oneof=\"routing_policy\", message=SingleClusterRouting,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n", "sub_path": "google/cloud/bigtable_admin_v2/types/instance.py", "file_name": "instance.py", "file_ext": "py", "file_size_in_byte": 9753, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "proto.module", "line_number": 21, "usage_type": "call"}, {"api_name": "proto.Message", "line_number": 26, "usage_type": "attribute"}, {"api_name": "proto.Enum", "line_number": 62, "usage_type": "attribute"}, {"api_name": "proto.Enum", "line_number": 68, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 74, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 74, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 75, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 75, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 76, "usage_type": "call"}, {"api_name": "proto.ENUM", "line_number": 76, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 77, "usage_type": "call"}, {"api_name": "proto.ENUM", "line_number": 77, "usage_type": "attribute"}, {"api_name": "proto.MapField", "line_number": 78, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 78, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 81, "usage_type": "attribute"}, {"api_name": "proto.Enum", "line_number": 111, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 119, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 139, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 139, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 141, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 141, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 142, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 142, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 143, "usage_type": "call"}, {"api_name": "proto.ENUM", "line_number": 143, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 144, "usage_type": "call"}, {"api_name": "proto.INT32", "line_number": 144, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 145, "usage_type": "call"}, {"api_name": "proto.ENUM", "line_number": 145, "usage_type": "attribute"}, {"api_name": "google.cloud.bigtable_admin_v2.types.common.StorageType", "line_number": 145, "usage_type": "attribute"}, {"api_name": "google.cloud.bigtable_admin_v2.types.common", "line_number": 145, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 146, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 146, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 149, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 178, "usage_type": "attribute"}, {"api_name": "proto.RepeatedField", "line_number": 193, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 193, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 195, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 211, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 211, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 212, "usage_type": "call"}, {"api_name": "proto.BOOL", "line_number": 212, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 214, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 214, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 215, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 215, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 216, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 216, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 217, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 218, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 223, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 224, "usage_type": "attribute"}]} +{"seq_id": "530880528", "text": "from threading import Barrier\r\nfrom Operai import Operai\r\nimport multiprocessing\r\nclass Operazione:\r\n def __init__(self, v1, v2):\r\n self.v1 = v1\r\n self.v2 = v2\r\n def sommaVettori(self):\r\n threadReali = multiprocessing.cpu_count()\r\n fetta = len(self.v1) // threadReali\r\n while fetta == 0:\r\n threadReali -= 1\r\n fetta = len(self.v1) // threadReali\r\n \r\n b = Barrier(threadReali + 1)\r\n operai = []\r\n for i in range(0, threadReali - 1):\r\n inizio = i * fetta\r\n fine = fetta - 1 + inizio\r\n operai.append(Operai(inizio, fine, self.v1, self.v2, b))\r\n operai[i].start()\r\n operai.append(Operai((threadReali - 1) * fetta, len(self.v1) - 1, self.v1, self.v2, b))\r\n operai[threadReali - 1].start()\r\n b.wait()\r\n \r\n for o in operai:\r\n print(f\"{o.inizio}, {o.fine}, {o.getVFinale()}\")", "sub_path": "AritmeticaVettoriale/Operazione.py", "file_name": "Operazione.py", "file_ext": "py", "file_size_in_byte": 943, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "multiprocessing.cpu_count", "line_number": 9, "usage_type": "call"}, {"api_name": "threading.Barrier", "line_number": 15, "usage_type": "call"}, {"api_name": "Operai.Operai", "line_number": 20, "usage_type": "call"}, {"api_name": "Operai.Operai", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "119182206", "text": "# Copyright 2021 msg systems ag\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport os\nimport pickle\nimport traceback\nfrom sys import exc_info\nfrom packaging import version\nimport spacy\nimport pkg_resources\nfrom tensorflow import keras\nfrom spacy.language import Language\nfrom spacy.tokens import Doc, Token\nfrom thinc.api import Config\nfrom .annotation import Annotator\nfrom .errors import LanguageNotSupportedError, ModelNotSupportedError, \\\n VectorsModelNotInstalledError, VectorsModelHasWrongVersionError, \\\n MultiprocessingParsingNotSupportedError\n\nCOMMON_MODELS_PACKAGE_NAMEPART = 'coreferee_model_'\n\nFEATURE_TABLE_FILENAME = 'feature_table.bin'\n\nKERAS_MODEL_FILENAME = 'keras_ensemble.h5'\n\nclass CorefereeManager:\n\n @staticmethod\n def get_annotator(nlp: Language) -> Annotator:\n model_name = '_'.join((nlp.meta['lang'], nlp.meta['name']))\n relative_config_filename = os.sep.join(('lang', nlp.meta['lang'], 'config.cfg'))\n if not pkg_resources.resource_exists(__name__, relative_config_filename):\n raise LanguageNotSupportedError(nlp.meta['lang'])\n absolute_config_filename = pkg_resources.resource_filename(__name__,\n relative_config_filename)\n config = Config().from_disk(absolute_config_filename)\n for config_entry_name, config_entry in config.items():\n if nlp.meta['name'] == config_entry['model'] and \\\n version.parse(nlp.meta['version']) >= \\\n version.parse(config_entry['from_version']) and \\\n version.parse(nlp.meta['version']) <= \\\n version.parse(config_entry['to_version']):\n if 'vectors_model' in config_entry:\n try:\n vectors_nlp = spacy.load('_'.join((nlp.meta['lang'],\n config_entry['vectors_model'])))\n except OSError:\n raise VectorsModelNotInstalledError(''.join(('Model ', model_name,\n ' is only supported in conjunction with model ',\n nlp.meta['lang'], '_', config_entry['vectors_model'],\n \" which must be loaded using 'python -m spacy download \",\n nlp.meta['lang'], '_', config_entry['vectors_model'], \"'.\")))\n if version.parse(vectors_nlp.meta['version']) < \\\n version.parse(config_entry['vectors_from_version']) or \\\n version.parse(vectors_nlp.meta['version']) > \\\n version.parse(config_entry['vectors_to_version']):\n raise VectorsModelHasWrongVersionError(''.join(('Model ', model_name,\n ' is only supported in conjunction with model ',\n nlp.meta['lang'], '_', config_entry['vectors_model'],\n ' between versions ', config_entry['vectors_from_version'], ' and ',\n config_entry['vectors_to_version'], ' inclusive.')))\n else:\n vectors_nlp = nlp\n model_package_name = ''.join((COMMON_MODELS_PACKAGE_NAMEPART, nlp.meta['lang'],\n '.', config_entry_name))\n try:\n importlib.import_module(model_package_name)\n except ModuleNotFoundError:\n print(''.join((\"Model could not be loaded for config entry '\",\n config_entry_name, \"' If models exist for language '\", nlp.meta['lang'],\n \"', load them with the command 'python -m coreferee install \",\n nlp.meta['lang'], \"'.\")))\n raise ModelNotSupportedError(''.join((nlp.meta['lang'], '_', nlp.meta['name'],\n ' version ', nlp.meta['version'])))\n this_feature_table_filename = pkg_resources.resource_filename(model_package_name,\n FEATURE_TABLE_FILENAME)\n with open(this_feature_table_filename, \"rb\") as feature_table_file:\n feature_table = pickle.load(feature_table_file)\n absolute_keras_model_filename = pkg_resources.resource_filename(\n model_package_name, KERAS_MODEL_FILENAME)\n keras_ensemble = keras.models.load_model(absolute_keras_model_filename)\n return Annotator(nlp, vectors_nlp, feature_table, keras_ensemble)\n raise ModelNotSupportedError(''.join((nlp.meta['lang'], '_', nlp.meta['name'],\n ' version ', nlp.meta['version'])))\n\n@Language.factory(\"coreferee\")\nclass CorefereeBroker:\n def __init__(self, nlp:Language, name:str):\n self.nlp = nlp\n self.pid = os.getpid()\n self.annotator = CorefereeManager().get_annotator(nlp)\n\n def __call__(self, doc:Doc) -> Doc:\n if os.getpid() != self.pid:\n raise MultiprocessingParsingNotSupportedError(\n 'Unfortunately at present parsing cannot be shared between forked processes.')\n try:\n self.annotator.annotate(doc)\n except:\n print('Unexpected error annotating document, skipping ....')\n exception_info_parts = exc_info()\n print(exception_info_parts[0])\n print(exception_info_parts[1])\n traceback.print_tb(exception_info_parts[2])\n return doc\n\n def __getstate__(self):\n return self.nlp.meta\n\n def __setstate__(self, meta):\n nlp_name = '_'.join((meta['lang'], meta['name']))\n self.nlp = spacy.load(nlp_name)\n self.annotator = CorefereeManager().get_annotator(self.nlp)\n self.pid = os.getpid()\n CorefereeBroker.set_extensions()\n\n @staticmethod\n def set_extensions():\n if not Doc.has_extension('coref_chains'):\n Doc.set_extension('coref_chains', default=None)\n if not Token.has_extension('coref_chains'):\n Token.set_extension('coref_chains', default=None)\n", "sub_path": "coreferee/manager.py", "file_name": "manager.py", "file_ext": "py", "file_size_in_byte": 6534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "spacy.language.Language", "line_number": 41, "usage_type": "name"}, {"api_name": "os.sep.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pkg_resources.resource_exists", "line_number": 44, "usage_type": "call"}, {"api_name": "errors.LanguageNotSupportedError", "line_number": 45, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 46, "usage_type": "call"}, {"api_name": "thinc.api.Config", "line_number": 48, "usage_type": "call"}, {"api_name": "packaging.version.parse", "line_number": 51, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 51, "usage_type": "name"}, {"api_name": "packaging.version.parse", "line_number": 52, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 52, "usage_type": "name"}, {"api_name": "packaging.version.parse", "line_number": 53, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 53, "usage_type": "name"}, {"api_name": "packaging.version.parse", "line_number": 54, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 54, "usage_type": "name"}, {"api_name": "spacy.load", "line_number": 57, "usage_type": "call"}, {"api_name": "errors.VectorsModelNotInstalledError", "line_number": 60, "usage_type": "call"}, {"api_name": "packaging.version.parse", "line_number": 65, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 65, "usage_type": "name"}, {"api_name": "packaging.version.parse", "line_number": 66, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 66, "usage_type": "name"}, {"api_name": "packaging.version.parse", "line_number": 67, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 67, "usage_type": "name"}, {"api_name": "packaging.version.parse", "line_number": 68, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 68, "usage_type": "name"}, {"api_name": "errors.VectorsModelHasWrongVersionError", "line_number": 69, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 79, "usage_type": "call"}, {"api_name": "errors.ModelNotSupportedError", "line_number": 85, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 87, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 90, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 93, "usage_type": "name"}, {"api_name": "annotation.Annotator", "line_number": 94, "usage_type": "call"}, {"api_name": "errors.ModelNotSupportedError", "line_number": 95, "usage_type": "call"}, {"api_name": "annotation.Annotator", "line_number": 41, "usage_type": "name"}, {"api_name": "spacy.language.Language", "line_number": 100, "usage_type": "name"}, {"api_name": "os.getpid", "line_number": 102, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 105, "usage_type": "name"}, {"api_name": "os.getpid", "line_number": 106, "usage_type": "call"}, {"api_name": "errors.MultiprocessingParsingNotSupportedError", "line_number": 107, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 113, "usage_type": "call"}, {"api_name": "traceback.print_tb", "line_number": 116, "usage_type": "call"}, {"api_name": "spacy.load", "line_number": 124, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 126, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc.has_extension", "line_number": 131, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 131, "usage_type": "name"}, {"api_name": "spacy.tokens.Doc.set_extension", "line_number": 132, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 132, "usage_type": "name"}, {"api_name": "spacy.tokens.Token.has_extension", "line_number": 133, "usage_type": "call"}, {"api_name": "spacy.tokens.Token", "line_number": 133, "usage_type": "name"}, {"api_name": "spacy.tokens.Token.set_extension", "line_number": 134, "usage_type": "call"}, {"api_name": "spacy.tokens.Token", "line_number": 134, "usage_type": "name"}, {"api_name": "spacy.language.Language.factory", "line_number": 98, "usage_type": "call"}, {"api_name": "spacy.language.Language", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "178633007", "text": "import pyautogui\nimport pyperclip\nimport time\n\n# Rodando em ambiente fora do Jupyter\n# será necessário importar demais bibliotecas\n# pandas\n# numpy\n# openpyxl\n\npyautogui.PAUSE = 1\n\n# Passo 1: Entrar no sistema (no caso, entrar no link)\npyautogui.hotkey(\"ctrl\",\"t\")\npyperclip.copy(\"https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga?usp=sharing\")\npyautogui.hotkey(\"ctrl\", \"v\")\npyautogui.press(\"enter\")\n\ntime.sleep(5)\n# Passo 2: Navegar até o local do relatório (Entrar na pasta Exportar)\npyautogui.click(x=334, y=288, clicks=2)\n\ntime.sleep(2)\n# Passo 3: Fazer download do arquivo\npyautogui.click(x=428, y=408)\ntime.sleep(1)\npyautogui.click(x=1157, y=195)\ntime.sleep(1)\npyautogui.click(x=1084, y=597)\ntime.sleep(5)\n\n\n# -----------------------------------------------\n\n### Agora vamos lê o arquivo baixado e guardar os indicadores\n## Faturamento\n## Quantidade de produto\n\n# Calcular os indicadores\nimport pandas as pd\n\ntabela = pd.read_excel(r\"C:\\Users\\Suporte\\Downloads\\Vendas - Dez.xlsx\") # Consultar caminho do diretório\ndisplay(tabela)\n\nfaturamento = tabela[\"Valor Final\"].sum()\nquantidade = tabela[\"Quantidade\"].sum()\n\n# -----------------------------------------------\n\n### Enviando e-mail via Gmail\n\n# Passo 5: Entrar no email\npyautogui.hotkey(\"ctrl\",\"t\")\npyperclip.copy(\"https://mail.google.com/mail/u/0/#inbox\")\npyautogui.hotkey(\"ctrl\",\"v\")\npyautogui.press(\"enter\")\ntime.sleep(5)\n\n# Passo 6: Enviar por e-mail o resultado\npyautogui.click(x=74, y=202)\ntime.sleep(1)\n\n#pyautogui.write(\"e-mail@gmail.com\")\npyautogui.write(\"ronaldcontact2019@gmail.com\")\npyautogui.press(\"tab\") # seleciona o e-mail\npyautogui.press(\"tab\") # pula para campo assunto\npyperclip.copy(\"Relatório automatizado por Python #Ronald#\")\npyautogui.hotkey(\"ctrl\",\"v\") # escreve o assunto\npyautogui.press(\"tab\") # pula para campo conteudo\n\ntime.sleep(1)\ntexto = f\"\"\"\nPrezados, bom dia\n\nO faturamento de ontem foi de: R$ {faturamento:,.2f}\nA quantidade de produto foi de: R$ {quantidade:,}\n\nAbs\nRonald SS\"\"\"\npyperclip.copy(texto)\npyautogui.hotkey(\"ctrl\",\"v\")\n\n# Apertar ctrl + enter para enviar e-mail\npyautogui.hotkey(\"ctrl\",\"enter\")\n\n\n# -----------------------------------------------\n\n### Use esse código para descobrir qual a posição de um item que queira clicar\n##Lembre-se: a posição na sua tela é diferente da posição na minha tela\n\n#time.sleep(4)\n#pyautogui.position()\n\n\n\n\n\n", "sub_path": "Aula-01/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2387, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pyautogui.PAUSE", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pyautogui.hotkey", "line_number": 14, "usage_type": "call"}, {"api_name": "pyperclip.copy", "line_number": 15, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 16, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 42, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 53, "usage_type": "call"}, {"api_name": "pyperclip.copy", "line_number": 54, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 55, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 56, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 57, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 60, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 61, "usage_type": "call"}, {"api_name": "pyautogui.write", "line_number": 64, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 65, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 66, "usage_type": "call"}, {"api_name": "pyperclip.copy", "line_number": 67, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 68, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "pyperclip.copy", "line_number": 80, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 81, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "457260978", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 6 15:10:04 2018\r\n\r\n@author: Administrator\r\n\r\n实现了加载图片以及利用鼠标进行图像区域交互并进行剪裁并另存为新图片的操作\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np \r\nglobal img\r\nglobal point1\r\nglobal point2,i\r\ni=0\r\ndef use_mouse(event,x,y,flags,param): #参数必须要写好 不写编译不通过\r\n global img\r\n global point1\r\n global point2\r\n if event==cv2.EVENT_LBUTTONDOWN:\r\n point1=(x,y)\r\n print(point1)\r\n cv2.circle(img,point1,1,(255,255,255),1)\r\n elif event==(cv2.EVENT_FLAG_LBUTTON):\r\n point2=(x,y)\r\n cv2.rectangle(img,point1,point2,(255,255,255),1)\r\n elif event==cv2.EVENT_LBUTTONUP:\r\n point2=(x,y)\r\n print(point2)\r\n cv2.rectangle(img,point1,point2,(255,255,255),1)\r\n #img_width=abs(point1[0]-point2[0])\r\n #img_height=abs(point1[1]-point2[1])\r\n #print('value is %d' %(point1[1]+img_height))\r\n #img1=img[point1[1]+1:point1[1]+img_height,point1[0]+1:point1[0]+img_width] #python中建坐标是横x竖y 但是在切片中先写y再写x\r\n #cv2.imwrite('10'+str(i)+'.jpg',img1)\r\n\r\ndef main():\r\n global img,i\r\n img=cv2.imread(str(i)+'.jpg')\r\n img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n cv2.namedWindow('image')\r\n cv2.setMouseCallback('image',use_mouse)\r\n while True:\r\n cv2.imshow('image',img)\r\n if cv2.waitKey(1)==ord('1'):\r\n break\r\n cv2.destroyAllWindows()\r\n \r\nif __name__==\"__main__\":\r\n main()", "sub_path": "02_getpoints.py", "file_name": "02_getpoints.py", "file_ext": "py", "file_size_in_byte": 1553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cv2.EVENT_LBUTTONDOWN", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.EVENT_FLAG_LBUTTON", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.EVENT_LBUTTONUP", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.setMouseCallback", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "184131830", "text": "import json, html2text, logging\nfrom datetime import datetime\nfrom google.appengine.ext import ndb, blobstore\nfrom sendgrid import Mail, SendGridClient\nfrom smtpapi import *\nfrom dkc import *\nfrom models import *\n\nclass ApplicationOverview(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n self._serve_page()\n\n def _serve_page(self):\n config = ndb.Key(Settings, 'config').get()\n template_values = {\n 'user_id': self.user.get_id(),\n 'application_url': '/application/overview',\n 'config': config,\n }\n self.render_application('application-overview.html', template_values)\n\nclass ApplicationProfile(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n applicant = self.user\n application = applicant.application.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify profile by %s\", applicant.email)\n self._serve_page()\n return\n\n applicant.first_name = self.request.get('first-name')\n applicant.last_name = self.request.get('last-name')\n applicant.grade = self.request.get('grade')\n applicant.address = self.request.get('address')\n applicant.city = self.request.get('city')\n applicant.zip_code = self.request.get('zip-code')\n applicant.phone_number = self.request.get('phone-number')\n applicant.division = self.request.get('division')\n applicant.ltg = self.request.get('ltg')\n applicant.school = self.request.get('school')\n applicant.school_address = self.request.get('school-address')\n applicant.school_city = self.request.get('school-city')\n applicant.school_zip_code = self.request.get('school-zip-code')\n applicant.club_president = self.request.get('club-president')\n applicant.club_president_phone_number = self.request.get('club-president-phone-number')\n applicant.faculty_advisor = self.request.get('faculty-advisor')\n applicant.faculty_advisor_phone_number = self.request.get('faculty-advisor-phone-number')\n applicant.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/profile'\n }\n self.render_application('application-profile.html', template_values)\n\nclass ApplicationPersonalStatement(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify personal statement by %s\", applicant.email)\n self._serve_page()\n return\n\n application.personal_statement_choice = self.request.get(\"personal-statement-choice\")\n application.personal_statement = self.request.get('personal-statement')\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/personal-statement'\n }\n self.render_application('application-personal_statement.html', template_values)\n\nclass ApplicationProjects(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify projects by %s\", applicant.email)\n self._serve_page()\n return\n\n international_project_sections = self.request.get_all('international-projects-section')\n international_project_events = self.request.get_all('international-projects-event')\n international_project_descriptions = self.request.get_all('international-projects-description')\n application.international_projects = []\n for i in range(0, len(international_project_sections)):\n application.international_projects.append(InternationalProject(section=international_project_sections[i], event=international_project_events[i], description=international_project_descriptions[i]))\n\n district_project_events = self.request.get_all('district-projects-event')\n district_project_charities = self.request.get_all('district-projects-charity')\n district_project_descriptions = self.request.get_all('district-projects-description')\n application.district_projects = []\n for i in range(0, len(district_project_events)):\n application.district_projects.append(DistrictProject(event=district_project_events[i], charity=district_project_charities[i], description=district_project_descriptions[i]))\n\n divisional_dates = self.request.get_all('divisional-meeting-date')\n divisional_locations = self.request.get_all('divisional-meeting-location')\n application.divisionals = []\n for i in range(0, len(divisional_dates)):\n application.divisionals.append(Divisional(date=divisional_dates[i], location=divisional_locations[i]))\n\n division_project_events = self.request.get_all('division-projects-event')\n division_project_locations = self.request.get_all('division-projects-location')\n division_project_descriptions = self.request.get_all('division-projects-description')\n application.division_projects = []\n for i in range(0, len(division_project_events)):\n application.division_projects.append(GeneralProject(event=division_project_events[i], location=division_project_locations[i], description=division_project_descriptions[i]))\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/projects'\n }\n self.render_application('application-projects.html', template_values)\n\nclass ApplicationInvolvement(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify involvement by %s\", applicant.email)\n self._serve_page()\n return\n\n application.key_club_week_mon = self.request.get('key-club-week-monday')\n application.key_club_week_tue = self.request.get('key-club-week-tuesday')\n application.key_club_week_wed = self.request.get('key-club-week-wednesday')\n application.key_club_week_thu = self.request.get('key-club-week-thursday')\n application.key_club_week_fri = self.request.get('key-club-week-friday')\n\n application.attendance_dtc = self.request.get('attendance-dtc') == 'on'\n application.attendance_fall_rally = self.request.get('attendance-fall-rally') == 'on'\n application.attendance_kamp_kiwanis = self.request.get('attendance-kamp-kiwanis') == 'on'\n application.attendance_key_leader = self.request.get('attendance-key-leader') == 'on'\n application.attendance_ltc = self.request.get('attendance-ltc') == 'on'\n application.attendance_icon = self.request.get('attendance-icon') == 'on'\n\n application.positions = self.request.get('positions')\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n config = ndb.Key(Settings, 'config').get()\n template_values = {\n 'application_url': '/application/involvement',\n 'config': config\n }\n self.render_application('application-involvement.html', template_values)\n\nclass ApplicationActivities(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify profile by %s\", applicant.email)\n self._serve_page()\n return\n\n if len(self.request.get_all('kiwanis-one-day-event')) > 0:\n application.kiwanis_one_day = GeneralProject(event=self.request.get('kiwanis-one-day-event'), location=self.request.get('kiwanis-one-day-location'), description=self.request.get('kiwanis-one-day-description'))\n else:\n application.kiwanis_one_day = None\n\n k_family_projects_events = self.request.get_all('k-family-projects-event')\n k_family_projects_locations = self.request.get_all('k-family-projects-location')\n k_family_projects_descriptions = self.request.get_all('k-family-projects-description')\n application.k_family_projects = []\n for i in range(0, len(k_family_projects_events)):\n application.k_family_projects.append(GeneralProject(event=k_family_projects_events[i], location=k_family_projects_locations[i], description=k_family_projects_descriptions[i]))\n\n interclub_projects_events = self.request.get_all('interclub-projects-event')\n interclub_projects_locations = self.request.get_all('interclub-projects-location')\n interclub_projects_descriptions = self.request.get_all('interclub-projects-description')\n application.interclub_projects = []\n for i in range(0, len(interclub_projects_events)):\n application.interclub_projects.append(GeneralProject(event=interclub_projects_events[i], location=interclub_projects_locations[i], description=interclub_projects_descriptions[i]))\n\n application.advocacy_cause = self.request.get('advocacy-cause')\n application.advocacy_description = self.request.get('advocacy-description')\n\n application.committee = self.request.get('committee')\n application.committee_type = self.request.get('committee-type')\n application.committee_description = self.request.get('committee-description')\n\n application.divisional_newsletter = self.request.get('divisional-newsletter') == 'on'\n if application.divisional_newsletter:\n application.divisional_newsletter_info = self.request.get('divisional-newsletter-info')\n application.district_newsletter = self.request.get('district-newsletter') == 'on'\n if application.district_newsletter:\n application.district_newsletter_info = self.request.get('district-newsletter-info')\n application.district_website = self.request.get('district-website') == 'on'\n if application.district_website:\n application.district_website_info = self.request.get('district-website-info')\n\n other_projects_events = self.request.get_all('other-projects-event')\n other_projects_locations = self.request.get_all('other-projects-location')\n other_projects_descriptions = self.request.get_all('other-projects-description')\n application.other_projects = []\n for i in range(0, len(other_projects_events)):\n application.other_projects.append(GeneralProject(event=other_projects_events[i], location=other_projects_locations[i], description=other_projects_descriptions[i]))\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/activities',\n }\n self.render_application('application-activities.html', template_values)\n\nclass ApplicationOther(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify scoring by %s\", applicant.email)\n self._serve_page()\n return\n\n if self.request.get('early-submission-checkbox'):\n application.early_submission_points = self.request.get('early-submission-points')\n else:\n application.early_submission_points = \"Any section\"\n\n if self.request.get('recommender-checkbox'):\n application.recommender_points = self.request.get('recommender-points')\n else:\n application.recommender_points = \"No Recommendation\"\n\n application.outstanding_awards = self.request.get('outstanding-awards')\n\n application.scoring_reason_two = self.request.get('scoring-reason-two')\n application.scoring_reason_three = self.request.get('scoring-reason-three')\n application.scoring_reason_four = self.request.get('scoring-reason-four')\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n config = ndb.Key(Settings, 'config').get()\n template_values = {\n 'application_url': '/application/other',\n 'config': config\n }\n self.render_application('application-other.html', template_values)\n\nclass ApplicationVerification(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n applicant = self.user\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if self._no_verify() or application.submit_time:\n logging.info(\"Attempt to modify verification by %s\", applicant.email)\n self._serve_page()\n return\n\n task = self.request.get('task')\n if task != 'applicant':\n user_id = self.user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='v', user_id=user_id, signup_token=token, _full=True)\n logging.info(verification_url)\n\n config = ndb.Key(Settings, 'config').get()\n sg = SendGridClient(config.sendgrid_username, config.sendgrid_password, secure=True)\n\n verification_email = Mail(from_name=\"NYDKC Awards Committee\",\n from_email=\"recognition@nydkc.org\",\n subject=\"Distinguished Key Clubber Application Verification for %s %s\" % (applicant.first_name, applicant.last_name)\n )\n\n verifier = \"\"\n if task == 'ltg':\n application.verification_ltg_email = self.request.get('ltg-email')\n application.verification_ltg_token = token\n application.verification_ltg_sent = True\n verification_email.add_to(application.verification_ltg_email)\n verifier = \"Lieutenant Governor \" + applicant.ltg.title()\n elif task == 'club-president':\n application.verification_club_president_email = self.request.get('club-president-email')\n application.verification_club_president_token = token\n application.verification_club_president_sent = True\n verification_email.add_to(application.verification_club_president_email)\n verifier = \"Club President \" + applicant.club_president.title()\n elif task == 'faculty-advisor':\n application.verification_faculty_advisor_email = self.request.get('faculty-advisor-email')\n application.verification_faculty_advisor_token = token\n application.verification_faculty_advisor_sent = True\n verification_email.add_to(application.verification_faculty_advisor_email)\n verifier = \"Faculty Advisor \" + applicant.faculty_advisor.title()\n\n template_values = {\n 'applicant': applicant,\n 'verification_url': verification_url,\n 'verifier': verifier\n }\n verification_email.set_html(JINJA_ENVIRONMENT.get_template('verification-email.html').render(template_values))\n htmlhandler = html2text.HTML2Text()\n verification_email.set_text(htmlhandler.handle(verification_email.html).encode(\"UTF+8\"))\n verification_email.add_unique_arg('user_id', str(user_id))\n\n code, response = sg.send(verification_email)\n response = json.loads(response)\n if response[\"message\"] == \"error\":\n logging.error((\"Problem with sending email to %s: \" % verification_email.to) + str(response[\"errors\"]))\n self._serve_page()\n return\n else:\n application.verification_applicant = True\n application.verification_applicant_date = datetime.now()\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/verification',\n 'no_verify': self._no_verify()\n }\n self.render_application('application-verification.html', template_values)\n\n def _no_verify(self):\n applicant = self.user\n no_verify = (applicant.first_name == '' or applicant.first_name == None)\\\n or (applicant.last_name == '' or applicant.last_name == None)\\\n or (applicant.school == '' or applicant.school == None)\\\n or (applicant.division == '' or applicant.division == None)\\\n or (applicant.ltg == '' or applicant.ltg == None)\\\n or (applicant.club_president == '' or applicant.club_president == None)\\\n or (applicant.club_president_phone_number == '' or applicant.club_president_phone_number == None)\\\n or (applicant.faculty_advisor == '' or applicant.faculty_advisor == None)\\\n or (applicant.faculty_advisor_phone_number == '' or applicant.faculty_advisor_phone_number == None)\n return no_verify\n\nclass ApplicationSubmit(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page(self._not_complete())\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n not_complete = self._not_complete()\n if True in not_complete.values(): # If there is an error\n self.response.set_status(204)\n self._serve_page(errors=self._not_complete())\n else:\n applicant = self.user\n application.submit_time = datetime.now()\n application.put()\n\n config = ndb.Key(Settings, 'config').get()\n sg = SendGridClient(config.sendgrid_username, config.sendgrid_password, secure=True)\n\n verification_email = Mail(from_name=\"NYDKC Awards Committee\",\n from_email=\"recognition@nydkc.org\",\n subject=\"DKC Application Confirmation for %s %s\" % (applicant.first_name, applicant.last_name),\n to=applicant.email\n )\n\n template_values = {\n 'applicant': applicant,\n 'application': application\n }\n verification_email.set_html(JINJA_ENVIRONMENT.get_template('confirmation-email.html').render(template_values))\n htmlhandler = html2text.HTML2Text()\n verification_email.set_text(htmlhandler.handle(verification_email.html).encode(\"UTF+8\"))\n\n code, response = sg.send(verification_email)\n response = json.loads(response)\n if response[\"message\"] == \"error\":\n logging.error((\"Problem with sending email to %s: \" % verification_email.to) + str(response[\"errors\"]))\n self._serve_page()\n return\n\n self.redirect('/application')\n\n def _serve_page(self, errors={'profile':False, 'personal_statement':False, 'projects':False, 'involvement':False, 'activities':False, 'other':False, 'verification':False}):\n template_values = {\n 'user_id': self.user.get_id(),\n 'application_url': '/application/submit',\n 'profile': errors['profile'],\n 'personal_statement': errors['personal_statement'],\n 'projects': errors['projects'],\n 'involvement': errors['involvement'],\n 'activities': errors['activities'],\n 'other': errors['other'],\n 'verification': errors['verification']\n }\n self.render_application('application-submit.html', template_values)\n\n def _not_complete(self):\n applicant = self.user\n application = applicant.application.get()\n\n not_complete_profile = (applicant.first_name == None or applicant.first_name == '')\\\n or (applicant.last_name == None or applicant.last_name == '')\\\n or (applicant.school == None or applicant.school == '')\\\n or (applicant.division == None or applicant.division == '')\\\n or (applicant.ltg == None or applicant.ltg == '')\\\n or (applicant.club_president == None or applicant.club_president == '')\\\n or (applicant.club_president_phone_number == None or applicant.club_president_phone_number == '')\\\n or (applicant.faculty_advisor == None or applicant.faculty_advisor == '')\\\n or (applicant.faculty_advisor_phone_number == None or applicant.faculty_advisor_phone_number == '')\\\n\n not_complete_personal_statement = (application.personal_statement == None or application.personal_statement == '')\n\n not_complete_projects = (len(application.international_projects) == 0)\\\n and (len(application.district_projects) == 0)\\\n and (len(application.divisionals) == 0)\\\n and (len(application.division_projects) == 0)\\\n and (application.scoring_reason_two == None or application.scoring_reason_two == '')\n\n not_complete_involvement = (application.key_club_week_mon == None or application.key_club_week_mon == '')\\\n and (application.key_club_week_tue == None or application.key_club_week_tue == '')\\\n and (application.key_club_week_wed == None or application.key_club_week_wed == '')\\\n and (application.key_club_week_thu == None or application.key_club_week_thu == '')\\\n and (application.key_club_week_fri == None or application.key_club_week_fri == '')\\\n and (application.attendance_dtc == None)\\\n and (application.attendance_fall_rally == None)\\\n and (application.attendance_kamp_kiwanis == None)\\\n and (application.attendance_key_leader == None)\\\n and (application.attendance_ltc == None)\\\n and (application.attendance_icon == None)\\\n and (application.positions == None or application.positions == '')\\\n and (application.scoring_reason_three == None or application.scoring_reason_three == '')\n\n not_complete_activities = (application.kiwanis_one_day == None)\\\n and (len(application.k_family_projects) == 0)\\\n and (len(application.interclub_projects) == 0)\\\n and (application.advocacy_cause == None or application.advocacy_cause == '')\\\n and (application.committee == None or application.committee == '')\\\n and (application.divisional_newsletter == None)\\\n and (application.district_newsletter == None)\\\n and (application.district_website == None)\\\n and (len(application.other_projects) == 0)\\\n and (application.scoring_reason_four == None or application.scoring_reason_four == '')\n\n verification_count = 0\n if application.verification_ltg:\n verification_count += 1\n if application.verification_club_president:\n verification_count += 1\n if application.verification_faculty_advisor:\n verification_count += 1\n if application.verification_applicant:\n verification_count += 1\n not_complete_verification = verification_count < 3 # Need at least 3 of 4 verifications\n\n not_complete_other = (not_complete_projects\n or not_complete_personal_statement\\\n or not_complete_involvement\\\n or not_complete_activities\\\n or application.outstanding_awards == None or application.outstanding_awards == '')\n\n return {'profile': not_complete_profile,\n 'personal_statement': not_complete_personal_statement,\n 'projects': not_complete_projects,\n 'involvement': not_complete_involvement,\n 'activities': not_complete_activities,\n 'other': not_complete_other,\n 'verification': not_complete_verification}\n", "sub_path": "dkc/application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 24939, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "google.appengine.ext.ndb.Key", "line_number": 20, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 20, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 40, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 78, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 78, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 82, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 105, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 105, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 109, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 157, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 157, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 161, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 184, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 184, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 199, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 199, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 203, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 267, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 267, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 271, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 295, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 295, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 311, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 311, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 315, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 324, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 326, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 326, "usage_type": "name"}, {"api_name": "sendgrid.SendGridClient", "line_number": 327, "usage_type": "call"}, {"api_name": "sendgrid.Mail", "line_number": 329, "usage_type": "call"}, {"api_name": "html2text.HTML2Text", "line_number": 360, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 365, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 367, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 372, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 372, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 405, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 405, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 414, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 414, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 417, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 417, "usage_type": "name"}, {"api_name": "sendgrid.SendGridClient", "line_number": 418, "usage_type": "call"}, {"api_name": "sendgrid.Mail", "line_number": 420, "usage_type": "call"}, {"api_name": "html2text.HTML2Text", "line_number": 431, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 435, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 437, "usage_type": "call"}]} +{"seq_id": "462794525", "text": "import argparse\nimport datetime\nimport os\n\nfrom time import sleep\n\nimport settings\nfrom dal.ae2.ae2_transaction import retrieve_all_experiments\nfrom dal.biostudies.biostudies_transaction import get_ae_submissions\nfrom lsf.job import Job\n\n__author__ = 'Ahmed G. Ali'\n\nlog_file = 'RUNNER_' + datetime.datetime.utcnow().isoformat().split('.')[0].replace(':', '_')\nlog_file = os.path.join(settings.LOG, log_file + '.csv')\nf_logger = None\n\n\nclass Experiment:\n def __init__(self, acc):\n self.acc = acc\n\n\ndef get_exp_command(acc, skip_copy=False, sync=False):\n cmd = \"export PYTHONPATH=\\\"${PYTHONPATH}:/nfs/biostudies/pyBiostudies\\\"; export \" \\\n \"LD_LIBRARY_PATH=/nfs/biostudies/instantclient_12_2:$LD_LIBRARY_PATH;source \" \\\n \"/nfs/biostudies/pyBiostudies/virtual_env/bin/activate;python /nfs/biostudies/pyBiostudies/pagetab_loader.py -sr \" \\\n \" \" + acc\n if skip_copy:\n cmd += ' -sc'\n if not sync:\n cmd += ' -asyn'\n return cmd\n\n\ndef main(acc=None, limit=0, meta_data=False, max_jobs=100, force_load=False, log_name=None, sync=False, min_date=None):\n global log_file, f_logger\n if log_name:\n log_file = os.path.join(settings.LOG, log_name + '.csv')\n f_logger = open(log_file, 'a')\n exp = []\n in_run = []\n if acc:\n for a in acc:\n exp.append(a)\n else:\n exps, con = retrieve_all_experiments(min_date=min_date)\n # exps, _ = retrieve_public_experiments()\n exp = sorted(list(set([e['acc'] for e in exps])))\n\n jobs = []\n\n exp_to_load = exp\n if not force_load:\n added_experiments = sorted([i['accNo'] for i in get_ae_submissions()])\n exp_to_load = list(set(exp).difference(set(added_experiments)))\n if limit:\n print('Loading %d Experiments' % limit)\n else:\n limit = len(exp_to_load)\n print('Loading %d Experiments' % len(exp_to_load))\n # exit()\n counter = 0\n if exp_to_load:\n while True:\n while len(jobs) < max_jobs and exp_to_load and counter < limit:\n d = exp_to_load.pop()\n\n if d in in_run:\n continue\n in_run.append(d)\n job = Job(name=d, command=get_exp_command(acc=d, skip_copy=meta_data, sync=sync),\n queue=settings.LSF_QUEUE, user=settings.LSF_USER, memory=3072,\n start_time=datetime.datetime.utcnow().isoformat())\n job.submit()\n jobs.append(job)\n if limit:\n counter += 1\n if not jobs:\n break\n\n for j in jobs:\n try:\n live = j.is_alive()\n except Exception as e:\n live = False\n f_logger.writelines(['\\t'.join([j.name, j.start_time, datetime.datetime.utcnow().isoformat(),\n\n \"%s\" % (str(e) + j.error)]) + '\\n'])\n if not live:\n if j.error or (j.out is not None and 'ERROR' in j.out):\n try:\n msg = j.out.split('')[1].split('')[0]\n except:\n msg = 'Error retrieving message'\n f_logger.writelines([' \\t'.join([j.name, j.start_time, datetime.datetime.utcnow().isoformat(),\n '\"Error :%s\\n%s\"' % (j.error, msg)]) + '\\n'])\n else:\n f_logger.writelines(['\\t'.join([j.name, j.start_time, datetime.datetime.utcnow().isoformat(),\n \"Loaded\"]) + '\\n'])\n if j in jobs:\n jobs.remove(j)\n print(\"%d jobs running!\" % len(jobs))\n\n sleep(10)\n f_logger.close()\n\n\ndef parse_arguments():\n arg_parser = argparse.ArgumentParser(description='Migrates ArrayExpress experiments to Biostudies.')\n arg_parser.add_argument('-acc', '--accessions', metavar='E-MTAB-xxxx', nargs='+',\n help='''The accession number(s) for the experiment.''')\n arg_parser.add_argument('-l', '--limit', type=int, default=0,\n help='Number of studies to be migrated')\n arg_parser.add_argument('-lg', '--log', type=str, default=None,\n help='Log file name.')\n arg_parser.add_argument('-j', '--job_limit', type=int, default=100,\n help='Max number of lsf jobs executed simultaneously.')\n arg_parser.add_argument('-m', '--meta-data', action='store_true',\n help='Load metadata only and skip updating files.')\n arg_parser.add_argument('-f', '--force_load', action='store_true',\n help='Forcing overriding the experiment in Biostudies')\n arg_parser.add_argument('-sync',\n '--sync', action='store_true',\n help='Submit in sync mode')\n arg_parser.add_argument('-d', '--min_date', type=lambda d: datetime.datetime.strptime(d, '%Y-%m-%d'),\n help='Minimum modified date in AE database.')\n\n return arg_parser\n\n\nif __name__ == '__main__':\n parser = parse_arguments()\n args = parser.parse_args()\n acc = args.accessions\n limit = args.limit\n force = args.force_load\n log = args.log\n sync = args.sync\n min_date = args.min_date.date().isoformat()\n number = 0\n jobs = args.job_limit\n main(acc=acc, limit=limit, max_jobs=jobs, force_load=force, log_name=log, sync=sync, min_date=min_date)\n\n", "sub_path": "lsf/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 5686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "settings.LOG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "settings.LOG", "line_number": 39, "usage_type": "attribute"}, {"api_name": "dal.ae2.ae2_transaction.retrieve_all_experiments", "line_number": 47, "usage_type": "call"}, {"api_name": "dal.biostudies.biostudies_transaction.get_ae_submissions", "line_number": 55, "usage_type": "call"}, {"api_name": "lsf.job.Job", "line_number": 72, "usage_type": "call"}, {"api_name": "settings.LSF_QUEUE", "line_number": 73, "usage_type": "attribute"}, {"api_name": "settings.LSF_USER", "line_number": 73, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 99, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 105, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 126, "usage_type": "attribute"}]} +{"seq_id": "66001592", "text": "# -*- coding: utf-8 -*-\nfrom config import Config\nimport logging\nimport logging.handlers\n\ndef Logger(name):\n # 로거 인스턴스를 만든다\n logger = logging.getLogger(name)\n # 환경변수를 읽어서 로깅 레벨과 로그를 남길 파일의 경로를 변수에 저장한다\n if Config.LOG[\"level\"] == 'DEBUG':\n fomatter = logging.Formatter(\"%(asctime)s[%(levelname)s|%(name)s,%(lineno)s] %(message)s\")\n loggerLevel = logging.DEBUG\n else:\n fomatter = logging.Formatter(\"%(asctime)s[%(name)s] %(message)s\")\n if Config.LOG[\"level\"] == 'INFO':\n loggerLevel = logging.INFO\n else:\n loggerLevel = logging.ERROR\n\n logger.setLevel(loggerLevel)\n # 스트림과 파일로 로그를 출력하는 핸들러를 각각 만든다.\n fileHandler = logging.handlers.RotatingFileHandler(Config.LOG[\"file\"], maxBytes=1024 * 1024 * int(Config.LOG[\"maxmb\"]), backupCount=int(Config.LOG[\"backupcount\"]), encoding=\"utf-8\")\n streamHandler = logging.StreamHandler()\n # 각 핸들러에 포매터를 지정한다.\n fileHandler.setFormatter(fomatter)\n streamHandler.setFormatter(fomatter)\n # 로거 인스턴스에 스트림 핸들러와 파일핸들러를 붙인다.\n logger.addHandler(fileHandler)\n logger.addHandler(streamHandler)\n return logger\n", "sub_path": "logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 1339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "config.Config.LOG", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 10, "usage_type": "name"}, {"api_name": "logging.Formatter", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 14, "usage_type": "call"}, {"api_name": "config.Config.LOG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 15, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 22, "usage_type": "attribute"}, {"api_name": "config.Config.LOG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 22, "usage_type": "name"}, {"api_name": "logging.StreamHandler", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "91210363", "text": "#\n# Copyright (C) 2017 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"create jobresults table\n\nRevision ID: 429a312c5e85\nRevises: 1bb42ff54435\nCreate Date: 2017-03-30 07:36:44.830095\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '429a312c5e85'\ndown_revision = '1bb42ff54435'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\nimport dci.common.utils as utils\n\n\ndef upgrade():\n op.create_table(\n 'tests_results',\n sa.Column('id', postgresql.UUID(as_uuid=True),\n primary_key=True, default=utils.gen_uuid),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=False),\n sa.Column('name', sa.String(length=255), nullable=False),\n sa.Column('total', sa.Integer()),\n sa.Column('success', sa.Integer()),\n sa.Column('skips', sa.Integer()),\n sa.Column('failures', sa.Integer()),\n sa.Column('errors', sa.Integer()),\n sa.Column('time', sa.Integer()),\n sa.Column('job_id', postgresql.UUID(as_uuid=True),\n sa.ForeignKey('jobs.id', ondelete='CASCADE'),\n nullable=False),\n sa.Index('tests_results_job_id_idx', 'job_id'),\n sa.Column('file_id', postgresql.UUID(as_uuid=True),\n sa.ForeignKey('files.id', ondelete='CASCADE'),\n nullable=False),\n sa.Index('tests_results_file_id_idx', 'file_id')\n )\n\n\ndef downgrade():\n op.drop_table('tests_results')\n", "sub_path": "dci/alembic/versions/429a312c5e85_create_jobresults_table.py", "file_name": "429a312c5e85_create_jobresults_table.py", "file_ext": "py", "file_size_in_byte": 2084, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "alembic.op.create_table", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 38, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 40, "usage_type": "name"}, {"api_name": "dci.common.utils.gen_uuid", "line_number": 41, "usage_type": "attribute"}, {"api_name": "dci.common.utils", "line_number": 41, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 51, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 51, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 51, "usage_type": "name"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.Index", "line_number": 54, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 55, "usage_type": "name"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlalchemy.Index", "line_number": 58, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 63, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "78688242", "text": "\"\"\"NewBWPP URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login, logout\n\nfrom main import views as main_views\n\nurlpatterns = [\n url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS\n url(r'^captcha/', include('captcha.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'^$', main_views.index, name='index'),\n url(r'^mail/(?P[0-9]+)$', main_views.mailpage, name='mail_page'),\n url(r'^new/$', main_views.Mail, name='new_mail'),\n url(r'^login/$', login, {'template_name': 'login.html'}, name='login'),\n url(r'^register/$', main_views.register, name='register'),\n url(r'^logout/$', logout, {'next_page': '/login'}, name='logout'),\n url(r'^user/$', main_views.user, name='user'),\n url(r'^take/(?P[0-9]+)$', main_views.take, name='take'),\n url(r'^get/(?P[0-9]+)$', main_views.get, name='get'),\n url(r'^quxiao/(?P[0-9]+)$', main_views.quxiao, name='quxiao'),\n url(r'^check/', main_views.check, name='check'),\n url(r'^doing/$', main_views.doing, name='doing'),\n url(r'^xieyi/$',main_views.xieyi,name='xieyi')\n]\n", "sub_path": "NewBWPP/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1794, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "main.views.index", "line_number": 26, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "main.views.mailpage", "line_number": 27, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "main.views.Mail", "line_number": 28, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.login", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "main.views.register", "line_number": 30, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.logout", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "main.views.user", "line_number": 32, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 32, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "main.views.take", "line_number": 33, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "main.views.get", "line_number": 34, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "main.views.quxiao", "line_number": 35, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 35, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "main.views.check", "line_number": 36, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 36, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "main.views.doing", "line_number": 37, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 37, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "main.views.xieyi", "line_number": 38, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "236403542", "text": "#!/usr/bin/python3.5\n\nimport socket,time,os,sys,asyncio,queue\n\nasync def work(host,loop):\n\tglobal opencount,closecount,ptime\n\twhile not wq.empty():\n\t\taddr=wq.get()\n\t\tcon=''\n\t\tst=time.time()\n\t\ts=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\ts.setblocking(0)\n\t\t#print('[work]host:',i,s)\n\t\ttry:\n\t\t\tcon=await loop.sock_connect(s,addr)\n\t\texcept OSError as err:\n\t\t\tclosecount+=1\n\t\t\terr=str(err)\n\t\t\t#print(addr,err[12:31])\n\t\tif con == None:\n\t\t\topencount+=1\n\t\t\tprint(addr,'open')\n\t\ts.close()\n\t\tptime+=time.time()-st\n\t\t#print('[work]ptime=',ptime)\n\ndef workers_y(a,host,loop):\n\tfor i in range(a):\n\t\tyield work(host,loop)\n\ndef prepare(workers,host,loop):\n\tcount=0\n\tcorus=[]\n\tworkers_g=workers_y(workers,host,loop)\n\twhile True:\n\t\ttry:\n\t\t\tx = next(workers_g)\n\t\texcept:\n\t\t\tbreak\n\t\tcorus.append(x)\n\t\tcount+=1\n\treturn corus\n\nif __name__=='__main__':\n\tst=time.time()\n\tptime=0\n\topencount=0\n\tclosecount=0\n\t\n\twq=queue.Queue()\n\thost=[]\n\tfor i in range(1,65536):\n\t\tx='10.186.64.3'\n\t\ty=i\n\t\tz=(x,y)\n\t\twq.put(z)\n\t\n\tworkers=900\n\tselloop=asyncio.SelectorEventLoop()\n\tasyncio.set_event_loop(selloop)\n\tloop = asyncio.get_event_loop()\n\tcorus = prepare(workers,host,loop)\n\tfs=asyncio.gather(*corus)\n\tloop.run_until_complete(fs)\n\tloop.close()\n\tprint('real time: %.4f'%ptime,'open_counts:',opencount,'close_count:',closecount,'all counts',opencount+closecount)\n\tprint(\"use time: %.4f\"%(time.time()-st))", "sub_path": "py-test/asyncio_socket.py", "file_name": "asyncio_socket.py", "file_ext": "py", "file_size_in_byte": 1376, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 11, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 11, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "time.time", "line_number": 45, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 50, "usage_type": "call"}, {"api_name": "asyncio.SelectorEventLoop", "line_number": 59, "usage_type": "call"}, {"api_name": "asyncio.set_event_loop", "line_number": 60, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 61, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 63, "usage_type": "call"}, {"api_name": "time.time", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "587003901", "text": "from rest_framework import viewsets, status\nfrom rest_framework.response import Response\n\nfrom utils import change_key\n\n\nclass CustomViewSet(viewsets.ModelViewSet):\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n res = serializer.data\n if \"status\" in res.keys():\n res[\"status\"] = str(res[\"status\"])\n return Response({\n \"code\": 200,\n \"data\": res\n })\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response({'code': 200}, status=status.HTTP_201_CREATED, headers=headers)\n\n def put(self, request, *args, **kwargs):\n change_key(request)\n update_fields = [one for one in request.data.keys() if one != self.serializer_class.Meta.model._meta.pk.name]\n self.serializer_class.Meta.model(**request.data).save(update_fields=update_fields)\n return Response({'code': 200, 'msg': '修改成功'})\n\n # def destroy(self, request, *args, **kwargs):\n # instance = self.get_object()\n # self.perform_destroy(instance)\n # return Response({'code': 200}, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n ids = kwargs[\"pk\"].split(\",\")\n self.serializer_class.Meta.model.objects.filter(pk__in=ids).delete()\n return Response({\n \"code\": 200\n })\n", "sub_path": "{{cookiecutter.项目名称}}/{{cookiecutter.初始化app名称}}/custom_viewset.py", "file_name": "custom_viewset.py", "file_ext": "py", "file_size_in_byte": 1619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 7, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 7, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 14, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.change_key", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "347531973", "text": "import copy, discord, io, pymongo\nfrom discord.ext import commands\nfrom PIL import Image, ImageDraw, ImageFont\n\ndef dev():\n async def pred(ctx):\n return ctx.author.id in ctx.bot.allowedusers\n\n return commands.check(pred)\n\ndef admin():\n # async def pred(ctx): return any(elem in [v for k, v in ctx.bot.modroles.items() if k == \"Admin\"] for elem in [i.id for i in ctx.bot.blurpleguild.fetch_member(ctx.author.id).roles])\n async def pred(ctx): return ctx.bot.modroles['Admin'] in [i.id for i in (await ctx.bot.blurpleguild.fetch_member(ctx.author.id)).roles]\n return commands.check(pred)\n\nclass ColoursCog(commands.Cog, name=\"Colours\"):\n\n def __init__(self, bot):\n self.bot = bot\n\n a = pymongo.MongoClient(\"mongodb://localhost:27017/?retryWrites=true&w=majority\")\n self.colourscoll = a.colours\n\n self.bot.colours, self.bot.coloursrgb, self.bot.coloursdict = self.defaultcolours()\n self.bot.empty = \"<:empty:541914164235337728>\"\n self.bot.partners = self.partnercolours()\n\n class image():\n def colouremoji(self, colour, size = 885):\n if len(colour) == 3: colour += [255]\n\n percent = 0.1\n img = self.image.round_rectangle(\n self, (size, size),\n int(round(size * percent, 0)),\n tuple(colour), allcorners = True\n )\n\n image_file_object = io.BytesIO()\n img.save(image_file_object, format='png')\n image_file_object.seek(0)\n\n return image_file_object\n\n\n def round_corner(self, radius, fill):\n \"\"\"Draw a round corner\"\"\"\n corner = Image.new('RGBA', (radius, radius), (0, 0, 0, 0))\n draw = ImageDraw.Draw(corner)\n draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)\n return corner\n\n def round_rectangle(self, size, radius, fill, topleft=False, topright=False, bottomleft=False, bottomright=False, allcorners=False):\n \"\"\"Draw a rounded rectangle\"\"\"\n if allcorners: topleft = topright = bottomleft = bottomright = True\n\n width, height = size\n rectangle = Image.new('RGBA', size, fill)\n corner = self.image.round_corner(self, radius, fill)\n if topleft: rectangle.paste(corner, (0, 0))\n if bottomleft: rectangle.paste(corner.rotate(90), (0, height - radius)) # Rotate the corner and paste it\n if bottomright: rectangle.paste(corner.rotate(180), (width - radius, height - radius))\n if topright: rectangle.paste(corner.rotate(270), (width - radius, 0))\n return rectangle\n\n def defaultcolours(self):\n mydict = self.colourscoll.default\n\n c = {}\n crgb = {}\n cdict = {}\n for i in mydict.find():\n c[i['code']] = i['emoji']\n if i['code'] != \"edit\":\n crgb[i['code']] = tuple(i['rgb'])\n cdict[i['name']] = {\n \"name\": i['name'],\n \"tag\": i['code'],\n \"emoji\": i['emoji'],\n \"guild\": None,\n \"rgb\": tuple(i['rgb'])\n }\n\n return c, crgb, cdict\n\n def partnercolours(self):\n mydict = self.colourscoll.partner\n\n c = {}\n for i in mydict.find():\n c[str(i['name'])] = {\n \"name\": i['name'],\n \"tag\": str(i['tag']),\n \"emoji\": i['emoji'],\n \"guild\": str(i['guild']),\n \"rgb\": tuple(i['rgb']),\n }\n\n return c\n\n @commands.command(name=\"addpcolour\")\n @admin()\n async def addpcolour(self, ctx, guildid, tag, emoji, rgb, *, name):\n \"\"\"Adds a new partner colour to the list\"\"\"\n try:\n rgbformatted = tuple([int(i) for i in rgb.split(',')] + [255])\n if len(rgbformatted) != 4: raise Exception('no')\n except Exception as e:\n return await ctx.send('Not correct rgb formatting - type `R,G,B` (not `(R, G, B)`)')\n\n if emoji.lower() in ['generate', 'gen']:\n img = await self.bot.loop.run_in_executor(None, self.image.colouremoji, self, rgbformatted)\n emoji = await self.uploademoji(copy.copy(img), f\"pl_{tag}\")\n emoji = str(emoji)\n\n mydict = {\n \"name\": name,\n \"tag\": tag,\n \"emoji\": emoji[2:][:-1],\n \"guild\": str(guildid),\n \"rgb\": rgbformatted\n }\n\n await ctx.send('Is this what you want to add? (yes/no)\\n' + str(mydict))\n\n def check(message):\n return ctx.author == message.author and message.content.lower() in ['yes', 'no', 'y', 'n'] and message.channel == ctx.message.channel\n\n msg = await self.bot.wait_for('message', check=check)\n \n if msg.content in ['no', 'n']:\n return await ctx.send(\"Ok, cancelled\")\n\n self.colourscoll.partner.insert_many([mydict])\n self.bot.partners = self.partnercolours()\n await ctx.send(f\"{emoji} Ok, done.\")\n\n @commands.command(aliases=['genemoji'])\n @admin()\n async def generateemoji(self, ctx, code, *rgb):\n rgb = [int(i) for i in rgb]\n if len(code) != 4: return await ctx.send(f\"The code must be 4 letters long\")\n\n img = await self.bot.loop.run_in_executor(None, self.image.colouremoji, self, rgb)\n emoji = await self.uploademoji(copy.copy(img), f\"pl_{code}\")\n\n image = discord.File(fp = copy.copy(img), filename = f\"pl_{code}.png\")\n\n await ctx.send(str(emoji), file=image)\n\n async def uploademoji(self, img, name):\n # guild = self.bot.get_guild(559341262302347314)\n guild = self.bot.get_guild(972091742088232970)\n\n toupload = img.read()\n\n emoji = await guild.create_custom_emoji(name=name, image=toupload)\n\n return emoji\n\n @commands.command(hidden = True)\n @dev()\n async def server(self, ctx):\n for s in await self.bot.fetch_guilds(limit=None).flatten():\n print(f\"{str(s)} ({s.id})\")\n\n @commands.command(hidden = True)\n @dev()\n async def servertest(self, ctx):\n channel = ctx.guild.get_channel(573546839085940751)\n\n i = 0\n async for m in channel.history(limit=None):\n i += 1\n if m.author.id == 420675394224521240:\n e = m.embeds[0]\n f = e.fields[0]\n if int(f.value) > 4000:\n print(f\"{e.author.name} - {f.value}\")\n\n # @commands.command()\n # @dev()\n # async def role2019(self, ctx):\n # lrole = ctx.guild.get_role(573011450231259157)\n # drole = ctx.guild.get_role(573011441683005440)\n # nrole = ctx.guild.get_role(705294465631256607)\n\n # i = 0\n # for u in ctx.guild.members:\n # if (lrole in u.roles or drole in u.roles) and nrole not in u.roles:\n # await u.add_roles(nrole)\n # i += 1\n # print(f\"Added to {str(u)}\")\n\n # await ctx.send(f'Done ({i} users!)')\n\n\n # self.bot.colours = {\n # \"brll\": \"pl_brll:541841828844929025\", # Brilliance Red\n # \"hpsq\": \"pl_hpsq:541841829969133571\", # Hypesquad Yellow\n # \"bhnt\": \"pl_bhnt:541841828454858801\", # Bug Hunter Green\n # \"blnc\": \"pl_blnc:541841828652122133\", # Balance Cyan\n # \"ptnr\": \"pl_ptnr:541841829679857664\", # Partner Blue\n # \"blpl\": \"pl_blpl:540761785884737537\", # Blurple\n # \"brvy\": \"pl_brvy:541841829256101899\", # Bravery Purple\n # \"whte\": \"pl_whte:546829770055352340\", # Full White\n # \"ntgr\": \"pl_ntgr:541841829520211968\", # Nitro Grey\n # \"grpl\": \"pl_grpl:541841829453103142\", # Greyple\n # \"ntbl\": \"pl_ntbl:541841829318885396\", # Nitro Blue\n # \"nqbl\": \"pl_nqbl:546829770030317569\", # Not Quite Black\n # \"blank\": \"pl_blank:540761786484391957\", # Blank tile\n # \"edit\": \"pl_edit:540761787662991370\" # Edit tile\n # }\n # self.bot.coloursrgb = {\n # \"brll\": (244, 123, 103, 255),\n # \"hpsq\": (248, 165, 50, 255),\n # \"bhnt\": (72, 183, 132, 255),\n # \"blnc\": (69, 221, 192, 255),\n # \"ptnr\": (65, 135, 237, 255),\n # \"blpl\": (114, 137, 218, 255),p/\n # \"brvy\": (156, 132, 239, 255),\n # \"whte\": (255, 255, 255, 255),\n # \"ntgr\": (183, 194, 206, 255),\n # \"grpl\": (153, 170, 181, 255),\n # \"ntbl\": (79, 93, 127, 255),\n # \"nqbl\": (44, 47, 51, 255),\n # \"blank\": (114, 137, 218, 127),\n # }\n # self.bot.empty = \"<:empty:541914164235337728>\"\n \n # self.bot.partners = {\n # 281648235557421056: { # r/Marvel Discord\n # \"name\": \"Marvel Red\",\n # \"tag\": \"mrvl\",\n # \"emoji\": \"pl_mrvl:572564652559564810\",\n # \"guild\": 281648235557421056,\n # \"rgb\": (234, 35, 40, 255),\n # },\n # 272885620769161216: { # Blob Emoji\n # \"name\": \"Blob Yellow\",\n # \"tag\": \"blob\",\n # \"emoji\": \"pl_blob:573101758130421770\",\n # \"guild\": 272885620769161216,\n # \"rgb\": (252, 194, 27, 255),\n # },\n # 316720611453829121: { # N.I.T.R.O.\n # \"name\": \"N.I.T.R.O. Orange\",\n # \"tag\": \"ntro\",\n # \"emoji\": \"pl_ntro:575279584820330498\",\n # \"guild\": 316720611453829121,\n # \"rgb\": (252, 150, 75, 255)\n # },\n # 152517096104919042: { # Rocket League\n # \"name\": \"Rocketeer Blue\",\n # \"tag\": \"rckt\",\n # \"emoji\": \"pl_rckt:574086064671555624\",\n # \"guild\": 152517096104919042,\n # \"rgb\": (0, 156, 222, 255),\n # },\n # 290572012437372931: { # Ping and Salar\n # \"name\": \"Ping and Salar's Red\",\n # \"tag\": \"pgsl\",\n # \"emoji\": \"pl_pgsl:574086064827007027\",\n # \"guild\": 290572012437372931,\n # \"rgb\": (255, 64, 0, 255),\n # },\n # 349243932447604736: { # r/Jailbreak\n # \"name\": \"Cydia Brown\",\n # \"tag\": \"jlbr\",\n # \"emoji\": \"pl_jlbr:574086064923475988\",\n # \"guild\": 349243932447604736,\n # \"rgb\": (165, 107, 77, 255),\n # },\n # 173184118492889089: { # Tatsumaki\n # \"name\": \"Tatsu Emerald\",\n # \"tag\": \"ttsu\",\n # \"emoji\": \"pl_ttsu:574907457995014154\",\n # \"guild\": 173184118492889089,\n # \"rgb\": (23, 161, 103, 255),\n # },\n # 262077211526299648: { # Auttaja\n # \"name\": \"Auttaja Blue\",\n # \"tag\": \"attj\",\n # \"emoji\": \"pl_attj:574907457915060224\",\n # \"guild\": 262077211526299648,\n # \"rgb\": (0, 112, 250, 255),\n # },\n # 228406572756369408: { # r/StarWars\n # \"name\": \"Opening Crawl Yellow\",\n # \"tag\": \"stwr\",\n # \"emoji\": \"pl_stwr:575279585130840102\",\n # \"guild\": 228406572756369408,\n # \"rgb\": (254, 210, 24, 255),\n # },\n # 145166056812576768: { # Ayana\n # \"name\": \"Ayana Pink\",\n # \"tag\": \"ayna\",\n # \"emoji\": \"pl_ayna:575452605292216340\",\n # \"guild\": 145166056812576768,\n # \"rgb\": (198, 59, 104, 255),\n # },\n # 284447205358829570: { # The Furry Nexus\n # \"name\": \"Paw Print Pink\",\n # \"tag\": \"frry\",\n # \"emoji\": \"pl_frry:575567782683607040\",\n # \"guild\": 284447205358829570,\n # \"rgb\": (198, 118, 255, 255)\n # },\n # 304383757975289857: { # Blob Hub\n # \"name\": \"Butterfly Pink\",\n # \"tag\": \"nbhb\",\n # \"emoji\": \"pl_nbhb:575636279383949312\",\n # \"guild\": 304383757975289857,\n # \"rgb\": (233, 160, 214, 255),\n # },\n # 416749164731301888: { # PUBG\n # \"name\": \"Winner Winner Golden Dinner\",\n # \"tag\": \"pubg\",\n # \"emoji\": \"pl_pubg:575808438114844682\",\n # \"guild\": 416749164731301888,\n # \"rgb\": (222, 141, 0, 255)\n # },\n # 478114566509821953: { # r/Google\n # \"name\": \"Google Yellow\",\n # \"tag\": \"goog\",\n # \"emoji\": \"pl_goog:576591834529267732\",\n # \"guild\": 478114566509821953,\n # \"rgb\": (251, 188, 5, 255),\n # },\n # # 360462032811851777: { # Something For Everybody\n # # \"name\": \"\",\n # # \"tag\": \"sfeb\",\n # # \"emoji\": \"pl_sfeb:\",\n # # \"guild\": 360462032811851777,\n # # \"rgb\": ()\n # # },\n # 493351982887862283: { # Pepe Emoji\n # \"name\": \"Pepe Green\",\n # \"tag\": \"pepe\",\n # \"emoji\": \"pl_pepe:577471090973212683\",\n # \"guild\": 493351982887862283,\n # \"rgb\": (91, 144, 66, 255),\n # },\n # 446658603873730611: { # Raft\n # \"name\": \"Raft Blue\",\n # \"tag\": \"raft\",\n # \"emoji\": \"pl_raft:577597652573749248\",\n # \"guild\": 446658603873730611,\n # \"rgb\": (68, 174, 210, 255),\n # }\n # }\n\nasync def setup(bot):\n await bot.add_cog(ColoursCog(bot))", "sub_path": "cogs/colours.py", "file_name": "colours.py", "file_ext": "py", "file_size_in_byte": 13736, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "discord.ext.commands.check", "line_number": 9, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 9, "usage_type": "name"}, {"api_name": "discord.ext.commands.check", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 14, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog", "line_number": 16, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 16, "usage_type": "name"}, {"api_name": "pymongo.MongoClient", "line_number": 21, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 48, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 49, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 58, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 113, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 101, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 101, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 145, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 147, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 147, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 138, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 138, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 161, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 161, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 167, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 167, "usage_type": "name"}]} +{"seq_id": "256889129", "text": "from django.shortcuts import render, HttpResponse\r\nfrom django.views import View\r\nfrom django.views.generic import ListView\r\nfrom . import models\r\nfrom .models import Lpage\r\n\r\n\r\n# Create your views here.\r\n\r\ndef add_data(request):\r\n for num in range(100):\r\n models.Lpage.objects.create(title='a{}'.format(num))\r\n return HttpResponse('ok')\r\n\r\n\r\nclass Lpage(ListView):\r\n model = Lpage\r\n template_name = 'lpage/lpage1.html'\r\n paginate_by = 10\r\n context_object_name = 'lpage'\r\n # ordering = 'create_time'\r\n page_kwarg = 'page'\r\n\r\n def get_context_data(self, *, object_list=None, **kwargs):\r\n context = super(Lpage, self).get_context_data(**kwargs)\r\n # print(context)\r\n # 获得分页数据\r\n paginate_data = self.get_paginate_data(context.get('paginator'), context.get('page_obj'))\r\n # 添加到context字典中 传入模板\r\n context.update(paginate_data)\r\n return context\r\n\r\n def get_paginate_data(self, paginator, page_obj, arround_count=2):\r\n current_page = page_obj.number\r\n\r\n left_has_more = False\r\n right_has_more = False\r\n\r\n if current_page <= arround_count + 2:\r\n left_pages = range(1, current_page)\r\n else:\r\n left_has_more = True\r\n left_pages = range(current_page - arround_count, current_page)\r\n\r\n if current_page >= paginator.num_pages - arround_count - 1:\r\n right_pages = range(current_page + 1, paginator.num_pages + 1)\r\n else:\r\n right_has_more = True\r\n right_pages = range(current_page + 1, current_page + arround_count + 1)\r\n\r\n return {\r\n 'left_pages': left_pages,\r\n 'right_pages': right_pages,\r\n 'current_page': current_page,\r\n 'left_has_more': left_has_more,\r\n 'right_has_more': right_has_more,\r\n }\r\n", "sub_path": "myClass/djangoDemo/apps/lpage/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "models.Lpage.objects.create", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Lpage", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 13, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "541403856", "text": "import django_tables2 as tables\nfrom .models import Timestep, Symbol\nfrom django.utils.html import format_html\nfrom django.conf import settings\nfrom .utils import get_weather\n\nclass ImageColumn(tables.Column):\n def render(self, value):\n wt = get_weather(value) # revisit this weird reverse lookup\n \n return format_html(\n '\"{wt}\",',\n url=(settings.MEDIA_URL + value),\n wt=(wt)\n )\n#'',\n\nclass TimestepTable(tables.Table):\n #def __init__(self,*args,**kwargs):\n # super().__init__(*args,**kwargs)\n # self.base_columns['wind_direction'].verbose_name = 'Rose'\n \n date_header = 'date header'\n step_time = tables.DateTimeColumn(format ='gA')\n wind_direction = tables.Column(verbose_name = 'Rose')\n #symbol = tables.Column(accessor='get_symbol',\n # verbose_name = 'Symbol')\n symbol = ImageColumn(accessor='get_symbol',\n verbose_name = 'Weather')\n \n class Meta:\n model = Timestep\n template_name = 'django_tables2/bootstrap4.html'\n fields = ('step_time',\n #'weather',\n 'symbol', \n 'temperature', \n 'feels_like_temperature',\n 'precipitation',\n 'wind_gust',\n 'wind_direction',\n 'uv',)", "sub_path": "weather/tables.py", "file_name": "tables.py", "file_ext": "py", "file_size_in_byte": 1584, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django_tables2.Column", "line_number": 7, "usage_type": "attribute"}, {"api_name": "utils.get_weather", "line_number": 9, "usage_type": "call"}, {"api_name": "django.utils.html.format_html", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "django_tables2.Table", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django_tables2.DateTimeColumn", "line_number": 24, "usage_type": "call"}, {"api_name": "django_tables2.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Timestep", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "462721570", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/9/23 10:23\n# @Author : Tianchiyue\n# @File : model.py\n# @Software: PyCharm Community Edition\n\nimport numpy as np\nfrom keras.layers import Dense, Dropout\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras import optimizers, regularizers, callbacks\nfrom sklearn.metrics import accuracy_score\nimport logging\n\n\nclass BaseModel:\n def __init__(self, config):\n self.config = config\n self.model = None\n self.sentence_input = None\n\n def build(self, embedding_matrix):\n pass\n\n def compile(self, embedding_matrix):\n # 文本表示\n rep = self.build(embedding_matrix)\n if self.config['use_mlp']:\n rep = Dropout(self.config['dropout_rate'])(rep)\n rep = Dense(self.config['hidden_dims'], activation=self.config['activation'])(rep)\n rep = Dropout(self.config['dropout_rate'])(rep)\n if self.config['use_l2']:\n predictions = Dense(self.config['num_classes'],\n kernel_regularizer=regularizers.l2(self.config['l2']),\n activation='softmax')(rep)\n else:\n predictions = Dense(self.config['num_classes'],\n activation='softmax')(rep)\n self.model = Model(inputs=[self.sentence_input], outputs=predictions)\n opt = optimizers.get(self.config['optimizer'])\n K.set_value(opt.lr, self.config['learning_rate'])\n self.model.compile(optimizer=opt,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n def predict(self, test_x):\n return self.model.predict(test_x)\n\n # 根据任务改变\n def evaluate(self, valid_x, valid_y):\n v_pred = [i.argmax() for i in self.predict(valid_x)]\n v_true = [i.argmax() for i in valid_y]\n valid_score = BaseModel.score(v_true, v_pred)\n evaluate_list = self.model.evaluate(valid_x, valid_y, verbose=0)\n return evaluate_list[0], evaluate_list[1], valid_score\n\n # @staticmethod\n # def batch_iter(data, labels, batch_size, shuffle=True):\n # num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n def data_generator(self, data, labels, batch_size, num_batches_per_epoch, shuffle=True):\n data_size = len(data)\n while True:\n # Shuffle the data at each epoch\n\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n shuffled_labels = labels[shuffle_indices]\n else:\n shuffled_data = data\n shuffled_labels = labels\n\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n X = shuffled_data[start_index: end_index]\n y = shuffled_labels[start_index: end_index]\n yield X, y\n\n # return num_batches_per_epoch, data_generator()\n\n def fit(self, train_x, train_y, valid_x, valid_y, predicted=False, filename='trained_models/best.model'):\n lr_decay = callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=self.config['lr_decay_epoch'],\n min_lr=0.01 * self.config['learning_rate'])\n csv_log = callbacks.CSVLogger(filename.replace('.model', '.csv'))\n es = callbacks.EarlyStopping(monitor='val_acc', patience=self.config['n_stop'])\n mc = callbacks.ModelCheckpoint(filename, monitor='val_acc', save_best_only=True, save_weights_only=True)\n\n train_steps = int((len(train_y) - 1) / self.config['batch_size']) + 1\n valid_steps = int((len(valid_y) - 1) / self.config['batch_size']) + 1\n train_batches = self.data_generator(train_x, train_y, self.config['batch_size'], train_steps)\n valid_batches = self.data_generator(valid_x, valid_y, self.config['batch_size'], valid_steps)\n hist = self.model.fit_generator(train_batches, train_steps,\n epochs=self.config['epochs'],\n callbacks=[lr_decay, csv_log, es, mc],\n validation_data=valid_batches,\n validation_steps=valid_steps)\n\n # hist = self.model.fit(train_x, train_label, batch_size=self.config['batch_size'], epochs=self.config['epochs'],\n # validation_data=(valid_x, valid_y), callbacks=[lr_decay, csv_log, es, mc])\n best_acc = max(hist.history['val_acc'])\n if predicted:\n self.model.load_weights(filename)\n return self.predict(valid_x), best_acc\n else:\n return best_acc\n\n @staticmethod\n def score(y_true, y_pred):\n return accuracy_score(y_true, y_pred)\n", "sub_path": "models/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 4975, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "keras.layers.Dropout", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 34, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.optimizers.get", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 40, "usage_type": "name"}, {"api_name": "keras.backend.set_value", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.random.permutation", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 83, "usage_type": "name"}, {"api_name": "keras.callbacks.CSVLogger", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 85, "usage_type": "name"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 86, "usage_type": "name"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 87, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "323709595", "text": "# Copyright (c) 2020 University of Chicago\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_config import cfg\n\nfrom blazar.utils.openstack import base\nfrom oslo_log import log as logging\nfrom manilaclient import client as manila_client\n\n\nmanila_opts = [\n cfg.StrOpt(\n 'manila_api_version',\n default='2',\n help='Manila API version'),\n cfg.StrOpt(\n 'manila_api_microversion',\n default='2.69',\n help='Manila API microversion')\n]\n\nCONF = cfg.CONF\nCONF.register_opts(manila_opts, group='manila')\n\nLOG = logging.getLogger(__name__)\n\n\nclass BlazarManilaClient(object):\n \"\"\"Client class for Manila service.\"\"\"\n\n def __init__(self, **kwargs):\n client_kwargs = base.client_kwargs(**kwargs)\n client_kwargs.setdefault('os_manila_api_version',\n CONF.manila.manila_api_microversion)\n self.manila = manila_client.Client(\n CONF.manila.manila_api_version, **client_kwargs)\n\n def __getattr__(self, attr):\n return getattr(self.manila, attr)\n", "sub_path": "blazar/utils/openstack/manila.py", "file_name": "manila.py", "file_ext": "py", "file_size_in_byte": 1559, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "oslo_config.cfg.StrOpt", "line_number": 24, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 24, "usage_type": "name"}, {"api_name": "oslo_config.cfg.StrOpt", "line_number": 28, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 28, "usage_type": "name"}, {"api_name": "oslo_config.cfg.CONF", "line_number": 34, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 34, "usage_type": "name"}, {"api_name": "oslo_log.log.getLogger", "line_number": 37, "usage_type": "call"}, {"api_name": "oslo_log.log", "line_number": 37, "usage_type": "name"}, {"api_name": "blazar.utils.openstack.base.client_kwargs", "line_number": 44, "usage_type": "call"}, {"api_name": "blazar.utils.openstack.base", "line_number": 44, "usage_type": "name"}, {"api_name": "manilaclient.client.Client", "line_number": 47, "usage_type": "call"}, {"api_name": "manilaclient.client", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "250283480", "text": "#!/usr/bin/env python3\n\nimport subprocess\nfrom pathlib import Path\nfrom pprint import pformat\nimport json\nimport re\nfrom . import manifest\nfrom .common import WitUserError\nfrom .witlogger import getLogger\nfrom typing import Set # noqa: F401\nfrom functools import lru_cache\nfrom .env import git_reference_workspace\n\nlog = getLogger()\n\n\nclass GitError(Exception):\n pass\n\n\nclass GitCommitNotFound(Exception):\n pass\n\n\nclass BadSource(WitUserError):\n def __init__(self, name, source):\n self.name = name\n self.source = source\n\n def __str__(self):\n return \"Bad remote for '{}':\\n {}\".format(self.name, self.source)\n\n\nverbose_prefix = re.compile(r\"^refs/(?:heads/)?\")\n\n\n# TODO Could speed up validation\n# - use git ls-remote to validate remote exists\n# - use git ls-remote to validate revision for tags and branches\n# - if github repo, check if page exists (or if you get 404)\n\nclass GitRepo:\n \"\"\"\n In memory data structure representing a Git repo package\n It may not be in sync with data structures on the file system\n Note there can be multiple GitRepo objects for the same package\n \"\"\"\n PKG_DEPENDENCY_FILE = \"wit-manifest.json\"\n\n def __init__(self, name, wsroot: Path):\n self.name = name\n self.path = wsroot / name\n # Cache known hashes for quick lookup\n self._known_hashes = set() # type: Set[str]\n\n def _known_hash(self, commit) -> bool:\n \"\"\"Checks if a hash exists in the current repo\"\"\"\n return commit in self._known_hashes\n\n def _add_known_hash(self, commit):\n self._known_hashes.add(commit)\n\n def is_bad_source(self, source):\n tmp = self.path\n self.path = self.path.parent\n proc = self._git_command('ls-remote', source)\n self.path = tmp\n return proc.returncode != 0\n\n # name is needed for generating error messages\n def download(self, source, name):\n if not GitRepo.is_git_repo(self.path):\n self.clone(source, name)\n self.fetch(source, name)\n\n # name is needed for generating error messages\n def clone(self, source, name):\n assert not GitRepo.is_git_repo(self.path), \\\n \"Trying to clone and checkout into existing git repo!\"\n\n cmd = [\"clone\", *self._git_reference_options(), \"--no-checkout\", source, str(self.path)]\n proc = self._git_command(*cmd, working_dir=str(self.path.parent))\n try:\n self._git_check(proc)\n except GitError:\n if self.is_bad_source(source):\n raise BadSource(name, source)\n else:\n raise\n log.info('Cloned {}'.format(self.name))\n\n def _git_reference_options(self):\n \"\"\"\n Use git clone's '--reference' to point at a local repository cache to copy objects/commits\n to save network traffic. Any missing objects/commits are downloaded from the true remote.\n Only newer git versions can use '--reference-if-able', so we emulate the 'if-able' bit.\n \"\"\"\n if not git_reference_workspace:\n return []\n paths = [Path(git_reference_workspace) / self.name,\n Path(git_reference_workspace) / (self.name+'.git')]\n for path in paths:\n if path.is_dir():\n return [\"--reference\", str(path), \"--dissociate\"]\n return []\n\n # name is needed for generating error messages\n def fetch(self, source, name):\n # in case source is a remote and we want a commit\n proc = self._git_command('fetch', source)\n # in case source is a file path and we want, for example, origin/master\n self._git_command('fetch', '--all')\n try:\n self._git_check(proc)\n except GitError:\n if self.is_bad_source(source):\n raise BadSource(name, source)\n else:\n raise\n return proc.returncode == 0\n\n def get_head_commit(self) -> str:\n return self.get_commit('HEAD')\n\n @lru_cache(maxsize=None)\n def _get_commit_cached(self, commit):\n return self._get_commit_impl(commit)\n\n def _get_commit_impl(self, commit):\n proc = self._git_command('rev-parse', commit)\n try:\n self._git_check(proc)\n except GitError:\n proc = self._git_command('rev-parse', 'origin/{}'.format(commit))\n try:\n self._git_check(proc)\n except GitError:\n if 'unknown revision or path not in the working tree' in proc.stderr:\n raise GitCommitNotFound\n else:\n raise\n return proc.stdout.rstrip()\n\n def get_commit(self, commit) -> str:\n if self._known_hash(commit):\n result = self._get_commit_cached(commit)\n else:\n result = self._get_commit_impl(commit)\n self._add_known_hash(result)\n return result\n\n @lru_cache(maxsize=None)\n def _get_shortened_rev_cached(self, commit):\n return self._get_shortened_rev_impl(commit)\n\n def _get_shortened_rev_impl(self, commit):\n proc = self._git_command('rev-parse', '--short', commit)\n self._git_check(proc)\n return proc.stdout.rstrip()\n\n def get_shortened_rev(self, commit):\n if self._known_hash(commit):\n return self._get_shortened_rev_cached(commit)\n else:\n return self._get_shortened_rev_impl(commit)\n\n def is_hash(self, ref):\n return self.get_commit(ref) == ref\n\n def is_tag(self, ref):\n proc = self._git_command('tag', '--list', ref)\n self._git_check(proc)\n return ref in proc.stdout.split('\\n')\n\n def has_commit(self, commit) -> bool:\n # rev-parse does not always fail when a commit is missing\n proc = self._git_command('cat-file', '-t', commit)\n return proc.returncode == 0\n\n def have_common_ancestor(self, commits):\n proc = self._git_command('merge-base', '--octopus', *commits)\n return proc.returncode == 0\n\n def get_remote(self) -> str:\n # TODO Do we need to worry about other remotes?\n proc = self._git_command('remote', 'get-url', 'origin')\n self._git_check(proc)\n return proc.stdout.rstrip()\n\n def set_origin(self, source):\n proc = self._git_command('remote', 'set-url', 'origin', source)\n self._git_check(proc)\n\n def clean(self):\n proc = self._git_command('status', '--porcelain')\n self._git_check(proc)\n return proc.stdout == \"\"\n\n def modified(self):\n proc = self._git_command('status', '--porcelain')\n self._git_check(proc)\n for line in proc.stdout.split(\"\\n\"):\n if line.lstrip().startswith(\"M\"):\n return True\n return False\n\n def untracked(self):\n proc = self._git_command('status', '--porcelain')\n self._git_check(proc)\n for line in proc.stdout.split(\"\\n\"):\n if line.lstrip().startswith(\"??\"):\n return True\n return False\n\n def modified_manifest(self):\n proc = self._git_command('status', '--porcelain')\n self._git_check(proc)\n for line in proc.stdout.split(\"\\n\"):\n if ((line.lstrip().startswith(\"M\") or line.lstrip().startswith(\"D\"))\n and line.endswith(\"wit-manifest.json\")):\n return True\n return False\n\n @lru_cache(maxsize=None)\n def _commit_to_time_cached(self, hash):\n return self._commit_to_time_impl(hash)\n\n def _commit_to_time_impl(self, hash):\n proc = self._git_command('log', '-n1', '--format=%ct', hash)\n self._git_check(proc)\n return proc.stdout.rstrip()\n\n def commit_to_time(self, hash):\n if self._known_hash(hash):\n return self._commit_to_time_cached(hash)\n else:\n return self._commit_to_time_impl(hash)\n\n def is_ancestor(self, ancestor, current=None):\n proc = self._git_command(\"merge-base\", \"--is-ancestor\", ancestor,\n current or self.get_head_commit())\n return proc.returncode == 0\n\n def read_manifest(self) -> manifest.Manifest:\n mpath = self.manifest_path()\n return manifest.Manifest.read_manifest(mpath, safe=True)\n\n def write_manifest(self, manifest) -> None:\n mpath = self.manifest_path()\n manifest.write(mpath)\n\n def read_manifest_from_commit(self, revision) -> manifest.Manifest:\n proc = self._git_command(\"show\", \"{}:{}\".format(revision, GitRepo.PKG_DEPENDENCY_FILE))\n if proc.returncode:\n log.debug(\"No dependency file found in repo [{}:{}]\".format(revision,\n self.path))\n json_content = [] if proc.returncode else json.loads(proc.stdout)\n return manifest.Manifest.process_manifest(json_content, self.name)\n\n def checkout(self, revision):\n wanted_hash = self.get_commit(revision)\n if self.get_commit('HEAD') != wanted_hash:\n proc_ref = self._git_command(\"show-ref\")\n self._git_check(proc_ref)\n rev_names = proc_ref.stdout.rstrip().split('\\n')\n rev_names = [r.split(' ') for r in rev_names]\n rev_names = [r[1] for r in rev_names if r[0] == wanted_hash]\n rev_names = [r for r in rev_names if not r.startswith('refs/remotes')]\n rev_names = [verbose_prefix.sub('', r) for r in rev_names]\n\n suggestions = ''\n if len(rev_names) > 1:\n suggestions = ' ({})'.format(', '.join(rev_names))\n\n if len(rev_names) != 1:\n rev = revision\n log.info(\"Checking out '{}' at '{}'{}\".format(self.name, rev, suggestions))\n else:\n rev = rev_names[0]\n log.info(\"Checking out '{}' at '{}' ({})\".format(self.name, rev, revision))\n\n proc = self._git_command(\"checkout\", rev)\n self._git_check(proc)\n else:\n proc = self._git_command(\"checkout\")\n self._git_check(proc)\n\n # If our revision was a branch or tag, get the actual commit\n self.revision = self.get_head_commit()\n\n def manifest_path(self):\n return self.path / self.PKG_DEPENDENCY_FILE\n\n def manifest(self, source, revision):\n return {\n 'name': self.name,\n 'source': source,\n 'commit': revision,\n }\n\n def _git_command(self, *args, working_dir=None):\n cwd = str(self.path) if working_dir is None else str(working_dir)\n log.debug(\"Executing [{}] in [{}]\".format(' '.join(['git', *args]), cwd))\n proc = subprocess.run(['git', *args],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n cwd=cwd)\n log.spam(\" stderr: [{}]\".format(proc.stderr.rstrip()))\n log.spam(\" stdout: [{}]\".format(proc.stdout.rstrip()))\n return proc\n\n def _git_check(self, proc):\n if proc.returncode:\n msg = \"Command [{}] in [{}] exited with non-zero exit status [{}]\\n\".format(\n ' '.join(proc.args), str(self.path), proc.returncode)\n msg += \"stdout: [{}]\\n\".format(proc.stdout.rstrip())\n msg += \"stderr: [{}]\\n\".format(proc.stderr.rstrip())\n raise GitError(msg)\n\n return proc.returncode\n\n @staticmethod\n def path_to_name(path):\n \"\"\"\n >>> GitRepo.path_to_name(\"a.git\")\n 'a'\n >>> GitRepo.path_to_name(\"/a/b/c/def.git\")\n 'def'\n >>> GitRepo.path_to_name(\"ghi\")\n 'ghi'\n \"\"\"\n return Path(path).name.replace('.git', '')\n\n @staticmethod\n def is_git_repo(path):\n cmd = ['git', 'ls-remote', '--exit-code', str(path)]\n proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n ret = proc.returncode\n return ret == 0\n\n # Enable prettyish-printing of the class\n def __repr__(self):\n return pformat(vars(self), indent=4, width=1)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n", "sub_path": "lib/wit/gitrepo.py", "file_name": "gitrepo.py", "file_ext": "py", "file_size_in_byte": 12130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "witlogger.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "common.WitUserError", "line_number": 26, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 35, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 51, "usage_type": "name"}, {"api_name": "env.git_reference_workspace", "line_number": 99, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 101, "usage_type": "call"}, {"api_name": "env.git_reference_workspace", "line_number": 101, "usage_type": "argument"}, {"api_name": "pathlib.Path", "line_number": 102, "usage_type": "call"}, {"api_name": "env.git_reference_workspace", "line_number": 102, "usage_type": "argument"}, {"api_name": "functools.lru_cache", "line_number": 126, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 153, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 225, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 258, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 305, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 306, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 307, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 334, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 339, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 339, "usage_type": "attribute"}, {"api_name": "pprint.pformat", "line_number": 345, "usage_type": "call"}, {"api_name": "doctest.testmod", "line_number": 350, "usage_type": "call"}]} +{"seq_id": "38365315", "text": "#!/usr/bin/env python3\n\n\nimport argparse\nimport matplotlib\nimport matplotlib.pyplot as plot\nimport scipy\n\nimport wells.publisher as publisher\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--interactive\",\n help=\"Interactive mode\",\n action=\"store_true\")\nparser.add_argument(\"-e\", \"--ext\",\n help=\"Output image extension\",\n type=str,\n default=\"png\")\nparser.add_argument(\"-v\", \"--vertical\",\n help=\"Plot vertical colorbar\",\n action=\"store_true\")\nparser.add_argument(\"-s\", \"--figsize\",\n help=\"Figure size\",\n type=str,\n default=(\"2.8, 0.1\"))\nparser.add_argument(\"-l\", \"--label\",\n help=\"Colorbar label\",\n type=str,\n default=\"dB\")\nparser.add_argument(\"-n\",\n help=\"Number of ticks to plot\",\n type=int,\n default=5)\nparser.add_argument(\"--min\",\n help=\"Minimum decibels level to display\",\n type=float,\n default=-60)\nparser.add_argument(\"--max\",\n help=\"Minimum decibels level to display\",\n type=float,\n default=0)\nargs = parser.parse_args()\n\n\n\nif not args.interactive:\n figsize = [float(x) for x in args.figsize.split(\",\")]\n publisher.init({\"figure.figsize\": figsize})\n\n\ndef texify(ticks, digits=0):\n labels = []\n template = \"$%d$\"\n if digits:\n template = \"$%%.%df$\" % digits\n for t in ticks:\n labels.append(template % t)\n return labels\n\n\nticks = scipy.linspace(args.min, args.max, args.n)\nlabels = texify(ticks, digits=0)\n\n\nfig = plot.figure()\naxes = fig.add_axes([0.30, 0.40, 0.50, 0.10])\ncmap = matplotlib.cm.magma\nnorm = matplotlib.colors.Normalize(vmin=args.min, vmax=args.max)\ncb = matplotlib.colorbar.ColorbarBase(\n axes,\n cmap=cmap,\n norm=norm,\n orientation=\"vertical\" if args.vertical else \"horizontal\")\ncb.set_ticks(ticks)\ncb.set_ticklabels(labels)\ncb.set_label(args.label, labelpad=-28)\n\nif args.interactive:\n plot.show()\nelse:\n publisher.publish(\"colorbar\", args.ext, tight=False)\n", "sub_path": "colorbar.py", "file_name": "colorbar.py", "file_ext": "py", "file_size_in_byte": 2267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "wells.publisher.init", "line_number": 49, "usage_type": "call"}, {"api_name": "wells.publisher", "line_number": 49, "usage_type": "name"}, {"api_name": "scipy.linspace", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.cm", "line_number": 68, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 69, "usage_type": "attribute"}, {"api_name": "matplotlib.colorbar.ColorbarBase", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.colorbar", "line_number": 70, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "wells.publisher.publish", "line_number": 82, "usage_type": "call"}, {"api_name": "wells.publisher", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "236304662", "text": "from rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.views import View\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom users.models import CustomUser\nfrom accounting.models import *\nimport simplejson as json\nimport decimal\nimport requests\nfrom utils.constants import *\nimport random\nimport hashlib \nimport logging\nimport datetime\nfrom datetime import timedelta\nfrom django.utils import timezone\nimport uuid\nfrom games.models import *\nimport json\n\nfrom rest_framework.authtoken.models import Token\nfrom Crypto.Cipher import DES3\nimport xmltodict\nimport base64\nimport pytz\nimport urllib\n\nfrom utils.aws_helper import getThirdPartyKeys\n\nlogger = logging.getLogger('django')\n\ndef pad(m):\n return m+chr(16-len(m)%16)*(16-len(m)%16)\n\n\ndef des3Encryption(plain_text):\n try:\n key = hashlib.md5(IMES_KEY.encode()).digest()\n cipher = DES3.new(key, DES3.MODE_ECB)\n cipher_text = cipher.encrypt(pad(plain_text))\n\n return str(base64.b64encode(cipher_text), \"utf-8\")\n except Exception as e:\n logger.error(\"IMES Encrypt Error: {}\".format(repr(e)))\n return \"\"\n\n\ndef des3Decryption(cipher_text):\n try:\n key = hashlib.md5(IMES_KEY.encode()).digest()\n cipher_text = base64.b64decode(cipher_text)\n cipher = DES3.new(key, DES3.MODE_ECB)\n plain_text = cipher.decrypt(cipher_text)\n return plain_text.decode()\n except Exception as e:\n logger.error(\"IMES Decrypt Error: {}\".format(repr(e)))\n return \"\"\n\n\nclass InplayLoginAPI(View):\n def post(self, request, *arg, **kwargs):\n try:\n data = json.loads(request.body)\n\n username = data['username']\n user = CustomUser.objects.get(username=username)\n\n post_data = {}\n sessionToken = Token.objects.get(user_id=user)\n post_data['Token'] = str(sessionToken)\n # post_data['Token'] = \"e789cd6b4cc84f9ff8de0bee5a0bf8f5485c6d9f\"\n\n # time_stamp = (datetime.datetime.utcnow() - timedelta(hours=4)).strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n time_stamp = datetime.datetime.utcnow().strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n time_stamp = des3Encryption(time_stamp)\n post_data['TimeStamp'] = str(time_stamp)\n \n url = IMES_URL + \"api/login\"\n headers = {'Content-type': 'application/json'}\n res = requests.post(url, data=json.dumps(post_data), headers=headers)\n\n res = res.json()\n\n return HttpResponse(json.dumps(res), content_type='application/json')\n except ObjectDoesNotExist:\n logger.info(\"User: {} does not exist\".format(username))\n\n res = {}\n res[\"statusCode\"] = 101 # Invalid User\n res[\"statusDesc\"] = \"Invalid User\"\n\n return HttpResponse(json.dumps(res), content_type=\"application/json\", status=200)\n except Exception as e:\n logger.error(\"FATAL__ERROR: IMES Login Error -- {}\".format(repr(e)))\n return HttpResponse(\"Login Error\", content_type='text/plain', status=400)\n\n\nclass ValidateTokenAPI(View):\n def get(self, request, *arg, **kwargs):\n try:\n token = request.GET.get(\"token\")\n res = {}\n\n user = Token.objects.get(key=token).user\n\n res[\"memberCode\"] = user.username\n if user.currency == CURRENCY_CNY:\n res[\"CurrencyCode\"] = \"RMB\"\n if user.currency == CURRENCY_USD:\n res[\"CurrencyCode\"] = \"USD\"\n if user.currency == CURRENCY_THB:\n res[\"CurrencyCode\"] = \"THB\"\n if user.currency == CURRENCY_IDR:\n res[\"CurrencyCode\"] = \"IDR\"\n if user.currency == CURRENCY_HKD:\n res[\"CurrencyCode\"] = \"HKD\"\n if user.currency == CURRENCY_AUD:\n res[\"CurrencyCode\"] = \"AUD\"\n if user.currency == CURRENCY_MYR:\n res[\"CurrencyCode\"] = \"MYR\"\n if user.currency == CURRENCY_MMK:\n res[\"CurrencyCode\"] = \"MMK\"\n if user.currency == CURRENCY_EUR:\n res[\"CurrencyCode\"] = \"EUR\"\n if user.currency == CURRENCY_GBP:\n res[\"CurrencyCode\"] = \"GBP\"\n if user.currency == CURRENCY_NOK:\n res[\"CurrencyCode\"] = \"NOK\"\n\n res[\"statusCode\"] = 100\n res[\"statusDesc\"] = \"Success\"\n\n return HttpResponse(json.dumps(res), content_type=\"application/json\", status=200)\n except ObjectDoesNotExist as e:\n logger.info(str(token) + \" : {}\".format(repr(e)))\n\n res = {}\n res[\"statusCode\"] = 101 # Invalid User\n res[\"statusDesc\"] = \"Invalid User\"\n\n return HttpResponse(json.dumps(res), content_type=\"application/json\", status=200)\n except Exception as e:\n logger.error(\"IMES Validation Error: {}\".format(repr(e)))\n\n res = {}\n res[\"statusCode\"] = 301 # Internal Error\n res[\"statusDesc\"] = \"Internal Error\"\n \n return HttpResponse(json.dumps(res), content_type=\"application/json\", status=400)\n\n\nclass InplayGetBalanceAPI(View):\n def get(self, request, *arg, **kwargs):\n balance_package = request.GET.get(\"balancePackage\")\n date_sent = request.GET.get(\"dateSent\")\n try:\n balance_package = balance_package.replace(' ', '+')\n data = des3Decryption(balance_package)\n data = \"\".join([data.rsplit(\"}\" , 1)[0] , \"}\"])\n data = json.loads(data)\n\n response = {}\n if data[\"EventTypeId\"] == '1000':\n member_code = data[\"MemberCode\"]\n member_code = member_code.strip('\\\"')\n user = CustomUser.objects.get(username=member_code)\n\n response[\"StatusCode\"] = 100\n response[\"StatusMessage\"] = \"Success\"\n response[\"PackageId\"] = str(uuid.uuid1())\n response[\"Balance\"] = float(user.main_wallet)\n\n response = json.dumps(response)\n\n ciphertext = des3Encryption(response)\n return HttpResponse(ciphertext, content_type='text/plain', status=200)\n else:\n response[\"StatusCode\"] = -100\n response[\"StatusMessage\"] = \"Wrong Event Type\"\n\n response = json.dumps(response)\n cipher_text = des3Encryption(response)\n except Exception as e:\n logger.error(\"IMES GET Balance Error: {}\".format(repr(e)))\n return HttpResponse(status=400)\n\n\nclass InplayGetApprovalAPI(View):\n def get(self, request, *arg, **kwargs):\n balance_package = request.GET.get('balancePackage')\n package_id = request.GET.get('packageid')\n date_sent = request.GET.get('dateSent')\n try:\n # balance_package = \"ZwgZhGFWmUv5vDi5q2ruVAUij5STfGZ6ctAdoxbVdOUeW+RbwyYE91w8OXAeAgw5G8cVCxZC5Lt6MFBoaBxSfdVG6C55NSVcRYyB4Fk76mo=\"\n balance_package = balance_package.replace(' ', '+')\n data = des3Decryption(balance_package)\n data = \"\".join([data.rsplit(\"}\" , 1)[0], \"}\"])\n data = json.loads(data)\n response = {}\n if data[\"EventTypeId\"] == '1001':\n member_code = data[\"MemberCode\"]\n amount = float(data[\"TransactionAmt\"])\n user = CustomUser.objects.get(username=member_code)\n if user.main_wallet >= amount:\n # response[\"DateReceived\"] = timezone.now()\n # response[\"DateSent\"] = timezone.now()\n response[\"StatusCode\"] = 100\n response[\"StatusMessage\"] = \"Balance is sufficient, go ahead\"\n response[\"PackageId\"] = package_id\n response[\"Balance\"] = float(user.main_wallet)\n else:\n response[\"StatusCode\"] = -100\n \n response = json.dumps(response)\n cipher_text = des3Encryption(response)\n return HttpResponse(cipher_text, content_type='text/plain', status=200)\n else:\n response[\"StatusCode\"] = -100\n \n response = json.dumps(response)\n cipher_text = des3Encryption(response)\n return HttpResponse(cipher_text, content_type='text/plain', status=200)\n except Exception as e:\n logger.error(\"FATAL__ERROR: IMES Get Approval Error -- {}\".format(repr(e)))\n \n response = {}\n response[\"StatusCode\"] = -100\n response[\"StatusMessage\"] = \"Internal Error\"\n\n response = json.dumps(response)\n cipher_text = des3Encryption(response)\n\n return HttpResponse(cipher_text, content_type='text/plain', status=200)\n\n\nclass InplayDeductBalanceAPI(View):\n def get(self, request, *arg, **kwargs):\n balance_package = request.GET.get('balancePackage')\n package_id = request.GET.get('packageid')\n date_sent = request.GET.get('dateSent')\n\n try:\n balance_package = balance_package.replace(' ', '+')\n data = des3Decryption(balance_package)\n data = \"\".join([data.rsplit(\"}\" , 1)[0] , \"}\"]) \n data = json.loads(data)\n response = {}\n if data[\"EventTypeId\"] == '1003':\n user = data[\"MemberCode\"]\n amount = float(data[\"TransactionAmt\"])\n user = CustomUser.objects.get(username=user)\n if user.main_wallet > amount:\n trans_id = user.username + \"-\" + timezone.datetime.today().isoformat() + \"-\" + str(random.randint(0, 10000000))\n\n provider = GameProvider.objects.get(provider_name=IMES_PROVIDER)\n category = Category.objects.get(name='Sports')\n\n with transaction.atomic():\n \n GameBet.objects.create(\n provider=provider,\n category=category,\n user=user,\n user_name=user.username,\n amount_wagered=decimal.Decimal(amount),\n transaction_id=trans_id,\n market=ibetCN,\n ref_no=package_id\n )\n\n user.main_wallet = user.main_wallet - decimal.Decimal(amount)\n user.save()\n \n # res[\"DateReceived\"] = str(timezone.now())\n # res[\"DateSent\"] = str(timezone.now())\n response[\"StatusCode\"] = 100\n response[\"StatusMessage\"] = \"Success\"\n response[\"PackageId\"] = package_id\n response[\"Balance\"] = float(user.main_wallet)\n else:\n response[\"StatusCode\"] = -100\n response[\"StatusMessage\"] = \"Not enough balance\"\n\n response = json.dumps(response)\n cipher_text = des3Encryption(response)\n\n return HttpResponse(cipher_text, content_type='text/plain', status=200)\n else:\n return HttpResponse(\"Wrong Event type\")\n except Exception as e:\n logger.error(\"IMES Deduct Balance Error: {}\".format(repr(e)))\n\n response[\"StatusCode\"] = -100\n response[\"StatusMessage\"] = \"Internal Error\"\n\n response = json.dumps(response)\n cipher_text = des3Encryption(response)\n\n return HttpResponse(cipher_text, content_type='text/plain', status=200)\n\n\nclass InplayUpdateBalanceAPI(View):\n def get(self, request, *arg, **kwargs):\n balance_package = request.GET.get('balancePackage')\n package_id = request.GET.get('packageid')\n date_sent = request.GET.get('dateSent')\n try:\n balance_package = balance_package.replace(' ', '+')\n data = des3Decryption(balance_package)\n data = \"\".join([data.rsplit(\"}\" , 1)[0] , \"}\"])\n data = json.loads(data)\n if data[\"EventTypeId\"] == '4002':\n provider = GameProvider.objects.get(provider_name=IMES_PROVIDER)\n category = Category.objects.get(name='Sports')\n\n match_no = data[\"MatchNo\"]\n bet_detail_list = data[\"BetDetailList\"]\n bet_detail_list = json.loads(bet_detail_list)\n for bet in bet_detail_list:\n member_code = bet[\"MemberCode\"]\n bet_no = bet[\"BetNo\"]\n amount = bet[\"TransactionAmt\"]\n\n user = CustomUser.objects.get(username=member_code)\n\n trans_id = user.username + \"-\" + timezone.datetime.today().isoformat() + \"-\" + str(random.randint(0, 10000000))\n\n with transaction.atomic():\n\n GameBet.objects.get_or_create(\n provider = provider,\n category = category,\n # game = models.ForeignKey(Game, on_delete=models.CASCADE, blank=True, null=True) # small game\n # game_name = models.CharField(max_length=200, blank=True, null=True) # subset of category, (e.g within basketball, there's NBA, FIBA, euroleague, within soccer there's euroleague, premier league, etc.) \n user = user,\n user_name = user.username,\n # amount_wagered = decimal.Decimal(amount),\n amount_won = decimal.Decimal(amount),\n # # outcome = models.BooleanField() # true = win, false = lost\n # outcome = models.SmallIntegerField(choices=OUTCOME_CHOICES, null=True, blank=True)\n # odds = models.DecimalField(null=True, blank=True,max_digits=12, decimal_places=2,) # payout odds (in american odds), e.g. +500, -110, etc.\n # bet_type = models.CharField(max_length=6, choices=BET_TYPES_CHOICES, null=True, blank=True)\n # line = models.CharField(max_length=50, null=True, blank=True) # examples: if bet_type=spread: <+/-> | bet_type=moneyline: name of team | bet_type=total: 200\n transaction_id = trans_id,\n # currency = models.SmallIntegerField(choices=CURRENCY_CHOICES, default=0, verbose_name=_(\"Currency\"))\n market = ibetCN,\n ref_no = bet_no,\n resolved_time = timezone.now(),\n other_data = json.dumps({\"bet_no\" : bet_no})\n )\n\n user.main_wallet = user.main_wallet + decimal.Decimal(amount)\n user.save()\n\n res = {}\n\n res[\"StatusCode\"] = 100\n res[\"StatusMessage\"] = \"Acknowledged\"\n\n return HttpResponse(json.dumps(res), content_type='application/json', status=200)\n except Exception as e:\n logger.error(\"IMES Update Balance Error: {}\".format(repr(e)))\n return HttpResponse(repr(e), status=400)\n\n\nclass InplayPostBetDetailsAPI(View):\n def post(self, request, *arg, **kwargs):\n bet_package = request.POST.get('postPackage')\n \n try:\n bet_package = bet_package.replace(' ', '+')\n data = des3Decryption(data)\n data = \"\".join([data.rsplit(\">\" , 1)[0] , \">\"])\n data = xmltodict.parse(data)\n\n member_bet_details = data[\"BetDetails\"][\"MemberBetDetails\"]\n\n member_code = member_bet_details[\"memberCode\"]\n bet_id = member_bet_details[\"betId\"]\n bet_time = member_bet_details[\"betTime\"]\n sports_name = member_bet_details[\"sportsName\"]\n bet_amt = member_bet_details[\"betAmt\"]\n odds = member_bet_details[\"odds\"]\n\n user = CustomUser.objects.get(username=member_code)\n\n provider = GameProvider.objects.get(provider_name=IMES_PROVIDER)\n category = Category.objects.filter(name='SPORTS')\n\n trans_id = user.username + \"-\" + timezone.datetime.today().isoformat() + \"-\" + str(random.randint(0, 10000000))\n\n GameBet.objects.get_or_create(\n provider = provider,\n category = category[0],\n # game = models.ForeignKey(Game, on_delete=models.CASCADE, blank=True, null=True) # small game\n # game_name = models.CharField(max_length=200, blank=True, null=True) # subset of category, (e.g within basketball, there's NBA, FIBA, euroleague, within soccer there's euroleague, premier league, etc.) \n user = user,\n user_name = user.username,\n amount_wagered = decimal.Decimal(bet_amt),\n # amount_won = models.DecimalField(max_digits=12, decimal_places=2, null=True) # if amount_won = 0, outcome is also 0 (false)\n # # outcome = models.BooleanField() # true = win, false = lost\n # outcome = models.SmallIntegerField(choices=OUTCOME_CHOICES, null=True, blank=True)\n # odds = Decimal(odds) # payout odds (in american odds), e.g. +500, -110, etc.\n # bet_type = models.CharField(max_length=6, choices=BET_TYPES_CHOICES, null=True, blank=True)\n # line = models.CharField(max_length=50, null=True, blank=True) # examples: if bet_type=spread: <+/-> | bet_type=moneyline: name of team | bet_type=total: 200\n transaction_id = trans_id,\n currency = user.currency,\n market = ibetCN,\n ref_no = bet_id,\n # resolved_time = models.DateTimeField(null=True, blank=True)\n other_data = json.loads({\"data\": data})\n )\n \n return HttpResponse(status=200)\n except Exception as e:\n logger.error(\"Inplay Post Bet Error {}\".format(repr(e)))\n # return HttpResponse(repr(e), status=400) No return here\n\n\nclass TestDecryption(View):\n def get(self, request, *arg, **kwargs):\n try:\n api_no = request.GET.get('api')\n event_type_id = request.GET.get('EventTypeId') # \"EventTypeId\": 1001,\n member_code = request.GET.get('MemberCode') # \"MemberCode\": \"bae02\",\n transaction_amt = request.GET.get('TransactionAmt') # \"TransactionAmt\": 100.0\n match_no = request.GET.get('MatchNo') # \"MatchNo\": 1688512\n bet_detail_list = request.GET.get('BetDetailList')\n\n plain_json = {}\n plain_json[\"EventTypeId\"] = event_type_id\n plain_json[\"MemberCode\"] = member_code\n\n if api_no == '1':\n pass\n # plain_json = json.dumps(plain_json)\n elif api_no == '2':\n plain_json[\"TransactionAmt\"] = transaction_amt\n elif api_no == '6':\n plain_json[\"MatchNo\"] = match_no\n plain_json[\"BetDetailList\"] = bet_detail_list\n\n plain_json = json.dumps(plain_json)\n \n # key = hashlib.md5(b'9d25ee5d1ffa0e01').digest()\n\n cipher_json = des3Encryption(plain_json)\n\n plain_json = des3Decryption(cipher_json)\n\n plain_json = \"\".join([plain_json.rsplit(\"}\" , 1)[0] , \"}\"]) \n\n plain_json = json.loads(plain_json)\n\n return HttpResponse(cipher_json)\n except Exception as e:\n print(repr(e))", "sub_path": "ibet_apps/games/views/inplaygameviews.py", "file_name": "inplaygameviews.py", "file_ext": "py", "file_size_in_byte": 20029, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 43, "usage_type": "call"}, {"api_name": "Crypto.Cipher.DES3.new", "line_number": 44, "usage_type": "call"}, {"api_name": "Crypto.Cipher.DES3", "line_number": 44, "usage_type": "name"}, {"api_name": "Crypto.Cipher.DES3.MODE_ECB", "line_number": 44, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 47, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 55, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 56, "usage_type": "call"}, {"api_name": "Crypto.Cipher.DES3.new", "line_number": 57, "usage_type": "call"}, {"api_name": "Crypto.Cipher.DES3", "line_number": 57, "usage_type": "name"}, {"api_name": "Crypto.Cipher.DES3.MODE_ECB", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.views.View", "line_number": 65, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 68, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 71, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 71, "usage_type": "name"}, {"api_name": "rest_framework.authtoken.models.Token.objects.get", "line_number": 74, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.models.Token.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.models.Token", "line_number": 74, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 85, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 85, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 90, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 97, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 97, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 100, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 103, "usage_type": "name"}, {"api_name": "rest_framework.authtoken.models.Token.objects.get", "line_number": 109, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.models.Token.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.models.Token", "line_number": 109, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 138, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 138, "usage_type": "call"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 139, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 146, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 146, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 154, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 157, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 165, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 171, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 171, "usage_type": "name"}, {"api_name": "uuid.uuid1", "line_number": 175, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 178, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 181, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 186, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 190, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 193, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 203, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 208, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 208, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 208, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 219, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 221, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 225, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 227, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 235, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 238, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 241, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 251, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 256, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 256, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 256, "usage_type": "name"}, {"api_name": "django.utils.timezone.datetime.today", "line_number": 258, "usage_type": "call"}, {"api_name": "django.utils.timezone.datetime", "line_number": 258, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 258, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 258, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 263, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 263, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 270, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 276, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 289, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 292, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 294, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 301, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 304, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 307, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 316, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 323, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 329, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 329, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 329, "usage_type": "name"}, {"api_name": "django.utils.timezone.datetime.today", "line_number": 331, "usage_type": "call"}, {"api_name": "django.utils.timezone.datetime", "line_number": 331, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 331, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 331, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 333, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 333, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 343, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 353, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 353, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 354, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 357, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 365, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 365, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 368, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 371, "usage_type": "name"}, {"api_name": "xmltodict.parse", "line_number": 379, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 390, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 390, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 390, "usage_type": "name"}, {"api_name": "django.utils.timezone.datetime.today", "line_number": 395, "usage_type": "call"}, {"api_name": "django.utils.timezone.datetime", "line_number": 395, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 395, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 395, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 404, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 416, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 419, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 425, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 448, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 458, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 460, "usage_type": "call"}]} +{"seq_id": "203812380", "text": "import numpy as np\nimport pylab as pl\nfrom scipy import interpolate\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport csv\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.decomposition import pca\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import confusion_matrix\nfrom scipy.signal import butter, filtfilt\nimport time\nfrom sklearn.svm import SVC\nfrom scipy.signal import find_peaks_cwt\n\n\ndef butter_lowpass(cutoff, fs, order=6):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n return b, a\n#\ndef butter_lowpass_filter(data, cutoff, fs, order=6):\n b, a = butter_lowpass(cutoff, fs, order=order)\n y = filtfilt(b, a, data)\n return y\n\n\n\nEMGDATA = []\nEMGLABEL = []\n# set up sampling frequency and cutoff frequency\nfs = 200\ncutoff = 25\n\ntime_readstart = time.clock()\nprint(\"read start\")\nrootpath = '/Users/ziyu/Desktop/Capstone/testdata'\nfor parent, subdir, filenames in os.walk(rootpath):\n count = 0\n print(\"reading fold path:\",parent)\n for filename in filenames:\n if filename.endswith('.csv') :\n signal_paths = [os.path.join(rootpath,parent, filename)]\n #count for each csv file\n count += 1\n if signal_paths[0]:\n print(signal_paths[0])\n emg1 = []\n emg2 = []\n emg3 = []\n emg4 = []\n emg5 = []\n emg6 = []\n emg7 = []\n emg8 = []\n\n with open(signal_paths[0]) as f:\n reader = csv.reader(f)\n # 读取一行,下面的reader中已经没有该行了\n head_row = next(reader)\n for row in reader:\n # 行号从2开始\n # print(reader.line_num, row)\n emg1.append(row[1])\n emg2.append(row[2])\n emg3.append(row[3])\n emg4.append(row[4])\n emg5.append(row[5])\n emg6.append(row[6])\n emg7.append(row[7])\n emg8.append(row[8])\n\n emg1 = [float(i) for i in emg1]\n emg1_abs = list(map(abs, emg1))\n emg2 = [float(i) for i in emg2]\n emg2_abs = list(map(abs, emg2))\n emg3 = [float(i) for i in emg3]\n emg3_abs = list(map(abs, emg3))\n emg4 = [float(i) for i in emg4]\n emg4_abs = list(map(abs, emg4))\n emg5 = [float(i) for i in emg5]\n emg5_abs = list(map(abs, emg5))\n emg6 = [float(i) for i in emg6]\n emg6_abs = list(map(abs, emg6))\n emg7 = [float(i) for i in emg7]\n emg7_abs = list(map(abs, emg7))\n emg8 = [float(i) for i in emg8]\n emg8_abs = list(map(abs, emg8))\n #emg and emg_abs's different\n\n\n\n # use emg4 to cut signal\n emg4_smooth = butter_lowpass_filter(emg4_abs, cutoff, fs)\n\n sigment_add = []\n cutpoint = []\n\n #cut off for each gesture\n for index in range(0, len(emg4_smooth), 200):\n # print(index)\n # every 400 points is a segment, overlap is 200 points\n sigment = emg4_smooth[index:(index + 400)]\n sigment_mean = sum(sigment) / len(sigment)\n # print(sigment)\n\n sigment_add.append(sigment_mean)\n cb = np.array(sigment_add)\n peaks = find_peaks_cwt(-cb, [3])\n #print(peaks)\n for item in peaks:\n cut = item * 200 + 200\n cutpoint.append(cut)\n\n print('分段点:',cutpoint)\n\n if len(cutpoint) != 11:\n if len(cutpoint) == 10:\n if cutpoint[0] > 1000:\n cutpoint.insert(0, 100)\n #insert point 100 as the first segment\n print('开始分段点插入后:', cutpoint)\n else:\n cutpoint.insert(10, len(emg4_smooth) - 100)\n print('前后分段点插入后:', cutpoint)\n\n\n elif len(cutpoint) == 9:\n #both front and back missing\n cutpoint.insert(0, 100)\n cutpoint.insert(10, len(emg4_smooth)-100)\n print('前后分段点插入后:', cutpoint)\n else:\n print(\"wrong\")\n\n #cutpoint is the result of segment, make 10 segments\n\n # i is from 0 - 9\n for i in range(0, len(cutpoint) - 1):\n #print(i)\n gesture_emg1 = np.array(emg1[cutpoint[i]:cutpoint[i + 1]])\n # plt.plot(gesture)\n #print('length of gesture:%d' % len(gesture_emg1))\n x = np.linspace(0, len(gesture_emg1), len(gesture_emg1))\n #print('length of x = %d'%len(x))\n x_new = np.linspace(0, len(gesture_emg1), 5000)\n # print('length of x_new = %d'%len(x_new))\n tck_emg1 = interpolate.splrep(x, gesture_emg1)\n gesture_emg1_bspline = interpolate.splev(x_new, tck_emg1)\n gesture_emg1_bspline_abs = list(map(abs, gesture_emg1_bspline))\n # print(gesture_bspline)\n # plt.plot(x, gesture, \"o\", label=u\"original data\")\n #print('length of gesture after interpolate:%d' % len(gesture_emg1_bspline))\n # plt.plot(gesture_bspline, label=u\"B-spline interpolate\")\n # pl.legend()\n # pl.show()\n #the gesture_bspline list is the result of one gesture of one emg\n\n gesture_emg2 = np.array(emg2[cutpoint[i]:cutpoint[i + 1]])\n tck_emg2 = interpolate.splrep(x, gesture_emg2)\n gesture_emg2_bspline = interpolate.splev(x_new, tck_emg2)\n gesture_emg2_bspline_abs = list(map(abs, gesture_emg2_bspline))\n\n\n gesture_emg3 = np.array(emg3[cutpoint[i]:cutpoint[i + 1]])\n tck_emg3 = interpolate.splrep(x, gesture_emg3)\n gesture_emg3_bspline = interpolate.splev(x_new, tck_emg3)\n gesture_emg3_bspline_abs = list(map(abs, gesture_emg3_bspline))\n\n\n gesture_emg4 = np.array(emg4[cutpoint[i]:cutpoint[i + 1]])\n tck_emg4 = interpolate.splrep(x, gesture_emg4)\n gesture_emg4_bspline = interpolate.splev(x_new, tck_emg4)\n gesture_emg4_bspline_abs = list(map(abs, gesture_emg4_bspline))\n\n\n gesture_emg5 = np.array(emg5[cutpoint[i]:cutpoint[i + 1]])\n tck_emg5 = interpolate.splrep(x, gesture_emg5)\n gesture_emg5_bspline = interpolate.splev(x_new, tck_emg5)\n gesture_emg5_bspline_abs = list(map(abs, gesture_emg5_bspline))\n\n\n gesture_emg6 = np.array(emg6[cutpoint[i]:cutpoint[i + 1]])\n tck_emg6 = interpolate.splrep(x, gesture_emg6)\n gesture_emg6_bspline = interpolate.splev(x_new, tck_emg6)\n gesture_emg6_bspline_abs = list(map(abs, gesture_emg6_bspline))\n\n\n gesture_emg7 = np.array(emg7[cutpoint[i]:cutpoint[i + 1]])\n tck_emg7 = interpolate.splrep(x, gesture_emg7)\n gesture_emg7_bspline = interpolate.splev(x_new, tck_emg7)\n gesture_emg7_bspline_abs = list(map(abs, gesture_emg7_bspline))\n\n\n gesture_emg8 = np.array(emg8[cutpoint[i]:cutpoint[i + 1]])\n tck_emg8 = interpolate.splrep(x, gesture_emg8)\n gesture_emg8_bspline = interpolate.splev(x_new, tck_emg8)\n gesture_emg8_bspline_abs = list(map(abs, gesture_emg8_bspline))\n\n\n gesture = np.append(gesture_emg1_bspline_abs,gesture_emg2_bspline_abs)\n gesture = np.append(gesture, gesture_emg3_bspline_abs)\n gesture = np.append(gesture, gesture_emg4_bspline_abs)\n gesture = np.append(gesture, gesture_emg5_bspline_abs)\n gesture = np.append(gesture, gesture_emg6_bspline_abs)\n gesture = np.append(gesture, gesture_emg7_bspline_abs)\n gesture = np.append(gesture, gesture_emg8_bspline_abs)\n\n #print('gesture shape:',len(gesture))\n\n EMGDATA.append(gesture)\n EMGLABEL.append(i+1)\n\n\n\nfinishREAD = (time.clock() - time_readstart)\nprint(\"READING Time used:\",finishREAD)\nprint(\"totally read csv file number:\",count)\n# for temp in EMGLABEL:\n# plt.plot(EMGDATA[temp])\n# plt.title('EMGDATA[%d],gesture:%d'%(temp,EMGLABEL[temp]))\n# plt.show()\n\nprint('length of EMGDATA:', len(EMGDATA))\n\n# plt.plot(EMGLABEL)\n# plt.show()\n\n#EMGDATA AND EMGLABEL is the training data\n\n\n# # PCA transfer\n# print('start to PCA')\n# startPCA = time.clock()\n# # feature wanted\n# K=50\n# # building model,n_components is the number of feature wanted\n# model = pca.PCA(n_components=K).fit(EMGDATA)\n# # transform to run PCA\n# face_X = model.transform(EMGDATA)\n#\n# finishPCA = (time.clock() - startPCA)\n# print(\"PCA Time used:\",finishPCA)\n\n\nX_train, X_test, y_train, y_test = train_test_split(EMGDATA, EMGLABEL, test_size=0.3)\nprint(\"length of X_train:\", len(X_train))\nprint(\"feature used\", len(X_train[0]))\n\nplt.plot(X_train[0])\nplt.title('gesture:%d'%(y_train[0]))\nplt.show()\n\nplt.plot(X_train[1])\nplt.title('gesture:%d'%(y_train[1]))\nplt.show()\n\nplt.plot(X_train[2])\nplt.title('gesture:%d'%(y_train[2]))\nplt.show()\n\nplt.plot(X_train[3])\nplt.title('gesture:%d'%(y_train[3]))\nplt.show()\n\nprint('start to train KNN')\nstartKNN = time.clock()\n\nknn = KNeighborsClassifier()\nknn.fit(X_train, y_train)\nX_result = knn.predict(X_test)\n#print(y_test)\n\nfinishKNN = (time.clock() - startKNN)\nprint(\"KNN Time used:\",finishKNN)\nprint(y_test)\nprint(X_result)\n\n#calculate Similiarity\nsame = 0\nfor num in range(0,len(y_test)-1):\n if y_test[num] == X_result[num]:\n same = same + 1\nsimiliarity = same/len(y_test)\nprint(\"similiarity:\", same/len(y_test))\n\n\n", "sub_path": "SomeTestCode/KNN.py", "file_name": "KNN.py", "file_ext": "py", "file_size_in_byte": 9578, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "scipy.signal.butter", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.signal.filtfilt", "line_number": 27, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 38, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}, {"api_name": "scipy.signal.find_peaks_cwt", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 149, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 151, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 151, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 152, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 152, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 162, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 163, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 163, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 164, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 164, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 168, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 169, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 169, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 170, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 170, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 175, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 175, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 176, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 176, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 180, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 181, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 181, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 182, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 182, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 186, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 187, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 187, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 188, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 188, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 193, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 193, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 194, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 194, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 198, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 199, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 199, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 200, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 200, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 210, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 219, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "time.clock", "line_number": 270, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 272, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 277, "usage_type": "call"}]} +{"seq_id": "480194678", "text": "from flask import Flask, g, abort, current_app, request, url_for\nfrom werkzeug.exceptions import HTTPException, InternalServerError\nfrom flask_restful import Resource, Api\nfrom datetime import datetime\nfrom functools import wraps\nimport threading\nimport time\nimport uuid\n\ntasks = {}\n\napp = Flask(__name__)\napi = Api(app)\n\n\n@app.before_first_request\ndef before_first_request():\n \"\"\"Start a background thread that cleans up old tasks.\"\"\"\n def clean_old_tasks():\n \"\"\"\n This function cleans up old tasks from our in-memory data structure.\n \"\"\"\n global tasks\n while True:\n # Only keep tasks that are running or that finished less than 5\n # minutes ago.\n five_min_ago = datetime.timestamp(datetime.utcnow()) - 5 * 60\n tasks = {task_id: task for task_id, task in tasks.items()\n if 'completion_timestamp' not in task or task['completion_timestamp'] > five_min_ago}\n time.sleep(60)\n\n if not current_app.config['TESTING']:\n thread = threading.Thread(target=clean_old_tasks)\n thread.start()\n\n\ndef async_api(wrapped_function):\n @wraps(wrapped_function)\n def new_function(*args, **kwargs):\n def task_call(flask_app, environ):\n # Create a request context similar to that of the original request\n # so that the task can have access to flask.g, flask.request, etc.\n with flask_app.request_context(environ):\n try:\n tasks[task_id]['return_value'] = wrapped_function(*args, **kwargs)\n except HTTPException as e:\n tasks[task_id]['return_value'] = current_app.handle_http_exception(e)\n except Exception as e:\n # The function raised an exception, so we set a 500 error\n tasks[task_id]['return_value'] = InternalServerError()\n if current_app.debug:\n # We want to find out if something happened so reraise\n raise\n finally:\n # We record the time of the response, to help in garbage\n # collecting old tasks\n tasks[task_id]['completion_timestamp'] = datetime.timestamp(datetime.utcnow())\n\n # close the database session (if any)\n\n # Assign an id to the asynchronous task\n task_id = uuid.uuid4().hex\n\n # Record the task, and then launch it\n tasks[task_id] = {'task_thread': threading.Thread(\n target=task_call, args=(current_app._get_current_object(),\n request.environ))}\n tasks[task_id]['task_thread'].start()\n\n # Return a 202 response, with a link that the client can use to\n # obtain task status\n print(url_for('gettaskstatus', task_id=task_id))\n return 'accepted', 202, {'Location': url_for('gettaskstatus', task_id=task_id)}\n return new_function\n\n\nclass GetTaskStatus(Resource):\n def get(self, task_id):\n \"\"\"\n Return status about an asynchronous task. If this request returns a 202\n status code, it means that task hasn't finished yet. Else, the response\n from the task is returned.\n \"\"\"\n task = tasks.get(task_id)\n if task is None:\n abort(404)\n if 'return_value' not in task:\n return '', 202, {'Location': url_for('gettaskstatus', task_id=task_id)}\n return task['return_value']\n\n\nclass CatchAll(Resource):\n @async_api\n def get(self, path=''):\n # perform some intensive processing\n print(\"starting processing task, path: '%s'\" % path)\n time.sleep(10)\n print(\"completed processing task, path: '%s'\" % path)\n return f'The answer is: {path}'\n\n\napi.add_resource(CatchAll, '/', '/')\napi.add_resource(GetTaskStatus, '/status/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)", "sub_path": "dev/api_example.py", "file_name": "api_example.py", "file_ext": "py", "file_size_in_byte": 3962, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.timestamp", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 32, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 33, "usage_type": "call"}, {"api_name": "werkzeug.exceptions.HTTPException", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.current_app.handle_http_exception", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 47, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.InternalServerError", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.current_app.debug", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.datetime.timestamp", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 57, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 62, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.current_app._get_current_object", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.environ", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 73, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 38, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 88, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 92, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "115623108", "text": "import yaml\nimport io\nimport os.path\nimport shutil\n\nwith open(r'./../param.yaml') as stream:\n paramLoaded = yaml.safe_load(stream)\n\n# Transferring the files containing the mean and std of the recorded (processed) data \nmeanStdDataPath = paramLoaded[\"savePath\"]\nmeanStdDataPath = os.path.join(meanStdDataPath, 'sparseFormat')\n\nmeanFileName = \"trainDataMean\"\nstdFileName = \"trainDataStd\"\nmeanFileName = os.path.join(meanStdDataPath, meanFileName + \".txt\")\nstdFileName = os.path.join(meanStdDataPath, stdFileName + \".txt\")\n\nnewMeanPath = shutil.copy(meanFileName, os.getcwd())\nnewStdPath = shutil.copy(stdFileName, os.getcwd())\n", "sub_path": "learning/ANN_coupled/scripts/transport.py", "file_name": "transport.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "yaml.safe_load", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 11, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 16, "usage_type": "name"}, {"api_name": "shutil.copy", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.getcwd", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "name"}, {"api_name": "shutil.copy", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.getcwd", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "477093134", "text": "import re\nimport cfscrape\nimport subprocess\nfrom pyquery import PyQuery as pq\nfrom .utils import exceptions, casper\n\nscraper = cfscrape.create_scraper()\n\nclass abysstream_IE:\n def __init__(self):\n self.regexes = [r\"^(?:https?://)?(?:www\\.)?(abysstream\\.[a-z]+)\\/(?:v\\/|video\\/)?([A-Za-z0-9]{10,})\"]\n self.aggregate = False\n \n def rewrite(self, url, find):\n for r in self.regexes:\n obj = re.match(r, url)\n if obj:\n return \"%s/v/%s\" % (find.best(obj.group(1)), obj.group(2))\n \n raise exceptions.RewriteError()\n \n \n def supports(self, url):\n for r in self.regexes:\n if re.match(r, url):\n return True\n \n return False\n \n \n def get(self, url, headers, bestOnly=True):\n return casper().fetch(\"hosts/abysstream.js\", url)", "sub_path": "extractors/abysstream_strm.py", "file_name": "abysstream_strm.py", "file_ext": "py", "file_size_in_byte": 865, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cfscrape.create_scraper", "line_number": 7, "usage_type": "call"}, {"api_name": "re.match", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.exceptions.RewriteError", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.exceptions", "line_number": 20, "usage_type": "name"}, {"api_name": "re.match", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.casper", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "380952245", "text": "# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\n\nimport pymysql\nfrom scwzproject.items import ScwzprojectItem,DetailItem\nclass ScwzprojectPipeline:\n def open_spider(self,spider):\n print(\"开启爬虫时执行...\")\n #创建连接对象\n self.conn = pymysql.Connect(host='127.0.0.1', port=3306, database='wz_db', user='root', password='1234',charset='utf8')\n # 通过连接对象创建游标\n self.cursor = self.conn.cursor()\n\n def process_item(self, item, spider):\n if isinstance(item,ScwzprojectItem):\n try:\n #插入操作\n sql_str = 'insert into tb_wz(title,url,date) values(\"%s\",\"%s\",\"%s\")' % (item['title'], item['url'], item['date'])\n self.cursor.execute(sql_str)\n self.conn.commit() #提交事务\n except Exception as e:\n print(e)\n self.conn.rollback() #回滚事务\n return item\n if isinstance(item,DetailItem):\n try:\n #更新操作\n sql_str = 'update tb_wz set content=\"%s\" where url=\"%s\"' % (item['content'], item['url'])\n self.cursor.execute(sql_str)\n self.conn.commit() # 提交事务\n except Exception as e:\n print(e)\n self.conn.rollback() # 回滚事务\n return item\n return item\n\n # def close_spider(self):\n # print(\"关闭爬虫时执行...\")\n # self.cursor.close()\n # self.conn.close()", "sub_path": "scwzproject/scwzproject/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 1760, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pymysql.Connect", "line_number": 16, "usage_type": "call"}, {"api_name": "scwzproject.items.ScwzprojectItem", "line_number": 21, "usage_type": "argument"}, {"api_name": "scwzproject.items.DetailItem", "line_number": 31, "usage_type": "argument"}]} +{"seq_id": "91383413", "text": "#!/usr/bin/env python\n\n\"\"\" nav_test.py - Version 1.1 2013-12-20\n\n Command a robot to move autonomously among a number of goal locations defined in the map frame.\n On each round, select a new random sequence of locations, then attempt to move to each location\n in succession. Keep track of success rate, time elapsed, and total distance traveled.\n\n Created for the Pi Robot Project: http://www.pirobot.org\n Copyright (c) 2012 Patrick Goebel. All rights reserved.\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.5\n \n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details at:\n \n http://www.gnu.org/licenses/gpl.html\n \n\"\"\"\n\nimport rospy\nimport actionlib\nimport tf\nimport math\nfrom actionlib_msgs.msg import *\nfrom geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom random import sample\nfrom math import pow, sqrt\n\nclass TestInitalpose():\n def __init__(self):\n rospy.init_node('test_initalpose', anonymous=False)\n rospy.loginfo(\"start test inital pose...\")\n \n self.setpose_pub = rospy.Publisher(\"initialpose\",PoseWithCovarianceStamped,latch=True, queue_size=1)\n \n #self.setpose_pub = rospy.Publisher(\"initialpose\", PoseWithCovarianceStamped,queue_size=10)\n \n self.set_pose = {'x':-0.170512974262,'y':-0.0195373892784,'a':0.0}\n self.test_set_pose_flag = True\n self.test_set_pose_cnt = 3\n \n \n while self.test_set_pose_flag == True:\n \n self.set_inital_pose()\n self.test_set_pose_cnt -= 1\n if self.test_set_pose_cnt == 0:\n self.test_set_pose_flag = False\n rospy.sleep(1)\n\n def set_inital_pose(self):\n # Define a set inital pose publisher.\n rospy.loginfo(\"start set pose...\")\n p = PoseWithCovarianceStamped()\n p.header.stamp = rospy.Time.now()\n p.header.frame_id = \"map\"\n p.pose.pose.position.x = self.set_pose['x']\n p.pose.pose.position.y = self.set_pose['y']\n p.pose.pose.position.z = self.set_pose['a']\n (p.pose.pose.orientation.x,\n p.pose.pose.orientation.y,\n p.pose.pose.orientation.z,\n p.pose.pose.orientation.w) = tf.transformations.quaternion_from_euler(0, 0, self.set_pose['a'])\n p.pose.covariance[6 * 0 + 0] = 0.5 * 0.5\n p.pose.covariance[6 * 1 + 1] = 0.5 * 0.5\n p.pose.covariance[6 * 3 + 3] = math.pi / 12.0 * math.pi / 12.0\n \n self.setpose_pub.publish(p)\nif __name__ == '__main__':\n try:\n TestInitalpose()\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"AMCL navigation test finished.\")", "sub_path": "rbx1_nav/nodes/test_initalpose.py", "file_name": "test_initalpose.py", "file_ext": "py", "file_size_in_byte": 3117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "rospy.init_node", "line_number": 38, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 39, "usage_type": "call"}, {"api_name": "rospy.Publisher", "line_number": 41, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseWithCovarianceStamped", "line_number": 41, "usage_type": "argument"}, {"api_name": "rospy.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 60, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseWithCovarianceStamped", "line_number": 61, "usage_type": "call"}, {"api_name": "rospy.Time.now", "line_number": 62, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tf.transformations.quaternion_from_euler", "line_number": 70, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 70, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 73, "usage_type": "attribute"}, {"api_name": "rospy.spin", "line_number": 79, "usage_type": "call"}, {"api_name": "rospy.ROSInterruptException", "line_number": 80, "usage_type": "attribute"}, {"api_name": "rospy.loginfo", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "67671374", "text": "from Testing import assertions\nimport datetime\nimport calendar\nimport operator\n\n# https://stackoverflow.com/questions/33673116/eval-with-a-variable-operator\n# since we need to do inverse operations, add this (confusing) operator map.\noperator_map = {\n \"-\": operator.add,\n \"+\": operator.sub\n}\n\n\ndef time_delta(t1, t2):\n return abs(get_timestamp(t1) - get_timestamp(t2))\n\n\ndef get_timestamp(datetime_string):\n date = datetime.datetime.strptime(datetime_string, '%a %d %b %Y %H:%M:%S %z')\n return int(date.timestamp())\n\n\ndef time_delta_get_timestamp_without_format(t1, t2):\n return abs(get_timestamp_without_format(t1) - get_timestamp_without_format(t2))\n\n\n# Some string formatting / time diff exercise.\ndef get_timestamp_without_format(datetime_string):\n\n day_name, day_number, month, year, time, time_diff = datetime_string.split()\n\n month_names = list(calendar.month_name)\n month_number = month_names.index(month)\n hours, minutes, seconds = map(int, time.split(\":\"))\n\n date = datetime.datetime(int(year), int(month_number), int(day_number), hours, minutes, seconds, 0, tzinfo=datetime.timezone.utc)\n\n time_diff_operator = time_diff[:1]\n time_diff_hours = int(time_diff[1:3])\n time_diff_minutes = int(time_diff[3:5])\n\n hours_diff = datetime.timedelta(hours=time_diff_hours)\n minutes_diff = datetime.timedelta(minutes=time_diff_minutes)\n\n date = operator_map.get(time_diff_operator)(date, hours_diff)\n date = operator_map.get(time_diff_operator)(date, minutes_diff)\n\n return int(date.timestamp())\n\n\nassertions.assert_equals(25200, time_delta(\"Sun 10 May 2015 13:54:36 -0700\", \"Sun 10 May 2015 13:54:36 -0000\"))\nassertions.assert_equals(88200, time_delta(\"Sat 02 May 2015 19:54:36 +0530\", \"Fri 01 May 2015 13:54:36 -0000\"))\n\nassertions.assert_equals(25200, time_delta_get_timestamp_without_format(\"Sun 10 May 2015 13:54:36 -0700\", \"Sun 10 May 2015 13:54:36 -0000\"))\nassertions.assert_equals(88200, time_delta_get_timestamp_without_format(\"Sat 02 May 2015 19:54:36 +0530\", \"Fri 01 May 2015 13:54:36 -0000\"))", "sub_path": "Python/Date and Time/python-time-delta.py", "file_name": "python-time-delta.py", "file_ext": "py", "file_size_in_byte": 2060, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "operator.add", "line_number": 9, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "calendar.month_name", "line_number": 32, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.timezone", "line_number": 36, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 43, "usage_type": "call"}, {"api_name": "Testing.assertions.assert_equals", "line_number": 51, "usage_type": "call"}, {"api_name": "Testing.assertions", "line_number": 51, "usage_type": "name"}, {"api_name": "Testing.assertions.assert_equals", "line_number": 52, "usage_type": "call"}, {"api_name": "Testing.assertions", "line_number": 52, "usage_type": "name"}, {"api_name": "Testing.assertions.assert_equals", "line_number": 54, "usage_type": "call"}, {"api_name": "Testing.assertions", "line_number": 54, "usage_type": "name"}, {"api_name": "Testing.assertions.assert_equals", "line_number": 55, "usage_type": "call"}, {"api_name": "Testing.assertions", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "407880031", "text": "import pygame, sys, time, random\nfrom pygame.locals import *\n#Importing modules from other files\nfrom trex import trex\nfrom cactus import cactus\nfrom cloud import cloud\n\n#Always call before utilizing pygame functions\npygame.init()\n#Sets FPS and starts game clock/\nFPS = 40\nfpsClock = pygame.time.Clock()\nframe_counter = 0\npygame.mixer.music.load('resources/gerudo.mp3')\npygame.mixer.music.play(-1, 0.0)\n\nDISPLAYSURF = pygame.display.set_mode((400,300), 0, 32)\n#Sets title of GUI frame\npygame.display.set_caption(\"Dino Jump\")\nBASICFONT = pygame.font.Font('freesansbold.ttf', 16)\n\n#Sets background color\nWHITE = (250, 250, 250)\nrex = trex(150)\ncacti = pygame.sprite.Group()\nclouds = pygame.sprite.Group()\n\n#Adds a new cactus sprite to the list of obstacles\ndef add_cacti():\n plant = cactus(120)\n cacti.add(plant)\n\ndef add_cloud():\n x = cloud()\n clouds.add(x)\n\n#Updates each cactus sprite's location\n#Removes the cactus from sprite group if it's off screen\n#Scores removed cacti\n#Redraws cactus image\ndef update_cacti():\n for plant in cacti:\n plant.update()\n\ndef update_clouds():\n for y in clouds:\n y.update()\n\n#Updates trex sprite's location and redraws trex image\ndef update_rex(jumping):\n if jumping:\n rex.move(150)\n if frame_counter % 3 == 0:\n rex.image = DINO[0]\n elif frame_counter % 3 == 1:\n rex.image = DINO[1]\n elif frame_counter % 3 == 2:\n rex.image = DINO[2]\n\n#Starts game over actions\n#Displays an end of game message in a text box\n#Kills trex sprite\n#Creates new game loop to display end game state\ndef game_over2(game_over):\n if game_over:\n DISPLAYSURF.fill((255, 255, 255))\n rex.kill()\n for c in cacti:\n c.kill()\n for c in clouds:\n c.kill()\n Surf = BASICFONT.render(\"GAME OVER\", 1, (0,0,0))\n Rect = Surf.get_rect()\n Rect.topleft = (10, 10)\n DISPLAYSURF.blit(Surf, Rect)\n Surf = BASICFONT.render(\"PRESS ENTER TO RESTART\", 1, (0,0,0))\n Rect = Surf.get_rect()\n Rect.topleft = (150, 150)\n DISPLAYSURF.blit(Surf, Rect)\n Surf = BASICFONT.render(\"SCORE: \" + str(frame_counter), 1, (0,0,0))\n Rect = Surf.get_rect()\n Rect.topleft = (10, 250)\n DISPLAYSURF.blit(Surf, Rect)\n\n#Creates a text box with the text provided in location x, y on screen\ndef display_message(text, x, y):\n print\n\n#Displays current score in a text box\ndef display_score():\n Surf = BASICFONT.render(str(frame_counter), 1, (0,0,0))\n Rect = Surf.get_rect()\n Rect.topleft = (10, 10)\n DISPLAYSURF.blit(Surf, Rect)\n\n#Displays current time in a text box\ndef display_time():\n print\n\n#Determines whether the trex sprite collides with a cacti sprite\n#If there is a collision, the game is over.\ndef is_collision():\n if pygame.sprite.spritecollideany(rex, cacti):\n return True\n else:\n return False\n\n#Increases the FPS by 5 every 100 seconds\n#This is a placeholder for a challenge exercise.\ndef increase_FPS():\n if frame_counter % 500 == 0:\n return FPS + 5\n else:\n return FPS\n\n#Main game loop\njumping = False\ngame_over = False\nDINO = [pygame.image.load('resources/dino1.png'), pygame.image.load('resources/dino2.png'), pygame.image.load('resources/dino3.png')]\nwhile True:\n\n #Fill in background\n DISPLAYSURF.fill(WHITE)\n pygame.draw.line(DISPLAYSURF, (0, 0, 0), (0, 190), (400, 190), 2)\n if not game_over:\n frame_counter += 1\n restart = False\n\n #Event loop\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_SPACE:\n jumping = True\n if event.key == K_RETURN:\n restart = True\n\n #some functionality\n game_over2(game_over)\n if not game_over:\n update_cacti()\n update_clouds()\n update_rex(jumping)\n if rex.rect.y == 150:\n jumping = False\n FPS = increase_FPS()\n game_over = is_collision()\n if frame_counter % 75 == 0:\n add_cacti()\n if frame_counter % 500 == 5:\n add_cloud()\n\n #Update display\n pygame.draw.rect(DISPLAYSURF, (151, 236, 246), (0, 0, 400, 190))\n pygame.draw.rect(DISPLAYSURF, (245, 207, 151), (0, 192, 400, 200))\n for plant in cacti:\n DISPLAYSURF.blit(plant.image, plant.rect)\n for x in clouds:\n DISPLAYSURF.blit(x.image, x.rect)\n display_score()\n DISPLAYSURF.blit(rex.image, rex.rect)\n\n if game_over and restart:\n rex = trex(150)\n FPS = 40\n frame_counter = 0\n game_over = False\n\n pygame.display.update()\n fpsClock.tick(FPS)\n", "sub_path": "trex_jump.py", "file_name": "trex_jump.py", "file_ext": "py", "file_size_in_byte": 4756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pygame.init", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 20, "usage_type": "attribute"}, {"api_name": "trex.trex", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cactus.cactus", "line_number": 30, "usage_type": "call"}, {"api_name": "cloud.cloud", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollideany", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 124, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 130, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 132, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 156, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 157, "usage_type": "attribute"}, {"api_name": "trex.trex", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 171, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 171, "usage_type": "attribute"}]} +{"seq_id": "264087915", "text": "from mylib.dataloader import DataLoader\n\nimport torch\nfrom argparse import ArgumentParser\n\n\n# Arguments\nparser = ArgumentParser(description='Testing Model')\nparser.add_argument('--model', type=str, default='models/model.pkl', help='Path of Previous Trained Model')\nparser.add_argument('--imgs', type=str, default='data/images', help='Path of Testing Images')\nparser.add_argument('--labels', type=str, default='data/labels.txt', help='Path of Labels File')\nparser.add_argument('--bs', default=32, type=int, help='Batch Size')\nargs = parser.parse_args()\n\n\n# Start frome here!\nif __name__ == '__main__':\n # Testing Data\n data = DataLoader(imgs_dir=args.imgs, labels_path=args.labels, batch_sz=args.bs)\n test_loader = data.test_loader()\n n_classes = data.n_classes\n classes = [\"Surprise\", \"Fear\", \"Disgust\", \"Happiness\", \"Sadness\" ,\"Anger\", \"Neutral\"]\n\n # Load Model\n model = torch.load(args.model)\n\n # Evaluation\n class_correct = [ 0. for i in range(n_classes) ]\n class_total = [ 0. for i in range(n_classes) ]\n with torch.no_grad():\n for batch_X, batch_y in test_loader:\n outputs = model(batch_X)\n _, predicts = torch.max(outputs, 1)\n correct = (predicts == batch_y).squeeze().tolist()\n for label, c in zip(batch_y, correct):\n class_correct[label] += c\n class_total[label] += 1\n \n print('-' * 10)\n for i in range(n_classes):\n print(f\"Test Accuracy of {classes[i]}: {100*(class_correct[i]/class_total[i]):.2f}%\")\n print('-' * 10)\n print(f'Overall Accuracy: {100*(sum(class_correct)/sum(class_total)):.2f}%')", "sub_path": "evaluation.py", "file_name": "evaluation.py", "file_ext": "py", "file_size_in_byte": 1640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "mylib.dataloader.DataLoader", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "337343869", "text": "import unittest\r\nimport sys\r\nfrom io import StringIO\r\ntry:\r\n from L09 import *\r\nexcept:\r\n print('Could not open file due to the following reasons:')\r\n print('1. File not named \"L09\" (not \"L09.py\").')\r\n print('2. Syntax or runtime error. Try running your file.')\r\n print('Contact a TA if you continue having problems.')\r\n input(\"\")\r\n sys.exit()\r\n\r\n######################### GLOBAL VARIABLES ###########################\r\n\r\nproblemList = [\"rainfall\", \"tempconv\", \"L09-03\", \"L09-04\", \"psalm112Upper\",\r\n \"numLinesWordsChars\", \"concordance\", \"concordanceSort\"]\r\npointsList = [5.0, 7.0, 2.0, 2.0, 7.0, 7.0, 30.0, 10.0]\r\nmanualList = [False, False, True, True, False, False, False, False]\r\ncorrectList = manualList[:]\r\n\r\n############################## TESTS #################################\r\n\r\nclass Unittests(unittest.TestCase):\r\n def setUp(self):\r\n rainfall = open(\"rainfall.txt\", \"w\")\r\n rainfall.write(\"Akron 25.81\\n\")\r\n rainfall.write(\"Albia 37.65 \\n\")\r\n rainfall.write(\"Algona 30.69 \\n\")\r\n rainfall.write(\"Allison 33.64 \\n\")\r\n rainfall.write(\"Alton 27.43 \\n\")\r\n rainfall.write(\"AmesW 34.07 \\n\")\r\n rainfall.write(\"AmesSE 33.95 \\n\")\r\n rainfall.write(\"Anamosa 35.33 \\n\")\r\n rainfall.write(\"Ankeny 33.38 \\n\")\r\n rainfall.write(\"Atlantic 34.77 \\n\")\r\n rainfall.write(\"Audubon 33.41 \\n\")\r\n rainfall.write(\"Beaconsfield 35.27 \\n\")\r\n rainfall.write(\"Bedford 36.35 \\n\")\r\n rainfall.write(\"BellePlaine 35.81\\n\")\r\n rainfall.write(\"Bellevue 34.35 \\n\")\r\n rainfall.write(\"Blockton 36.28 \\n\")\r\n rainfall.write(\"Bloomfield 38.02 \\n\")\r\n rainfall.write(\"Boone 36.30 \\n\")\r\n rainfall.write(\"Brighton 33.59 \\n\")\r\n rainfall.write(\"Britt 31.54 \\n\")\r\n rainfall.write(\"Buckeye 33.66 \\n\")\r\n rainfall.write(\"BurlingtonKBUR 37.94 \\n\")\r\n rainfall.write(\"Burlington 36.94 \\n\")\r\n rainfall.write(\"Carroll 33.33 \\n\")\r\n rainfall.write(\"Cascade 33.48\")\r\n rainfall.close()\r\n\r\n psalm112 = open(\"psalm112.txt\", \"w\")\r\n psalm112.write(\" 1 Praise the LORD.[b] \\n\")\r\n psalm112.write(\" Blessed are those who fear the LORD, \\n\")\r\n psalm112.write(\" who find great delight in his commands. \\n\")\r\n psalm112.write(\" 2 Their children will be mighty in the land; \\n\")\r\n psalm112.write(\" the generation of the upright will be blessed. \\n\")\r\n psalm112.write(\" 3 Wealth and riches are in their houses, \\n\")\r\n psalm112.write(\" and their righteousness endures forever. \\n\")\r\n psalm112.write(\" 4 Even in darkness light dawns for the upright, \\n\")\r\n psalm112.write(\" for those who are gracious and compassionate and righteous. \\n\")\r\n psalm112.write(\" 5 Good will come to those who are generous and lend freely, \\n\")\r\n psalm112.write(\" who conduct their affairs with justice. \\n\")\r\n psalm112.write(\" 6 Surely the righteous will never be shaken; \\n\")\r\n psalm112.write(\" they will be remembered forever. \\n\")\r\n psalm112.write(\" 7 They will have no fear of bad news; \\n\")\r\n psalm112.write(\" their hearts are steadfast, trusting in the LORD. \\n\")\r\n psalm112.write(\" 8 Their hearts are secure, they will have no fear; \\n\")\r\n psalm112.write(\" in the end they will look in triumph on their foes. \\n\")\r\n psalm112.write(\" 9 They have freely scattered their gifts to the poor, \\n\")\r\n psalm112.write(\" their righteousness endures forever; \\n\")\r\n psalm112.write(\" their horn[c] will be lifted high in honor. \\n\")\r\n psalm112.write(\"10 The wicked will see and be vexed, \\n\")\r\n psalm112.write(\" they will gnash their teeth and waste away; \\n\")\r\n psalm112.write(\" the longings of the wicked will come to nothing.\")\r\n psalm112.close()\r\n\r\n concord = open(\"restaurant.txt\", \"w\")\r\n concord.write(\"I went to the restaurant yesterday. Hello, I said, to the man who \\n\")\r\n concord.write(\"greeted me at the door. Where is your restroom? On my way to the\\n\")\r\n concord.write(\"restroom, I bumped into the waiter boy. Excuse me, sir, I said.\\n\")\r\n concord.write(\"When I returned to the table, the meal was served. These are the\\n\")\r\n concord.write(\"best clams I have ever eaten, I said. My compliments to the chef.\\n\")\r\n concord.write(\"Unfortunately, I was arrested by the officer for not paying my bill.\")\r\n concord.close()\r\n \r\n self.saved_stdout = sys.stdout\r\n self.out = StringIO()\r\n sys.stdout = self.out\r\n\r\n def tearDown(self):\r\n sys.stdout = self.saved_stdout\r\n\r\n def forcePrint(self, obj):\r\n sys.stdout = self.saved_stdout\r\n print(obj)\r\n\r\n # Builds the concordance dictionary for the two problems that require it.\r\n def concordanceHelper(self):\r\n concord = {}\r\n inFile = open(\"restaurant.txt\", \"r\")\r\n lines = inFile.readlines()\r\n for lineNum in range(1, len(lines)+1):\r\n words = lines[lineNum-1].split()\r\n for word in words:\r\n word = word.lower()\r\n if word[-1] not in \"abcdefghijklmnopqrstuvwxyz\":\r\n word = word[:len(word)-1]\r\n if word in concord and lineNum not in concord[word]:\r\n concord[word].append(lineNum)\r\n elif word not in concord:\r\n concord[word] = [lineNum]\r\n inFile.close()\r\n\r\n return concord\r\n#------------------------------------------#\r\n def test_rainfall(self):\r\n # Build a string with all the formatted lines, without a space before\r\n # the newline character.\r\n inFile = open(\"rainfall.txt\", \"r\")\r\n build_str = \"\"\r\n lines = inFile.readlines()\r\n for line in lines:\r\n words = line.split()\r\n build_str += \"%+25s %5.1f\\n\"%(words[0], float(words[1]))\r\n\r\n \r\n inFile = open(\"rainfall.txt\", \"r\")\r\n build_str2 = \"\"\r\n lines = inFile.readlines()\r\n for line in lines:\r\n words = line.split()\r\n build_str2 += \"%+25s %5.1f \\n\"%(words[0], float(words[1]))\r\n inFile.close()\r\n\r\n # Call the function\r\n rainfall()\r\n\r\n # Make a list of the lines in build_str and build_str2, removing any\r\n # elements with an empty string.\r\n inFile = open(\"rainfallfmt.txt\", \"r\")\r\n build_str_lines = build_str.split(\"\\n\")\r\n build_str_lines2 = build_str2.split(\"\\n\")\r\n while \"\" in build_str_lines:\r\n build_str_lines.remove(\"\")\r\n while \"\" in build_str_lines2:\r\n build_str_lines2.remove(\"\")\r\n\r\n # For each line in the formatted text file (\"rainfallfmt.txt\"), check\r\n # if it matches the acceptable formatted lines (with or without a space\r\n # before the newline character).\r\n lines = inFile.readlines()\r\n for lineNum in range(len(build_str_lines)):\r\n assert build_str_lines[lineNum] == lines[lineNum].strip(\"\\n\") or\\\r\n build_str_lines2[lineNum] == lines[lineNum].strip(\"\\n\")\r\n inFile.close()\r\n\r\n global correctList\r\n correctList[0] = True\r\n#------------------------------------------#\r\n def test_tempconv(self):\r\n # Build a string with all the formatted lines, without a space before\r\n # the newline character.\r\n build_str = \"\"\r\n build_str += \"Fahrenheit Celsius\\n\"\r\n for f in range(-300, 213):\r\n c = (f - 32) * (5 / 9)\r\n build_str += \"%10.3f %10.3f\\n\"%(f, c)\r\n\r\n # Build a string with all the formatted lines, with a space before the\r\n # newline character.\r\n build_str2 = \"\"\r\n build_str2 += \"Fahrenheit Celsius \\n\"\r\n for f in range(-300, 213):\r\n c = (f - 32) * (5 / 9)\r\n build_str2 += \"%10.3f %10.3f \\n\"%(f, c)\r\n\r\n # Call the function\r\n tempconv()\r\n\r\n # Make a list of the lines in build_str and build_str2, removing any\r\n # elements with an empty string.\r\n inFile = open(\"tempconv.txt\", \"r\")\r\n build_str_lines = build_str.split(\"\\n\")\r\n build_str_lines2 = build_str2.split(\"\\n\")\r\n while \"\" in build_str_lines:\r\n build_str_lines.remove(\"\")\r\n while \"\" in build_str_lines2:\r\n build_str_lines2.remove(\"\")\r\n\r\n # For each line in the formatted text file (\"tempconv.txt\"), check\r\n # if it matches the acceptable formatted lines (with or without a space\r\n # before the newline character).\r\n lines = inFile.readlines()\r\n for lineNum in range(len(build_str_lines)):\r\n assert build_str_lines[lineNum] == lines[lineNum].strip(\"\\n\") or\\\r\n build_str_lines2[lineNum] == lines[lineNum].strip(\"\\n\")\r\n inFile.close()\r\n \r\n global correctList\r\n correctList[1] = True\r\n#------------------------------------------#\r\n def test_psalm112Upper(self):\r\n # Build a string of the file in upper case.\r\n inFile = open(\"psalm112.txt\", \"r\")\r\n build_str = \"\"\r\n lines = inFile.readlines()\r\n for line in lines:\r\n build_str += line.upper()\r\n inFile.close()\r\n\r\n # Call the function.\r\n psalm112Upper()\r\n\r\n # Make a list of the lines in build_str, removing any elements with an\r\n # empty string.\r\n inFile = open(\"psalm112Upper.txt\", \"r\")\r\n build_str_lines = build_str.split(\"\\n\")\r\n while \"\" in build_str_lines:\r\n build_str_lines.remove(\"\")\r\n\r\n # For each line in the formatted text file (\"tempconv.txt\"), check\r\n # if it matches the corresponding line in build_str.\r\n lines = inFile.readlines()\r\n for lineNum in range(len(build_str_lines)):\r\n assert build_str_lines[lineNum] == lines[lineNum].strip(\"\\n\")\r\n inFile.close()\r\n \r\n global correctList\r\n correctList[4] = True\r\n#------------------------------------------#\r\n def test_numLinesWordsChars(self):\r\n numLinesWordsChars(\"psalm112.txt\")\r\n output = self.out.getvalue().strip().lower()\r\n s = \"23 lines\\n182 words\\n1062 characters\"\r\n assert output == s\r\n\r\n self.out = StringIO()\r\n sys.stdout = self.out\r\n numLinesWordsChars(\"rainfall.txt\")\r\n output = self.out.getvalue().strip().lower()\r\n s = \"25 lines\\n50 words\\n358 characters\"\r\n assert output == s\r\n\r\n self.out = StringIO()\r\n sys.stdout = self.out\r\n numLinesWordsChars(\"restaurant.txt\")\r\n output = self.out.getvalue().strip().lower()\r\n s = \"6 lines\\n77 words\\n396 characters\"\r\n assert output == s\r\n \r\n global correctList\r\n correctList[5] = True\r\n#------------------------------------------#\r\n def test_concordance(self):\r\n concord = self.concordanceHelper()\r\n\r\n concordance(\"restaurant.txt\")\r\n inFile = open(\"concord.txt\", \"r\")\r\n lines = inFile.readlines()\r\n\r\n # For each word in the concordance, check if it matches the acceptable\r\n # formatted lines (with or without a space before the newline\r\n # character).\r\n for word in concord:\r\n line = \"%+25s %-25s\\n\" % (word, str(concord[word]))\r\n line2 = \"%+25s %-25s \\n\" % (word, str(concord[word]))\r\n assert line in lines or line2 in lines\r\n inFile.close()\r\n\r\n global correctList\r\n correctList[6] = True\r\n#------------------------------------------#\r\n def test_concordanceSort(self):\r\n concord = self.concordanceHelper()\r\n keys = list(concord.keys())\r\n keys.sort()\r\n\r\n concordanceSort(\"restaurant.txt\")\r\n inFile = open(\"concord.txt\", \"r\")\r\n lines = inFile.readlines()\r\n\r\n # For each line in the \"concord.txt\" file, check if it matches the\r\n # corresponding line in the keys list, sorted in alphabetical order.\r\n # Also, check if it matches the acceptable formatted lines (with or\r\n # without a space before the newline character).\r\n for lineNum in range(len(lines)):\r\n key = keys[lineNum]\r\n line = \"%+25s %-25s\\n\" % (key, concord[key])\r\n line2 = \"%+25s %-25s \\n\" % (key, concord[key])\r\n assert lines[lineNum] == line or lines[lineNum] == line2\r\n inFile.close()\r\n\r\n global correctList\r\n correctList[7] = True\r\n#------------------------------------------#\r\nif __name__ == '__main__':\r\n unittest.main(exit=False, verbosity = 0)\r\n\r\n######################### HELPER FUNCTIONS ###########################\r\n\r\ndef toGrade(percent):\r\n if percent>=92.5:\r\n return \"A\"\r\n elif percent>=89.5:\r\n return \"A-\"\r\n elif percent>=86.5:\r\n return \"B+\"\r\n elif percent>=82.5:\r\n return \"B\"\r\n elif percent>=79.5:\r\n return \"B-\"\r\n elif percent>=76.5:\r\n return \"C+\"\r\n elif percent>=72.5:\r\n return \"C\"\r\n elif percent>=69.5:\r\n return \"C-\"\r\n elif percent>=66.5:\r\n return \"D+\"\r\n elif percent>=62.5:\r\n return \"D\"\r\n elif percent>=59.5:\r\n return \"D-\"\r\n else:\r\n return \"F\"\r\n\r\n######################### PRINT TEST REPORT ##########################\r\n\r\nprint()\r\nprint(\"=\"*66)\r\nprint(\"%+28s %+8s %+8s %+18s\"%(\"FUNCTION\",\"EARNED\",\"OUT OF\",\"STATUS\"))\r\nprint(\"=\"*66)\r\n\r\nfor problem in range(len(correctList)):\r\n if manualList[problem]:\r\n print(\"%+28s %+36s\"%(problemList[problem],\"MANUAL CHECK OFF\"))\r\n elif correctList[problem]:\r\n print(\"%+28s %8.1f %8.1f %+18s\"%(problemList[problem],pointsList[problem]/1.0,pointsList[problem]/1.0,\"VALID\"))\r\n else:\r\n print(\"%+28s %8.1f %8.1f %+18s\"%(problemList[problem],0.0,pointsList[problem]/1.0,\"INVALID\"))\r\nprint(\"=\"*66)\r\n\r\nif True not in manualList:\r\n correct = 0\r\n for problem in range(len(correctList)):\r\n if correctList[problem]:\r\n correct += pointsList[problem]\r\n\r\n if sum(pointsList) > 0:\r\n percent = round((correct*100)/sum(pointsList), 2)\r\n else:\r\n percent = 0.0\r\n print(\"%37.1f %8.1f %23s\"%(correct, sum(pointsList)/1.0, str(percent)+\" % (\"+toGrade(percent)+\")\"))\r\n\r\nprint()\r\nif correctList.count(False) > 0:\r\n input(\"Press ENTER to show hints \")\r\n\r\n############################### HINTS ################################\r\n print() \r\n print(\"=\"*62)\r\n print(\" \"*28, \"HINTS\")\r\n print(\"=\"*62)\r\n print('''Look at the top of your window. Does it say \"L09\" (not \"L09\r\nStarter Code\")? If not, close out of your window, rename it to\r\n\"L09\", and run the tests again.\\n''')\r\n print('''Do the function names in your code match those in the table\r\nabove?\\n''')\r\n print(\"-\"*62)\r\n if correctList[0] == False:\r\n print('''rainfall - Check the following in your spacing:\r\n a) The city is 25 characters wide, right-aligned.\r\n b) The rainfall is formatted with one digit to the right\r\n of the decimal.\r\n c) There is a space separating the columns. This means\r\n there will be TWO spaces between the city and the\r\n rainfall, one for the column separation and one due to\r\n the right alignment of the rainfall.''')\r\n print(\"-\"*62)\r\n\r\n if correctList[1] == False:\r\n print('''tempconv - Check the following in your spacing:\r\n a) Each column in 10 characters wide.\r\n b) The rainfall is formatted with three digits to the right\r\n of the decimal.\r\n c) There is a space separating the columns. This means\r\n there will be THREE spaces between the numbers, one for\r\n the column separation and two due to the right alignment\r\n of the rainfall.\r\n d) Positive numbers are NOT formatted with a leading '+'\r\n sign. Because numbers are right-aligned by default, you\r\n do NOT need the '+' in the formatted string (i.e.\r\n %5.1f, not %+5.1f).''')\r\n print(\"-\"*62)\r\n\r\n if correctList[5] == False:\r\n print('''numLinesWordsChars - If your problem appears to be correct\r\nand it is not getting marked as correct, it is most likely\r\nan error with calculating the number of characters. Check\r\nthe following in your character count calculation:\r\n a) Spaces are being counted as well as letters, numbers,\r\n and symbols. Since spaces are also characters, it is\r\n not sufficient to simply count the characters in each\r\n word.\r\n b) Newline characters are not being counted.''')\r\n print(\"-\"*62)\r\n\r\n if correctList[6] == False:\r\n print('''concordance - Check the following in your spacing:\r\n a) The word is 25 characters wide, right-aligned.\r\n b) There is a space separating the columns. Since the\r\n list is to be left-aligned, this means that there\r\n should only be ONE space between the word and the\r\n list.''')\r\n print(\"-\"*62)\r\n\r\n print() \r\n print(\"+\" + \"-\"*21 + \" IMPORTANT MESSAGE \" + \"-\"*20 + \"+\")\r\n print('''| Normally labs will not be as picky about spacing, but this |\r\n| lab will be because the problems require formatting. If a |\r\n| problem is not getting marked as correct and you think |\r\n| that it should, check the following: |\r\n| a) The column widths are EXACTLY what the problem asks |\r\n| for, as well as number of digits after the decimal |\r\n| point. |\r\n| b) There is a space separating the columns, no more, no |\r\n| less. |\r\n| c) There are no unnecessary newlines. None of the |\r\n| files or output should be double-spaced. |\r\n| d) There is no unnecessary whitespace. This means that |\r\n| the newline character for each line MUST come |\r\n| immediately after the last column, without any spaces |\r\n| in between. |\r\n| If you continue having problems, ask a TA in lab or |\r\n| email me at john_luscombe@taylor.edu. |''')\r\n print(\"+\" + \"-\"*60 + \"+\")\r\n print()\r\n\r\nelse:\r\n print('''\"Success is not final, failure is not fatal; it is the courage to\r\ncontinue that counts.\" ~Winston Churchill''')\r\n\r\ninput(\"\")\r\n", "sub_path": "L09/L09Test.py", "file_name": "L09Test.py", "file_ext": "py", "file_size_in_byte": 17047, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.exit", "line_number": 12, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 89, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 97, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 240, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 241, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 247, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 248, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 300, "usage_type": "call"}]} +{"seq_id": "236812118", "text": "\"\"\"\n -----------------------------------------------------------------------------------------------------------\n Package: AequilibraE\n\n Name: Main interface for comparing assignment scenarios\n Purpose: Load GUI and user interface for the scenario comparison procedure\n\n Original Author: Pedro Camargo (c@margo.co)\n Contributors:\n Last edited by: Pedro Camargo\n\n Website: www.AequilibraE.com\n Repository: https://github.com/AequilibraE/AequilibraE\n\n Created: 2016-12-01\n Updated:\n Copyright: (c) AequilibraE authors\n Licence: See LICENSE.TXT\n -----------------------------------------------------------------------------------------------------------\n \"\"\"\n\nimport qgis\nfrom functools import partial\nfrom qgis.core import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4 import uic\nfrom qgis.gui import QgsMapLayerProxyModel\nimport sys\nimport os\n\nfrom ..common_tools.global_parameters import *\nfrom ..common_tools.auxiliary_functions import *\n\nfrom random import randint\n\nsys.modules['qgsfieldcombobox'] = qgis.gui\nsys.modules['qgsmaplayercombobox'] = qgis.gui\nFORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'forms/ui_compare_scenarios.ui'))\n\nclass CompareScenariosDialog(QDialog, FORM_CLASS):\n def __init__(self, iface):\n QDialog.__init__(self)\n self.iface = iface\n self.setupUi(self)\n\n self.positive_color.setColor(QColor(0, 174, 116, 255))\n self.negative_color.setColor(QColor(218, 0, 3, 255))\n self.common_flow_color.setColor(QColor(0, 0, 0, 255))\n self.radio_diff.toggled.connect(self.show_color_composite)\n self.radio_compo.toggled.connect(self.show_color_composite)\n \n self.band_size = 10.0\n self.space_size = 0.0\n self.layer = None\n self.expert_mode = False\n self.drive_side = get_parameter_chain(['system', 'driving side'])\n\n # layers and fields # For adding skims\n self.mMapLayerComboBox.setFilters(QgsMapLayerProxyModel.LineLayer)\n self.mMapLayerComboBox.layerChanged.connect(self.add_fields_to_cboxes)\n\n self.ab_FieldComboBoxBase.currentIndexChanged.connect(partial(self.choose_a_field, 'base_AB'))\n self.ba_FieldComboBoxBase.currentIndexChanged.connect(partial(self.choose_a_field, 'base_BA'))\n\n self.ab_FieldComboBoxAlt.currentIndexChanged.connect(partial(self.choose_a_field, 'alt_AB'))\n self.ba_FieldComboBoxAlt.currentIndexChanged.connect(partial(self.choose_a_field, 'alt_BA'))\n\n # space slider\n self.slider_spacer.setMinimum(0)\n self.slider_spacer.setMaximum(30)\n self.slider_spacer.setValue(0)\n self.slider_spacer.setTickPosition(QSlider.TicksBelow)\n self.slider_spacer.setTickInterval(10)\n self.slider_spacer.valueChanged.connect(self.spacevaluechange)\n\n # band slider\n self.slider_band_size.setMinimum(5)\n self.slider_band_size.setMaximum(150)\n self.slider_band_size.setValue(50)\n self.slider_band_size.setTickPosition(QSlider.TicksBelow)\n self.slider_band_size.setTickInterval(5)\n self.slider_band_size.valueChanged.connect(self.sizevaluechange)\n\n self.but_run.clicked.connect(self.execute_comparison)\n self.add_fields_to_cboxes()\n self.sizevaluechange()\n self.spacevaluechange()\n self.set_initial_value_if_available()\n self.show_color_composite()\n \n def show_color_composite(self):\n self.common_label.setVisible(self.radio_compo.isChecked())\n self.common_flow_color.setVisible(self.radio_compo.isChecked())\n \n def choose_a_field(self, modified):\n if modified[0:3] == 'bas':\n self.choose_field_indeed(modified, self.ab_FieldComboBoxBase, self.ba_FieldComboBoxBase)\n else:\n self.choose_field_indeed(modified, self.ab_FieldComboBoxAlt, self.ba_FieldComboBoxAlt)\n\n def choose_field_indeed(self, modified, ab, ba):\n i, j = 'AB', 'BA'\n text = ab.currentText()\n if i in text:\n text = text.replace(i, j)\n index = ba.findText(text, Qt.MatchFixedString)\n if index >= 0:\n ba.setCurrentIndex(index)\n if modified == j:\n text = ba.currentText()\n if j in text:\n text = text.replace(j, i)\n index = ab.findText(text, Qt.MatchFixedString)\n if index >= 0:\n ab.setCurrentIndex(index)\n\n def set_initial_value_if_available(self):\n all_items = [self.ab_FieldComboBoxBase.itemText(i) for i in range(self.ab_FieldComboBoxBase.count())]\n\n for i in all_items:\n if 'AB' in i:\n index = self.ab_FieldComboBoxBase.findText(i, Qt.MatchFixedString)\n if index >= 0:\n self.ab_FieldComboBoxBase.setCurrentIndex(index)\n self.ab_FieldComboBoxAlt.setCurrentIndex(index)\n break\n\n def spacevaluechange(self):\n self.space_size = self.slider_spacer.value() / 100.0\n self.lbl_space.setText(\"{:3,.2f}\".format(self.space_size))\n\n def sizevaluechange(self):\n self.band_size = self.slider_band_size.value() / 5.0\n self.lbl_width.setText(\"{:3,.2f}\".format(self.band_size))\n\n def add_fields_to_cboxes(self):\n self.layer = get_vector_layer_by_name(self.mMapLayerComboBox.currentText())\n self.ab_FieldComboBoxBase.setLayer(self.layer)\n self.ba_FieldComboBoxBase.setLayer(self.layer)\n self.ab_FieldComboBoxAlt.setLayer(self.layer)\n self.ba_FieldComboBoxAlt.setLayer(self.layer)\n\n\n def execute_comparison(self):\n if self.check_inputs():\n self.expert_mode = self.chk_expert_mode.isChecked()\n self.but_run.setEnabled(False)\n self.band_size = str(self.band_size)\n self.space_size = str(self.space_size)\n\n if self.expert_mode:\n QgsExpressionContextUtils.setProjectVariable('aeq_band_spacer', float(self.space_size))\n QgsExpressionContextUtils.setProjectVariable('aeq_band_width', float(self.band_size))\n self.space_size = '@aeq_band_spacer'\n self.band_size = '@aeq_band_width'\n \n # define the side of the plotting based on the side of the road the system has defined\n ab = -1\n if self.drive_side == 'right':\n ab = 1\n ba = - ab\n\n # fields\n ab_base = self.ab_FieldComboBoxBase.currentText()\n ba_base = self.ba_FieldComboBoxBase.currentText()\n ab_alt = self.ab_FieldComboBoxAlt.currentText()\n ba_alt = self.ba_FieldComboBoxAlt.currentText()\n idx_ab = self.layer.fieldNameIndex(ab_base)\n idx_ba = self.layer.fieldNameIndex(ba_base)\n idx2_ab = self.layer.fieldNameIndex(ab_alt)\n idx2_ba = self.layer.fieldNameIndex(ba_alt)\n\n # Create the bandwidths for the comon flow, if requested\n if self.radio_compo.isChecked():\n values = []\n values.append(self.layer.maximumValue(idx_ab))\n values.append(self.layer.maximumValue(idx_ba))\n values.append(self.layer.maximumValue(idx2_ab))\n values.append(self.layer.maximumValue(idx2_ba))\n max_value = max(values)\n\n if self.expert_mode:\n QgsExpressionContextUtils.setProjectVariable('aeq_band_max_value', float(max_value))\n max_value = '@aeq_band_max_value'\n\n # We create the styles for AB and BA directions and add to the fields\n for abb, aba, di, t in ([ab_base, ab_alt, ab, 'ab'],[ba_base, ba_alt, ba, 'ba']):\n width = '(coalesce(scale_linear(min(\"' + abb + '\",\"' + aba + '\") , 0,' + str(max_value) + ', 0, ' + self.band_size + '), 0))'\n offset = str(di) + '*(' + width + '/2 + ' + self.space_size + ')'\n line_pattern = 'if (max((\"' + abb + '\"+\"' + aba + '\"),0) = 0,' + \"'no', 'solid')\"\n symbol_layer = self.create_style(width, offset, self.text_color(self.common_flow_color), line_pattern)\n self.layer.rendererV2().symbol().appendSymbolLayer(symbol_layer)\n if t == 'ab':\n ab_offset = str(di) + '*(' + width + ' + ' + self.space_size + ')'\n else:\n ba_offset = str(di) + '*(' + width + ' + ' + self.space_size + ')'\n\n\n # If we want a plot of the differences only\n if self.radio_diff.isChecked():\n # we compute the size of the differences\n diffs = []\n for feat in self.layer.getFeatures():\n diffs.append(abs(feat.attributes()[idx_ab] - feat.attributes()[idx2_ab]))\n diffs.append(abs(feat.attributes()[idx_ba] - feat.attributes()[idx2_ba]))\n max_value = max(diffs)\n ab_offset = '0'\n ba_offset = '0'\n\n if self.expert_mode:\n QgsExpressionContextUtils.setProjectVariable('aeq_band_max_value', float(max_value))\n max_value = '@aeq_band_max_value'\n \n # We now create the positive and negative bandwidths for each side of the link\n styles = []\n styles.append((ab_base, ab_alt, ab, ab_offset))\n styles.append((ba_base, ba_alt, ba, ba_offset))\n \n for i in styles:\n width = '(coalesce(scale_linear(abs(\"' + i[0] + '\"-\"' + i[1] + '\") , 0,' + \\\n str(max_value) + ', 0, ' + self.band_size + '), 0))'\n offset = i[3] + '+' + str(i[2]) + '*(' + width + '/2 + ' + self.space_size + ')'\n line_pattern = 'if ((\"' + i[0] + '\"-\"' + i[1] + '\") = 0,' + \"'no', 'solid')\"\n color = 'if (max((\"' + i[0] + '\"-\"' + i[1] + '\"),0) = 0,' + self.text_color(self.negative_color) + \\\n ', ' + self.text_color(self.positive_color) + ')'\n symbol_layer = self.create_style(width, offset, color, line_pattern)\n self.layer.rendererV2().symbol().appendSymbolLayer(symbol_layer)\n\n self.layer.triggerRepaint()\n self.exit_procedure()\n\n def check_inputs(self):\n if self.layer is None:\n return False\n if min(self.ab_FieldComboBoxBase.currentIndex(), self.ba_FieldComboBoxBase.currentIndex(),\n self.ab_FieldComboBoxAlt.currentIndex(), self.ba_FieldComboBoxAlt.currentIndex()) < 0:\n return False\n return True\n\n def create_style(self, width, offset, color, line_pattern):\n symbol_layer = QgsSimpleLineSymbolLayerV2.create({})\n props = symbol_layer.properties()\n props['width_dd_expression'] = width\n props['offset_dd_expression'] = offset\n props['line_style_expression'] = line_pattern\n props['color_dd_expression'] = color\n symbol_layer = QgsSimpleLineSymbolLayerV2.create(props)\n return symbol_layer\n\n def exit_procedure(self):\n self.close()\n\n def text_color(self, some_color_btn):\n str_color = str(some_color_btn.color().getRgb())\n str_color = str_color.replace(\"(\", \"\")\n return \"'\" + str_color.replace(\")\", \"\") + \"'\"\n \nif __name__ == '__main__':\n main()", "sub_path": "gis/compare_scenarios_dialog.py", "file_name": "compare_scenarios_dialog.py", "file_ext": "py", "file_size_in_byte": 11463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.modules", "line_number": 37, "usage_type": "attribute"}, {"api_name": "qgis.gui", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 38, "usage_type": "attribute"}, {"api_name": "qgis.gui", "line_number": 38, "usage_type": "attribute"}, {"api_name": "PyQt4.uic.loadUiType", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt4.uic", "line_number": 39, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 39, "usage_type": "call"}, {"api_name": "qgis.gui.QgsMapLayerProxyModel.LineLayer", "line_number": 60, "usage_type": "attribute"}, {"api_name": "qgis.gui.QgsMapLayerProxyModel", "line_number": 60, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 63, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 64, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 66, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "82848824", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nlogging配置\n\"\"\"\n\nimport logging.config\nimport os\n\nfrom conf import settings\n\nBASE_DIR = settings.BASE_DIR\n\nlogfile_path = os.path.join(BASE_DIR, 'log')\nif not logfile_path:\n os.makedirs(logfile_path)\n\n# log文件的全路径\nLOGFILE_ALL = os.path.join(logfile_path, 'all.log')\nLOGFILE_ERR = os.path.join(logfile_path, 'err.log')\n\nlevel_console = settings.LEVEL_CONSOLE if settings.LEVEL_CONSOLE else 'DEBUG'\nlevel_files = settings.LEVEL_FILES if settings.LEVEL_FILES else 'INFO'\n\n\n# 定义三种日志格式\nstandard_format = '[%(asctime)s]-[%(threadName)s:%(thread)d]-[task_id:%(name)s]-[%(filename)s:%(lineno)d]' \\\n '-[%(levelname)s]: %(message)s'\n# simple_format = '[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d]: %(message)s'\nsimple_format = '[%(asctime)s-%(filename)s:%(lineno)d] %(message)s'\n\n# log配置字典\nlogging_dic = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': standard_format\n },\n 'simple': {\n 'format': simple_format\n },\n },\n 'filters': {},\n 'handlers': {\n 'console': {\n 'level': level_console,\n 'class': 'logging.StreamHandler', # 打印到屏幕\n 'formatter': 'simple'\n },\n 'default': {\n 'level': level_files,\n 'class': 'logging.handlers.RotatingFileHandler', # 保存到文件\n 'filename': LOGFILE_ALL, # 日志文件\n 'maxBytes': 1024*1024*5, # 日志大小 5M\n 'backupCount': 50,\n 'formatter': 'standard',\n 'encoding': 'utf-8',\n\n },\n 'error': {\n 'level': 'ERROR',\n 'class': 'logging.handlers.RotatingFileHandler', # 保存到文件\n 'filename': LOGFILE_ERR, # 日志文件\n 'maxBytes': 1024*1024*5, # 日志大小 5M\n 'backupCount': 50,\n 'formatter': 'standard',\n 'encoding': 'utf-8',\n },\n },\n # 这里把上面定义的两个handler都加上,即log数据既写入文件又打印到屏幕\n 'loggers': {\n '': {\n 'handlers': ['default', 'console', 'error'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n },\n}\nlogging.config.dictConfig(logging_dic) # 导入上面定义的配置\n\nif __name__ == '__main__':\n logger = logging.getLogger(__name__)\n logger.info('It works!') # 记录该文件的运行状态\n", "sub_path": "modules/custom_logging.py", "file_name": "custom_logging.py", "file_ext": "py", "file_size_in_byte": 2526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "conf.settings.BASE_DIR", "line_number": 13, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "conf.settings.LEVEL_CONSOLE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 23, "usage_type": "name"}, {"api_name": "conf.settings.LEVEL_FILES", "line_number": 24, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 24, "usage_type": "name"}, {"api_name": "logging.config.config.dictConfig", "line_number": 81, "usage_type": "call"}, {"api_name": "logging.config.config", "line_number": 81, "usage_type": "attribute"}, {"api_name": "logging.config", "line_number": 81, "usage_type": "name"}, {"api_name": "logging.config.getLogger", "line_number": 84, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "332759627", "text": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\nfrom google.ads.googleads.v12.enums.types import interaction_event_type\nfrom google.ads.googleads.v12.enums.types import quality_score_bucket\n\n\n__protobuf__ = proto.module(\n package=\"google.ads.googleads.v12.common\",\n marshal=\"google.ads.googleads.v12\",\n manifest={\"Metrics\",},\n)\n\n\nclass Metrics(proto.Message):\n r\"\"\"Metrics data.\n\n Attributes:\n absolute_top_impression_percentage (float):\n The percent of your ad impressions that are\n shown as the very first ad above the organic\n search results.\n\n This field is a member of `oneof`_ ``_absolute_top_impression_percentage``.\n active_view_cpm (float):\n Average cost of viewable impressions\n (``active_view_impressions``).\n\n This field is a member of `oneof`_ ``_active_view_cpm``.\n active_view_ctr (float):\n Active view measurable clicks divided by\n active view viewable impressions. This metric is\n reported only for display network.\n\n This field is a member of `oneof`_ ``_active_view_ctr``.\n active_view_impressions (int):\n A measurement of how often your ad has become\n viewable on a Display Network site.\n\n This field is a member of `oneof`_ ``_active_view_impressions``.\n active_view_measurability (float):\n The ratio of impressions that could be\n measured by Active View over the number of\n served impressions.\n\n This field is a member of `oneof`_ ``_active_view_measurability``.\n active_view_measurable_cost_micros (int):\n The cost of the impressions you received that\n were measurable by Active View.\n\n This field is a member of `oneof`_ ``_active_view_measurable_cost_micros``.\n active_view_measurable_impressions (int):\n The number of times your ads are appearing on\n placements in positions where they can be seen.\n\n This field is a member of `oneof`_ ``_active_view_measurable_impressions``.\n active_view_viewability (float):\n The percentage of time when your ad appeared\n on an Active View enabled site (measurable\n impressions) and was viewable (viewable\n impressions).\n\n This field is a member of `oneof`_ ``_active_view_viewability``.\n all_conversions_from_interactions_rate (float):\n All conversions from interactions (as oppose\n to view through conversions) divided by the\n number of ad interactions.\n\n This field is a member of `oneof`_ ``_all_conversions_from_interactions_rate``.\n all_conversions_value (float):\n The value of all conversions.\n\n This field is a member of `oneof`_ ``_all_conversions_value``.\n all_conversions_value_by_conversion_date (float):\n The value of all conversions. When this column is selected\n with date, the values in date column means the conversion\n date. Details for the by_conversion_date columns are\n available at\n https://support.google.com/google-ads/answer/9549009.\n all_conversions (float):\n The total number of conversions. This includes all\n conversions regardless of the value of\n include_in_conversions_metric.\n\n This field is a member of `oneof`_ ``_all_conversions``.\n all_conversions_by_conversion_date (float):\n The total number of conversions. This includes all\n conversions regardless of the value of\n include_in_conversions_metric. When this column is selected\n with date, the values in date column means the conversion\n date. Details for the by_conversion_date columns are\n available at\n https://support.google.com/google-ads/answer/9549009.\n all_conversions_value_per_cost (float):\n The value of all conversions divided by the\n total cost of ad interactions (such as clicks\n for text ads or views for video ads).\n\n This field is a member of `oneof`_ ``_all_conversions_value_per_cost``.\n all_conversions_from_click_to_call (float):\n The number of times people clicked the \"Call\"\n button to call a store during or after clicking\n an ad. This number doesn't include whether or\n not calls were connected, or the duration of any\n calls. This metric applies to feed items only.\n\n This field is a member of `oneof`_ ``_all_conversions_from_click_to_call``.\n all_conversions_from_directions (float):\n The number of times people clicked a \"Get\n directions\" button to navigate to a store after\n clicking an ad. This metric applies to feed\n items only.\n\n This field is a member of `oneof`_ ``_all_conversions_from_directions``.\n all_conversions_from_interactions_value_per_interaction (float):\n The value of all conversions from\n interactions divided by the total number of\n interactions.\n\n This field is a member of `oneof`_ ``_all_conversions_from_interactions_value_per_interaction``.\n all_conversions_from_menu (float):\n The number of times people clicked a link to\n view a store's menu after clicking an ad.\n This metric applies to feed items only.\n\n This field is a member of `oneof`_ ``_all_conversions_from_menu``.\n all_conversions_from_order (float):\n The number of times people placed an order at\n a store after clicking an ad. This metric\n applies to feed items only.\n\n This field is a member of `oneof`_ ``_all_conversions_from_order``.\n all_conversions_from_other_engagement (float):\n The number of other conversions (for example,\n posting a review or saving a location for a\n store) that occurred after people clicked an ad.\n This metric applies to feed items only.\n\n This field is a member of `oneof`_ ``_all_conversions_from_other_engagement``.\n all_conversions_from_store_visit (float):\n Estimated number of times people visited a\n store after clicking an ad. This metric applies\n to feed items only.\n\n This field is a member of `oneof`_ ``_all_conversions_from_store_visit``.\n all_conversions_from_store_website (float):\n The number of times that people were taken to\n a store's URL after clicking an ad.\n This metric applies to feed items only.\n\n This field is a member of `oneof`_ ``_all_conversions_from_store_website``.\n auction_insight_search_absolute_top_impression_percentage (float):\n This metric is part of the Auction Insights\n report, and tells how often the ads of another\n participant showed as the very first ad above\n the organic search results.\n This percentage is computed only over the\n auctions that you appeared in the page.\n This metric is not publicly available.\n\n This field is a member of `oneof`_ ``_auction_insight_search_absolute_top_impression_percentage``.\n auction_insight_search_impression_share (float):\n This metric is part of the Auction Insights\n report, and tells the percentage of impressions\n that another participant obtained, over the\n total number of impressions that your ads were\n eligible for. Any value below 0.1 is reported as\n 0.0999.\n This metric is not publicly available.\n\n This field is a member of `oneof`_ ``_auction_insight_search_impression_share``.\n auction_insight_search_outranking_share (float):\n This metric is part of the Auction Insights\n report, and tells the percentage of impressions\n that your ads outranked (showed above) another\n participant in the auction, compared to the\n total number of impressions that your ads were\n eligible for.\n Any value below 0.1 is reported as 0.0999.\n This metric is not publicly available.\n\n This field is a member of `oneof`_ ``_auction_insight_search_outranking_share``.\n auction_insight_search_overlap_rate (float):\n This metric is part of the Auction Insights\n report, and tells how often another\n participant's ad received an impression when\n your ad also received an impression.\n This metric is not publicly available.\n\n This field is a member of `oneof`_ ``_auction_insight_search_overlap_rate``.\n auction_insight_search_position_above_rate (float):\n This metric is part of the Auction Insights\n report, and tells how often another\n participant's ad was shown in a higher position\n than yours, when both of your ads were shown at\n the same page. This metric is not publicly\n available.\n\n This field is a member of `oneof`_ ``_auction_insight_search_position_above_rate``.\n auction_insight_search_top_impression_percentage (float):\n This metric is part of the Auction Insights\n report, and tells how often the ads of another\n participant showed above the organic search\n results. This percentage is computed only over\n the auctions that you appeared in the page.\n This metric is not publicly available.\n\n This field is a member of `oneof`_ ``_auction_insight_search_top_impression_percentage``.\n average_cost (float):\n The average amount you pay per interaction.\n This amount is the total cost of your ads\n divided by the total number of interactions.\n\n This field is a member of `oneof`_ ``_average_cost``.\n average_cpc (float):\n The total cost of all clicks divided by the\n total number of clicks received.\n\n This field is a member of `oneof`_ ``_average_cpc``.\n average_cpe (float):\n The average amount that you've been charged\n for an ad engagement. This amount is the total\n cost of all ad engagements divided by the total\n number of ad engagements.\n\n This field is a member of `oneof`_ ``_average_cpe``.\n average_cpm (float):\n Average cost-per-thousand impressions (CPM).\n\n This field is a member of `oneof`_ ``_average_cpm``.\n average_cpv (float):\n The average amount you pay each time someone\n views your ad. The average CPV is defined by the\n total cost of all ad views divided by the number\n of views.\n\n This field is a member of `oneof`_ ``_average_cpv``.\n average_page_views (float):\n Average number of pages viewed per session.\n\n This field is a member of `oneof`_ ``_average_page_views``.\n average_time_on_site (float):\n Total duration of all sessions (in seconds) /\n number of sessions. Imported from Google\n Analytics.\n\n This field is a member of `oneof`_ ``_average_time_on_site``.\n benchmark_average_max_cpc (float):\n An indication of how other advertisers are\n bidding on similar products.\n\n This field is a member of `oneof`_ ``_benchmark_average_max_cpc``.\n biddable_app_install_conversions (float):\n Number of app installs.\n\n This field is a member of `oneof`_ ``_biddable_app_install_conversions``.\n biddable_app_post_install_conversions (float):\n Number of in-app actions.\n\n This field is a member of `oneof`_ ``_biddable_app_post_install_conversions``.\n benchmark_ctr (float):\n An indication on how other advertisers'\n Shopping ads for similar products are performing\n based on how often people who see their ad click\n on it.\n\n This field is a member of `oneof`_ ``_benchmark_ctr``.\n bounce_rate (float):\n Percentage of clicks where the user only\n visited a single page on your site. Imported\n from Google Analytics.\n\n This field is a member of `oneof`_ ``_bounce_rate``.\n clicks (int):\n The number of clicks.\n\n This field is a member of `oneof`_ ``_clicks``.\n combined_clicks (int):\n The number of times your ad or your site's\n listing in the unpaid results was clicked. See\n the help page at\n https://support.google.com/google-ads/answer/3097241\n for details.\n\n This field is a member of `oneof`_ ``_combined_clicks``.\n combined_clicks_per_query (float):\n The number of times your ad or your site's listing in the\n unpaid results was clicked (combined_clicks) divided by\n combined_queries. See the help page at\n https://support.google.com/google-ads/answer/3097241 for\n details.\n\n This field is a member of `oneof`_ ``_combined_clicks_per_query``.\n combined_queries (int):\n The number of searches that returned pages\n from your site in the unpaid results or showed\n one of your text ads. See the help page at\n https://support.google.com/google-ads/answer/3097241\n for details.\n\n This field is a member of `oneof`_ ``_combined_queries``.\n content_budget_lost_impression_share (float):\n The estimated percent of times that your ad\n was eligible to show on the Display Network but\n didn't because your budget was too low. Note:\n Content budget lost impression share is reported\n in the range of 0 to 0.9. Any value above 0.9 is\n reported as 0.9001.\n\n This field is a member of `oneof`_ ``_content_budget_lost_impression_share``.\n content_impression_share (float):\n The impressions you've received on the\n Display Network divided by the estimated number\n of impressions you were eligible to receive.\n Note: Content impression share is reported in\n the range of 0.1 to 1. Any value below 0.1 is\n reported as 0.0999.\n\n This field is a member of `oneof`_ ``_content_impression_share``.\n conversion_last_received_request_date_time (str):\n The last date/time a conversion tag for this\n conversion action successfully fired and was\n seen by Google Ads. This firing event may not\n have been the result of an attributable\n conversion (for example, because the tag was\n fired from a browser that did not previously\n click an ad from an appropriate advertiser). The\n date/time is in the customer's time zone.\n\n This field is a member of `oneof`_ ``_conversion_last_received_request_date_time``.\n conversion_last_conversion_date (str):\n The date of the most recent conversion for\n this conversion action. The date is in the\n customer's time zone.\n\n This field is a member of `oneof`_ ``_conversion_last_conversion_date``.\n content_rank_lost_impression_share (float):\n The estimated percentage of impressions on\n the Display Network that your ads didn't receive\n due to poor Ad Rank. Note: Content rank lost\n impression share is reported in the range of 0\n to 0.9. Any value above 0.9 is reported as\n 0.9001.\n\n This field is a member of `oneof`_ ``_content_rank_lost_impression_share``.\n conversions_from_interactions_rate (float):\n Conversions from interactions divided by the number of ad\n interactions (such as clicks for text ads or views for video\n ads). This only includes conversion actions which\n include_in_conversions_metric attribute is set to true. If\n you use conversion-based bidding, your bid strategies will\n optimize for these conversions.\n\n This field is a member of `oneof`_ ``_conversions_from_interactions_rate``.\n conversions_value (float):\n The value of conversions. This only includes conversion\n actions which include_in_conversions_metric attribute is set\n to true. If you use conversion-based bidding, your bid\n strategies will optimize for these conversions.\n\n This field is a member of `oneof`_ ``_conversions_value``.\n conversions_value_by_conversion_date (float):\n The value of conversions. This only includes conversion\n actions which include_in_conversions_metric attribute is set\n to true. If you use conversion-based bidding, your bid\n strategies will optimize for these conversions. When this\n column is selected with date, the values in date column\n means the conversion date. Details for the\n by_conversion_date columns are available at\n https://support.google.com/google-ads/answer/9549009.\n conversions_value_per_cost (float):\n The value of conversions divided by the cost of ad\n interactions. This only includes conversion actions which\n include_in_conversions_metric attribute is set to true. If\n you use conversion-based bidding, your bid strategies will\n optimize for these conversions.\n\n This field is a member of `oneof`_ ``_conversions_value_per_cost``.\n conversions_from_interactions_value_per_interaction (float):\n The value of conversions from interactions divided by the\n number of ad interactions. This only includes conversion\n actions which include_in_conversions_metric attribute is set\n to true. If you use conversion-based bidding, your bid\n strategies will optimize for these conversions.\n\n This field is a member of `oneof`_ ``_conversions_from_interactions_value_per_interaction``.\n conversions (float):\n The number of conversions. This only includes conversion\n actions which include_in_conversions_metric attribute is set\n to true. If you use conversion-based bidding, your bid\n strategies will optimize for these conversions.\n\n This field is a member of `oneof`_ ``_conversions``.\n conversions_by_conversion_date (float):\n The number of conversions. This only includes conversion\n actions which include_in_conversions_metric attribute is set\n to true. If you use conversion-based bidding, your bid\n strategies will optimize for these conversions. When this\n column is selected with date, the values in date column\n means the conversion date. Details for the\n by_conversion_date columns are available at\n https://support.google.com/google-ads/answer/9549009.\n cost_micros (int):\n The sum of your cost-per-click (CPC) and\n cost-per-thousand impressions (CPM) costs during\n this period.\n\n This field is a member of `oneof`_ ``_cost_micros``.\n cost_per_all_conversions (float):\n The cost of ad interactions divided by all\n conversions.\n\n This field is a member of `oneof`_ ``_cost_per_all_conversions``.\n cost_per_conversion (float):\n The cost of ad interactions divided by conversions. This\n only includes conversion actions which\n include_in_conversions_metric attribute is set to true. If\n you use conversion-based bidding, your bid strategies will\n optimize for these conversions.\n\n This field is a member of `oneof`_ ``_cost_per_conversion``.\n cost_per_current_model_attributed_conversion (float):\n The cost of ad interactions divided by current model\n attributed conversions. This only includes conversion\n actions which include_in_conversions_metric attribute is set\n to true. If you use conversion-based bidding, your bid\n strategies will optimize for these conversions.\n\n This field is a member of `oneof`_ ``_cost_per_current_model_attributed_conversion``.\n cross_device_conversions (float):\n Conversions from when a customer clicks on a Google Ads ad\n on one device, then converts on a different device or\n browser. Cross-device conversions are already included in\n all_conversions.\n\n This field is a member of `oneof`_ ``_cross_device_conversions``.\n ctr (float):\n The number of clicks your ad receives\n (Clicks) divided by the number of times your ad\n is shown (Impressions).\n\n This field is a member of `oneof`_ ``_ctr``.\n current_model_attributed_conversions (float):\n Shows how your historic conversions data would look under\n the attribution model you've currently selected. This only\n includes conversion actions which\n include_in_conversions_metric attribute is set to true. If\n you use conversion-based bidding, your bid strategies will\n optimize for these conversions.\n\n This field is a member of `oneof`_ ``_current_model_attributed_conversions``.\n current_model_attributed_conversions_from_interactions_rate (float):\n Current model attributed conversions from interactions\n divided by the number of ad interactions (such as clicks for\n text ads or views for video ads). This only includes\n conversion actions which include_in_conversions_metric\n attribute is set to true. If you use conversion-based\n bidding, your bid strategies will optimize for these\n conversions.\n\n This field is a member of `oneof`_ ``_current_model_attributed_conversions_from_interactions_rate``.\n current_model_attributed_conversions_from_interactions_value_per_interaction (float):\n The value of current model attributed conversions from\n interactions divided by the number of ad interactions. This\n only includes conversion actions which\n include_in_conversions_metric attribute is set to true. If\n you use conversion-based bidding, your bid strategies will\n optimize for these conversions.\n\n This field is a member of `oneof`_ ``_current_model_attributed_conversions_from_interactions_value_per_interaction``.\n current_model_attributed_conversions_value (float):\n The value of current model attributed conversions. This only\n includes conversion actions which\n include_in_conversions_metric attribute is set to true. If\n you use conversion-based bidding, your bid strategies will\n optimize for these conversions.\n\n This field is a member of `oneof`_ ``_current_model_attributed_conversions_value``.\n current_model_attributed_conversions_value_per_cost (float):\n The value of current model attributed conversions divided by\n the cost of ad interactions. This only includes conversion\n actions which include_in_conversions_metric attribute is set\n to true. If you use conversion-based bidding, your bid\n strategies will optimize for these conversions.\n\n This field is a member of `oneof`_ ``_current_model_attributed_conversions_value_per_cost``.\n engagement_rate (float):\n How often people engage with your ad after\n it's shown to them. This is the number of ad\n expansions divided by the number of times your\n ad is shown.\n\n This field is a member of `oneof`_ ``_engagement_rate``.\n engagements (int):\n The number of engagements.\n An engagement occurs when a viewer expands your\n Lightbox ad. Also, in the future, other ad types\n may support engagement metrics.\n\n This field is a member of `oneof`_ ``_engagements``.\n hotel_average_lead_value_micros (float):\n Average lead value based on clicks.\n\n This field is a member of `oneof`_ ``_hotel_average_lead_value_micros``.\n hotel_commission_rate_micros (int):\n Commission bid rate in micros. A 20%\n commission is represented as 200,000.\n\n This field is a member of `oneof`_ ``_hotel_commission_rate_micros``.\n hotel_expected_commission_cost (float):\n Expected commission cost. The result of multiplying the\n commission value times the hotel_commission_rate in\n advertiser currency.\n\n This field is a member of `oneof`_ ``_hotel_expected_commission_cost``.\n hotel_price_difference_percentage (float):\n The average price difference between the\n price offered by reporting hotel advertiser and\n the cheapest price offered by the competing\n advertiser.\n\n This field is a member of `oneof`_ ``_hotel_price_difference_percentage``.\n hotel_eligible_impressions (int):\n The number of impressions that hotel partners\n could have had given their feed performance.\n\n This field is a member of `oneof`_ ``_hotel_eligible_impressions``.\n historical_creative_quality_score (google.ads.googleads.v12.enums.types.QualityScoreBucketEnum.QualityScoreBucket):\n The creative historical quality score.\n historical_landing_page_quality_score (google.ads.googleads.v12.enums.types.QualityScoreBucketEnum.QualityScoreBucket):\n The quality of historical landing page\n experience.\n historical_quality_score (int):\n The historical quality score.\n\n This field is a member of `oneof`_ ``_historical_quality_score``.\n historical_search_predicted_ctr (google.ads.googleads.v12.enums.types.QualityScoreBucketEnum.QualityScoreBucket):\n The historical search predicted click through\n rate (CTR).\n gmail_forwards (int):\n The number of times the ad was forwarded to\n someone else as a message.\n\n This field is a member of `oneof`_ ``_gmail_forwards``.\n gmail_saves (int):\n The number of times someone has saved your\n Gmail ad to their inbox as a message.\n\n This field is a member of `oneof`_ ``_gmail_saves``.\n gmail_secondary_clicks (int):\n The number of clicks to the landing page on\n the expanded state of Gmail ads.\n\n This field is a member of `oneof`_ ``_gmail_secondary_clicks``.\n impressions_from_store_reach (int):\n The number of times a store's location-based\n ad was shown. This metric applies to feed items\n only.\n\n This field is a member of `oneof`_ ``_impressions_from_store_reach``.\n impressions (int):\n Count of how often your ad has appeared on a\n search results page or website on the Google\n Network.\n\n This field is a member of `oneof`_ ``_impressions``.\n interaction_rate (float):\n How often people interact with your ad after\n it is shown to them. This is the number of\n interactions divided by the number of times your\n ad is shown.\n\n This field is a member of `oneof`_ ``_interaction_rate``.\n interactions (int):\n The number of interactions.\n An interaction is the main user action\n associated with an ad format-clicks for text and\n shopping ads, views for video ads, and so on.\n\n This field is a member of `oneof`_ ``_interactions``.\n interaction_event_types (Sequence[google.ads.googleads.v12.enums.types.InteractionEventTypeEnum.InteractionEventType]):\n The types of payable and free interactions.\n invalid_click_rate (float):\n The percentage of clicks filtered out of your\n total number of clicks (filtered + non-filtered\n clicks) during the reporting period.\n\n This field is a member of `oneof`_ ``_invalid_click_rate``.\n invalid_clicks (int):\n Number of clicks Google considers\n illegitimate and doesn't charge you for.\n\n This field is a member of `oneof`_ ``_invalid_clicks``.\n message_chats (int):\n Number of message chats initiated for Click\n To Message impressions that were message\n tracking eligible.\n\n This field is a member of `oneof`_ ``_message_chats``.\n message_impressions (int):\n Number of Click To Message impressions that\n were message tracking eligible.\n\n This field is a member of `oneof`_ ``_message_impressions``.\n message_chat_rate (float):\n Number of message chats initiated (message_chats) divided by\n the number of message impressions (message_impressions).\n Rate at which a user initiates a message chat from an ad\n impression with a messaging option and message tracking\n enabled. Note that this rate can be more than 1.0 for a\n given message impression.\n\n This field is a member of `oneof`_ ``_message_chat_rate``.\n mobile_friendly_clicks_percentage (float):\n The percentage of mobile clicks that go to a\n mobile-friendly page.\n\n This field is a member of `oneof`_ ``_mobile_friendly_clicks_percentage``.\n optimization_score_uplift (float):\n Total optimization score uplift of all\n recommendations.\n\n This field is a member of `oneof`_ ``_optimization_score_uplift``.\n optimization_score_url (str):\n URL for the optimization score page in the Google Ads web\n interface. This metric can be selected from ``customer`` or\n ``campaign``, and can be segmented by\n ``segments.recommendation_type``. For example,\n ``SELECT metrics.optimization_score_url, segments.recommendation_type FROM customer``\n will return a URL for each unique (customer,\n recommendation_type) combination.\n\n This field is a member of `oneof`_ ``_optimization_score_url``.\n organic_clicks (int):\n The number of times someone clicked your\n site's listing in the unpaid results for a\n particular query. See the help page at\n https://support.google.com/google-ads/answer/3097241\n for details.\n\n This field is a member of `oneof`_ ``_organic_clicks``.\n organic_clicks_per_query (float):\n The number of times someone clicked your site's listing in\n the unpaid results (organic_clicks) divided by the total\n number of searches that returned pages from your site\n (organic_queries). See the help page at\n https://support.google.com/google-ads/answer/3097241 for\n details.\n\n This field is a member of `oneof`_ ``_organic_clicks_per_query``.\n organic_impressions (int):\n The number of listings for your site in the\n unpaid search results. See the help page at\n https://support.google.com/google-ads/answer/3097241\n for details.\n\n This field is a member of `oneof`_ ``_organic_impressions``.\n organic_impressions_per_query (float):\n The number of times a page from your site was listed in the\n unpaid search results (organic_impressions) divided by the\n number of searches returning your site's listing in the\n unpaid results (organic_queries). See the help page at\n https://support.google.com/google-ads/answer/3097241 for\n details.\n\n This field is a member of `oneof`_ ``_organic_impressions_per_query``.\n organic_queries (int):\n The total number of searches that returned\n your site's listing in the unpaid results. See\n the help page at\n https://support.google.com/google-ads/answer/3097241\n for details.\n\n This field is a member of `oneof`_ ``_organic_queries``.\n percent_new_visitors (float):\n Percentage of first-time sessions (from\n people who had never visited your site before).\n Imported from Google Analytics.\n\n This field is a member of `oneof`_ ``_percent_new_visitors``.\n phone_calls (int):\n Number of offline phone calls.\n\n This field is a member of `oneof`_ ``_phone_calls``.\n phone_impressions (int):\n Number of offline phone impressions.\n\n This field is a member of `oneof`_ ``_phone_impressions``.\n phone_through_rate (float):\n Number of phone calls received (phone_calls) divided by the\n number of times your phone number is shown\n (phone_impressions).\n\n This field is a member of `oneof`_ ``_phone_through_rate``.\n relative_ctr (float):\n Your clickthrough rate (Ctr) divided by the\n average clickthrough rate of all advertisers on\n the websites that show your ads. Measures how\n your ads perform on Display Network sites\n compared to other ads on the same sites.\n\n This field is a member of `oneof`_ ``_relative_ctr``.\n search_absolute_top_impression_share (float):\n The percentage of the customer's Shopping or\n Search ad impressions that are shown in the most\n prominent Shopping position. See\n https://support.google.com/google-ads/answer/7501826\n for details. Any value below 0.1 is reported as\n 0.0999.\n\n This field is a member of `oneof`_ ``_search_absolute_top_impression_share``.\n search_budget_lost_absolute_top_impression_share (float):\n The number estimating how often your ad\n wasn't the very first ad above the organic\n search results due to a low budget. Note: Search\n budget lost absolute top impression share is\n reported in the range of 0 to 0.9. Any value\n above 0.9 is reported as 0.9001.\n\n This field is a member of `oneof`_ ``_search_budget_lost_absolute_top_impression_share``.\n search_budget_lost_impression_share (float):\n The estimated percent of times that your ad\n was eligible to show on the Search Network but\n didn't because your budget was too low. Note:\n Search budget lost impression share is reported\n in the range of 0 to 0.9. Any value above 0.9 is\n reported as 0.9001.\n\n This field is a member of `oneof`_ ``_search_budget_lost_impression_share``.\n search_budget_lost_top_impression_share (float):\n The number estimating how often your ad\n didn't show anywhere above the organic search\n results due to a low budget. Note: Search budget\n lost top impression share is reported in the\n range of 0 to 0.9. Any value above 0.9 is\n reported as 0.9001.\n\n This field is a member of `oneof`_ ``_search_budget_lost_top_impression_share``.\n search_click_share (float):\n The number of clicks you've received on the\n Search Network divided by the estimated number\n of clicks you were eligible to receive. Note:\n Search click share is reported in the range of\n 0.1 to 1. Any value below 0.1 is reported as\n 0.0999.\n\n This field is a member of `oneof`_ ``_search_click_share``.\n search_exact_match_impression_share (float):\n The impressions you've received divided by\n the estimated number of impressions you were\n eligible to receive on the Search Network for\n search terms that matched your keywords exactly\n (or were close variants of your keyword),\n regardless of your keyword match types. Note:\n Search exact match impression share is reported\n in the range of 0.1 to 1. Any value below 0.1 is\n reported as 0.0999.\n\n This field is a member of `oneof`_ ``_search_exact_match_impression_share``.\n search_impression_share (float):\n The impressions you've received on the Search\n Network divided by the estimated number of\n impressions you were eligible to receive. Note:\n Search impression share is reported in the range\n of 0.1 to 1. Any value below 0.1 is reported as\n 0.0999.\n\n This field is a member of `oneof`_ ``_search_impression_share``.\n search_rank_lost_absolute_top_impression_share (float):\n The number estimating how often your ad\n wasn't the very first ad above the organic\n search results due to poor Ad Rank. Note: Search\n rank lost absolute top impression share is\n reported in the range of 0 to 0.9. Any value\n above 0.9 is reported as 0.9001.\n\n This field is a member of `oneof`_ ``_search_rank_lost_absolute_top_impression_share``.\n search_rank_lost_impression_share (float):\n The estimated percentage of impressions on\n the Search Network that your ads didn't receive\n due to poor Ad Rank. Note: Search rank lost\n impression share is reported in the range of 0\n to 0.9. Any value above 0.9 is reported as\n 0.9001.\n\n This field is a member of `oneof`_ ``_search_rank_lost_impression_share``.\n search_rank_lost_top_impression_share (float):\n The number estimating how often your ad\n didn't show anywhere above the organic search\n results due to poor Ad Rank. Note: Search rank\n lost top impression share is reported in the\n range of 0 to 0.9. Any value above 0.9 is\n reported as 0.9001.\n\n This field is a member of `oneof`_ ``_search_rank_lost_top_impression_share``.\n search_top_impression_share (float):\n The impressions you've received in the top\n location (anywhere above the organic search\n results) compared to the estimated number of\n impressions you were eligible to receive in the\n top location. Note: Search top impression share\n is reported in the range of 0.1 to 1. Any value\n below 0.1 is reported as 0.0999.\n\n This field is a member of `oneof`_ ``_search_top_impression_share``.\n speed_score (int):\n A measure of how quickly your page loads\n after clicks on your mobile ads. The score is a\n range from 1 to 10, 10 being the fastest.\n\n This field is a member of `oneof`_ ``_speed_score``.\n top_impression_percentage (float):\n The percent of your ad impressions that are\n shown anywhere above the organic search results.\n\n This field is a member of `oneof`_ ``_top_impression_percentage``.\n valid_accelerated_mobile_pages_clicks_percentage (float):\n The percentage of ad clicks to Accelerated\n Mobile Pages (AMP) landing pages that reach a\n valid AMP page.\n\n This field is a member of `oneof`_ ``_valid_accelerated_mobile_pages_clicks_percentage``.\n value_per_all_conversions (float):\n The value of all conversions divided by the\n number of all conversions.\n\n This field is a member of `oneof`_ ``_value_per_all_conversions``.\n value_per_all_conversions_by_conversion_date (float):\n The value of all conversions divided by the number of all\n conversions. When this column is selected with date, the\n values in date column means the conversion date. Details for\n the by_conversion_date columns are available at\n https://support.google.com/google-ads/answer/9549009.\n\n This field is a member of `oneof`_ ``_value_per_all_conversions_by_conversion_date``.\n value_per_conversion (float):\n The value of conversions divided by the number of\n conversions. This only includes conversion actions which\n include_in_conversions_metric attribute is set to true. If\n you use conversion-based bidding, your bid strategies will\n optimize for these conversions.\n\n This field is a member of `oneof`_ ``_value_per_conversion``.\n value_per_conversions_by_conversion_date (float):\n The value of conversions divided by the number of\n conversions. This only includes conversion actions which\n include_in_conversions_metric attribute is set to true. If\n you use conversion-based bidding, your bid strategies will\n optimize for these conversions. When this column is selected\n with date, the values in date column means the conversion\n date. Details for the by_conversion_date columns are\n available at\n https://support.google.com/google-ads/answer/9549009.\n\n This field is a member of `oneof`_ ``_value_per_conversions_by_conversion_date``.\n value_per_current_model_attributed_conversion (float):\n The value of current model attributed conversions divided by\n the number of the conversions. This only includes conversion\n actions which include_in_conversions_metric attribute is set\n to true. If you use conversion-based bidding, your bid\n strategies will optimize for these conversions.\n\n This field is a member of `oneof`_ ``_value_per_current_model_attributed_conversion``.\n video_quartile_p100_rate (float):\n Percentage of impressions where the viewer\n watched all of your video.\n\n This field is a member of `oneof`_ ``_video_quartile_p100_rate``.\n video_quartile_p25_rate (float):\n Percentage of impressions where the viewer\n watched 25% of your video.\n\n This field is a member of `oneof`_ ``_video_quartile_p25_rate``.\n video_quartile_p50_rate (float):\n Percentage of impressions where the viewer\n watched 50% of your video.\n\n This field is a member of `oneof`_ ``_video_quartile_p50_rate``.\n video_quartile_p75_rate (float):\n Percentage of impressions where the viewer\n watched 75% of your video.\n\n This field is a member of `oneof`_ ``_video_quartile_p75_rate``.\n video_view_rate (float):\n The number of views your TrueView video ad\n receives divided by its number of impressions,\n including thumbnail impressions for TrueView\n in-display ads.\n\n This field is a member of `oneof`_ ``_video_view_rate``.\n video_views (int):\n The number of times your video ads were\n viewed.\n\n This field is a member of `oneof`_ ``_video_views``.\n view_through_conversions (int):\n The total number of view-through conversions.\n These happen when a customer sees an image or\n rich media ad, then later completes a conversion\n on your site without interacting with (for\n example, clicking on) another ad.\n\n This field is a member of `oneof`_ ``_view_through_conversions``.\n sk_ad_network_conversions (int):\n The number of iOS Store Kit Ad Network\n conversions.\n publisher_purchased_clicks (int):\n Clicks from properties not owned by the\n publisher for which the traffic the publisher\n has paid for or acquired through incentivized\n activity\n publisher_organic_clicks (int):\n Clicks from properties for which the traffic\n the publisher has not paid for or acquired\n through incentivized activity\n publisher_unknown_clicks (int):\n Clicks from traffic which is not identified\n as \"Publisher Purchased\" or \"Publisher Organic\".\n all_conversions_from_location_asset_click_to_call (float):\n Number of call button clicks on any location\n surface after a chargeable ad event (click or\n impression). This measure is coming from Asset\n based location.\n\n This field is a member of `oneof`_ ``_all_conversions_from_location_asset_click_to_call``.\n all_conversions_from_location_asset_directions (float):\n Number of driving directions clicks on any\n location surface after a chargeable ad event\n (click or impression). This measure is coming\n from Asset based location.\n\n This field is a member of `oneof`_ ``_all_conversions_from_location_asset_directions``.\n all_conversions_from_location_asset_menu (float):\n Number of menu link clicks on any location\n surface after a chargeable ad event (click or\n impression). This measure is coming from Asset\n based location.\n\n This field is a member of `oneof`_ ``_all_conversions_from_location_asset_menu``.\n all_conversions_from_location_asset_order (float):\n Number of order clicks on any location\n surface after a chargeable ad event (click or\n impression). This measure is coming from Asset\n based location.\n\n This field is a member of `oneof`_ ``_all_conversions_from_location_asset_order``.\n all_conversions_from_location_asset_other_engagement (float):\n Number of other types of local action clicks\n on any location surface after a chargeable ad\n event (click or impression). This measure is\n coming from Asset based location.\n\n This field is a member of `oneof`_ ``_all_conversions_from_location_asset_other_engagement``.\n all_conversions_from_location_asset_store_visits (float):\n Estimated number of visits to the store after\n a chargeable ad event (click or impression).\n This measure is coming from Asset based\n location.\n\n This field is a member of `oneof`_ ``_all_conversions_from_location_asset_store_visits``.\n all_conversions_from_location_asset_website (float):\n Number of website URL clicks on any location\n surface after a chargeable ad event (click or\n impression). This measure is coming from Asset\n based location.\n\n This field is a member of `oneof`_ ``_all_conversions_from_location_asset_website``.\n eligible_impressions_from_location_asset_store_reach (int):\n Number of impressions in which the store\n location was shown or the location was used for\n targeting. This measure is coming from Asset\n based location.\n\n This field is a member of `oneof`_ ``_eligible_impressions_from_location_asset_store_reach``.\n view_through_conversions_from_location_asset_click_to_call (float):\n Number of call button clicks on any location\n surface after an impression. This measure is\n coming from Asset based location.\n\n This field is a member of `oneof`_ ``_view_through_conversions_from_location_asset_click_to_call``.\n view_through_conversions_from_location_asset_directions (float):\n Number of driving directions clicks on any\n location surface after an impression. This\n measure is coming from Asset based location.\n\n This field is a member of `oneof`_ ``_view_through_conversions_from_location_asset_directions``.\n view_through_conversions_from_location_asset_menu (float):\n Number of menu link clicks on any location\n surface after an impression. This measure is\n coming from Asset based location.\n\n This field is a member of `oneof`_ ``_view_through_conversions_from_location_asset_menu``.\n view_through_conversions_from_location_asset_order (float):\n Number of order clicks on any location\n surface after an impression. This measure is\n coming from Asset based location.\n\n This field is a member of `oneof`_ ``_view_through_conversions_from_location_asset_order``.\n view_through_conversions_from_location_asset_other_engagement (float):\n Number of other types of local action clicks\n on any location surface after an impression.\n This measure is coming from Asset based\n location.\n\n This field is a member of `oneof`_ ``_view_through_conversions_from_location_asset_other_engagement``.\n view_through_conversions_from_location_asset_store_visits (float):\n Estimated number of visits to the store after\n an impression. This measure is coming from Asset\n based location.\n\n This field is a member of `oneof`_ ``_view_through_conversions_from_location_asset_store_visits``.\n view_through_conversions_from_location_asset_website (float):\n Number of website URL clicks on any location\n surface after an impression. This measure is\n coming from Asset based location.\n\n This field is a member of `oneof`_ ``_view_through_conversions_from_location_asset_website``.\n \"\"\"\n\n absolute_top_impression_percentage = proto.Field(\n proto.DOUBLE, number=183, optional=True,\n )\n active_view_cpm = proto.Field(proto.DOUBLE, number=184, optional=True,)\n active_view_ctr = proto.Field(proto.DOUBLE, number=185, optional=True,)\n active_view_impressions = proto.Field(\n proto.INT64, number=186, optional=True,\n )\n active_view_measurability = proto.Field(\n proto.DOUBLE, number=187, optional=True,\n )\n active_view_measurable_cost_micros = proto.Field(\n proto.INT64, number=188, optional=True,\n )\n active_view_measurable_impressions = proto.Field(\n proto.INT64, number=189, optional=True,\n )\n active_view_viewability = proto.Field(\n proto.DOUBLE, number=190, optional=True,\n )\n all_conversions_from_interactions_rate = proto.Field(\n proto.DOUBLE, number=191, optional=True,\n )\n all_conversions_value = proto.Field(\n proto.DOUBLE, number=192, optional=True,\n )\n all_conversions_value_by_conversion_date = proto.Field(\n proto.DOUBLE, number=240,\n )\n all_conversions = proto.Field(proto.DOUBLE, number=193, optional=True,)\n all_conversions_by_conversion_date = proto.Field(proto.DOUBLE, number=241,)\n all_conversions_value_per_cost = proto.Field(\n proto.DOUBLE, number=194, optional=True,\n )\n all_conversions_from_click_to_call = proto.Field(\n proto.DOUBLE, number=195, optional=True,\n )\n all_conversions_from_directions = proto.Field(\n proto.DOUBLE, number=196, optional=True,\n )\n all_conversions_from_interactions_value_per_interaction = proto.Field(\n proto.DOUBLE, number=197, optional=True,\n )\n all_conversions_from_menu = proto.Field(\n proto.DOUBLE, number=198, optional=True,\n )\n all_conversions_from_order = proto.Field(\n proto.DOUBLE, number=199, optional=True,\n )\n all_conversions_from_other_engagement = proto.Field(\n proto.DOUBLE, number=200, optional=True,\n )\n all_conversions_from_store_visit = proto.Field(\n proto.DOUBLE, number=201, optional=True,\n )\n all_conversions_from_store_website = proto.Field(\n proto.DOUBLE, number=202, optional=True,\n )\n auction_insight_search_absolute_top_impression_percentage = proto.Field(\n proto.DOUBLE, number=258, optional=True,\n )\n auction_insight_search_impression_share = proto.Field(\n proto.DOUBLE, number=259, optional=True,\n )\n auction_insight_search_outranking_share = proto.Field(\n proto.DOUBLE, number=260, optional=True,\n )\n auction_insight_search_overlap_rate = proto.Field(\n proto.DOUBLE, number=261, optional=True,\n )\n auction_insight_search_position_above_rate = proto.Field(\n proto.DOUBLE, number=262, optional=True,\n )\n auction_insight_search_top_impression_percentage = proto.Field(\n proto.DOUBLE, number=263, optional=True,\n )\n average_cost = proto.Field(proto.DOUBLE, number=203, optional=True,)\n average_cpc = proto.Field(proto.DOUBLE, number=204, optional=True,)\n average_cpe = proto.Field(proto.DOUBLE, number=205, optional=True,)\n average_cpm = proto.Field(proto.DOUBLE, number=206, optional=True,)\n average_cpv = proto.Field(proto.DOUBLE, number=207, optional=True,)\n average_page_views = proto.Field(proto.DOUBLE, number=208, optional=True,)\n average_time_on_site = proto.Field(proto.DOUBLE, number=209, optional=True,)\n benchmark_average_max_cpc = proto.Field(\n proto.DOUBLE, number=210, optional=True,\n )\n biddable_app_install_conversions = proto.Field(\n proto.DOUBLE, number=254, optional=True,\n )\n biddable_app_post_install_conversions = proto.Field(\n proto.DOUBLE, number=255, optional=True,\n )\n benchmark_ctr = proto.Field(proto.DOUBLE, number=211, optional=True,)\n bounce_rate = proto.Field(proto.DOUBLE, number=212, optional=True,)\n clicks = proto.Field(proto.INT64, number=131, optional=True,)\n combined_clicks = proto.Field(proto.INT64, number=156, optional=True,)\n combined_clicks_per_query = proto.Field(\n proto.DOUBLE, number=157, optional=True,\n )\n combined_queries = proto.Field(proto.INT64, number=158, optional=True,)\n content_budget_lost_impression_share = proto.Field(\n proto.DOUBLE, number=159, optional=True,\n )\n content_impression_share = proto.Field(\n proto.DOUBLE, number=160, optional=True,\n )\n conversion_last_received_request_date_time = proto.Field(\n proto.STRING, number=161, optional=True,\n )\n conversion_last_conversion_date = proto.Field(\n proto.STRING, number=162, optional=True,\n )\n content_rank_lost_impression_share = proto.Field(\n proto.DOUBLE, number=163, optional=True,\n )\n conversions_from_interactions_rate = proto.Field(\n proto.DOUBLE, number=164, optional=True,\n )\n conversions_value = proto.Field(proto.DOUBLE, number=165, optional=True,)\n conversions_value_by_conversion_date = proto.Field(\n proto.DOUBLE, number=242,\n )\n conversions_value_per_cost = proto.Field(\n proto.DOUBLE, number=166, optional=True,\n )\n conversions_from_interactions_value_per_interaction = proto.Field(\n proto.DOUBLE, number=167, optional=True,\n )\n conversions = proto.Field(proto.DOUBLE, number=168, optional=True,)\n conversions_by_conversion_date = proto.Field(proto.DOUBLE, number=243,)\n cost_micros = proto.Field(proto.INT64, number=169, optional=True,)\n cost_per_all_conversions = proto.Field(\n proto.DOUBLE, number=170, optional=True,\n )\n cost_per_conversion = proto.Field(proto.DOUBLE, number=171, optional=True,)\n cost_per_current_model_attributed_conversion = proto.Field(\n proto.DOUBLE, number=172, optional=True,\n )\n cross_device_conversions = proto.Field(\n proto.DOUBLE, number=173, optional=True,\n )\n ctr = proto.Field(proto.DOUBLE, number=174, optional=True,)\n current_model_attributed_conversions = proto.Field(\n proto.DOUBLE, number=175, optional=True,\n )\n current_model_attributed_conversions_from_interactions_rate = proto.Field(\n proto.DOUBLE, number=176, optional=True,\n )\n current_model_attributed_conversions_from_interactions_value_per_interaction = proto.Field(\n proto.DOUBLE, number=177, optional=True,\n )\n current_model_attributed_conversions_value = proto.Field(\n proto.DOUBLE, number=178, optional=True,\n )\n current_model_attributed_conversions_value_per_cost = proto.Field(\n proto.DOUBLE, number=179, optional=True,\n )\n engagement_rate = proto.Field(proto.DOUBLE, number=180, optional=True,)\n engagements = proto.Field(proto.INT64, number=181, optional=True,)\n hotel_average_lead_value_micros = proto.Field(\n proto.DOUBLE, number=213, optional=True,\n )\n hotel_commission_rate_micros = proto.Field(\n proto.INT64, number=256, optional=True,\n )\n hotel_expected_commission_cost = proto.Field(\n proto.DOUBLE, number=257, optional=True,\n )\n hotel_price_difference_percentage = proto.Field(\n proto.DOUBLE, number=214, optional=True,\n )\n hotel_eligible_impressions = proto.Field(\n proto.INT64, number=215, optional=True,\n )\n historical_creative_quality_score = proto.Field(\n proto.ENUM,\n number=80,\n enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,\n )\n historical_landing_page_quality_score = proto.Field(\n proto.ENUM,\n number=81,\n enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,\n )\n historical_quality_score = proto.Field(\n proto.INT64, number=216, optional=True,\n )\n historical_search_predicted_ctr = proto.Field(\n proto.ENUM,\n number=83,\n enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,\n )\n gmail_forwards = proto.Field(proto.INT64, number=217, optional=True,)\n gmail_saves = proto.Field(proto.INT64, number=218, optional=True,)\n gmail_secondary_clicks = proto.Field(\n proto.INT64, number=219, optional=True,\n )\n impressions_from_store_reach = proto.Field(\n proto.INT64, number=220, optional=True,\n )\n impressions = proto.Field(proto.INT64, number=221, optional=True,)\n interaction_rate = proto.Field(proto.DOUBLE, number=222, optional=True,)\n interactions = proto.Field(proto.INT64, number=223, optional=True,)\n interaction_event_types = proto.RepeatedField(\n proto.ENUM,\n number=100,\n enum=interaction_event_type.InteractionEventTypeEnum.InteractionEventType,\n )\n invalid_click_rate = proto.Field(proto.DOUBLE, number=224, optional=True,)\n invalid_clicks = proto.Field(proto.INT64, number=225, optional=True,)\n message_chats = proto.Field(proto.INT64, number=226, optional=True,)\n message_impressions = proto.Field(proto.INT64, number=227, optional=True,)\n message_chat_rate = proto.Field(proto.DOUBLE, number=228, optional=True,)\n mobile_friendly_clicks_percentage = proto.Field(\n proto.DOUBLE, number=229, optional=True,\n )\n optimization_score_uplift = proto.Field(\n proto.DOUBLE, number=247, optional=True,\n )\n optimization_score_url = proto.Field(\n proto.STRING, number=248, optional=True,\n )\n organic_clicks = proto.Field(proto.INT64, number=230, optional=True,)\n organic_clicks_per_query = proto.Field(\n proto.DOUBLE, number=231, optional=True,\n )\n organic_impressions = proto.Field(proto.INT64, number=232, optional=True,)\n organic_impressions_per_query = proto.Field(\n proto.DOUBLE, number=233, optional=True,\n )\n organic_queries = proto.Field(proto.INT64, number=234, optional=True,)\n percent_new_visitors = proto.Field(proto.DOUBLE, number=235, optional=True,)\n phone_calls = proto.Field(proto.INT64, number=236, optional=True,)\n phone_impressions = proto.Field(proto.INT64, number=237, optional=True,)\n phone_through_rate = proto.Field(proto.DOUBLE, number=238, optional=True,)\n relative_ctr = proto.Field(proto.DOUBLE, number=239, optional=True,)\n search_absolute_top_impression_share = proto.Field(\n proto.DOUBLE, number=136, optional=True,\n )\n search_budget_lost_absolute_top_impression_share = proto.Field(\n proto.DOUBLE, number=137, optional=True,\n )\n search_budget_lost_impression_share = proto.Field(\n proto.DOUBLE, number=138, optional=True,\n )\n search_budget_lost_top_impression_share = proto.Field(\n proto.DOUBLE, number=139, optional=True,\n )\n search_click_share = proto.Field(proto.DOUBLE, number=140, optional=True,)\n search_exact_match_impression_share = proto.Field(\n proto.DOUBLE, number=141, optional=True,\n )\n search_impression_share = proto.Field(\n proto.DOUBLE, number=142, optional=True,\n )\n search_rank_lost_absolute_top_impression_share = proto.Field(\n proto.DOUBLE, number=143, optional=True,\n )\n search_rank_lost_impression_share = proto.Field(\n proto.DOUBLE, number=144, optional=True,\n )\n search_rank_lost_top_impression_share = proto.Field(\n proto.DOUBLE, number=145, optional=True,\n )\n search_top_impression_share = proto.Field(\n proto.DOUBLE, number=146, optional=True,\n )\n speed_score = proto.Field(proto.INT64, number=147, optional=True,)\n top_impression_percentage = proto.Field(\n proto.DOUBLE, number=148, optional=True,\n )\n valid_accelerated_mobile_pages_clicks_percentage = proto.Field(\n proto.DOUBLE, number=149, optional=True,\n )\n value_per_all_conversions = proto.Field(\n proto.DOUBLE, number=150, optional=True,\n )\n value_per_all_conversions_by_conversion_date = proto.Field(\n proto.DOUBLE, number=244, optional=True,\n )\n value_per_conversion = proto.Field(proto.DOUBLE, number=151, optional=True,)\n value_per_conversions_by_conversion_date = proto.Field(\n proto.DOUBLE, number=245, optional=True,\n )\n value_per_current_model_attributed_conversion = proto.Field(\n proto.DOUBLE, number=152, optional=True,\n )\n video_quartile_p100_rate = proto.Field(\n proto.DOUBLE, number=132, optional=True,\n )\n video_quartile_p25_rate = proto.Field(\n proto.DOUBLE, number=133, optional=True,\n )\n video_quartile_p50_rate = proto.Field(\n proto.DOUBLE, number=134, optional=True,\n )\n video_quartile_p75_rate = proto.Field(\n proto.DOUBLE, number=135, optional=True,\n )\n video_view_rate = proto.Field(proto.DOUBLE, number=153, optional=True,)\n video_views = proto.Field(proto.INT64, number=154, optional=True,)\n view_through_conversions = proto.Field(\n proto.INT64, number=155, optional=True,\n )\n sk_ad_network_conversions = proto.Field(proto.INT64, number=246,)\n publisher_purchased_clicks = proto.Field(proto.INT64, number=264,)\n publisher_organic_clicks = proto.Field(proto.INT64, number=265,)\n publisher_unknown_clicks = proto.Field(proto.INT64, number=266,)\n all_conversions_from_location_asset_click_to_call = proto.Field(\n proto.DOUBLE, number=267, optional=True,\n )\n all_conversions_from_location_asset_directions = proto.Field(\n proto.DOUBLE, number=268, optional=True,\n )\n all_conversions_from_location_asset_menu = proto.Field(\n proto.DOUBLE, number=269, optional=True,\n )\n all_conversions_from_location_asset_order = proto.Field(\n proto.DOUBLE, number=270, optional=True,\n )\n all_conversions_from_location_asset_other_engagement = proto.Field(\n proto.DOUBLE, number=271, optional=True,\n )\n all_conversions_from_location_asset_store_visits = proto.Field(\n proto.DOUBLE, number=272, optional=True,\n )\n all_conversions_from_location_asset_website = proto.Field(\n proto.DOUBLE, number=273, optional=True,\n )\n eligible_impressions_from_location_asset_store_reach = proto.Field(\n proto.INT64, number=274, optional=True,\n )\n view_through_conversions_from_location_asset_click_to_call = proto.Field(\n proto.DOUBLE, number=275, optional=True,\n )\n view_through_conversions_from_location_asset_directions = proto.Field(\n proto.DOUBLE, number=276, optional=True,\n )\n view_through_conversions_from_location_asset_menu = proto.Field(\n proto.DOUBLE, number=277, optional=True,\n )\n view_through_conversions_from_location_asset_order = proto.Field(\n proto.DOUBLE, number=278, optional=True,\n )\n view_through_conversions_from_location_asset_other_engagement = proto.Field(\n proto.DOUBLE, number=279, optional=True,\n )\n view_through_conversions_from_location_asset_store_visits = proto.Field(\n proto.DOUBLE, number=280, optional=True,\n )\n view_through_conversions_from_location_asset_website = proto.Field(\n proto.DOUBLE, number=281, optional=True,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n", "sub_path": "google/ads/googleads/v12/common/types/metrics.py", "file_name": "metrics.py", "file_ext": "py", "file_size_in_byte": 66012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "proto.module", "line_number": 22, "usage_type": "call"}, {"api_name": "proto.Message", "line_number": 29, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1034, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1035, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1037, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1037, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1038, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1038, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1039, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1040, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1042, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1043, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1045, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1046, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1048, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1049, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1051, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1052, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1054, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1055, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1057, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1058, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1060, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1061, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1063, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1063, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1064, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1064, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1065, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1066, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1068, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1069, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1071, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1072, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1074, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1075, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1077, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1078, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1080, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1081, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1083, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1084, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1086, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1087, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1089, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1090, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1092, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1093, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1095, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1096, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1098, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1099, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1101, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1102, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1104, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1105, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1107, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1108, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1110, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1110, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1111, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1111, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1112, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1112, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1113, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1113, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1114, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1114, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1115, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1115, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1116, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1116, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1117, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1118, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1120, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1121, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1123, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1124, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1126, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1126, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1127, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1127, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1128, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1128, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1129, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1129, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1130, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1131, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1133, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1133, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1134, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1135, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1137, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1138, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1140, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 1141, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1143, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 1144, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1146, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1147, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1149, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1150, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1152, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1152, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1153, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1154, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1156, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1157, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1159, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1160, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1162, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1162, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1163, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1163, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1164, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1164, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1165, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1166, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1168, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1168, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1169, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1170, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1172, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1173, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1175, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1175, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1176, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1177, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1179, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1180, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1182, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1183, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1185, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1186, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1188, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1189, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1191, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1191, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1192, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1192, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1193, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1194, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1196, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1197, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1199, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1200, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1202, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1203, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1205, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1206, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1208, "usage_type": "call"}, {"api_name": "proto.ENUM", "line_number": 1209, "usage_type": "attribute"}, {"api_name": "google.ads.googleads.v12.enums.types.quality_score_bucket.QualityScoreBucketEnum", "line_number": 1211, "usage_type": "attribute"}, {"api_name": "google.ads.googleads.v12.enums.types.quality_score_bucket", "line_number": 1211, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 1213, "usage_type": "call"}, {"api_name": "proto.ENUM", "line_number": 1214, "usage_type": "attribute"}, {"api_name": "google.ads.googleads.v12.enums.types.quality_score_bucket.QualityScoreBucketEnum", "line_number": 1216, "usage_type": "attribute"}, {"api_name": "google.ads.googleads.v12.enums.types.quality_score_bucket", "line_number": 1216, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 1218, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1219, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1221, "usage_type": "call"}, {"api_name": "proto.ENUM", "line_number": 1222, "usage_type": "attribute"}, {"api_name": "google.ads.googleads.v12.enums.types.quality_score_bucket.QualityScoreBucketEnum", "line_number": 1224, "usage_type": "attribute"}, {"api_name": "google.ads.googleads.v12.enums.types.quality_score_bucket", "line_number": 1224, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 1226, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1226, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1227, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1227, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1228, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1229, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1231, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1232, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1234, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1234, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1235, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1235, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1236, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1236, "usage_type": "attribute"}, {"api_name": "proto.RepeatedField", "line_number": 1237, "usage_type": "call"}, {"api_name": "proto.ENUM", "line_number": 1238, "usage_type": "attribute"}, {"api_name": "google.ads.googleads.v12.enums.types.interaction_event_type.InteractionEventTypeEnum", "line_number": 1240, "usage_type": "attribute"}, {"api_name": "google.ads.googleads.v12.enums.types.interaction_event_type", "line_number": 1240, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 1242, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1242, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1243, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1243, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1244, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1244, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1245, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1245, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1246, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1246, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1247, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1248, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1250, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1251, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1253, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 1254, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1256, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1256, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1257, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1258, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1260, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1260, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1261, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1262, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1264, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1264, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1265, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1265, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1266, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1266, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1267, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1267, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1268, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1268, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1269, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1269, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1270, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1271, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1273, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1274, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1276, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1277, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1279, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1280, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1282, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1282, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1283, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1284, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1286, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1287, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1289, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1290, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1292, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1293, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1295, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1296, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1298, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1299, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1301, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1301, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1302, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1303, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1305, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1306, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1308, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1309, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1311, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1312, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1314, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1314, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1315, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1316, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1318, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1319, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1321, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1322, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1324, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1325, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1327, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1328, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1330, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1331, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1333, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1333, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1334, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1334, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1335, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1336, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1338, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1338, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1339, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1339, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1340, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1340, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1341, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1341, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1342, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1343, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1345, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1346, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1348, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1349, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1351, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1352, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1354, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1355, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1357, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1358, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1360, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1361, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1363, "usage_type": "call"}, {"api_name": "proto.INT64", "line_number": 1364, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1366, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1367, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1369, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1370, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1372, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1373, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1375, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1376, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1378, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1379, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1381, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1382, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 1384, "usage_type": "call"}, {"api_name": "proto.DOUBLE", "line_number": 1385, "usage_type": "attribute"}]} +{"seq_id": "83911242", "text": "\nimport os\nfrom book import Book\nimport json\nimport fileio\nimport datetime\n\nDATA_DIR = 'data'\nBOOKS_FILE_NAME = os.path.join(DATA_DIR, 'wishlist.txt')\nCOUNTER_FILE_NAME = os.path.join(DATA_DIR, 'counter.txt')\n\nseparator = '^^^' # a string probably not in any valid data relating to a book\n\nbook_list = []\ncounter = 0\n\ndef setup():\n ''' Read book info from file, if file exists. '''\n\n global counter\n\n # if there is no file we get None returned\n\n book_data = fileio.read_from_file(BOOKS_FILE_NAME)\n\n if book_data is not None:\n\n make_book_list(book_data)\n\n count_data = fileio.read_from_file(COUNTER_FILE_NAME)\n\n # if we cannot cast book data to int then set it to 0\n if count_data is not None:\n try:\n counter = int(count_data)\n\n except ValueError:\n\n counter = 0\n\n else:\n counter = len(book_list)\n\n\ndef shutdown():\n '''Save all data to a file - one for books, one for the current counter value, for persistent storage'''\n\n output_data = make_output_data()\n\n # write data\n fileio.write_to_file(DATA_DIR, BOOKS_FILE_NAME, output_data)\n\n fileio.write_to_file(DATA_DIR, COUNTER_FILE_NAME, str(counter))\n\n\ndef get_books(**kwargs):\n ''' Return books from data store. With no arguments, returns everything. '''\n\n global book_list\n\n if len(kwargs) == 0:\n return book_list\n\n if 'read' in kwargs:\n read_books = [ book for book in book_list if book.read == kwargs['read'] ]\n return read_books\n\n\n\ndef add_book(book):\n ''' Add to db, set id value, return Book'''\n\n global book_list\n\n book.id = generate_id()\n book_list.append(book)\n\ndef delete_book(book_id): #(JEN)\n '''Remove unread book from db'''\n global book_list\n\n for book in book_list:\n\n if book.id == book_id:\n book_list.remove(book)\n\n\ndef generate_id():\n global counter\n counter += 1\n return counter\n\n\ndef set_read(book_id, read):\n '''Update book with given book_id to read. Return True if book is found in DB and update is made, False otherwise.'''\n\n global book_list\n\n for book in book_list:\n\n if book.id == book_id:\n\n book.read = True\n\n # https://docs.python.org/3/library/datetime.html#date-objects\n # date format 'YYYY-MM-DD'\n book.read_date = datetime.date.today().__str__()\n\n return True\n\n return False # return False if book id is not found\n\ndef set_delete(book_id, read):#(JEN)\n '''Delete book with given book_id. Return True if book is found in DB and update is made, False otherwise'''\n global book_list\n\n for book in book_list:\n\n if book.id == book_id:\n delete_book(book_id)\n return True\n\n return False # returns False if book id is not found\n\ndef edit_book(book_id):#(JEN)\n '''Update the book's author/title with the given book_id. Return true if book is found in DB and update is made, False otherwise.'''\n\n global book_list\n\n for book in book_list:\n\n if book.id == book_id:\n return True\n\n return False # return False if book id is not found\n\n\ndef make_changes(book, book_id):\n\n global book_list\n\n book.id = book_id\n delete_book(book_id) #delete the old record\n book_list.insert((book_id - 1),book) #update the record with .insert(puts it at this position, this is what you're inserting)\n\n\ndef make_book_list(json_string_from_file):\n ''' turn the json string into a list of Book objects'''\n\n global book_list\n\n books_str = json.loads(json_string_from_file)\n\n for data in books_str:\n book = Book(data[\"title\"], data[\"author\"], data[\"read\"] == 'True', data['read_date'], int(data[\"id\"]))\n book_list.append(book)\n\n\ndef make_output_data():\n ''' create a json containing all data on books, for writing to output file'''\n\n global book_list\n\n output_data = []\n\n # example json data [{\"title\": book.title, \"author\": book.author, \"read\": book.read, 'read_date': book.read_date, \"id\": book.id}, ...]\n for book in book_list:\n output = {\"title\": book.title, \"author\": book.author, \"read\": str(book.read), \"read_date\": book.read_date, \"id\": str(book.id)}\n output_data.append(output)\n\n all_books_string = json.dumps(output_data)\n\n return all_books_string\n", "sub_path": "datastore.py", "file_name": "datastore.py", "file_ext": "py", "file_size_in_byte": 4291, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "fileio.read_from_file", "line_number": 24, "usage_type": "call"}, {"api_name": "fileio.read_from_file", "line_number": 30, "usage_type": "call"}, {"api_name": "fileio.write_to_file", "line_number": 51, "usage_type": "call"}, {"api_name": "fileio.write_to_file", "line_number": 53, "usage_type": "call"}, {"api_name": "book.read", "line_number": 65, "usage_type": "attribute"}, {"api_name": "book.id", "line_number": 75, "usage_type": "attribute"}, {"api_name": "book.id", "line_number": 84, "usage_type": "attribute"}, {"api_name": "book.id", "line_number": 101, "usage_type": "attribute"}, {"api_name": "book.read", "line_number": 103, "usage_type": "attribute"}, {"api_name": "book.read_date", "line_number": 107, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 107, "usage_type": "attribute"}, {"api_name": "book.id", "line_number": 119, "usage_type": "attribute"}, {"api_name": "book.id", "line_number": 132, "usage_type": "attribute"}, {"api_name": "book.id", "line_number": 142, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 152, "usage_type": "call"}, {"api_name": "book.Book", "line_number": 155, "usage_type": "call"}, {"api_name": "book.title", "line_number": 168, "usage_type": "attribute"}, {"api_name": "book.author", "line_number": 168, "usage_type": "attribute"}, {"api_name": "book.read", "line_number": 168, "usage_type": "attribute"}, {"api_name": "book.read_date", "line_number": 168, "usage_type": "attribute"}, {"api_name": "book.id", "line_number": 168, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "260096356", "text": "# coding=utf-8\nfrom django import forms\nfrom .models import Article\n\n\nclass ArticleForm(forms.ModelForm):\n\n class Meta:\n model = Article\n fields = (\"article_title\", \"article_text\", \"category\")\n widgets = {\"article_title\": forms.TextInput(attrs={\"size\": 101}),\n \"article_text\": forms.Textarea(attrs={\"rows\": 25})}", "sub_path": "my_blog/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 354, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.forms.ModelForm", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Article", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "275708509", "text": "import json\nclass openJson:\n def __init__(self,iroot,iallSite):\n self.root = iroot\n self.allSite = iallSite\n def openJson(self,name):\n folder_name = ''.join(name.split('_')[0])\n if folder_name == 'Home':\n jsonpath = self.root + '/JSON/{0}/{1}.json'\n jsonpath1 =jsonpath.format(folder_name,name)\n else:\n jsonpath = self.root + '/JSON/{0}/{1}/{2}.json'\n idx = self.allSite[folder_name]\n #0 遊戲 1手機 2日劇 \n if idx == 0:\n classfied = 'Game'\n elif idx == 1:\n classfied = 'Phone'\n elif idx == 2:\n classfied ='JapaneseDrama'\n jsonpath1 =jsonpath.format(classfied,folder_name,name)\n \n \n with open(jsonpath1) as data_file:\n data = json.load(data_file)\n return data\n \n", "sub_path": "mysite/trips/json.py", "file_name": "json.py", "file_ext": "py", "file_size_in_byte": 902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "json.load", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "273330957", "text": "#automatically rotate the wheel for one rotation and test the encoder\nimport RPi.GPIO as gpio\nimport time\nimport numpy as np\nimport serial\n\n\ndef init():\n\n gpio.setup(31,gpio.OUT) #IN1\n gpio.setup(33,gpio.OUT) #IN2\n gpio.setup(35,gpio.OUT) #IN3\n gpio.setup(37,gpio.OUT) #IN4\n\ndef gameover():\n gpio.output(31,False)\n gpio.output(33,False)\n gpio.output(35,False)\n gpio.output(37,False)\n\ngpio.setmode(gpio.BOARD)\n \ninit()\n\n#MAIN CODE\n#right back wheel encoder\ngpio.setup(12,gpio.IN,pull_up_down = gpio.PUD_UP)\n#left front wheel encoder\ngpio.setup(7,gpio.IN,pull_up_down = gpio.PUD_UP)\n\n\n\n\n#initialize pwm signal to control motor\nangle = 90 #USER DEFINED ANGLE\ntime_left_turn = ((angle*1.3)/90)\n\nlist_of_gpio = []\nlist_of_gpio_2 = []\nlist_of_x = []\ncurr_x = 0\ndef forward(time_to_run,ser):\n global curr_x\n pin = 31\n pin2 = 37\n val = 36\n pwm1 = gpio.PWM(pin,50)\n pwm1.start(val)\n pwm4 = gpio.PWM(pin2,50)\n pwm4.start(val)\n t = time.time()\n counter = np.uint64(0)\n counter2 = np.uint64(0) \n button = int(0)\n button2 = int(0)\n while time.time()-t=0:\n pwm1.ChangeDutyCycle(val - (err*kp))\n #time.sleep(0.1)\n \n list_of_gpio.append(counter)\n list_of_gpio_2.append(counter2)\n print('list_of_gpio : ', list_of_gpio, 'list_of_gpio_2' , list_of_gpio_2)\n \ndef left(val):\n pin = 33\n pin2 = 37\n pwm1 = gpio.PWM(pin,50)\n pwm1.start(val)\n pwm4 = gpio.PWM(pin2,50)\n pwm4.start(val)\n t = time.time()\n counter = np.uint64(0)\n counter2 = np.uint64(0) \n button = int(0)\n button2 = int(0)\n time.sleep(0.1)\n if int (gpio.input(12)) != int(button):\n button = int(gpio.input(12))\n counter+= 1\n if int (gpio.input(7)) != int(button2):\n button2 = int(gpio.input(7))\n counter2+=1 \nser = serial.Serial('/dev/ttyUSB0',9600)\n\ncount = 0\nnew_x_angle = 0\nwhile True:\n if(ser.in_waiting > 0):\n count+=1\n \n line = ser.readline()\n #print(line)\n \n \n if(count>10):\n \n\n\n time_front = 7\n time_front_2 = 5\n delay_between = 0.9\n val = 60\n\n forward(time_front,ser)\n time.sleep(delay_between)\n while 0<=(abs(curr_x-new_x_angle))<=90 or 270<=(abs(curr_x-new_x_angle))<=360:\n \n line = ser.readline()\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n print('line: ',line)\n new_x_angle = float(line[2:7])\n print('angle diff between: ',curr_x ,'and',new_x_angle, '=',abs(curr_x-new_x_angle))\n left(val)\n #time.sleep(0.1)\n time.sleep(delay_between)\n print('curr_x',curr_x)\n print('new_x_angle',new_x_angle)\n print('FIRST SIDE DONE')\n \n forward(time_front_2,ser)\n time.sleep(delay_between)\n while 0<=(abs(curr_x-new_x_angle))<=90 or 270<=(abs(curr_x-new_x_angle))<=360:\n line = ser.readline()\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n print('line: ',line)\n new_x_angle = float(line[2:7])\n diff = curr_x-new_x_angle\n if diff<55:\n val = 80\n else:\n val = 60\n print('angle diff between: ',curr_x ,'and',new_x_angle, '=',abs(diff))\n left(val)\n #time.sleep(0.1)\n time.sleep(delay_between)\n print('curr_x',curr_x)\n print('new_x_angle',new_x_angle)\n print('SECOND SIDE DONE')\n \n forward(time_front,ser)\n time.sleep(delay_between)\n while 0<=(abs(curr_x-new_x_angle))<=90 or 270<=(abs(curr_x-new_x_angle))<=360:\n line = ser.readline()\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n print('line: ',line)\n new_x_angle = float(line[2:7])\n print('angle diff between: ',curr_x ,'and',new_x_angle, '=',abs(curr_x-new_x_angle))\n left(val)\n #time.sleep(0.1)\n time.sleep(delay_between)\n print('curr_x',curr_x)\n print('new_x_angle',new_x_angle)\n print('THIRD SIDE DONE')\n \n forward(time_front_2,ser)\n time.sleep(delay_between)\n while 0<=(abs(curr_x-new_x_angle))<=90 or 270<=(abs(curr_x-new_x_angle))<=360:\n line = ser.readline()\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n print('line: ',line)\n new_x_angle = float(line[2:7])\n print('angle diff between: ',curr_x ,'and',new_x_angle, '=',abs(curr_x-new_x_angle))\n left(val)\n #time.sleep(0.1)\n time.sleep(delay_between)\n print('curr_x',curr_x)\n print('new_x_angle',new_x_angle)\n print('FOURTH SIDE DONE')\n break\nprint('PROCESS DONE!')\nprint(list_of_gpio)\nprint(list_of_gpio_2)\n\nfile = open('gpio_values_05_ALL_fOUR_1.txt','w')\nfor i in list_of_gpio:\n file.write(str(i))\n file.write('\\n')\nfile.close()\n \nfile = open('gpio_values_05_ALL_fOUR_2.txt','w')\nfor i in list_of_gpio_2:\n file.write(str(i))\n file.write('\\n')\nfile.close()\n \nfile = open('imu_x.txt','w')\nfor i in list_of_x:\n file.write(str(i))\n file.write('\\n')\nfile.close()\n\ngameover()\ngpio.cleanup()\n\n\n\n\n", "sub_path": "Assignment 8/encoder_imu.py", "file_name": "encoder_imu.py", "file_ext": "py", "file_size_in_byte": 6834, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "RPi.GPIO.setup", "line_number": 10, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 10, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 10, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 11, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 11, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 11, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 12, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 12, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 12, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 13, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 13, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 13, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 16, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 16, "usage_type": "name"}, {"api_name": "RPi.GPIO.output", "line_number": 17, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 17, "usage_type": "name"}, {"api_name": "RPi.GPIO.output", "line_number": 18, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 18, "usage_type": "name"}, {"api_name": "RPi.GPIO.output", "line_number": 19, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 19, "usage_type": "name"}, {"api_name": "RPi.GPIO.setmode", "line_number": 21, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 21, "usage_type": "name"}, {"api_name": "RPi.GPIO.BOARD", "line_number": 21, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 27, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 27, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 27, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PUD_UP", "line_number": 27, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 29, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 29, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 29, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PUD_UP", "line_number": 29, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PWM", "line_number": 47, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 47, "usage_type": "name"}, {"api_name": "RPi.GPIO.PWM", "line_number": 49, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 49, "usage_type": "name"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.uint64", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.uint64", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 56, "usage_type": "call"}, {"api_name": "RPi.GPIO.input", "line_number": 67, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 67, "usage_type": "name"}, {"api_name": "RPi.GPIO.input", "line_number": 68, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 68, "usage_type": "name"}, {"api_name": "RPi.GPIO.input", "line_number": 71, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 71, "usage_type": "name"}, {"api_name": "RPi.GPIO.input", "line_number": 72, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 72, "usage_type": "name"}, {"api_name": "RPi.GPIO.PWM", "line_number": 94, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 94, "usage_type": "name"}, {"api_name": "RPi.GPIO.PWM", "line_number": 96, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 96, "usage_type": "name"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.uint64", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.uint64", "line_number": 100, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "RPi.GPIO.input", "line_number": 104, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 104, "usage_type": "name"}, {"api_name": "RPi.GPIO.input", "line_number": 105, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 105, "usage_type": "name"}, {"api_name": "RPi.GPIO.input", "line_number": 107, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 107, "usage_type": "name"}, {"api_name": "RPi.GPIO.input", "line_number": 108, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 108, "usage_type": "name"}, {"api_name": "serial.Serial", "line_number": 110, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 132, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 145, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 151, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 168, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 174, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 186, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 192, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 204, "usage_type": "call"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 232, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 232, "usage_type": "name"}]} +{"seq_id": "285167767", "text": "from database.models import LawFirm\nfrom datetime import date\nfrom .folder_subprocess import FolderJobs\nfrom .browser_subprocess import BrowserJobs\nfrom .robot_subprocess import RobotJobs\nfrom .analysis_subprocess import AnalysisJobs\n\nclass MainJobs():\n\n @staticmethod\n def run():\n results = LawFirm.objects.all()\n for item in results:\n for issue in item.issues:\n mostRecentStatusFilePath = FolderJobs.getMostRecentStatusFilePath(item.name, issue.sud, issue.upisnik, issue.predmet, issue.godina)\n todayFolderPath = FolderJobs.createFolderOnPath(item.name, issue.sud, issue.upisnik, issue.predmet, issue.godina, date.today())\n url = FolderJobs.getIssueBasePathUrl(item.name, issue.sud, issue.upisnik, issue.predmet, issue.godina)\n BrowserJobs.open_browser()\n BrowserJobs.enter_link(url)\n BrowserJobs.override_captcha()\n BrowserJobs.copyContent()\n BrowserJobs.close_browser()\n todayStatusFilePath = FolderJobs.createStatusFile(todayFolderPath)\n AnalysisJobs.checkStatusFiles(mostRecentStatusFilePath, todayStatusFilePath)", "sub_path": "walkthrough/main_subprocess.py", "file_name": "main_subprocess.py", "file_ext": "py", "file_size_in_byte": 1200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "database.models.LawFirm.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "database.models.LawFirm.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "database.models.LawFirm", "line_number": 12, "usage_type": "name"}, {"api_name": "folder_subprocess.FolderJobs.getMostRecentStatusFilePath", "line_number": 15, "usage_type": "call"}, {"api_name": "folder_subprocess.FolderJobs", "line_number": 15, "usage_type": "name"}, {"api_name": "folder_subprocess.FolderJobs.createFolderOnPath", "line_number": 16, "usage_type": "call"}, {"api_name": "folder_subprocess.FolderJobs", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 16, "usage_type": "name"}, {"api_name": "folder_subprocess.FolderJobs.getIssueBasePathUrl", "line_number": 17, "usage_type": "call"}, {"api_name": "folder_subprocess.FolderJobs", "line_number": 17, "usage_type": "name"}, {"api_name": "browser_subprocess.BrowserJobs.open_browser", "line_number": 18, "usage_type": "call"}, {"api_name": "browser_subprocess.BrowserJobs", "line_number": 18, "usage_type": "name"}, {"api_name": "browser_subprocess.BrowserJobs.enter_link", "line_number": 19, "usage_type": "call"}, {"api_name": "browser_subprocess.BrowserJobs", "line_number": 19, "usage_type": "name"}, {"api_name": "browser_subprocess.BrowserJobs.override_captcha", "line_number": 20, "usage_type": "call"}, {"api_name": "browser_subprocess.BrowserJobs", "line_number": 20, "usage_type": "name"}, {"api_name": "browser_subprocess.BrowserJobs.copyContent", "line_number": 21, "usage_type": "call"}, {"api_name": "browser_subprocess.BrowserJobs", "line_number": 21, "usage_type": "name"}, {"api_name": "browser_subprocess.BrowserJobs.close_browser", "line_number": 22, "usage_type": "call"}, {"api_name": "browser_subprocess.BrowserJobs", "line_number": 22, "usage_type": "name"}, {"api_name": "folder_subprocess.FolderJobs.createStatusFile", "line_number": 23, "usage_type": "call"}, {"api_name": "folder_subprocess.FolderJobs", "line_number": 23, "usage_type": "name"}, {"api_name": "analysis_subprocess.AnalysisJobs.checkStatusFiles", "line_number": 24, "usage_type": "call"}, {"api_name": "analysis_subprocess.AnalysisJobs", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "203381840", "text": "import requests as request\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nclass Cinemaximum():\n \n def __init__(self):\n \n r = request.get('https://www.cinemaximum.com.tr/vizyondakiler')\n soup = BeautifulSoup(r.content, 'html.parser') \n \n link = str(soup.find_all('div',{'class':'buttons'}))\n \n link = link.replace('[','')\n link = link.replace(']','')\n link = link.replace('','')\n link = link.replace(' ','')\n \n self.linkler = link.split(',')\n \n filmler = str(soup.find_all('h4'))\n filmler = filmler.replace('','')\n filmler = filmler.replace('

','')\n filmler = filmler.replace('[','')\n filmler = filmler.replace(']','')\n \n filmler = filmler.split(',')\n filmler.pop(0)\n puanlar = str(soup.find_all('div',{'class':'movie-rating-2 hidden-md hidden-lg'}))\n \n puanlar = puanlar.replace('
','')\n puanlar = puanlar.replace('
','')\n puanlar = puanlar.replace('[','')\n puanlar = puanlar.replace(']','')\n \n puanlar = puanlar.split(',')\n \n self.FilmBilgileri = list(zip(puanlar,filmler))\n \n def listele(self):\n index = 0\n for i in self.FilmBilgileri:\n print('{} paun :{} - {}'.format(index,i[0],i[1]))\n index += 1\n print('\\n********** cikmak icin \"-1\" giriniz **********')\n \n def ara(self,num):\n \n filmLinki = 'https://www.cinemaximum.com.tr'+self.linkler[num]\n r = request.get(filmLinki)\n soup = BeautifulSoup(r.content, 'html.parser') \n \n ozet = str(soup.find_all('section',{'class':'movie-details-text'}))\n ozet = ozet.replace('[
','')\n ozet = ozet.replace('
','')\n ozet = ozet.replace('
','')\n \n ozet = ozet.replace('
','')\n ozet = ozet.replace('
','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('
','')\n ozet = ozet.replace('
','')\n ozet = ozet.replace('
','')\n \n ozet = ozet.replace('','')\n ozet = ozet.replace('

','')\n ozet = ozet.replace('

','')\n ozet = ozet.replace(']','')\n \n vizyonTarihi = ozet[ozet.find('Vizyon Tarihi'):ozet.find('Süre')]\n \n sure = ozet[ozet.find('Süre'):ozet.find('Tür')]\n \n tur = ozet[ozet.find('Tür'):ozet.find('Özet')]\n \n konu = ozet[ozet.find('Özet'):]\n \n print(vizyonTarihi,sure,tur,sep = '\\n')\n print('\\n',konu,sep='')\n \n return vizyonTarihi + sure + tur + konu\n \n \n \n \n \n \n \n\n", "sub_path": "getUpToDateMoviesFromCinemaximum/cinemaximum.py", "file_name": "cinemaximum.py", "file_ext": "py", "file_size_in_byte": 3159, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 52, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "38312375", "text": "import nltk\nimport pickle\nfrom nltk.classify import MaxentClassifier\n\n# Set up our training material in a nice dictionary.\ntraining = {\n 'ingredients': [\n 'Pastry for 9-inch tart pan',\n 'Apple cider vinegar',\n '3 eggs',\n '1/4 cup sugar',\n ],\n 'steps': [\n 'Sift the powdered sugar and cocoa powder together.',\n 'Coarsely crush the peppercorns using a mortar and pestle.',\n 'While the vegetables are cooking, scrub the pig ears clean and cut away any knobby bits of cartilage so they will lie flat.',\n 'Heat the oven to 375 degrees.',\n ]\n}\n\n# Set up a list that will contain all of our tagged examples,\n# which we will pass into the classifier at the end.\ntraining_set = []\nfor key, val in training.items():\n for i in val:\n # Set up a list we can use for all of our features,\n # which are just individual words in this case.\n feats = []\n # Before we can tokenize words, we need to break the\n # text out into sentences.\n sentences = nltk.sent_tokenize(i)\n for sentence in sentences:\n feats = feats + nltk.word_tokenize(sentence)\n\n # For this example, it's a good idea to normalize for case.\n # You may or may not need to do this.\n feats = [i.lower() for i in feats]\n # Each feature needs a value. A typical use for a case like this\n # is to use True or 1, though you can use almost any value for\n # a more complicated application or analysis.\n feats = dict([(i, True) for i in feats])\n # NLTK expects you to feed a classifier a list of tuples\n # where each tuple is (features, tag).\n training_set.append((feats, key))\n\n# Train up our classifier\nclassifier = MaxentClassifier.train(training_set)\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1793, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "nltk.sent_tokenize", "line_number": 31, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 33, "usage_type": "call"}, {"api_name": "nltk.classify.MaxentClassifier.train", "line_number": 47, "usage_type": "call"}, {"api_name": "nltk.classify.MaxentClassifier", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "580896445", "text": "import unittest\nimport nose\n\nfrom ..math.extended_gcd import extended_gcd\nfrom ..math.std_normal_pdf import pdf\nfrom ..math.approx_cdf import cdf\n\nclass TestExtendedGCD(unittest.TestCase):\n\n def test_extended_gcd(self):\n # Find extended_gcd of 35 and 77\n (a, b) = extended_gcd(35, 77)\n self.assertIs(35 * a + 77 * b, 7)\n\n # Find extended_gcd of 15 and 19\n (a, b) = extended_gcd(15, 19)\n self.assertIs(15 * a + 19 * b, 1)\n\n # Find extended_gcd of 18 and 9\n (a, b) = extended_gcd(18, 9)\n self.assertIs(18 * a + 9 * b, 9)\n\n # Find extended_gcd of 99 and 81\n (a, b) = extended_gcd(99, 81)\n self.assertIs(99 * a + 81 * b, 9)\n\n # Find extended_gcd of 50 and 15\n (a, b) = extended_gcd(50, 15)\n self.assertIs(50 * a + 15 * b, 5)\n\n\nclass TestStdNormPDF(unittest.TestCase):\n\n def test_pdf(self):\n # Calculate standard normal pdf for x=1\n a = pdf(1)\n nose.tools.assert_almost_equal(a, 0.24197072451914337)\n\n # Calculate standard normal pdf for x=(-1)\n a = pdf(-1)\n nose.tools.assert_almost_equal(a, 0.24197072451914337)\n\n # Calculate standard normal pdf for x=13, mean=10, std_dev=1\n a = pdf(x=13, mean=10, std_dev=1)\n nose.tools.assert_almost_equal(a, 0.004431848411938008)\n\n\nclass TestApproxCdf(unittest.TestCase):\n\n def test_cdf(self):\n # Calculate cumulative distribution function for x=1\n a = cdf(1)\n nose.tools.assert_almost_equal(a, 0.841344746068543)\n\n # Calculate cumulative distribution function x=0\n a = cdf(0)\n nose.tools.assert_almost_equal(a, 0.5)\n\n # Calculate cumulative distribution function for x=(-1)\n a = cdf(-1)\n nose.tools.assert_almost_equal(a, 0.15865525393145702)\n\n", "sub_path": "algorithms/tests/test_math.py", "file_name": "test_math.py", "file_ext": "py", "file_size_in_byte": 1819, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "math.extended_gcd.extended_gcd", "line_number": 12, "usage_type": "call"}, {"api_name": "math.extended_gcd.extended_gcd", "line_number": 16, "usage_type": "call"}, {"api_name": "math.extended_gcd.extended_gcd", "line_number": 20, "usage_type": "call"}, {"api_name": "math.extended_gcd.extended_gcd", "line_number": 24, "usage_type": "call"}, {"api_name": "math.extended_gcd.extended_gcd", "line_number": 28, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 32, "usage_type": "attribute"}, {"api_name": "math.std_normal_pdf.pdf", "line_number": 36, "usage_type": "call"}, {"api_name": "nose.tools.assert_almost_equal", "line_number": 37, "usage_type": "call"}, {"api_name": "nose.tools", "line_number": 37, "usage_type": "attribute"}, {"api_name": "math.std_normal_pdf.pdf", "line_number": 40, "usage_type": "call"}, {"api_name": "nose.tools.assert_almost_equal", "line_number": 41, "usage_type": "call"}, {"api_name": "nose.tools", "line_number": 41, "usage_type": "attribute"}, {"api_name": "math.std_normal_pdf.pdf", "line_number": 44, "usage_type": "call"}, {"api_name": "nose.tools.assert_almost_equal", "line_number": 45, "usage_type": "call"}, {"api_name": "nose.tools", "line_number": 45, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 48, "usage_type": "attribute"}, {"api_name": "math.approx_cdf.cdf", "line_number": 52, "usage_type": "call"}, {"api_name": "nose.tools.assert_almost_equal", "line_number": 53, "usage_type": "call"}, {"api_name": "nose.tools", "line_number": 53, "usage_type": "attribute"}, {"api_name": "math.approx_cdf.cdf", "line_number": 56, "usage_type": "call"}, {"api_name": "nose.tools.assert_almost_equal", "line_number": 57, "usage_type": "call"}, {"api_name": "nose.tools", "line_number": 57, "usage_type": "attribute"}, {"api_name": "math.approx_cdf.cdf", "line_number": 60, "usage_type": "call"}, {"api_name": "nose.tools.assert_almost_equal", "line_number": 61, "usage_type": "call"}, {"api_name": "nose.tools", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "134720424", "text": "# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass PickingType(models.Model):\n _inherit = \"stock.picking.type\"\n\n is_auto_packing = fields.Boolean(string=\"Automated Packaging\")\n\n\nclass StockPicking(models.Model):\n _inherit = \"stock.picking\"\n\n @api.multi\n def action_assign(self):\n res = super(StockPicking, self).action_assign()\n # If the type is auto_packing and the delivery method is set\n auto_picks = self.filtered(lambda p: p.picking_type_id.is_auto_packing and p.sale_id.carrier_id)\n for pick in auto_picks:\n for move in pick.move_ids_without_package:\n # Reverse sorted list of the package types(biggest package qty first)\n pack_list = move.product_id.packaging_type_ids.filtered(\n lambda l: l.package_carrier_type == pick.carrier_id.delivery_type\n ).sorted(key=lambda r: r.qty, reverse=True)\n if pack_list:\n move.pack_move(move.reserved_availability, pack_list)\n return res\n\n\nclass StockMove(models.Model):\n _inherit = \"stock.move\"\n\n # Logic to find the best fit package type, returns index\n @api.multi\n def find_fit_index(self, cur_res_qty, pack_list):\n for i in range(0, len(pack_list)):\n cur_pack_qty = pack_list[i].qty\n try:\n next_pack_qty = pack_list[i+1].qty\n except IndexError:\n next_pack_qty = -1.0\n if cur_pack_qty <= cur_res_qty or cur_res_qty > next_pack_qty:\n return i\n return -1\n\n # Logic to create packages and auto fill the package details\n @api.multi\n def do_auto_pack(self, pack_list, fit_index, qty_to_pack):\n pack_no_type = self.picking_id.put_in_pack()\n quant_pack = self.env['stock.quant.package']\n cur_package = quant_pack.browse(pack_no_type['context']['default_stock_quant_package_id'])\n if cur_package:\n cur_package.packaging_id = pack_list[fit_index]\n cur_package.shipping_weight = qty_to_pack * self.product_id.weight\n else:\n _logger.error('There was no package for the current stock.move, %s', self._name)\n\n # Logic to pack the current move, ensure everything is correctly sorted\n @api.multi\n def pack_move(self, cur_res_qty, pack_list):\n self.ensure_one()\n if not cur_res_qty:\n return {\n 'warning': {\n 'title': _('Warning'),\n 'message': _(\"There must be a reserved quantity on the picking, %s, to auto package.\") % (\n self._name),\n },\n }\n while cur_res_qty:\n move_line_ids = self.move_line_ids.filtered(lambda o: not o.result_package_id)\n if not move_line_ids:\n return False\n fit_index = self.find_fit_index(cur_res_qty, pack_list)\n # If there is no suitable box\n if fit_index < 0:\n return False\n else:\n qty_to_pack = pack_list[fit_index].qty\n\n if qty_to_pack < 1:\n raise UserError(_('Package Types for %s must have max quantity greater than 0 for auto pack.' % pack_list[fit_index].name_get()))\n # ex: cur_res_qty = 3 and qty_to_pack = 3\n if cur_res_qty == qty_to_pack:\n move_line_ids.qty_done = qty_to_pack\n self.do_auto_pack(pack_list, fit_index, qty_to_pack)\n cur_res_qty = 0\n # ex: cur_res_qty = 4 and qty_to_pack = 3\n elif cur_res_qty > qty_to_pack:\n move_line_ids.qty_done = qty_to_pack\n self.do_auto_pack(pack_list, fit_index, qty_to_pack)\n cur_res_qty = cur_res_qty - qty_to_pack\n # ex: cur_res_qty = 2 and qty_to_pack = 3\n else:\n move_line_ids.qty_done = cur_res_qty\n self.do_auto_pack(pack_list, fit_index, qty_to_pack)\n cur_res_qty = 0\n", "sub_path": "easywater_stock/models/stock.py", "file_name": "stock.py", "file_ext": "py", "file_size_in_byte": 4273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 12, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 15, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 15, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 21, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 21, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 37, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 37, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 41, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 41, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 54, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 54, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 72, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 73, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 89, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 89, "usage_type": "call"}, {"api_name": "odoo.api.multi", "line_number": 66, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "512906537", "text": "import nibabel as nb\nimport numpy as np\nimport nilearn\nimport os\nimport pickle\nfrom nilearn.plotting import plot_anat\n\n\"\"\"\nreturns the bounding box of a mask (.nii file)\n\"\"\"\ndef get_bounding_box(mask):\n\n roi = mask.dataobj\n\n r = np.any(roi, axis=(1, 2))\n c = np.any(roi, axis=(0, 2))\n z = np.any(roi, axis=(0, 1))\n\n rmin, rmax = np.where(r)[0][[0, -1]]\n cmin, cmax = np.where(c)[0][[0, -1]]\n zmin, zmax = np.where(z)[0][[0, -1]]\n\n return ((rmin, rmax), (cmin, cmax), (zmin, zmax))\n\n\"\"\"\nGet the part of sequence masked and related to its bounding box\n\"\"\"\ndef mask_and_crop(sequence, mask, full_brain=False):\n \n assert sequence.shape == mask.shape\n\n ((rmin, rmax), (cmin, cmax), (zmin, zmax)) = get_bounding_box(mask)\n \n if full_brain:\n rmin, rmax = 0, mask.shape[0]\n cmin, cmax = 0, mask.shape[1]\n\n delta_r = rmax-rmin\n delta_c = cmax-cmin\n delta_z = zmax-zmin\n \n data = sequence.get_fdata()[rmin:(rmin+delta_r),cmin:(cmin+delta_c),zmin:(zmin+delta_z)]\n\n return nb.Nifti1Image(\n data, \n affine=sequence.affine)\n\n\"\"\"\nExtract mask from a sequence and resize to a cube a a given side\n\"\"\"\ndef mask_crop_resize(sequence, mask, x, y, z, full_brain=False):\n\n roi = mask_and_crop(sequence, mask, full_brain)\n\n (dim1, dim2, dim3) = float(roi.shape[0]), float(roi.shape[1]), float(roi.shape[2])\n \n scale_affine = np.array([[float(x) / dim1, 0, 0, 0], \n [0, float(y) / dim2, 0, 0], \n [0, 0, float(z) / dim3, 0], \n [0, 0, 0, 1]])\n\n resampled_roi = nb.Nifti1Image(\n roi.dataobj, \n affine=scale_affine)\n\n return nilearn.image.resample_img(\n resampled_roi, \n target_affine=np.eye(4),\n target_shape=(x, y, z), \n interpolation='nearest')\n\n\"\"\"\nEstablish ROI size around an axis (z-axis is 2)\n\"\"\"\ndef get_roi_size(roi, axis):\n \n roi_data = roi.get_fdata()\n\n # This is required by Brats since ROI is 0, 1, 2, 4 instead only 0, 1\n roi_data[roi_data>1] = 1\n\n other_axis = list(range(len(roi_data.shape)))\n \n other_axis.remove(axis)\n\n return np.sum(roi_data, axis=tuple(other_axis))\n\n\"\"\"\nGet index of image according and axis given the size of ROI\nalong that axis (z-index is 2)\n\"\"\"\ndef get_roi_index_percentile(roi, axis, percentile):\n \n # Sizes along z-axis\n roi_sizes = get_roi_size(roi, axis)\n \n non_empty_sizes = roi_sizes[np.where(roi_sizes > 0)]\n \n percentile_val = np.percentile(non_empty_sizes, percentile)\n\n return np.where(roi_sizes >= percentile_val)[0]\n\ndef index_percentile_of_sizes(sizes, percentile):\n \n non_empty_sizes = sizes[np.where(sizes > 0)]\n \n percentile_val = np.percentile(non_empty_sizes, percentile)\n\n return np.where(sizes >= percentile_val)\n\n\"\"\"\nReturn ordered index of sizes index with a given percentile\n\"\"\"\ndef ordered_index_percentile_of_sizes(sizes, percentile):\n \n non_empty_sizes = sizes[np.where(sizes > 0)]\n \n percentile_val = np.percentile(non_empty_sizes, percentile)\n\n w = np.where(sizes >= percentile_val)\n\n sort_index = np.argsort(sizes)\n\n r = sort_index[-w[0].shape[0]:]\n \n return r[::-1]\n\n\"\"\"\nSave the tumor crop with base shaped in square with given side.\n\"\"\"\ndef get_slices_for_subject(sequence_repo, sequence_name, subject, side, full_brain=False):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject)\n\n ((rmin, rmax), (cmin, cmax), (zmin, zmax)) = get_bounding_box(roi)\n \n z_height = zmax - zmin\n \n sequence_resampled = mask_crop_resize(sequence, roi, side, side, z_height, full_brain)\n \n slices = sequence_resampled.get_fdata()\n \n return slices\n \n\"\"\"\nSave the tumor crop with base shaped in square with given side.\n\"\"\"\n\"\"\"\ndef save_slices_for_subject(sequence_repo, sequence_name, subject, side, output_dir, full_brain=False):\n \n slices = get_slices_for_subject(sequence_repo, sequence_name, subject, side, full_brain)\n \n with open(f\"{output_dir}/{subject}/slices-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n\"\"\"\n\n\"\"\"\nSave the tumor crop with base shaped in square with given side.\n\"\"\"\n\"\"\"\ndef save_cube_for_subject(sequence_repo, sequence_name, subject, side, output_dir):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject, \"T2ROI\")\n\n resampled_roi = mask_crop_resize(roi, roi, side, side, side)\n \n sequence_resampled = mask_crop_resize(sequence, roi, side, side, side)\n \n slices = sequence_resampled.get_fdata()\n \n with open(f\"{output_dir}/{subject}/cube-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n\"\"\"\n \n\"\"\"\nSave the slices of the whole brain reshaped with a squared size\n\"\"\"\n\"\"\"\ndef save_slices_for_subject_full_brain(sequence_repo, sequence_name, subject, side, output_dir):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject, \"T2ROI\")\n\n ((rmin, rmax), (cmin, cmax), (zmin, zmax)) = get_bounding_box(roi)\n \n z_height = zmax - zmin\n\n sequence_resampled = mask_full_brain_resize(sequence, roi, side, side, z_height)\n \n slices = sequence_resampled.get_fdata()\n \n with open(f\"{output_dir}/{subject}/slices-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n\"\"\"\n\n\"\"\"\nThis method is able to normalize (like standard scaler) but with the possibility to specify axis\n\"\"\"\ndef normalize(images, max_value, axis):\n \n u, s = np.mean(images, axis=axis), np.std(images, axis=axis)\n \n u_extended = np.expand_dims(u, axis=axis)\n s_extended = np.expand_dims(s, axis=axis)\n \n images_centered = (images - u_extended) / s_extended\n \n max_ = np.max(images_centered, axis=axis)\n min_ = np.min(images_centered, axis=axis)\n max_extended = np.expand_dims(max_, axis=axis)\n min_extended = np.expand_dims(min_, axis=axis)\n \n delta_ = max_extended - min_extended\n \n return ((images_centered - min_extended) / delta_) * max_value\n\n\"\"\"\ndef save_slices_for_subject_brats19(sequence_repo, sequence_name, subject, side, output_dir):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject)\n\n ((rmin, rmax), (cmin, cmax), (zmin, zmax)) = get_bounding_box(roi)\n \n z_height = zmax - zmin\n \n sequence_resampled = mask_crop_resize(sequence, roi, side, side, z_height)\n \n slices = sequence_resampled.get_fdata()\n \n with open(f\"{output_dir}/{subject}/slices-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n\"\"\" \n \ndef save_slices_for_subject_full_brain_brats19(sequence_repo, sequence_name, subject, side, output_dir):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject)\n\n ((rmin, rmax), (cmin, cmax), (zmin, zmax)) = get_bounding_box(roi)\n \n z_height = zmax - zmin\n\n sequence_resampled = mask_full_brain_resize(sequence, roi, side, side, z_height)\n \n slices = sequence_resampled.get_fdata()\n \n with open(f\"{output_dir}/{subject}/slices-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n", "sub_path": "gliomi/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 7438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.any", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 21, "usage_type": "call"}, {"api_name": "nibabel.Nifti1Image", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "nibabel.Nifti1Image", "line_number": 62, "usage_type": "call"}, {"api_name": "nilearn.image.resample_img", "line_number": 66, "usage_type": "call"}, {"api_name": "nilearn.image", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 216, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "333167026", "text": "import sys\nimport os\nimport shutil\nimport warnings\nimport time\nimport traceback\nimport glob\nimport pickle\nimport librosa.feature\n\nimport pandas as pd\nimport numpy as np\n\nfrom flask import Flask, request, jsonify\nfrom sklearn.ensemble import RandomForestClassifier as rf\nfrom sklearn.model_selection import train_test_split\n\nwarnings.filterwarnings('ignore')\n\napp = Flask(__name__)\n\n# inputs\nmodel_file_name = 'model.pkl'\nfolder_utterances = './dataset/wav/'\nmax_mfcc = 15480\n\n# These will be populated at training time\nmodel_columns = None\nclf = None\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if clf:\n try:\n json_ = str(request.json) #capture the json from POST\n wav_file = json_\n print(wav_file)\n data_librosa, freq_librosa = librosa.load(wav_file)\n mfcc_mean = np.mean((librosa.feature.mfcc(y=data_librosa, sr=freq_librosa, n_mfcc=40)).T, axis=0)\n row = []\n row.append({'mfcc_mean':mfcc_mean})\n df_pred = pd.DataFrame(row)\n df_pred['mfcc_mean'] = df_pred['mfcc_mean'].apply(lambda x : np.pad(x, (0,max_mfcc-len(x)),'constant'))\n query = np.array(df_pred.mfcc_mean.tolist())\n prediction = clf.predict(query)\n\n return jsonify({'prediction': str(prediction)})\n\n except Exception as e:\n\n return jsonify({'error': str(e), 'trace': traceback.format_exc()})\n else:\n print('train first')\n return 'no model here'\n\n\n@app.route('/train', methods=['GET']) # Create http://host:port/train GET end point\ndef train():\n\n wavs = glob.glob(folder_utterances + '*.wav')\n row_list = []\n for w in wavs :\n _, file = os.path.split(w)\n name, _ = os.path.splitext(file)\n label = name[5]\n data_librosa, freq_librosa = librosa.load(w)\n mfcc_mean = np.mean((librosa.feature.mfcc(y=data_librosa, sr=freq_librosa, n_mfcc=40)).T, axis=0)\n len_mfcc = len(mfcc_mean)\n dict_ = {'mfcc_mean':mfcc_mean, 'label':label}\n row_list.append(dict_)\n df = pd.DataFrame(row_list)\n df['mfcc_mean'] = df['mfcc_mean'].apply(lambda x : np.pad(x, (0,max_mfcc-len(x)),'constant'))\n\n x = np.array(df['mfcc_mean'].tolist())\n y = np.array(df['label'].tolist())\n\n X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)\n\n global clf\n clf = rf(n_estimators=600, max_depth=20)\n start = time.time()\n clf.fit(X_train, y_train)\n print('Trained in %.1f seconds' % (time.time() - start))\n print('Model training score: %s' % clf.score(x, y))\n\n with open('./model.pkl', 'wb') as model_pkl:\n pickle.dump(clf, model_pkl)\n\n return 'Success'\n\n\nif __name__ == '__main__':\n try:\n port = int(sys.argv[1])\n except Exception as e:\n port = 80\n\n try:\n clf = pickle.load(open(model_file_name, 'rb'))\n print('model loaded')\n\n except Exception as e:\n print('No model here')\n print('Train first')\n print(str(e))\n clf = None\n\n app.run(host='0.0.0.0', port=port, debug=False)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3110, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "warnings.filterwarnings", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "librosa.feature.load", "line_number": 39, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 40, "usage_type": "call"}, {"api_name": "librosa.feature.feature.mfcc", "line_number": 40, "usage_type": "call"}, {"api_name": "librosa.feature.feature", "line_number": 40, "usage_type": "attribute"}, {"api_name": "librosa.feature", "line_number": 40, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 52, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 52, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "librosa.feature.load", "line_number": 67, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 67, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 68, "usage_type": "call"}, {"api_name": "librosa.feature.feature.mfcc", "line_number": 68, "usage_type": "call"}, {"api_name": "librosa.feature.feature", "line_number": 68, "usage_type": "attribute"}, {"api_name": "librosa.feature", "line_number": 68, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 84, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 88, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "293878071", "text": "\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom lmfit import minimize, Parameters, Parameter, report_fit, Minimizer, conf_interval, printfuncs\nfrom scipy.integrate import odeint\nimport csv\nimport numdifftools\nfrom pylab import shape\nfrom tqdm import tqdm\nimport argparse\nimport io\n\n#parameter values, constants\nm = 5/3\ntstep = 5\nmod_start = 288 #to estimate from 4/8/2019 00:00\nbeta = 1.55 #estimated using TDG vel\n#reach characteristics\nL = [3130, 4660, 2990]\nb = [0.1554, 0.0047, 0.0489]\nc = [0.3967, 0.8699, 0.4352]\n\ndef calc_dQdt(y, t, p, Qi):\n\n try:\n Fr = p['Fr'].value \n except:\n Fr = p \n \n tf = int(mod_start + t)\n\n Ts1 = L[0] / (b[0] * (y**c[0])) #unit: seconds\n Ts2 = L[1] / (b[1] * (y**c[1])) \n Ts3 = L[2] / (b[2] * (y**c[2]))\n Ts = (Ts1 + Ts2 + Ts3) / 60 #unit:min \n\n Tflow = Ts / (m * (1 + beta)) #unit:min \n tau_fl = (1 - Fr) * Tflow #unit:min\n tlag = round(tf - int(tau_fl/tstep))\n\n dQdt = 5 * 60 * ((Qi[tlag] - y) / (Fr * Tflow * 60)) \n\n return dQdt\n\n\n#odeint, residual functions\ndef g(t, y0, p, Qi):\n soln = odeint(calc_dQdt, y0, t, args=(p,Qi))\n return soln[:,0]\n\n\ndef residual(p, ts, y0, data, Qi):\n model = g(ts, y0, p, Qi)\n return (model - data).ravel()\n\n\ndef unsteady_flow_routing(input_file):\n\n flow = pd.read_csv(input_file)\n flow['date_time'] = pd.to_datetime(flow['date_time'], format='%d/%m/%Y %H:%M')\n\n Qi = flow['input_flow'].to_list()\n \n #store results and simulations\n result = []\n store_results = []\n final = []\n sim = []\n \n mod_len = len(Qi) - 1 - mod_start\n data = flow['observed_flow'][mod_start:mod_start + mod_len]\n t = np.arange(0,mod_len)\n y0 = Qi[mod_start]\n\n params = Parameters()\n params.add('Fr', value = 0.6, min = 0, max = 1) \n \n solve1 = minimize(residual, params, args=(t, y0, data, Qi), method='nelder')\n solve2 = data + solve1.residual.reshape(data.shape)\n\n #for rows in solve1:\n result.append(solve1)\n store_results.append(solve1)\n #for rows in solve2:\n final.append(solve2)\n final_array = np.asarray(final)\n Cinarray = final_array.flatten()\n sim.append(Cinarray)\n\n final_df = pd.DataFrame(sim).T\n\n stdoutOrigin=sys.stdout \n sys.stdout = open(\"MUFT_flow_log.txt\", \"w\")\n report_fit(store_results[0])\n sys.stdout.close()\n sys.stdout=stdoutOrigin\n\n fig, ax = plt.subplots(1,figsize = (10,5))\n ts = np.linspace(0,mod_len - 1, mod_len)\n ax.plot(ts, final_df[0], c='blue')\n ax.scatter(ts,data,c='red',s=25)\n ax.set_xlabel('Time (h)')\n ax.set_ylabel('Flow ($m^3$ $s^{-1}$)') \n plt.savefig('MUFT_flow__fit.png')\n\n final_df.to_csv('MUFT_simflow_Hekni.csv')\n \n return final_df\n\n\ndef main():\n # Create the parser\n parser = argparse.ArgumentParser()\n parser.add_argument('filename', help='Name of the file you want to load')\n args = parser.parse_args()\n\n with io.open(args.filename, 'r', encoding='utf-8') as f:\n reader = csv.reader(f)\n unsteady_flow_routing(f)\n\n \nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n print('Something went wrong {0}'.format(e))", "sub_path": "MUFT_Otra/Otra_flow/MUFT_flow_param.py", "file_name": "MUFT_flow_param.py", "file_ext": "py", "file_size_in_byte": 3216, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "scipy.integrate.odeint", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 74, "usage_type": "call"}, {"api_name": "lmfit.Parameters", "line_number": 77, "usage_type": "call"}, {"api_name": "lmfit.minimize", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 95, "usage_type": "attribute"}, {"api_name": "lmfit.report_fit", "line_number": 96, "usage_type": "call"}, {"api_name": "sys.stdout.close", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 98, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 115, "usage_type": "call"}, {"api_name": "io.open", "line_number": 119, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "220493012", "text": "# @Time : 2020/11/14\n# @Author : Junyi Li\n# @Email : lijunyi@ruc.edu.cn\n\n# UPDATE:\n# @Time : 2020/12/27\n# @Author : Tianyi Tang\n# @Email : steventang@ruc.edu.cn\n\n\nr\"\"\"\nTransformerEncDec\n################################################\nReference:\n Vaswani et al. \"Attention is All you Need\" in NIPS 2017.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom textbox.model.abstract_generator import ConditionalGenerator\nfrom textbox.module.Encoder.transformer_encoder import TransformerEncoder\nfrom textbox.module.Decoder.transformer_decoder import TransformerDecoder\nfrom textbox.module.Embedder.position_embedder import LearnedPositionalEmbedding, SinusoidalPositionalEmbedding\nfrom textbox.module.Attention.attention_mechanism import SelfAttentionMask\nfrom textbox.model.init import xavier_normal_initialization\nfrom textbox.module.strategy import topk_sampling, greedy_search, Beam_Search_Hypothesis\n\n\nclass TransformerEncDec(ConditionalGenerator):\n r\"\"\"Transformer-based Encoder-Decoder architecture is a powerful framework for conditional text generation.\n \"\"\"\n\n def __init__(self, config, dataset):\n super(TransformerEncDec, self).__init__(config, dataset)\n\n # load parameters info\n self.embedding_size = config['embedding_size']\n self.ffn_size = config['ffn_size']\n self.num_heads = config['num_heads']\n self.num_enc_layers = config['num_enc_layers']\n self.num_dec_layers = config['num_dec_layers']\n self.attn_dropout_ratio = config['attn_dropout_ratio']\n self.attn_weight_dropout_ratio = config['attn_weight_dropout_ratio']\n self.ffn_dropout_ratio = config['ffn_dropout_ratio']\n\n self.decoding_strategy = config['decoding_strategy']\n\n if (self.decoding_strategy not in ['topk_sampling', 'greedy_search', 'beam_search']):\n raise NotImplementedError(\"{} decoding strategy not implemented\".format(self.strategy))\n if (self.decoding_strategy == 'beam_search'):\n self.beam_size = config['beam_size']\n\n self.padding_token_idx = dataset.padding_token_idx\n self.sos_token_idx = dataset.sos_token_idx\n self.eos_token_idx = dataset.eos_token_idx\n\n # define layers and loss\n self.source_token_embedder = nn.Embedding(self.source_vocab_size, self.embedding_size,\n padding_idx=self.padding_token_idx)\n\n if config['share_vocab']:\n self.target_token_embedder = self.source_token_embedder\n else:\n self.target_token_embedder = nn.Embedding(self.target_vocab_size, self.embedding_size,\n padding_idx=self.padding_token_idx)\n\n if config['learned_position_embedder']:\n self.position_embedder = LearnedPositionalEmbedding(self.embedding_size)\n else:\n self.position_embedder = SinusoidalPositionalEmbedding(self.embedding_size)\n\n self.self_attn_mask = SelfAttentionMask()\n\n self.encoder = TransformerEncoder(self.embedding_size, self.ffn_size, self.num_enc_layers, self.num_heads,\n self.attn_dropout_ratio, self.attn_weight_dropout_ratio,\n self.ffn_dropout_ratio)\n\n self.decoder = TransformerDecoder(self.embedding_size, self.ffn_size, self.num_dec_layers, self.num_heads,\n self.attn_dropout_ratio, self.attn_weight_dropout_ratio,\n self.ffn_dropout_ratio, with_external=True)\n\n self.vocab_linear = nn.Linear(self.embedding_size, self.target_vocab_size)\n\n self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_token_idx, reduction='none')\n self.max_target_length = config['target_max_seq_length']\n\n # parameters initialization\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.vocab_linear.weight, std=0.02)\n nn.init.constant_(self.vocab_linear.bias, 0.)\n\n def generate(self, eval_dataloader):\n generate_corpus = []\n idx2token = eval_dataloader.target_idx2token\n\n for batch_data in eval_dataloader:\n source_text = batch_data['source_idx']\n source_embeddings = self.source_token_embedder(source_text) + \\\n self.position_embedder(source_text).to(self.device)\n source_padding_mask = torch.eq(source_text, self.padding_token_idx).to(self.device)\n encoder_outputs = self.encoder(source_embeddings,\n self_padding_mask=source_padding_mask,\n output_all_encoded_layers=False)\n\n for bid in range(source_text.size(0)):\n encoder_output = encoder_outputs[bid, :, :].unsqueeze(0)\n encoder_mask = source_padding_mask[bid, :].unsqueeze(0)\n generate_tokens = []\n prev_token_ids = [self.sos_token_idx]\n input_seq = torch.LongTensor([prev_token_ids]).to(self.device)\n\n if (self.decoding_strategy == 'beam_search'):\n hypothesis = Beam_Search_Hypothesis(self.beam_size, self.sos_token_idx, self.eos_token_idx, self.device, idx2token)\n \n for gen_idx in range(self.max_target_length):\n self_attn_mask = self.self_attn_mask(input_seq.size(-1)).bool().to(self.device)\n decoder_input = self.target_token_embedder(input_seq) + \\\n self.position_embedder(input_seq).to(self.device)\n decoder_outputs = self.decoder(decoder_input, self_attn_mask=self_attn_mask,\n external_states=encoder_output, external_padding_mask=encoder_mask)\n\n token_logits = self.vocab_linear(decoder_outputs[:, -1, :].unsqueeze(1))\n\n if (self.decoding_strategy == 'topk_sampling'):\n token_idx = topk_sampling(token_logits).item()\n elif (self.decoding_strategy == 'greedy_search'):\n token_idx = greedy_search(token_logits).item()\n elif (self.decoding_strategy == 'beam_search'):\n input_seq, encoder_output, encoder_mask = \\\n hypothesis.step(gen_idx, token_logits, encoder_output=encoder_output, encoder_mask=encoder_mask, input_type='whole')\n \n if (self.decoding_strategy in ['topk_sampling', 'greedy_search']):\n if token_idx == self.eos_token_idx:\n break\n else:\n generate_tokens.append(idx2token[token_idx])\n prev_token_ids.append(token_idx)\n input_seq = torch.LongTensor([prev_token_ids]).to(self.device)\n elif (self.decoding_strategy == 'beam_search'):\n if (hypothesis.stop()):\n break\n\n if (self.decoding_strategy == 'beam_search'):\n generate_tokens = hypothesis.generate()\n\n generate_corpus.append(generate_tokens)\n \n return generate_corpus\n\n def calculate_loss(self, corpus, epoch_idx=0):\n source_text = corpus['source_idx']\n\n input_text = corpus['target_idx'][:, :-1]\n target_text = corpus['target_idx'][:, 1:]\n\n source_embeddings = self.source_token_embedder(source_text) + self.position_embedder(source_text).to(\n self.device)\n source_padding_mask = torch.eq(source_text, self.padding_token_idx).to(self.device)\n encoder_outputs = self.encoder(source_embeddings,\n self_padding_mask=source_padding_mask)\n\n input_embeddings = self.target_token_embedder(input_text) + self.position_embedder(input_text).to(self.device)\n self_padding_mask = torch.eq(input_text, self.padding_token_idx).to(self.device)\n self_attn_mask = self.self_attn_mask(input_text.size(-1)).bool().to(self.device)\n decoder_outputs = self.decoder(input_embeddings,\n self_padding_mask=self_padding_mask,\n self_attn_mask=self_attn_mask,\n external_states=encoder_outputs,\n external_padding_mask=source_padding_mask)\n\n token_logits = self.vocab_linear(decoder_outputs)\n loss = self.loss(token_logits.view(-1, token_logits.size(-1)), target_text.contiguous().view(-1))\n loss = loss.reshape_as(target_text)\n\n length = corpus['target_length'] - 1\n loss = loss.sum(dim=1) / length.float()\n\n return loss.mean()\n", "sub_path": "textbox/model/Seq2Seq/transformerencdec.py", "file_name": "transformerencdec.py", "file_ext": "py", "file_size_in_byte": 8922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "textbox.model.abstract_generator.ConditionalGenerator", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "textbox.module.Embedder.position_embedder.LearnedPositionalEmbedding", "line_number": 70, "usage_type": "call"}, {"api_name": "textbox.module.Embedder.position_embedder.SinusoidalPositionalEmbedding", "line_number": 72, "usage_type": "call"}, {"api_name": "textbox.module.Attention.attention_mechanism.SelfAttentionMask", "line_number": 74, "usage_type": "call"}, {"api_name": "textbox.module.Encoder.transformer_encoder.TransformerEncoder", "line_number": 76, "usage_type": "call"}, {"api_name": "textbox.module.Decoder.transformer_decoder.TransformerDecoder", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.eq", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 114, "usage_type": "call"}, {"api_name": "textbox.module.strategy.Beam_Search_Hypothesis", "line_number": 117, "usage_type": "call"}, {"api_name": "textbox.module.strategy.topk_sampling", "line_number": 129, "usage_type": "call"}, {"api_name": "textbox.module.strategy.greedy_search", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "492651095", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.template import loader\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.views.generic import View\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.core.serializers.json import DjangoJSONEncoder ## allow datetime format to serialize to json\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom django.contrib.auth import login as auth_login, authenticate #authenticates User & creates session ID\nfrom django.contrib import messages\nfrom .forms import userForm, UploadForm #Import user registration form\nfrom django import forms\nfrom .models import Modules, Groundtruth, Rooms, Timemodule, Wifilogdata, BinaryPredictions, PercentagePredictions, EstimatePredictions\n# API\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import SerializerRooms, SerializerModules, SerializerGroundtruth, SerializerTimemodule, SerializerBinaryPredictions, SerializerPercentagePredictions, SerializerEstimatePredictions\n# wifi logs upload\nimport pandas as pd\nimport csv\nfrom io import TextIOWrapper\nimport json\nimport datetime\n\n# Reference: 'Django Tutorial for Beginners - 40 - REST API View Request and Response', thenewboston, YouTube [Video] https://www.youtube.com/watch?v=QW_5xCCPWFk&index=40&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK [Accessed: 28/08/16]\nclass RoomList(APIView):\n def get(self, request):\n rooms = Rooms.objects.all()\n serializer = SerializerRooms(rooms, many = True)\n return Response(serializer.data)\n\nclass ModuleList(APIView):\n def get(self, request):\n modules = Modules.objects.all()\n serializer = SerializerModules(modules, many = True)\n return Response(serializer.data)\n\nclass GroundtruthList(APIView):\n def get(self, request):\n groundtruth = Groundtruth.objects.all()\n serializer = SerializerGroundtruth(groundtruth, many = True)\n return Response(serializer.data)\n\nclass TimemoduleList(APIView):\n def get(self, request):\n timemodule = Timemodule.objects.all()\n serializer = SerializerTimemodule(timemodule, many = True)\n return Response(serializer.data)\n\nclass BinaryPredictionsList(APIView):\n def get(self, request):\n binarypredictions = BinaryPredictions.objects.all()\n serializer = SerializerBinaryPredictions( binarypredictions, many = True)\n return Response(serializer.data)\n\nclass PercentagePredictionsList(APIView):\n def get(self, request):\n percentagepredictions= PercentagePredictions.objects.all()\n serializer = SerializerPercentagePredictions(percentagepredictions, many = True)\n return Response(serializer.data)\n\nclass EstimatePredictionsList(APIView):\n def get(self, request):\n estimatepredictions = EstimatePredictions.objects.all()\n serializer = SerializerEstimatePredictions(estimatepredictions, many = True)\n return Response(serializer.data)\n\ndef login(request):\n return render(request, 'occupants/login.html', {})\n\ndef results(request):\n roomList = Rooms.objects.all()\n dateTimeList = Timemodule.objects.filter(room=\"B-004\")\n dateList = sorted(list(set([d.datetime.date() for d in dateTimeList])))\n dateList = [date.strftime('%m/%d/%Y') for date in dateList]\n\n return render(request, 'occupants/results.html', {'roomList': roomList, 'dateList' : dateList })\n\ndef calendarGen(request):\n '''function to query data for graph generation'''\n if request.method == 'POST':\n\n selectedRoom = request.POST.get('roomForm', False)\n startTime = request.POST.get('dateForm', False)\n startMonth = int(startTime[:2])\n startDay = int(startTime[3:5])\n startYear = int(startTime[6:])\n start_time = datetime.date(startYear, startMonth, startDay)\n roomObj = Rooms.objects.get(room=selectedRoom)\n\n roomSchedule = Timemodule.objects.filter(room=selectedRoom,\n datetime__range=(start_time, start_time + datetime.timedelta(days=5)))\n timeList = Timemodule.objects.filter(room=selectedRoom, datetime__day=start_time.day)\n calendarInfo = {\"room\": {\"roomName\": roomObj.room, \"capacity\": roomObj.capacity, \"campus\": roomObj.campus,\n \"building\": roomObj.building}, \"times\": [], \"timeSlots\": []}\n\n for dt in timeList:\n calendarInfo[\"times\"].append({\"time\": dt.datetime.time()})\n\n for ts in roomSchedule:\n calendarInfo[\"timeSlots\"].append({\"date\": ts.datetime.date(), \"time\": ts.datetime.time(),\n \"moduleName\": ts.module.modulename, \"timeModuleId\": ts.timemoduleid})\n\n return HttpResponse(json.dumps(calendarInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n\ndef GenGraph(request):\n ''' function to query database for hourly graph data '''\n if request.is_ajax():\n\n timeModuleId = request.POST['timeModuleId']\n\n ## use POST data to query database and parse reutrn into required format\n timeModule = Timemodule.objects.get(timemoduleid = timeModuleId)\n startTime = timeModule.datetime\n selectedRoom = timeModule.room.room\n\n wifiData = Wifilogdata.objects.filter(room=selectedRoom,\n datetime__range=(startTime, startTime + datetime.timedelta(hours=1)))\n predictions = EstimatePredictions.objects.get(room=selectedRoom, datetime=startTime)\n groundTruthObj = Groundtruth.objects.get(room=selectedRoom, datetime=startTime)\n\n groundTruth = groundTruthObj.percentageestimate\n registered = timeModule.module.numreg\n capacity = timeModule.room.capacity\n predictionRange = predictions.predictions\n predictionUpper = int(predictionRange[predictionRange.index('-')+1:])\n predictionLower = int(predictionRange[:predictionRange.index('-')])\n\n binaryPred = BinaryPredictions.objects.get(room=selectedRoom, datetime=startTime).predictions\n percentagePred = PercentagePredictions.objects.get(room=selectedRoom, datetime=startTime).predictions\n estimatePred = EstimatePredictions.objects.get(room=selectedRoom, datetime=startTime).predictions\n\n jsonFile = {\"timeSlice\": [], \"groundTruth\": groundTruth, \"registered\": registered, \"capacity\": capacity,\n \"predictionLower\": predictionLower, \"predictionUpper\": predictionUpper, \"binaryPred\": binaryPred,\n \"percentagePred\":percentagePred, \"estimatePred\":estimatePred}\n\n for ts in wifiData:\n associated = ts.associated\n jsonFile[\"timeSlice\"].append({'associated': associated})\n\n return HttpResponse(json.dumps(jsonFile), content_type=\"application/json\")\n\n else:\n raise Http404\n\ndef RoomDayGraph(request):\n ''' function to query database for daily room graph data '''\n if request.is_ajax():\n\n selectedRoom = request.POST['selectedRoom']\n selectedDate = request.POST['selectedDate']\n selectedYear = int(selectedDate[:4])\n selectedMonth = int(selectedDate[5:7])\n selectedDay = int(selectedDate[8:])\n selectedDateTime = datetime.date(selectedYear, selectedMonth, selectedDay)\n timeModuleList = Timemodule.objects.filter(room=selectedRoom,\n datetime__range=(selectedDateTime,\n selectedDateTime + datetime.timedelta(days=1)))\n predictionList = PercentagePredictions.objects.filter(room=selectedRoom,\n datetime__range=(selectedDateTime,\n selectedDateTime + datetime.timedelta(days=1)))\n groundTruthList = Groundtruth.objects.filter(room=selectedRoom,\n datetime__range=(selectedDateTime,\n selectedDateTime + datetime.timedelta(days=1)))\n roomObj = Rooms.objects.get(room=selectedRoom)\n\n jsonFile = {\"timeSlice\": [], \"capacity\": roomObj.capacity}\n\n for i in range(0, len(timeModuleList)-1):\n time = timeModuleList[i].datetime.time()\n module = timeModuleList[i].module.modulename\n registered = timeModuleList[i].module.numreg\n prediction = predictionList[i].predictions\n groundTruth = groundTruthList[i].percentageestimate\n\n jsonFile[\"timeSlice\"].append({'time': time, 'module': module, 'registered': registered,\n 'prediction': prediction, 'groundTruth': groundTruth})\n\n return HttpResponse(json.dumps(jsonFile, cls=DjangoJSONEncoder), content_type = \"application/json\")\n else:\n raise Http404\n\ndef homepage(request):\n hours_useb4 = Timemodule.objects.filter(room='B-004').exclude(module='None').count()\n hours_availb4 = Timemodule.objects.filter(room='B-004').count()\n capacityb4 = Rooms.objects.get(room='B-004').capacity\n room_occupiedb4 = BinaryPredictions.objects.filter(room='B-004').filter(predictions=1)\n range_peopleb4 = []\n num_peopleb4 = 0\n for i in range(0,len(room_occupiedb4)):\n range_peopleb4.append(EstimatePredictions.objects.filter(room='B-004').filter(datetime=room_occupiedb4[i].datetime))\n num_peopleb4 += int(range_peopleb4[i][0].predictions.split('-')[1])\n space_freqb4 = hours_useb4 / hours_availb4\n occ_rateb4 = num_peopleb4 / (capacityb4 * hours_useb4)\n\n hours_useb3 = Timemodule.objects.filter(room='B-003').exclude(module='None').count()\n hours_availb3 = Timemodule.objects.filter(room='B-003').count()\n capacityb3 = Rooms.objects.get(room='B-003').capacity\n room_occupiedb3 = BinaryPredictions.objects.filter(room='B-003').filter(predictions=1)\n range_peopleb3 = []\n num_peopleb3 = 0\n for i in range(0,len(room_occupiedb3)):\n range_peopleb3.append(EstimatePredictions.objects.filter(room='B-003').filter(datetime=room_occupiedb3[i].datetime))\n num_peopleb3 += int(range_peopleb3[i][0].predictions.split('-')[1])\n space_freqb3 = hours_useb3 / hours_availb3\n occ_rateb3 = num_peopleb3 / (capacityb3 * hours_useb3)\n\n hours_useb2 = Timemodule.objects.filter(room='B-002').exclude(module='None').count()\n hours_availb2 = Timemodule.objects.filter(room='B-002').count()\n capacityb2 = Rooms.objects.get(room='B-002').capacity\n room_occupiedb2 = BinaryPredictions.objects.filter(room='B-002').filter(predictions=1)\n range_peopleb2 = []\n num_peopleb2 = 0\n for i in range(0,len(room_occupiedb2)):\n range_peopleb2.append(EstimatePredictions.objects.filter(room='B-002').filter(datetime=room_occupiedb2[i].datetime))\n num_peopleb2 += int(range_peopleb2[i][0].predictions.split('-')[1])\n space_freqb2 = hours_useb2 / hours_availb2\n occ_rateb2 = num_peopleb2 / (capacityb2 * hours_useb2)\n\n return render(request, 'occupants/homepage.html', {'space_freqb4': space_freqb4, 'occ_rateb4': occ_rateb4,\n 'space_freqb3': space_freqb3, 'occ_rateb3': occ_rateb3,\n 'space_freqb2': space_freqb2, 'occ_rateb2': occ_rateb2, })\n\nfrom itertools import chain\n# Reference: 'Django Tutorial for Beginnners 30 Model Forms', thenewboston, YouTube [Video] https://www.youtube.com/watch?v=eouZwgKuA5k&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK&index=30 [Accessed: 28/08/16]\ndef SelectInfo(request):\n rooms = Rooms.objects.all()\n modules = Modules.objects.all()\n timemodule = Timemodule.objects.all()\n groundtruth = Groundtruth.objects.all()\n wifi = Wifilogdata.objects.filter()\n dateTimeList = Timemodule.objects.filter(room=\"B-004\")\n GTdateTimeList = Groundtruth.objects.filter(room=\"B-004\")\n WiFidateList = Wifilogdata.objects.filter(room=\"B-004\")\n\n template = loader.get_template('occupants/forms.html')\n context = {\n 'rooms': rooms,\n 'modules': modules,\n 'timemodule': timemodule,\n 'groundtruth': groundtruth,\n 'wifi': wifi,\n 'ModuleDates': dateTimeList,\n 'GTDates': GTdateTimeList,\n 'WiFiDates': WiFidateList,\n }\n\n return HttpResponse(template.render(context, request))\n\ndef TMRequest(request):\n if request.method == 'POST':\n selectedRoom = request.POST.get('roomForm', False)\n selectedDateTime = request.POST.get('dateForm', False)\n module = Timemodule.objects.filter(room=selectedRoom, datetime=selectedDateTime).values()\n TMInfo = {\"room\": selectedRoom, \"datetime\": selectedDateTime, \"module\": module[0]['module_id'], \"id\": module[0]['timemoduleid']}\n return HttpResponse(json.dumps(TMInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n\ndef GTRequest(request):\n if request.method == 'POST':\n selectedRoom = request.POST.get('roomForm', False)\n selectedDateTime = request.POST.get('dateForm', False)\n groundtruth = Groundtruth.objects.get(room=selectedRoom, datetime=selectedDateTime)\n gtInfo = {\"room\": selectedRoom, \"datetime\": selectedDateTime, \"percentage\": groundtruth.percentageestimate,\"binary\": groundtruth.binaryestimate, \"id\": groundtruth.groundtruthid}\n return HttpResponse(json.dumps(gtInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n\ndef WFRequest(request):\n if request.method == 'POST':\n selectedRoom = request.POST.get('roomForm', False)\n selectedDateTime = request.POST.get('dateForm', False)\n log = Wifilogdata.objects.get(room=selectedRoom, datetime=selectedDateTime)\n WFInfo = {\"room\": selectedRoom, \"datetime\": selectedDateTime, \"count\": log.associated, \"id\": log.wifilogdataid}\n return HttpResponse(json.dumps(WFInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n# Reference: 'Django Tutorial for Beginnners 30 Model Forms', thenewboston, YouTube [Video] https://www.youtube.com/watch?v=eouZwgKuA5k&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK&index=30 [Accessed: 28/08/16]\nclass AddModule(CreateView):\n model = Modules\n fields = ['modulename', 'numreg']\n success_url = reverse_lazy('SelectInfo')\n\nclass AddRoom(CreateView):\n model = Rooms\n fields = ['room', 'building', 'campus', 'capacity']\n success_url = reverse_lazy('SelectInfo')\n\nclass AddTimeModule(CreateView):\n model = Timemodule\n fields = ['datetime', 'room', 'module', 'timemoduleid']\n success_url = reverse_lazy('SelectInfo')\n \nclass AddGroundTruth(CreateView):\n model = Groundtruth\n fields = ['datetime','room', 'binaryestimate', 'percentageestimate', 'groundtruthid']\n success_url = reverse_lazy('SelectInfo')\n\n# Reference: 'Django Tutorial for Beginners - 32 - UpdateView and DeleteView', thenewboston, https://www.youtube.com/watch?v=5Ez2NXOX9zY&index=32&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK YouTube [Video] [Accessed: 28/08/16] \nclass UpdateModule(UpdateView):\n model = Modules\n fields = ['modulename', 'numreg']\n success_url = reverse_lazy('SelectInfo')\n \nclass UpdateRoom(UpdateView):\n model = Rooms\n fields = ['room', 'building', 'campus', 'capacity']\n success_url = reverse_lazy('SelectInfo')\n \nclass UpdateTimeModule(UpdateView):\n model = Timemodule\n fields = ['datetime', 'room', 'module', 'timemoduleid']\n success_url = reverse_lazy('SelectInfo')\n\nclass UpdateGroundTruth(UpdateView):\n model = Groundtruth\n fields = ['datetime','room', 'binaryestimate', 'percentageestimate', 'groundtruthid']\n success_url = reverse_lazy('SelectInfo')\n\nclass UpdateWifi(UpdateView):\n model = Wifilogdata\n fields = ['datetime','room', 'associated', 'wifilogdataid']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteModule(DeleteView):\n model = Modules\n fields = ['modulename', 'numreg']\n success_url = reverse_lazy('SelectInfo')\n \nclass DeleteRoom(DeleteView):\n model = Rooms\n fields = ['room', 'building', 'campus', 'capacity']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteTimeModule(DeleteView):\n model = Timemodule\n fields = ['datetime', 'room', 'module', 'timemoduleid']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteGroundTruth(DeleteView):\n model = Groundtruth\n fields = ['datetime','room', 'binaryestimate', 'percentageestimate', 'groundtruthid']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteWifi(DeleteView):\n model = Wifilogdata\n fields = ['datetime','room', 'associated', 'wifilogdataid']\n success_url = reverse_lazy('SelectInfo')\n\n\nclass userFormView(View):\n form_class = userForm #blueprint for form\n template_name = 'occupants/registration_form.html' #name of template to redirect to\n\n def get(self, request): #If user request is GET (display empty form) call this function\n form = self.form_class(None) #Specify what form we use\n return render(request, self.template_name, { 'form' : form })\n\n def post(self, request): #If user request is POST (submitting form) call this function\n form = self.form_class(request.POST)\n \n if form.is_valid():\n user = form.save(commit=False) #Doesn't save user yet. Customsing form below\n # standardise form inputs so they are clean and generic for our DB\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n #Changing users password\n user.set_password(password)\n user.is_active = False #Change default. User is not active until admin grants permission\n user.save()\n messages.info(request, 'Registration successful. You will receive an email confirming registration once your request has been approved.')\n\n #returns user objects if credentials are correct\n user = authenticate(username = username, password= password)\n\n if user is not None: \n if user.is_active: #Checks if user hasnt been banned\n auth_login(request, user)\n return redirect('homepage')\n\n \n return render(request, self.template_name, { 'form' : form })\n\n\ndef wifilogs(request):\n # Handle file upload\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n print('here')\n if form.is_valid():\n f = TextIOWrapper(request.FILES['docfile'].file, encoding=request.encoding)\n print(f)\n file = csv.reader(f)\n\n check = False\n for line in file:\n if check == True:\n df.loc[len(df)]=line\n if line[0]=='Key':\n columns=line\n df = pd.DataFrame(columns=line)\n check = True\n\n if check == False:\n messages.error(request, \"Invalid file content. Please upload a CSV containing WiFi Log Data.\");\n return render(request, 'occupants/wifilogs.html', {'form' : form })\n\n for i in range(0, len(df)):\n # put time into sql format\n df['Event Time'][i] = df['Event Time'][i].replace('GMT+00:00','')\n df['Event Time'][i] = datetime.datetime.strptime(df['Event Time'][i], '%a %b %d %X %Y')\n # Split column Key (contains campus, building and room) into separate parts so they can be added to separate columns of database table\n df['Key'][i] = df['Key'][i].split(' > ')\n \n for i in range(0, len(df)):\n model = Wifilogdata()\n model.datetime = df['Event Time'][i]\n RoomName = Rooms.objects.get(room=df['Key'][i][2])\n model.room = RoomName\n model.associated = df['Associated Client Count'][i]\n model.authenticated = df['Authenticated Client Count'][i]\n model.save()\n\n # Redirect to the document list after POST\n messages.info(request, \"WiFi Log Data successfully imported.\");\n return HttpResponseRedirect(reverse('wifilogs'))\n else:\n form = UploadForm() # A empty, unbound form\n\n # Render list page with the documents and the form\n return render(request, 'occupants/wifilogs.html', {'form' : form })\n\n", "sub_path": "myproject/occupants/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 20732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 26, "usage_type": "name"}, {"api_name": "models.Rooms.objects.all", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Rooms.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.Rooms", "line_number": 28, "usage_type": "name"}, {"api_name": "serializers.SerializerRooms", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Modules.objects.all", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Modules.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.Modules", "line_number": 34, "usage_type": "name"}, {"api_name": "serializers.SerializerModules", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 38, "usage_type": "name"}, {"api_name": "models.Groundtruth.objects.all", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Groundtruth.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Groundtruth", "line_number": 40, "usage_type": "name"}, {"api_name": "serializers.SerializerGroundtruth", "line_number": 41, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 44, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.all", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 46, "usage_type": "name"}, {"api_name": "serializers.SerializerTimemodule", "line_number": 47, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 48, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 50, "usage_type": "name"}, {"api_name": "models.BinaryPredictions.objects.all", "line_number": 52, "usage_type": "call"}, {"api_name": "models.BinaryPredictions.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.BinaryPredictions", "line_number": 52, "usage_type": "name"}, {"api_name": "serializers.SerializerBinaryPredictions", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 54, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 56, "usage_type": "name"}, {"api_name": "models.PercentagePredictions.objects.all", "line_number": 58, "usage_type": "call"}, {"api_name": "models.PercentagePredictions.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.PercentagePredictions", "line_number": 58, "usage_type": "name"}, {"api_name": "serializers.SerializerPercentagePredictions", "line_number": 59, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 60, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 62, "usage_type": "name"}, {"api_name": "models.EstimatePredictions.objects.all", "line_number": 64, "usage_type": "call"}, {"api_name": "models.EstimatePredictions.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.EstimatePredictions", "line_number": 64, "usage_type": "name"}, {"api_name": "serializers.SerializerEstimatePredictions", "line_number": 65, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 66, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Rooms.objects.all", "line_number": 72, "usage_type": "call"}, {"api_name": "models.Rooms.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.Rooms", "line_number": 72, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 73, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 88, "usage_type": "call"}, {"api_name": "models.Rooms.objects.get", "line_number": 89, "usage_type": "call"}, {"api_name": "models.Rooms.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "models.Rooms", "line_number": 89, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 91, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 91, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 93, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 104, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 104, "usage_type": "call"}, {"api_name": "django.core.serializers.json.DjangoJSONEncoder", "line_number": 104, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 106, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.get", "line_number": 115, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 115, "usage_type": "name"}, {"api_name": "models.Wifilogdata.objects.filter", "line_number": 119, "usage_type": "call"}, {"api_name": "models.Wifilogdata.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.Wifilogdata", "line_number": 119, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 120, "usage_type": "call"}, {"api_name": "models.EstimatePredictions.objects.get", "line_number": 121, "usage_type": "call"}, {"api_name": "models.EstimatePredictions.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "models.EstimatePredictions", "line_number": 121, "usage_type": "name"}, {"api_name": "models.Groundtruth.objects.get", "line_number": 122, "usage_type": "call"}, {"api_name": "models.Groundtruth.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "models.Groundtruth", "line_number": 122, "usage_type": "name"}, {"api_name": "models.BinaryPredictions.objects.get", "line_number": 131, "usage_type": "call"}, {"api_name": "models.BinaryPredictions.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "models.BinaryPredictions", "line_number": 131, "usage_type": "name"}, {"api_name": "models.PercentagePredictions.objects.get", "line_number": 132, "usage_type": "call"}, {"api_name": "models.PercentagePredictions.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.PercentagePredictions", "line_number": 132, "usage_type": "name"}, {"api_name": "models.EstimatePredictions.objects.get", "line_number": 133, "usage_type": "call"}, {"api_name": "models.EstimatePredictions.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "models.EstimatePredictions", "line_number": 133, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 143, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 143, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 146, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 157, "usage_type": "call"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 158, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 158, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 158, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 160, "usage_type": "call"}, {"api_name": "models.PercentagePredictions.objects.filter", "line_number": 161, "usage_type": "call"}, {"api_name": "models.PercentagePredictions.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "models.PercentagePredictions", "line_number": 161, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 163, "usage_type": "call"}, {"api_name": "models.Groundtruth.objects.filter", "line_number": 164, "usage_type": "call"}, {"api_name": "models.Groundtruth.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "models.Groundtruth", "line_number": 164, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 166, "usage_type": "call"}, {"api_name": "models.Rooms.objects.get", "line_number": 167, "usage_type": "call"}, {"api_name": "models.Rooms.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "models.Rooms", "line_number": 167, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 181, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 181, "usage_type": "call"}, {"api_name": "django.core.serializers.json.DjangoJSONEncoder", "line_number": 181, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 183, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 186, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 186, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 186, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 187, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 187, "usage_type": "name"}, {"api_name": "models.Rooms.objects.get", "line_number": 188, "usage_type": "call"}, {"api_name": "models.Rooms.objects", "line_number": 188, "usage_type": "attribute"}, {"api_name": "models.Rooms", "line_number": 188, "usage_type": "name"}, {"api_name": "models.BinaryPredictions.objects.filter", "line_number": 189, "usage_type": "call"}, {"api_name": "models.BinaryPredictions.objects", "line_number": 189, "usage_type": "attribute"}, {"api_name": "models.BinaryPredictions", "line_number": 189, "usage_type": "name"}, {"api_name": "models.EstimatePredictions.objects.filter", "line_number": 193, "usage_type": "call"}, {"api_name": "models.EstimatePredictions.objects", "line_number": 193, "usage_type": "attribute"}, {"api_name": "models.EstimatePredictions", "line_number": 193, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 198, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 198, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 199, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 199, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 199, "usage_type": "name"}, {"api_name": "models.Rooms.objects.get", "line_number": 200, "usage_type": "call"}, {"api_name": "models.Rooms.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "models.Rooms", "line_number": 200, "usage_type": "name"}, {"api_name": "models.BinaryPredictions.objects.filter", "line_number": 201, "usage_type": "call"}, {"api_name": "models.BinaryPredictions.objects", "line_number": 201, "usage_type": "attribute"}, {"api_name": "models.BinaryPredictions", "line_number": 201, "usage_type": "name"}, {"api_name": "models.EstimatePredictions.objects.filter", "line_number": 205, "usage_type": "call"}, {"api_name": "models.EstimatePredictions.objects", "line_number": 205, "usage_type": "attribute"}, {"api_name": "models.EstimatePredictions", "line_number": 205, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 210, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 210, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 210, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 211, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 211, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 211, "usage_type": "name"}, {"api_name": "models.Rooms.objects.get", "line_number": 212, "usage_type": "call"}, {"api_name": "models.Rooms.objects", "line_number": 212, "usage_type": "attribute"}, {"api_name": "models.Rooms", "line_number": 212, "usage_type": "name"}, {"api_name": "models.BinaryPredictions.objects.filter", "line_number": 213, "usage_type": "call"}, {"api_name": "models.BinaryPredictions.objects", "line_number": 213, "usage_type": "attribute"}, {"api_name": "models.BinaryPredictions", "line_number": 213, "usage_type": "name"}, {"api_name": "models.EstimatePredictions.objects.filter", "line_number": 217, "usage_type": "call"}, {"api_name": "models.EstimatePredictions.objects", "line_number": 217, "usage_type": "attribute"}, {"api_name": "models.EstimatePredictions", "line_number": 217, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 222, "usage_type": "call"}, {"api_name": "models.Rooms.objects.all", "line_number": 229, "usage_type": "call"}, {"api_name": "models.Rooms.objects", "line_number": 229, "usage_type": "attribute"}, {"api_name": "models.Rooms", "line_number": 229, "usage_type": "name"}, {"api_name": "models.Modules.objects.all", "line_number": 230, "usage_type": "call"}, {"api_name": "models.Modules.objects", "line_number": 230, "usage_type": "attribute"}, {"api_name": "models.Modules", "line_number": 230, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.all", "line_number": 231, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 231, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 231, "usage_type": "name"}, {"api_name": "models.Groundtruth.objects.all", "line_number": 232, "usage_type": "call"}, {"api_name": "models.Groundtruth.objects", "line_number": 232, "usage_type": "attribute"}, {"api_name": "models.Groundtruth", "line_number": 232, "usage_type": "name"}, {"api_name": "models.Wifilogdata.objects.filter", "line_number": 233, "usage_type": "call"}, {"api_name": "models.Wifilogdata.objects", "line_number": 233, "usage_type": "attribute"}, {"api_name": "models.Wifilogdata", "line_number": 233, "usage_type": "name"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 234, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 234, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 234, "usage_type": "name"}, {"api_name": "models.Groundtruth.objects.filter", "line_number": 235, "usage_type": "call"}, {"api_name": "models.Groundtruth.objects", "line_number": 235, "usage_type": "attribute"}, {"api_name": "models.Groundtruth", "line_number": 235, "usage_type": "name"}, {"api_name": "models.Wifilogdata.objects.filter", "line_number": 236, "usage_type": "call"}, {"api_name": "models.Wifilogdata.objects", "line_number": 236, "usage_type": "attribute"}, {"api_name": "models.Wifilogdata", "line_number": 236, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 238, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 238, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 250, "usage_type": "call"}, {"api_name": "models.Timemodule.objects.filter", "line_number": 256, "usage_type": "call"}, {"api_name": "models.Timemodule.objects", "line_number": 256, "usage_type": "attribute"}, {"api_name": "models.Timemodule", "line_number": 256, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 258, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 258, "usage_type": "call"}, {"api_name": "django.core.serializers.json.DjangoJSONEncoder", "line_number": 258, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 260, "usage_type": "name"}, {"api_name": "models.Groundtruth.objects.get", "line_number": 266, "usage_type": "call"}, {"api_name": "models.Groundtruth.objects", "line_number": 266, "usage_type": "attribute"}, {"api_name": "models.Groundtruth", "line_number": 266, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 268, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 268, "usage_type": "call"}, {"api_name": "django.core.serializers.json.DjangoJSONEncoder", "line_number": 268, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 270, "usage_type": "name"}, {"api_name": "models.Wifilogdata.objects.get", "line_number": 276, "usage_type": "call"}, {"api_name": "models.Wifilogdata.objects", "line_number": 276, "usage_type": "attribute"}, {"api_name": "models.Wifilogdata", "line_number": 276, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 278, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 278, "usage_type": "call"}, {"api_name": "django.core.serializers.json.DjangoJSONEncoder", "line_number": 278, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 280, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 282, "usage_type": "name"}, {"api_name": "models.Modules", "line_number": 283, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 285, "usage_type": "call"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 287, "usage_type": "name"}, {"api_name": "models.Rooms", "line_number": 288, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 290, "usage_type": "call"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 292, "usage_type": "name"}, {"api_name": "models.Timemodule", "line_number": 293, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 295, "usage_type": "call"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 297, "usage_type": "name"}, {"api_name": "models.Groundtruth", "line_number": 298, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 300, "usage_type": "call"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 303, "usage_type": "name"}, {"api_name": "models.Modules", "line_number": 304, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 306, "usage_type": "call"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 308, "usage_type": "name"}, {"api_name": "models.Rooms", "line_number": 309, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 311, "usage_type": "call"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 313, "usage_type": "name"}, {"api_name": "models.Timemodule", "line_number": 314, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 316, "usage_type": "call"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 318, "usage_type": "name"}, {"api_name": "models.Groundtruth", "line_number": 319, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 321, "usage_type": "call"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 323, "usage_type": "name"}, {"api_name": "models.Wifilogdata", "line_number": 324, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 326, "usage_type": "call"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 328, "usage_type": "name"}, {"api_name": "models.Modules", "line_number": 329, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 331, "usage_type": "call"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 333, "usage_type": "name"}, {"api_name": "models.Rooms", "line_number": 334, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 336, "usage_type": "call"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 338, "usage_type": "name"}, {"api_name": "models.Timemodule", "line_number": 339, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 341, "usage_type": "call"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 343, "usage_type": "name"}, {"api_name": "models.Groundtruth", "line_number": 344, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 346, "usage_type": "call"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 348, "usage_type": "name"}, {"api_name": "models.Wifilogdata", "line_number": 349, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 351, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 354, "usage_type": "name"}, {"api_name": "forms.userForm", "line_number": 355, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 360, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 375, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 375, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 378, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 382, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 383, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 386, "usage_type": "call"}, {"api_name": "forms.UploadForm", "line_number": 392, "usage_type": "call"}, {"api_name": "io.TextIOWrapper", "line_number": 395, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 397, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 405, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 409, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 409, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 410, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 415, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 415, "usage_type": "attribute"}, {"api_name": "models.Wifilogdata", "line_number": 420, "usage_type": "call"}, {"api_name": "models.Rooms.objects.get", "line_number": 422, "usage_type": "call"}, {"api_name": "models.Rooms.objects", "line_number": 422, "usage_type": "attribute"}, {"api_name": "models.Rooms", "line_number": 422, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 429, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 429, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 430, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 430, "usage_type": "call"}, {"api_name": "forms.UploadForm", "line_number": 432, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 435, "usage_type": "call"}]} +{"seq_id": "574391791", "text": "\"\"\"充值接口用例\"\"\"\nimport requests\nimport pytest\nimport json\n\nfrom decimal import Decimal\nfrom MiddleWare.helper import MiddleHandler\n\nexcel_charge_data = MiddleHandler.my_excel.read_data(\"charge\")\n\n\n@pytest.mark.parametrize(\"charge_data\", excel_charge_data)\ndef test_charge(charge_data, loan_user_login, db_access):\n \"\"\"测试充值接口\"\"\"\n request_method = charge_data[\"Request_method\"]\n url = charge_data[\"Url\"]\n headers = charge_data[\"Headers\"]\n json_data = charge_data[\"Data\"]\n expected_result = charge_data[\"Expect_result\"]\n if \"#loan_member_id#\" in json_data:\n json_data = json_data.replace(\"#loan_member_id#\", str(loan_user_login[\"member_id\"]))\n if \"#loan_token#\" in headers:\n headers = headers.replace(\"#loan_token#\", loan_user_login[\"token\"])\n if \"*wrong_member_id*\" in json_data:\n json_data = json_data.replace(\"*wrong_member_id*\", str(loan_user_login[\"member_id\"] + 1))\n sql = \"select leave_amount from member where id = {};\".format(loan_user_login[\"member_id\"])\n # 获取充值前db记录的余额\n money_charge_before = db_access.query_db(sql=sql)\n response = requests.request(method=request_method,\n url=MiddleHandler.yaml_data[\"host\"] + url,\n headers=json.loads(headers),\n json=json.loads(json_data))\n # 获取充值后db记录的余额\n money_charge_after = db_access.query_db(sql=sql)\n actual_result = response.json()[\"code\"]\n try:\n assert actual_result == expected_result\n if actual_result == 0:\n charge_money = Decimal(str(json.loads(json_data)[\"amount\"]))\n assert money_charge_after[\"leave_amount\"] == money_charge_before[\"leave_amount\"] + charge_money\n except AssertionError as e:\n MiddleHandler.my_logger.error(\"测试失败:{}{}\".format(headers, json_data))\n raise e\n", "sub_path": "tests/test_charge.py", "file_name": "test_charge.py", "file_ext": "py", "file_size_in_byte": 1917, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "MiddleWare.helper.MiddleHandler.my_excel.read_data", "line_number": 9, "usage_type": "call"}, {"api_name": "MiddleWare.helper.MiddleHandler.my_excel", "line_number": 9, "usage_type": "attribute"}, {"api_name": "MiddleWare.helper.MiddleHandler", "line_number": 9, "usage_type": "name"}, {"api_name": "requests.request", "line_number": 29, "usage_type": "call"}, {"api_name": "MiddleWare.helper.MiddleHandler.yaml_data", "line_number": 30, "usage_type": "attribute"}, {"api_name": "MiddleWare.helper.MiddleHandler", "line_number": 30, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "MiddleWare.helper.MiddleHandler.my_logger.error", "line_number": 42, "usage_type": "call"}, {"api_name": "MiddleWare.helper.MiddleHandler.my_logger", "line_number": 42, "usage_type": "attribute"}, {"api_name": "MiddleWare.helper.MiddleHandler", "line_number": 42, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 12, "usage_type": "attribute"}]} +{"seq_id": "402517066", "text": "from openpyxl import Workbook\r\nfrom openpyxl.styles import PatternFill, Border, Side\r\nfrom netmiko import ConnectHandler\r\nfrom pprint import pprint\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nimport time\r\n\r\nt1 = time.perf_counter()\r\nconnection_fail = []\r\nhostname_list = []\r\nsn_list = []\r\nuptime_list = []\r\nmodel_list = []\r\nos_version_list = []\r\n\r\nwb = Workbook()\r\nws = wb.active\r\nws.title = 'Inventory'\r\nws['A1'] = 'Hostname'\r\nws['B1'] = 'IP Address'\r\nws['C1'] = 'Serial Number'\r\nws['D1'] = 'Uptime'\r\nws['E1'] = 'Model'\r\nws['F1'] = 'OS Version'\r\nyellowFill = PatternFill(start_color='FFFF00', end_color='FFFF00', fill_type='solid')\r\nthin_border = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin'))\r\nws['A1'].fill=yellowFill\r\nws['B1'].fill=yellowFill\r\nws['C1'].fill=yellowFill\r\nws['D1'].fill=yellowFill\r\nws['E1'].fill=yellowFill\r\nws['F1'].fill=yellowFill\r\n\r\ndef retrieve_data(ip):\r\n try:\r\n connection_info = {\r\n 'device_type': 'cisco_ios',\r\n 'ip': ip,\r\n 'username': 'parry',\r\n 'password': 'afmo9se8e!'\r\n }\r\n with ConnectHandler(**connection_info) as conn:\r\n print (f'已经成功登陆交换机{ip}')\r\n hostname = conn.find_prompt().replace('#','')\r\n hostname_list.append(hostname)\r\n output = conn.send_command('show version', use_textfsm=True)\r\n sn = output[0]['serial'][0]\r\n sn_list.append(sn)\r\n uptime = output[0]['uptime']\r\n uptime_list.append(uptime)\r\n model = output[0]['hardware'][0]\r\n model_list.append(model)\r\n os_version = output[0]['version']\r\n os_version_list.append(os_version)\r\n except Exception as e:\r\n connection_fail.append(ip)\r\n\r\nwith open('ip_list.txt') as f:\r\n with ThreadPoolExecutor(max_workers=5000) as exe:\r\n ip_addresses = f.read().splitlines()\r\n results = exe.map(retrieve_data, ip_addresses)\r\n\r\nwith open('ip_list.txt') as f:\r\n f.seek(0)\r\n ip_list = f.readlines()\r\n number_of_sw = len(ip_list) + 2\r\n for hostname, ip, sn, uptime, model, os_version, row in zip(hostname_list, ip_list, sn_list, uptime_list, model_list, os_version_list, range(2, number_of_sw)):\r\n ws.cell(row=row, column=1, value=hostname)\r\n ws.cell(row=row, column=2, value=ip)\r\n ws.cell(row=row, column=3, value=sn)\r\n ws.cell(row=row, column=4, value=uptime)\r\n ws.cell(row=row, column=5, value=model)\r\n ws.cell(row=row, column=6, value=os_version)\r\n\r\ndims = {}\r\nfor row in ws.rows:\r\n for cell in row:\r\n cell.border=thin_border\r\n if cell.value:\r\n dims[cell.column_letter] = max((dims.get(cell.column_letter, 0), len(str(cell.value))))\r\n\r\nfor col, value in dims.items():\r\n ws.column_dimensions[col].width = value + 1\r\n\r\nwb.save('inventory.xlsx')\r\nt2 = time.perf_counter()\r\nprint(f'Finished in {round(t2-t1,2)} seconds.')\r\n\r\nprint ('SSH connection to below switches failed: ')\r\nfor i in connection_fail:\r\n print (i)", "sub_path": "inventory (concurrent future version).py", "file_name": "inventory (concurrent future version).py", "file_ext": "py", "file_size_in_byte": 3103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "time.perf_counter", "line_number": 8, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 16, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 25, "usage_type": "call"}, {"api_name": "openpyxl.styles.Border", "line_number": 26, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 26, "usage_type": "call"}, {"api_name": "netmiko.ConnectHandler", "line_number": 42, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 59, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "427938512", "text": "from django.conf.urls import url\n\nfrom . import views\n\n\napp_name = 'sodarcache'\n\nurlpatterns = [\n url(\n regex=r'^api/set/(?P[0-9a-f-]+)$',\n view=views.SodarCacheSetAPIView.as_view(),\n name='cache_set',\n ),\n url(\n regex=r'^api/get/(?P[0-9a-f-]+)$',\n view=views.SodarCacheGetAPIView.as_view(),\n name='cache_get',\n ),\n url(\n regex=r'^api/get/date/(?P[0-9a-f-]+)$',\n view=views.SodarCacheGetDateAPIView.as_view(),\n name='cache_get_date',\n ),\n]\n", "sub_path": "sodarcache/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "407195265", "text": "import logging\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n app.logger.debug('HELLO!!!!!!!!!!')\n return \"

Hello There!

\"\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\nelse:\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n\n", "sub_path": "obras/service/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 404, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "163820714", "text": "from PyTube import YouTubeHandler\nfrom pydub import AudioSegment\nfrom pygame import mixer as Mi\nfrom os import remove , rename\nimport pafy, glob as G\n\n\nMi.init(48000)\n\ndef to_mp3(search):\n Y = YouTubeHandler(search)\n Y.get_individual_video_link()\n try: result = pafy.new(Y.link)\n except: runmain()\n print(result.title + \" | \" + Y.link,\"\\nDownload...\")\n m4a = result.m4astreams[0]\n m4a.download()\n print(\"Completed!\")\n name = G.glob(\"*.m4a\")[0]\n Mp3 = AudioSegment.from_file(name, format=\"m4a\")\n print(\"Converting...\")\n Mp3.export(\"$music_temp.mp3\", format=\"mp3\" , bitrate='256')\n remove(name)\n print(\"Playing Music...\")\n Mi.music.load(\"$music_temp.mp3\")\n Mi.music.play()\n\ndef runmain():\n Select = input(\"input: \")\n if Select in \"Tt\": # Enter 'Tube' to Search.\n try:\n Mi.music.unload()\n remove(\"$music_temp.mp3\")\n except: pass\n finally:\n to_mp3(input(\"Input Music: \")) # Enter Title of song.\n runmain()\n elif Select in \"Ss\": # Enter 'Stop' to Stop Music and Delete File\n Mi.music.stop()\n Mi.music.unload()\n remove(\"$music_temp.mp3\")\n runmain()\n elif Select in \"Vv\": # Change Volume of Music\n Mi.music.set_volume(eval(input(\"Volume 0.0 - 1.0 : \")))\n print(\"Volume now is\",Mi.music.get_volume())\n runmain()\n elif Select in \"Xx\":\n exit(0)\n else: runmain()\n \nif __name__ == '__main__':\n runmain()\n", "sub_path": "MusicPlayer.py", "file_name": "MusicPlayer.py", "file_ext": "py", "file_size_in_byte": 1485, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "pygame.mixer.init", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 8, "usage_type": "name"}, {"api_name": "PyTube.YouTubeHandler", "line_number": 11, "usage_type": "call"}, {"api_name": "pafy.new", "line_number": 13, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 19, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 20, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 20, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.mixer.music.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 25, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 26, "usage_type": "name"}, {"api_name": "pygame.mixer.music.unload", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 32, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.mixer.music.stop", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 39, "usage_type": "name"}, {"api_name": "pygame.mixer.music.unload", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 40, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 44, "usage_type": "name"}, {"api_name": "pygame.mixer.music.get_volume", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "80809430", "text": "from django.shortcuts import render, HttpResponseRedirect\nimport base64\nfrom django.contrib.auth.hashers import make_password\nfrom django.contrib import messages\nfrom .models import Client\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom subcriper.models import Subcriper\nfrom clientgallery.models import Clientgallery\n\n# Create your views here.\n\n@login_required(login_url='login')\ndef addcli(request):\n if request.method == 'POST':\n if request.POST['pas'] == request.POST['cpas']:\n fnm = request.POST['fnm']\n lnm = request.POST['lnm']\n mob = request.POST['mob']\n em = request.POST['em'].lower()\n pas = request.POST['pas']\n # cpas = request.post['cpas']\n\n cimg = request.FILES['cimg']\n cnm = request.POST['cnm']\n cem = request.POST['cem'].lower()\n cmob = request.POST['cmob']\n cweb = request.POST['cweb']\n cadd = request.POST['cadd']\n\n sta = request.POST['sta']\n add = Client(cli_fname=fnm, cli_lname=lnm, cli_mob=mob, cli_email=em, cli_pass=pas, com_img=cimg, com_name=cnm, com_email=cem, com_mob=cmob, com_web=cweb, com_address=cadd, status=sta)\n \n add.save()\n else:\n messages.success(request, 'Client Password And Confirm Password Do Not Match. Please Try Again!', extra_tags='danger')\n\n return render(request, 'client/add_cli.html', {'name': request.user})\n\n\n messages.success(request, 'Client Added Successfully', extra_tags='success')\n\n return HttpResponseRedirect('/cli/manageclient/')\n \n # msg = {'serr':'Client Added Successfully'}\n # return render(request, 'client/add_cli.html', {'err':msg, 'name': request.user})\n else:\n # msg = {'ferr':'Please Fill All Field Either Client Do Not Add.'}\n return render(request, 'client/add_cli.html', {'name': request.user})\n\n@login_required(login_url='login')\ndef edtcli(request, eid):\n ecli = Client.objects.get(pk=eid)\n cli = Client.objects.all()\n return render(request, 'client/edit_cli.html', {'ecli':ecli, 'cli':cli, 'name': request.user})\n\n@login_required(login_url='login')\ndef updcli(request, ucliid):\n if request.method == 'POST':\n try:\n if request.FILES['cimg'] != 0:\n fnm = request.POST['fnm']\n lnm = request.POST['lnm']\n mob = request.POST['mob']\n em = request.POST['em'].lower()\n cimg = request.FILES['cimg']\n cnm = request.POST['cnm']\n cem = request.POST['cem'].lower()\n cmob = request.POST['cmob']\n cweb = request.POST['cweb']\n cadd = request.POST['cadd']\n sta = request.POST['sta']\n\n edt = Client.objects.get(cli_id = ucliid)\n edt.cli_fname = fnm\n edt.cli_lname = lnm\n edt.cli_mob = mob\n edt.cli_email = em\n edt.com_img = cimg\n edt.com_name = cnm\n edt.com_email = cem\n edt.com_mob = cmob\n edt.com_web = cweb\n edt.com_address = cadd\n edt.status = sta\n\n edt.save()\n except:\n fnm = request.POST['fnm']\n lnm = request.POST['lnm']\n mob = request.POST['mob']\n em = request.POST['em'].lower()\n cnm = request.POST['cnm']\n cem = request.POST['cem'].lower()\n cmob = request.POST['cmob']\n cweb = request.POST['cweb']\n cadd = request.POST['cadd']\n sta = request.POST['sta']\n\n edt = Client.objects.get(cli_id = ucliid)\n edt.cli_fname = fnm\n edt.cli_lname = lnm\n edt.cli_mob = mob\n edt.cli_email = em\n edt.com_img = edt.com_img\n edt.com_name = cnm\n edt.com_email = cem\n edt.com_mob = cmob\n edt.com_web = cweb\n edt.com_address = cadd\n edt.status = sta\n\n edt.save()\n\n messages.success(request, 'Client Updated Successfully', extra_tags='success')\n\n return HttpResponseRedirect('/cli/manageclient/')\n\n messages.success(request, 'Client Updated Successfully', extra_tags='success')\n\n return HttpResponseRedirect('/cli/manageclient/')\n\n@login_required(login_url='login')\ndef delcli(request, did):\n de = Client.objects.get(pk=did)\n de.delete()\n messages.success(request, 'Client Deleted Successfully', extra_tags='danger')\n return HttpResponseRedirect('/cli/manageclient/')\n\n@login_required(login_url='login')\ndef mancli(request):\n cli = Client.objects.filter().reverse()\n cli = reversed(list(cli))\n return render(request, 'client/man_cli.html', {'cli': cli, 'name': request.user})\n\n# @login_required(login_url='login')\n# def sub(request, subid):\n# try:\n# sub = Subcriper.objects.get(cli_id = subid)\n# return render(request, 'client/subcription.html', {'sub': sub, 'name': request.user})\n# except:\n# sub1 = 'Sorry, No Any Subscription Plan Active'\n# return render(request, 'client/subcription.html', {'sub1': sub1, 'name': request.user})\n\n# @login_required(login_url='login')\n# def clig(request, glyid):\n# try:\n# clig = Clientgallery.objects.filter(cli_id = glyid)\n# return render(request, 'client/cli_gallery.html', {'clig': clig, 'name': request.user})\n# except:\n# return render(request, 'client/cli_gallery.html', {'name': request.user})", "sub_path": "addpoint/client/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "models.Client", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 36, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Client.objects.get", "line_number": 53, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 53, "usage_type": "name"}, {"api_name": "models.Client.objects.all", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 54, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Client.objects.get", "line_number": 74, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 74, "usage_type": "name"}, {"api_name": "models.Client.objects.get", "line_number": 100, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 100, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 115, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 115, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 117, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 119, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 119, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 121, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Client.objects.get", "line_number": 125, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 125, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 125, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 127, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 127, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 128, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 123, "usage_type": "call"}, {"api_name": "models.Client.objects.filter", "line_number": 132, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 132, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 134, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "452845228", "text": "from Bio.PDB import *\nimport io\nimport glob\nimport json\n\n'''\nCant rely on BioPython\nparser = PDBParser()\nstructure = parser.get_structure('PHA-L', '4u0g.pdb')\nrcount=0\nacount=0\nfor model in structure:\n for i in model.get_residues():\n #print (i)\n rcount+=1\nppb = PPBuilder()\nfor atom in structure.get_atoms():\n acount+=1\nx=0\nfor pp in ppb.build_peptides(structure):\n #print(pp.get_sequence())\n x+=1\n'''\n#print(x,rcount,acount)\nacount=0\nrcount=0\n\ndef atom_counter(pdb_str):\n count=0\n file= pdb_str.splitlines()\n for n,line in enumerate(file):\n if line[:4] == \"ATOM\":\n count+=1\n return count\n\ndef residue_counter(pdb_str):\n count=0\n file= pdb_str.splitlines()\n for n,line in enumerate(file):\n if line[:6]==\"SEQRES\":\n temp=line[19:]\n for i in range(0,len(temp),4):\n #print(temp[i:i+3])\n if temp[i].isalnum():\n count+=1\n return count\n\ndef chain_extractor(pdb_str):\n mol_ids=[]\n chains={}\n names={}\n file= pdb_str.splitlines()\n for n,line in enumerate(file):\n if line[10:16]==\"MOL_ID\" :\n temp=line[17:]\n pos=temp.index(\";\")\n num=int(temp[:pos])\n if num not in mol_ids:\n mol_ids.append(num)\n temp=file[n+2][18:file[n+2].index(\";\")].split(\", \")\n chains[num]=temp\n temp=file[n+1][22:file[n+1].index(\";\")]\n names[num]=temp\n elif line[11:17]==\"MOL_ID\":\n temp=line[18:]\n pos=temp.index(\";\")\n num=int(temp[:pos])\n if num not in mol_ids:\n mol_ids.append(num) \n temp=file[n+2][18:file[n+2].index(\";\")].split(\", \")\n chains[num]=temp\n temp=file[n+1][21:file[n+1].index(\";\")]\n names[num]=temp\n return mol_ids,chains,names\n\ndef sequence_builder_residue(pdb_str,chains):\n file= pdb_str.splitlines()\n res_seq={}\n for i in chains:\n for j in chains[i]:\n res_seq[j]=[]\n for n,line in enumerate(file):\n if line[:6]==\"SEQRES\":\n chain_num=line[11]\n temp=line[19:].split(\" \")\n for i in temp:\n if i !=\"\":\n res_seq[chain_num].append(i)\n return res_seq\n\ndef sequence_builder_atom(pdb_str,chains):\n file= pdb_str.splitlines()\n atom_seq={}\n for i in chains:\n for j in chains[i]:\n atom_seq[j]=[]\n for n,line in enumerate(file):\n if line[:4]==\"ATOM\" or line[:6]==\"HETATM\":\n temp=[]\n temp.append(int(line[6:11]))\n temp.append(line[13:line[14:].index(\" \")+14])\n temp.append(line[17:20])\n c=line[27:56].split(\" \")\n coords=[]\n for i in c:\n if i!=\"\":\n coords.append(i)\n temp.append(coords)\n atom_seq[line[21]].append(temp)\n return atom_seq\n\ndef encoder(x):\n res_dict={\"ALA\":\"A\",\"ARG\":\"R\",\"ASN\":\"N\",\"ASP\":\"D\",\"CYS\":\"C\",\"GLN\":\"Q\",\"GLU\":\"G\",\"GLY\":\"G\",\"HIS\":\"H\",\"ILE\":\"I\",\"LEU\":\"L\",\"LYS\":\"K\",\"MET\":\"M\",\"PHE\":\"F\",\"PRO\":\"P\",\"SER\":\"S\",\"THR\":\"T\",\"TRP\":\"W\",\"TYR\":\"Y\",\"VAL\":\"V\",\"SEC\":\"U\",\"PYL\":\"O\"}\n temp=[]\n for i in x:\n if i in res_dict:\n temp.append(res_dict[i])\n else:\n temp.append(i.lower())\n return temp\nfor filename in glob.glob('pdb_files/*.pdb'):\n with open(filename) as file:\n print(filename)\n file = file.read()\n mol_ids,chains,names=chain_extractor(file)\n acount=atom_counter(file)\n res_seq=sequence_builder_residue(file,chains)\n #atom_seq=sequence_builder_atom(file,chains)\n rcount=residue_counter(file)\n for i in names:\n temp=\"sep_outputs/\"+filename[10:filename.index(\".\")]+\"_\"+names[i]+\".json\"\n with open(temp,\"w\") as fp:\n temp={}\n temp[\"Name\"]=names[i]\n temp[\"Chains\"]=chains[i]\n temp[\"Residues_List\"]={}\n temp[\"Atom_List\"]={}\n for j in chains[i]:\n temp[\"Residues_List\"][j]=res_seq[j]\n res_string=\"\".join(encoder(res_seq[j]))\n print(res_string)\n #temp[\"Atom_List\"][j]=atom_seq[j]\n \n #print(temp)\n json.dump(temp,fp)\n\n\n\nprint(mol_ids,chains,names)\nprint(acount,rcount)\n#print(res_seq)\n#print(atom_seq)\n", "sub_path": "start.py", "file_name": "start.py", "file_ext": "py", "file_size_in_byte": 4506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "glob.glob", "line_number": 121, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "332280456", "text": "\n# coding: utf-8\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n#creating data set\ndata = np.random.binomial(1, 0.25, (100000, 1000))\n\n\n\nepsilons = [0.5, 0.25, 0.1, 0.01, 0.001]\n\n\n\n# 23.a\nplt.figure(figsize=(10,10))\nfive_rows = data[:5]\nm_values = range(1, five_rows.shape[1] + 1)\n\n# This creates a table where i,j holds the mean of \n# first j+1 tosses in sequence i.\nfive_rows = five_rows.cumsum(axis=1)\nfive_rows = five_rows / (1 + np.indices(five_rows.shape)[1])\n\nfor row in range(five_rows.shape[0]):\n plt.plot(m_values, five_rows[row], label=row)\n\nplt.legend()\nplt.show()\n\n\n# As m grows, the values(Mean) converge to 0.25.\n\n\n# Helper functions.\n\ndef tabulate(x, y, f):\n \"\"\"Return a table of f(x, y).\"\"\"\n return np.vectorize(f)(*np.meshgrid(x, y, sparse=True))\n\ndef cheb_bound(m, e):\n \"\"\"\n returns chebyshev bound as a function of m(num of samples)\n and e(epsilon)\n \"\"\"\n return min(1.0 / (4 * m * e * e), 1)\n\ndef hoef_bound(m, e):\n \"\"\"\n Return hoeffding bound as a function of m(num of samples)\n and e(epsilon)\n \"\"\"\n return min(2 * np.exp(-2 * m * e * e), 1)\n\n\n\n# 23.b + 23.c\ncheb_bound_v = np.vectorize(cheb_bound)\nhoef_bound_v = np.vectorize(hoef_bound)\n\np = 0.25 # p according to part (c)\n\n\nm_values = np.array(range(1,1001))\ncheb_results = tabulate(m_values, epsilons, cheb_bound_v)\nhoef_results = tabulate(m_values, epsilons, hoef_bound_v)\n\n# Processing data to calculate for part (c)\ndist_results = data.cumsum(axis=1)\ndist_results = dist_results / (1 + np.indices(dist_results.shape)[1])\ndist_results -= p\ndist_results = np.abs(dist_results)\n# now dist_results holds for i,j abs(X^i_bar_j-E[X])\n\nfor i in range(len(epsilons)):\n plt.figure(figsize=(10,10))\n plt.plot(m_values, cheb_results[i],label=\"chebyshev\")\n plt.plot(m_values, hoef_results[i],label=\"hoeffding\")\n per_of_seq = np.sum(dist_results >= epsilons[i], axis=0) / data.shape[0] # calculating actual percentage of sequences\n plt.plot(m_values, per_of_seq, \n label=\"sequences that satisfy condition\")\n plt.title(\"epsilon = \"+str(epsilons[i]))\n plt.legend()\n plt.show()\n\n", "sub_path": "IML/ex1/concetration_inequalities.py", "file_name": "concetration_inequalities.py", "file_ext": "py", "file_size_in_byte": 2137, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "numpy.random.binomial", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.indices", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.vectorize", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.indices", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "323248476", "text": "from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom checkout.webhook_handler import StripeWebhookHandler\n\nimport stripe\n\n\n@require_POST\n@csrf_exempt\ndef webhook(request):\n \"\"\" Listen for Stripe Webhooks \"\"\"\n \"\"\" Setup \"\"\"\n webhook_secret = settings.STRIPE_WEBHOOK_SECRET\n stripe.api_key = settings.STRIPE_CLIENT_SECRET\n \"\"\" Get Webhook Data and Verify Signature \"\"\"\n payload = request.body\n sig_header = request.META['HTTP_STRIPE_SIGNATURE']\n event = None\n\n try:\n event = stripe.Webhook.construct_event(\n payload, sig_header, webhook_secret\n )\n except ValueError as e:\n \"\"\" Invalid Payload \"\"\"\n return HttpResponse(content=e, status=400)\n except stripe.error.SignatureVerificationError as e:\n \"\"\" Invalid Signature \"\"\"\n return HttpResponse(content=e, status=400)\n except Exception as e:\n return HttpResponse(content=e, status=400)\n\n \"\"\" Set Up Webhook Handler \"\"\"\n handler = StripeWebhookHandler(request)\n\n \"\"\" Map Webhooks To Relevant Handler Functions \"\"\"\n event_map = {\n 'payment_intent.succeeded': handler.handle_payment_succeeded,\n 'payment_intent.payment_failed': handler.handle_payment_failed,\n }\n\n \"\"\" Get Webhook Type From Stripe \"\"\"\n event_type = event['type']\n\n \"\"\" If it Has Handler, Get It From Event Map \"\"\"\n \"\"\" Use Generic One By Default \"\"\"\n event_handler = event_map.get(event_type, handler.handle_event)\n\n \"\"\" Call Event Handler with Event \"\"\"\n response = event_handler(event)\n return response\n", "sub_path": "checkout/webhooks.py", "file_name": "webhooks.py", "file_ext": "py", "file_size_in_byte": 1693, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.conf.settings.STRIPE_WEBHOOK_SECRET", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "stripe.api_key", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings.STRIPE_CLIENT_SECRET", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "stripe.Webhook.construct_event", "line_number": 24, "usage_type": "call"}, {"api_name": "stripe.Webhook", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "stripe.error", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponse", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 34, "usage_type": "call"}, {"api_name": "checkout.webhook_handler.StripeWebhookHandler", "line_number": 37, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 11, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "70675889", "text": "\"\"\"\nTrain model and eval model helpers.\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\nimport sklearn\nfrom models.support_vector_machine import SupportVectorMachine\nimport random\nfrom sklearn.utils import shuffle\n\ndef train_model(data, model, learning_rate=0.001, batch_size=100,\n num_steps=100, shuffle=True):\n \"\"\"Implements the training loop of stochastic gradient descent.\n\n Performs stochastic gradient descent with the indicated batch_size.\n If shuffle is true:\n Shuffle data at every epoch, including the 0th epoch.\n If the number of example is not divisible by batch_size, the last batch\n will simply be the remaining examples.\n\n Args:\n data(dict): Data loaded from io_tools\n model(LinearModel): Initialized linear model.\n learning_rate(float): Learning rate of your choice\n batch_size(int): Batch size of your choise.\n num_steps(int): Number of steps to run the updated.\n shuffle(bool): Whether to shuffle data at every epoch.\n Returns:\n model(LinearModel): Returns a trained model.\n \"\"\"\n # Perform gradient descent.\n\n batch_epoch_num = data['label'].shape[0] // batch_size\n epochs = 1\n\n for i in range(epochs):\n if shuffle:\n data['image'],data['label'] = sklearn.utils.shuffle(data['image'],data['label'], random_state=0)\n print(i)\n for j in range(0,data['label'].shape[0],batch_size):\n image_batch = data['image'][j:(j+batch_size)]\n label_batch = data['label'][j:(j+batch_size)]\n print(j)\n for k in range(num_steps):\n update_step(image_batch, label_batch, model, learning_rate)\n return model\n\n\ndef update_step(image_batch, label_batch, model, learning_rate):\n \"\"\"Performs on single update step, (i.e. forward then backward).\n Args:\n image_batch(numpy.ndarray): input data of dimension (N, ndims).\n label_batch(numpy.ndarray): label data of dimension (N,).\n model(LinearModel): Initialized linear model.\n \"\"\"\n f = model.forward(image_batch)\n gradient = model.backward(f,label_batch)\n model.w = model.w - learning_rate*gradient\n\n\ndef eval_model(data, model):\n \"\"\"Performs evaluation on a dataset.\n Args:\n data(dict): Data loaded from io_tools.\n model(LinearModel): Initialized linear model.\n Returns:\n loss(float): model loss on data.\n acc(float): model accuracy on data.\n \"\"\"\n f = model.forward(data['image'])\n loss = model.loss(f,data['label'])\n\n y_predict = model.predict(f)\n\n count = 0\n for i in range(len(data['label'])):\n if data['label'][i] == y_predict[i]:\n count = count + 1\n\n acc = (count/len(data['label']))*100\n\n return loss, acc\n", "sub_path": "mp1/train_eval_model_svm.py", "file_name": "train_eval_model_svm.py", "file_ext": "py", "file_size_in_byte": 2784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sklearn.utils.shuffle", "line_number": 38, "usage_type": "name"}, {"api_name": "sklearn.utils.shuffle", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.utils", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "160920615", "text": "import os\nimport sys\n\nimport pytest\n\nimport feedwork.utils.System as sysu\n\n\ndef test_env():\n PATH = sysu.env(\"PATH\", str)\n assert \"/bin\" in PATH\n assert \"/usr/bin\" in PATH\n assert \"/usr/sbin\" in PATH\n\n HRS_NUMS = sysu.env(\"HRS_NUMS_XXX\", int)\n assert HRS_NUMS is None\n HRS_NUMS = sysu.env(\"HRS_NUMS_XXX\", int, 0)\n assert HRS_NUMS == 0\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-q\", os.path.basename(sys.argv[0])])\n", "sub_path": "test_suite/utils/System_test.py", "file_name": "System_test.py", "file_ext": "py", "file_size_in_byte": 443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "feedwork.utils.System.env", "line_number": 10, "usage_type": "call"}, {"api_name": "feedwork.utils.System", "line_number": 10, "usage_type": "name"}, {"api_name": "feedwork.utils.System.env", "line_number": 15, "usage_type": "call"}, {"api_name": "feedwork.utils.System", "line_number": 15, "usage_type": "name"}, {"api_name": "feedwork.utils.System.env", "line_number": 17, "usage_type": "call"}, {"api_name": "feedwork.utils.System", "line_number": 17, "usage_type": "name"}, {"api_name": "pytest.main", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "528370236", "text": "import pickle\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Plotting')\nparser.add_argument('-bn', '--batch_norm', action='store_true')\nparser.add_argument('--save', action='store_true')\nparser.add_argument('-m','--mixed', action='store',\n choices=['lenet', 'vgg8', None], default=None)\nargs = parser.parse_args()\n\n\ndef format_text(m, sd):\n text = \"\"\n if str(m)[0] == '0':\n text += str(np.round(m, 2))[1:]\n else:\n text += str(np.round(m, 1))\n text += \"±\"\n if str(sd)[0] == '0':\n text += str(np.round(sd, 1))[1:]\n else:\n text += str(np.round(sd, 0))\n return text\n\neval_meths = ['train/accuracy@1', 'test/accuracy@1']\ndatasets = [\"cifar10\", \"cifar100\"]\n\nif not args.batch_norm and args.mixed is None:\n architectures = [\"lenet\", \"vgg8\"]\n net_types = [\"lrelu\", \"rn\", \"rrn\"]\n scores = dict(keys=net_types)\n rows = []\n for dataset in datasets:\n for eval_meth in eval_meths:\n row = []\n for arch in architectures:\n base_folder = f\"scores_sl/{arch}_scores_{dataset}\"\n for af in net_types:\n all_scores = []\n for seed in range(5):\n filename = f\"scores_{arch}_{af}_{seed}_xavier.pkl\"\n try:\n all_scores.append(pickle.load(open(f\"{base_folder}/{filename}\", \"rb\"))[eval_meth][:60])\n except FileNotFoundError:\n continue\n f_mean = np.mean(all_scores, 0)[-1]\n f_std = np.std(all_scores, 0)[-1]\n text = format_text(f_mean, f_std)\n row.append(text)\n rows.append(row)\n\n col_ind = pd.MultiIndex.from_product([architectures, net_types],\n names=[\"architecture\", \"net_type\"])\n row_ind = pd.MultiIndex.from_product([datasets, eval_meths],\n names=[\"dataset\", \"eval\"])\n df = pd.DataFrame(rows, index=row_ind, columns=col_ind)\n if args.save:\n df.to_csv('cifar_all_nets.csv')\n print(df)\n\nif args.batch_norm:\n architectures = [\"lenet\", \"vgg8\", \"vgg11\"]\n net_types = [\"lrelu\", \"rn\", \"rrn\"]\n scores = dict(keys=net_types)\n rows = []\n indexes = []\n eval_meth = \"train/accuracy@1\"\n for dataset in [\"cifar10\", \"cifar100\"]:\n for suffix in ['', '_bn']:\n row = []\n indexes.append(dataset + suffix)\n for arch in architectures:\n base_folder = f\"scores_sl/{arch}_scores_{dataset}\"\n for af in net_types:\n all_scores = []\n for seed in range(5):\n filename = f\"scores_{arch}_{af}_{seed}_xavier{suffix}.pkl\"\n try:\n all_scores.append(pickle.load(open(f\"{base_folder}/{filename}\", \"rb\"))[eval_meth])\n except FileNotFoundError:\n continue\n f_mean = np.mean(all_scores, 0)[-1]\n f_std = np.std(all_scores, 0)[-1]\n text = format_text(f_mean, f_std)\n row.append(text)\n rows.append(row)\n\n index = pd.MultiIndex.from_product([architectures, net_types],\n names=[\"architecture\", \"net_type\"])\n df = pd.DataFrame(rows, index=indexes, columns=index)\n\n print(df)\n exit()\n# import ipdb; ipdb.set_trace()\n\nif args.mixed is not None:\n rows = []\n indexes = []\n architectures = [args.mixed]\n if args.mixed == \"lenet\":\n net_types = [\"rn\", \"rrn\", \"r2rr\", \"rr2r\", \"rrr2\"]\n elif args.mixed == \"vgg8\":\n net_types = [\"rn\", \"rrn\", \"r2rrr\", \"rr2rr\", \"rrr2r\", \"rrrr2\",\n \"r2r2r\", \"rr2r2\", \"r2rr2\",\n \"r3rr\", \"rr3r\", \"rrr3\", \"r3r2\", \"r2r3\", \"r4r\", \"rr4\"]\n scores = dict(keys=net_types)\n for dataset in datasets:\n for eval_meth in eval_meths:\n row = []\n for arch in architectures:\n base_folder = f\"scores_sl/{arch}_scores_{dataset}\"\n for af in net_types:\n all_scores = []\n for seed in range(5):\n filename = f\"scores_{arch}_{af}_{seed}_xavier.pkl\"\n try:\n all_scores.append(pickle.load(open(f\"{base_folder}/{filename}\", \"rb\"))[eval_meth])\n except FileNotFoundError:\n continue\n f_mean = np.mean(all_scores, 0)[-1]\n f_std = np.std(all_scores, 0)[-1]\n text = format_text(f_mean, f_std)\n row.append(text)\n rows.append(row)\n\n index = pd.MultiIndex.from_product([architectures, net_types],\n names=[\"architecture\", \"net_type\"])\n row_ind = pd.MultiIndex.from_product([datasets, eval_meths],\n names=[\"dataset\", \"eval\"])\n df = pd.DataFrame(rows, index=row_ind, columns=index)\n if args.save:\n df.to_csv(f'cifar_{args.mixed}_selected_r.csv')\n print(f\"Saved in cifar_{args.mixed}_selected_r.csv\")\n print(df)\n", "sub_path": "cifar/scores_table.py", "file_name": "scores_table.py", "file_ext": "py", "file_size_in_byte": 5348, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 24, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.MultiIndex.from_product", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pandas.MultiIndex.from_product", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 58, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 85, "usage_type": "call"}, {"api_name": "pandas.MultiIndex.from_product", "line_number": 90, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 92, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.MultiIndex.from_product", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pandas.MultiIndex.from_product", "line_number": 130, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "516674206", "text": "from PyQt5 import uic\nfrom PyQt5.QtGui import *\nfrom PyQt5 import QtGui, QtCore\nfrom PyQt5.QtWidgets import *\nfrom PaymentWindow import PaymentWindow\nfrom DBHelper import DBHelper\n\nuserInterface = uic.loadUiType(\"gtk/ticketWindow.ui\")[0]\n\nclass TicketWindow(QDialog, userInterface):\n def __init__(self,userID,FirstName, LastName, parent=None):\n # Initialization help interface from QT to Python\n QWidget.__init__(self, parent)\n self.setupUi(self)\n self.UID = userID\n self.username = LastName+' '+FirstName\n \n self.label_username.setText(format(self.username))\n self.setFixedWidth(471)\n self.setFixedHeight(400)\n self.pushButton_saveTicketInfo.clicked.connect(self.paymentSaveWindow)\n self.pushButton_nextWindow.clicked.connect(self.nextWindow)\n\n def paymentSaveWindow(self):\n _age = self.comboBox_3.itemText(self.comboBox_3.currentIndex())\n _option = self.comboBox_4.currentIndex()\n self.label_totalTickets.setText(format(self.count_LE.text()))\n self._totaltickets = self.label_totalTickets.text()\n self._price = \"\"\n if (_option == 0):\n self._price = 50\n elif (_option == 1):\n self._price = 125\n else:\n self._price = 150\n\n if (_age == \"Less than 15\"):\n self.totalCost = ((self._price*50)/100)\n self.totalCost = int(self.totalCost)*int(self._totaltickets)\n self.label_totalCost.setText(format(self.totalCost))\n elif (_age == \"greater than 20 and less than 40\"):\n self.totalCost = self._price\n self.totalCost = int(self.totalCost)*int(self._totaltickets)\n self.label_totalCost.setText(format(self.totalCost))\n else: \n self.totalCost = ((self._price*75)/100)\n self.totalCost = int(self.totalCost)*int(self._totaltickets)\n self.label_totalCost.setText(format(self.totalCost))\n \n def nextWindow(self):\n _totaltickets = self.label_totalTickets.text()\n _totalCost = self.label_totalCost.text()\n self.PaymentWindow = PaymentWindow(self.UID,self.username, _totaltickets, _totalCost)\n self.PaymentWindow.show()\n self.accept()\n\n", "sub_path": "UTMS_mysqlDB/TicketWindow.py", "file_name": "TicketWindow.py", "file_ext": "py", "file_size_in_byte": 2254, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "PyQt5.uic.loadUiType", "line_number": 8, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 8, "usage_type": "name"}, {"api_name": "PaymentWindow.PaymentWindow", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "44780237", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport re\nfrom rapidfuzz import fuzz\n\ndef repeat_numbers(s, n):\n return re.sub(r'(\\d+)', lambda m: m.group(1) * n, s)\n\ndef enhanced_similarity(s1, s2, repeat_count=5):\n s1_enhanced = repeat_numbers(s1, repeat_count)\n s2_enhanced = repeat_numbers(s2, repeat_count)\n\n return fuzz.token_sort_ratio(s1_enhanced, s2_enhanced)\n\n# Usage\ns1 = \"united states jomo williams\"\ns2 = \"united states jerome williams\"\nscore = enhanced_similarity(s1, s2)\n\nprint(score)\n\n\n# In[2]:\n\n\nS1 = \" john doe subscriber assigned ip address 76126173191 strike 3 holdings\"\nS2 = \" strike 3 holdings lls john doe subscriber assigned ip address 76126173191\" \n\n\n# In[3]:\n\n\nenhanced_similarity(S1,S2)\n\n\n# In[4]:\n\n\ndef get_ngrams(s, n=3):\n return {s[i:i+n] for i in range(len(s) - n + 1)}\n\ndef jaccard_similarity(set1, set2):\n return len(set1 & set2) / len(set1 | set2)\n\ndef sort_words(s, repeat_count=5):\n words = s.split()\n words = [word * repeat_count if word.isdigit() else word for word in words]\n words.sort()\n return ' '.join(words)\n\n\ndef enhanced_similarity_1(s1, s2, n=3):\n s1_sorted = sort_words(s1)\n s2_sorted = sort_words(s2)\n\n s1_ngrams = get_ngrams(s1_sorted, n)\n s2_ngrams = get_ngrams(s2_sorted, n)\n\n return jaccard_similarity(s1_ngrams, s2_ngrams)\n\n\n# In[5]:\n\n\nenhanced_similarity_1(S1,S2)\n\n\n# In[6]:\n\n\nenhanced_similarity_1(s1,s2)\n\n\n# In[14]:\n\n\nS3 = \" commissioner of social security administration kelly hilton lang\"\nS4 = \" commissioner of social security administration kelly hilton\"\n\n\n# In[15]:\n\n\nenhanced_similarity(S3, S4)\n\n\n# In[16]:\n\n\nenhanced_similarity_1(S3, S4)\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "RapidFuzz Application.py", "file_name": "RapidFuzz Application.py", "file_ext": "py", "file_size_in_byte": 1664, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "re.sub", "line_number": 11, "usage_type": "call"}, {"api_name": "rapidfuzz.fuzz.token_sort_ratio", "line_number": 17, "usage_type": "call"}, {"api_name": "rapidfuzz.fuzz", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "313913673", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/pbr/util.py\n# Compiled at: 2017-12-04 07:19:32\n\"\"\"The code in this module is mostly copy/pasted out of the distutils2 source\ncode, as recommended by Tarek Ziade. As such, it may be subject to some change\nas distutils2 development continues, and will have to be kept up to date.\n\nI didn't want to use it directly from distutils2 itself, since I do not want it\nto be an installation dependency for our packages yet--it is still too unstable\n(the latest version on PyPI doesn't even install).\n\"\"\"\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\nimport logging, os, re, sys, traceback\nfrom collections import defaultdict\nimport distutils.ccompiler, pkg_resources\nfrom distutils import log\nfrom distutils import errors\nfrom setuptools.command.egg_info import manifest_maker\nfrom setuptools import dist as st_dist\nfrom setuptools import extension\ntry:\n import ConfigParser as configparser\nexcept ImportError:\n import configparser\n\nfrom pbr import extra_files\nimport pbr.hooks\n_VERSION_SPEC_RE = re.compile('\\\\s*(.*?)\\\\s*\\\\((.*)\\\\)\\\\s*$')\nD1_D2_SETUP_ARGS = {'name': ('metadata', ), \n 'version': ('metadata', ), \n 'author': ('metadata', ), \n 'author_email': ('metadata', ), \n 'maintainer': ('metadata', ), \n 'maintainer_email': ('metadata', ), \n 'url': ('metadata', 'home_page'), \n 'description': ('metadata', 'summary'), \n 'keywords': ('metadata', ), \n 'long_description': ('metadata', 'description'), \n 'download_url': ('metadata', ), \n 'classifiers': ('metadata', 'classifier'), \n 'platforms': ('metadata', 'platform'), \n 'license': ('metadata', ), \n 'install_requires': ('metadata', 'requires_dist'), \n 'setup_requires': ('metadata', 'setup_requires_dist'), \n 'provides': ('metadata', 'provides_dist'), \n 'obsoletes': ('metadata', 'obsoletes_dist'), \n 'package_dir': ('files', 'packages_root'), \n 'packages': ('files', ), \n 'package_data': ('files', ), \n 'namespace_packages': ('files', ), \n 'data_files': ('files', ), \n 'scripts': ('files', ), \n 'py_modules': ('files', 'modules'), \n 'cmdclass': ('global', 'commands'), \n 'use_2to3': ('backwards_compat', 'use_2to3'), \n 'zip_safe': ('backwards_compat', 'zip_safe'), \n 'tests_require': ('backwards_compat', 'tests_require'), \n 'dependency_links': ('backwards_compat', ), \n 'include_package_data': ('backwards_compat', )}\nMULTI_FIELDS = ('classifiers', 'platforms', 'install_requires', 'provides', 'obsoletes',\n 'namespace_packages', 'packages', 'package_data', 'data_files', 'scripts',\n 'py_modules', 'dependency_links', 'setup_requires', 'tests_require',\n 'cmdclass')\nBOOL_FIELDS = ('use_2to3', 'zip_safe', 'include_package_data')\nCSV_FIELDS = ('keywords', )\n\ndef resolve_name(name):\n \"\"\"Resolve a name like ``module.object`` to an object and return it.\n\n Raise ImportError if the module or name is not found.\n \"\"\"\n parts = name.split('.')\n cursor = len(parts) - 1\n module_name = parts[:cursor]\n attr_name = parts[(-1)]\n while cursor > 0:\n try:\n ret = __import__(('.').join(module_name), fromlist=[attr_name])\n break\n except ImportError:\n if cursor == 0:\n raise\n cursor -= 1\n module_name = parts[:cursor]\n attr_name = parts[cursor]\n ret = ''\n\n for part in parts[cursor:]:\n try:\n ret = getattr(ret, part)\n except AttributeError:\n raise ImportError(name)\n\n return ret\n\n\ndef cfg_to_args(path='setup.cfg', script_args=()):\n \"\"\"Distutils2 to distutils1 compatibility util.\n\n This method uses an existing setup.cfg to generate a dictionary of\n keywords that can be used by distutils.core.setup(kwargs**).\n\n :param path:\n The setup.cfg path.\n :param script_args:\n List of commands setup.py was called with.\n :raises DistutilsFileError:\n When the setup.cfg file is not found.\n \"\"\"\n if sys.version_info >= (3, 2):\n parser = configparser.ConfigParser()\n else:\n parser = configparser.SafeConfigParser()\n if not os.path.exists(path):\n raise errors.DistutilsFileError(\"file '%s' does not exist\" % os.path.abspath(path))\n parser.read(path)\n config = {}\n for section in parser.sections():\n config[section] = dict()\n for k, value in parser.items(section):\n config[section][k.replace('-', '_')] = value\n\n setup_hooks = has_get_option(config, 'global', 'setup_hooks')\n package_dir = has_get_option(config, 'files', 'packages_root')\n if package_dir:\n package_dir = os.path.abspath(package_dir)\n sys.path.insert(0, package_dir)\n try:\n if setup_hooks:\n setup_hooks = [ hook for hook in split_multiline(setup_hooks) if hook != 'pbr.hooks.setup_hook'\n ]\n for hook in setup_hooks:\n hook_fn = resolve_name(hook)\n try:\n hook_fn(config)\n except SystemExit:\n log.error('setup hook %s terminated the installation')\n except:\n e = sys.exc_info()[1]\n log.error('setup hook %s raised exception: %s\\n' % (\n hook, e))\n log.error(traceback.format_exc())\n sys.exit(1)\n\n pbr.hooks.setup_hook(config)\n kwargs = setup_cfg_to_setup_kwargs(config, script_args)\n kwargs['include_package_data'] = True\n kwargs['zip_safe'] = False\n register_custom_compilers(config)\n ext_modules = get_extension_modules(config)\n if ext_modules:\n kwargs['ext_modules'] = ext_modules\n entry_points = get_entry_points(config)\n if entry_points:\n kwargs['entry_points'] = entry_points\n files_extra_files = has_get_option(config, 'files', 'extra_files')\n if files_extra_files:\n extra_files.set_extra_files(split_multiline(files_extra_files))\n finally:\n if package_dir:\n sys.path.pop(0)\n\n return kwargs\n\n\ndef setup_cfg_to_setup_kwargs(config, script_args=()):\n \"\"\"Processes the setup.cfg options and converts them to arguments accepted\n by setuptools' setup() function.\n \"\"\"\n kwargs = {}\n all_requirements = {}\n for arg in D1_D2_SETUP_ARGS:\n if len(D1_D2_SETUP_ARGS[arg]) == 2:\n section, option = D1_D2_SETUP_ARGS[arg]\n elif len(D1_D2_SETUP_ARGS[arg]) == 1:\n section = D1_D2_SETUP_ARGS[arg][0]\n option = arg\n in_cfg_value = has_get_option(config, section, option)\n if not in_cfg_value:\n if arg == 'long_description':\n in_cfg_value = has_get_option(config, section, 'description_file')\n if in_cfg_value:\n in_cfg_value = split_multiline(in_cfg_value)\n value = ''\n for filename in in_cfg_value:\n description_file = open(filename)\n try:\n value += description_file.read().strip() + '\\n\\n'\n finally:\n description_file.close()\n\n in_cfg_value = value\n else:\n continue\n if arg in CSV_FIELDS:\n in_cfg_value = split_csv(in_cfg_value)\n if arg in MULTI_FIELDS:\n in_cfg_value = split_multiline(in_cfg_value)\n elif arg in BOOL_FIELDS:\n if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'):\n in_cfg_value = True\n else:\n in_cfg_value = False\n if in_cfg_value:\n if arg in ('install_requires', 'tests_require'):\n in_cfg_value = [ _VERSION_SPEC_RE.sub('\\\\1\\\\2', pred) for pred in in_cfg_value\n ]\n if arg == 'install_requires':\n install_requires = []\n requirement_pattern = '(?P[^;]*);?(?P[^#]*?)(?:\\\\s*#.*)?$'\n for requirement in in_cfg_value:\n m = re.match(requirement_pattern, requirement)\n requirement_package = m.group('package').strip()\n env_marker = m.group('env_marker').strip()\n install_requires.append((requirement_package, env_marker))\n\n all_requirements[''] = install_requires\n elif arg == 'package_dir':\n in_cfg_value = {'': in_cfg_value}\n elif arg in ('package_data', 'data_files'):\n data_files = {}\n firstline = True\n prev = None\n for line in in_cfg_value:\n if '=' in line:\n key, value = line.split('=', 1)\n key, value = key.strip(), value.strip()\n if key in data_files:\n prev = data_files[key]\n prev.extend(value.split())\n else:\n prev = data_files[key.strip()] = value.split()\n elif firstline:\n raise errors.DistutilsOptionError('malformed package_data first line %r (misses \"=\")' % line)\n else:\n prev.extend(line.strip().split())\n firstline = False\n\n if arg == 'data_files':\n data_files = data_files.items()\n in_cfg_value = data_files\n elif arg == 'cmdclass':\n cmdclass = {}\n dist = st_dist.Distribution()\n for cls_name in in_cfg_value:\n cls = resolve_name(cls_name)\n cmd = cls(dist)\n cmdclass[cmd.get_command_name()] = cls\n\n in_cfg_value = cmdclass\n kwargs[arg] = in_cfg_value\n\n if 'extras' in config:\n requirement_pattern = '(?P[^:]*):?(?P[^#]*?)(?:\\\\s*#.*)?$'\n extras = config['extras']\n for extra in extras:\n extra_requirements = []\n requirements = split_multiline(extras[extra])\n for requirement in requirements:\n m = re.match(requirement_pattern, requirement)\n extras_value = m.group('package').strip()\n env_marker = m.group('env_marker')\n extra_requirements.append((extras_value, env_marker))\n\n all_requirements[extra] = extra_requirements\n\n extras_require = {}\n for req_group in all_requirements:\n for requirement, env_marker in all_requirements[req_group]:\n if env_marker:\n extras_key = '%s:(%s)' % (req_group, env_marker)\n if 'bdist_wheel' not in script_args:\n try:\n if pkg_resources.evaluate_marker('(%s)' % env_marker):\n extras_key = req_group\n except SyntaxError:\n log.error('Marker evaluation failed, see the following error. For more information see: http://docs.openstack.org/developer/pbr/compatibility.html#evaluate-marker')\n raise\n\n else:\n extras_key = req_group\n extras_require.setdefault(extras_key, []).append(requirement)\n\n kwargs['install_requires'] = extras_require.pop('', [])\n kwargs['extras_require'] = extras_require\n return kwargs\n\n\ndef register_custom_compilers(config):\n \"\"\"Handle custom compilers; this has no real equivalent in distutils, where\n additional compilers could only be added programmatically, so we have to\n hack it in somehow.\n \"\"\"\n compilers = has_get_option(config, 'global', 'compilers')\n if compilers:\n compilers = split_multiline(compilers)\n for compiler in compilers:\n compiler = resolve_name(compiler)\n if hasattr(compiler, 'name'):\n name = compiler.name\n else:\n name = compiler.__name__\n if hasattr(compiler, 'description'):\n desc = compiler.description\n else:\n desc = 'custom compiler %s' % name\n module_name = compiler.__module__\n cc = distutils.ccompiler.compiler_class\n cc[name] = (module_name, compiler.__name__, desc)\n sys.modules['distutils.' + module_name] = sys.modules[module_name]\n\n\ndef get_extension_modules(config):\n \"\"\"Handle extension modules\"\"\"\n EXTENSION_FIELDS = ('sources', 'include_dirs', 'define_macros', 'undef_macros',\n 'library_dirs', 'libraries', 'runtime_library_dirs', 'extra_objects',\n 'extra_compile_args', 'extra_link_args', 'export_symbols',\n 'swig_opts', 'depends')\n ext_modules = []\n for section in config:\n if ':' in section:\n labels = section.split(':', 1)\n else:\n labels = section.split('=', 1)\n labels = [ l.strip() for l in labels ]\n if len(labels) == 2 and labels[0] == 'extension':\n ext_args = {}\n for field in EXTENSION_FIELDS:\n value = has_get_option(config, section, field)\n if not value:\n continue\n value = split_multiline(value)\n if field == 'define_macros':\n macros = []\n for macro in value:\n macro = macro.split('=', 1)\n if len(macro) == 1:\n macro = (\n macro[0].strip(), None)\n else:\n macro = (\n macro[0].strip(), macro[1].strip())\n macros.append(macro)\n\n value = macros\n ext_args[field] = value\n\n if ext_args:\n if 'name' not in ext_args:\n ext_args['name'] = labels[1]\n ext_modules.append(extension.Extension(ext_args.pop('name'), **ext_args))\n\n return ext_modules\n\n\ndef get_entry_points(config):\n \"\"\"Process the [entry_points] section of setup.cfg to handle setuptools\n entry points. This is, of course, not a standard feature of\n distutils2/packaging, but as there is not currently a standard alternative\n in packaging, we provide support for them.\n \"\"\"\n if 'entry_points' not in config:\n return {}\n return dict((option, split_multiline(value)) for option, value in config['entry_points'].items())\n\n\ndef has_get_option(config, section, option):\n if section in config and option in config[section]:\n return config[section][option]\n else:\n return False\n\n\ndef split_multiline(value):\n \"\"\"Special behaviour when we have a multi line options\"\"\"\n value = [ element for element in (line.strip() for line in value.split('\\n')) if element\n ]\n return value\n\n\ndef split_csv(value):\n \"\"\"Special behaviour when we have a comma separated options\"\"\"\n value = [ element for element in (chunk.strip() for chunk in value.split(',')) if element\n ]\n return value\n\n\nclass DefaultGetDict(defaultdict):\n \"\"\"Like defaultdict, but the get() method also sets and returns the default\n value.\n \"\"\"\n\n def get(self, key, default=None):\n if default is None:\n default = self.default_factory()\n return super(DefaultGetDict, self).setdefault(key, default)", "sub_path": "pycfiles/pbrlgs-3.1.4.linux-x86_64.tar/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 15732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "re.compile", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 117, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 118, "usage_type": "call"}, {"api_name": "configparser.SafeConfigParser", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "distutils.errors.DistutilsFileError", "line_number": 122, "usage_type": "call"}, {"api_name": "distutils.errors", "line_number": 122, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 134, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "distutils.log.error", "line_number": 144, "usage_type": "call"}, {"api_name": "distutils.log", "line_number": 144, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 146, "usage_type": "call"}, {"api_name": "distutils.log.error", "line_number": 147, "usage_type": "call"}, {"api_name": "distutils.log", "line_number": 147, "usage_type": "name"}, {"api_name": "distutils.log.error", "line_number": 149, "usage_type": "call"}, {"api_name": "distutils.log", "line_number": 149, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 149, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 150, "usage_type": "call"}, {"api_name": "pbr.hooks.setup_hook", "line_number": 152, "usage_type": "call"}, {"api_name": "pbr.hooks", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pbr.extra_files.set_extra_files", "line_number": 165, "usage_type": "call"}, {"api_name": "pbr.extra_files", "line_number": 165, "usage_type": "name"}, {"api_name": "sys.path.pop", "line_number": 168, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 219, "usage_type": "call"}, {"api_name": "distutils.errors.DistutilsOptionError", "line_number": 241, "usage_type": "call"}, {"api_name": "distutils.errors", "line_number": 241, "usage_type": "name"}, {"api_name": "setuptools.dist.Distribution", "line_number": 251, "usage_type": "call"}, {"api_name": "setuptools.dist", "line_number": 251, "usage_type": "name"}, {"api_name": "re.match", "line_number": 267, "usage_type": "call"}, {"api_name": "pkg_resources.evaluate_marker", "line_number": 281, "usage_type": "call"}, {"api_name": "distutils.log.error", "line_number": 284, "usage_type": "call"}, {"api_name": "distutils.log", "line_number": 284, "usage_type": "name"}, {"api_name": "distutils.ccompiler.ccompiler", "line_number": 315, "usage_type": "attribute"}, {"api_name": "distutils.ccompiler", "line_number": 315, "usage_type": "name"}, {"api_name": "sys.modules", "line_number": 317, "usage_type": "attribute"}, {"api_name": "setuptools.extension.Extension", "line_number": 358, "usage_type": "call"}, {"api_name": "setuptools.extension", "line_number": 358, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 395, "usage_type": "name"}]} +{"seq_id": "87395204", "text": "\"\"\"\nTools for hydrological regionalization\n\"\"\"\n\nimport logging\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport xarray as xr\nfrom ravenpy.models import get_model\n\nfrom . import coords\n\nLOGGER = logging.getLogger(\"PYWPS\")\n\n# Added directory for test data (smaller database wth only 10 donor catchments)\nDATA_DIR = (\n Path(__file__).parent.parent.parent / \"tests\" / \"testdata\" / \"regionalisation_data\"\n)\n\n\ndef regionalize(\n method,\n model,\n nash,\n params=None,\n props=None,\n target_props=None,\n size=5,\n min_NSE=0.6,\n **kwds\n):\n \"\"\"Perform regionalization for catchment whose outlet is defined by coordinates.\n\n Parameters\n ----------\n method : {'MLR', 'SP', 'PS', 'SP_IDW', 'PS_IDW', 'SP_IDW_RA', 'PS_IDW_RA'}\n Name of the regionalization method to use.\n model : {'HMETS', 'GR4JCN', 'MOHYSE'}\n Model name.\n nash : pd.Series\n NSE values for the parameters of gauged catchments.\n params : pd.DataFrame\n Model parameters of gauged catchments. Needed for all but MRL method.\n props : pd.DataFrame\n Properties of gauged catchments to be analyzed for the regionalization. Needed for MLR and RA methods.\n target_props : pd.Series or dict\n Properties of ungauged catchment. Needed for MLR and RA methods.\n size : int\n Number of catchments to use in the regionalization.\n min_NSE : float\n Minimum calibration NSE value required to be considered as a donor.\n kwds : {}\n Model configuration parameters, including the forcing files (ts).\n\n Returns\n -------\n (qsim, ensemble)\n qsim : DataArray (time, )\n Multi-donor averaged predicted streamflow.\n ensemble : Dataset\n q_sim : DataArray (realization, time)\n Ensemble of members based on number of donors.\n parameter : DataArray (realization, param)\n Parameters used to run the model.\n \"\"\"\n # TODO: Include list of available properties in docstring.\n # TODO: Add error checking for source, target stuff wrt method chosen.\n\n # Select properties based on those available in the ungauged properties DataFrame.\n if isinstance(target_props, dict):\n ungauged_properties = pd.Series(target_props)\n elif isinstance(target_props, pd.Series):\n ungauged_properties = target_props\n elif isinstance(target_props, pd.DataFrame):\n ungauged_properties = target_props.to_series()\n else:\n raise ValueError\n\n cr = coords.realization(1 if method == \"MLR\" else size)\n cp = coords.param(model)\n\n # Filter on NSE\n valid = nash > min_NSE\n filtered_params = params.where(valid).dropna()\n filtered_prop = props.where(valid).dropna()\n\n # Check to see if we have enough data, otherwise raise error\n if len(filtered_prop) < size and method != \"MLR\":\n raise ValueError(\n \"Hydrological_model and minimum NSE threshold \\\n combination is too strict for the number of donor \\\n basins. Please reduce the number of donor basins OR \\\n reduce the minimum NSE threshold.\"\n )\n\n # Rank the matrix according to the similarity or distance.\n if method in [\"PS\", \"PS_IDW\", \"PS_IDW_RA\"]: # Physical similarity\n dist = similarity(filtered_prop, ungauged_properties)\n else: # Geographical distance.\n dist = distance(filtered_prop, ungauged_properties)\n\n # Series of distances for the first `size` best donors\n sdist = dist.sort_values().iloc[:size]\n\n # Pick the donors' model parameters and catchment properties\n sparams = filtered_params.loc[sdist.index]\n sprop = filtered_prop.loc[sdist.index]\n\n # Get the list of parameters to run\n reg_params = regionalization_params(\n method, sparams, sprop, ungauged_properties, filtered_params, filtered_prop\n )\n\n # Run the model over all parameters and create ensemble DataArray\n m = get_model(model)()\n qsims = []\n\n for params in reg_params:\n kwds[\"params\"] = params\n m(overwrite=True, **kwds)\n qsims.append(m.q_sim.copy(deep=True))\n\n qsims = xr.concat(qsims, dim=cr)\n\n # 3. Aggregate runs into a single result -> dataset\n if method in [\n \"MLR\",\n \"SP\",\n \"PS\",\n ]: # Average (one realization for MLR, so no effect).\n qsim = qsims.mean(dim=\"realization\", keep_attrs=True)\n elif (\n \"IDW\" in method\n ): # Here we are replacing the mean by the IDW average, keeping attributes and dimensions.\n qsim = IDW(qsims, sdist)\n else:\n raise ValueError(\"No matching algorithm for {}\".format(method))\n\n # Metadata handling\n # TODO: Store the basin_name\n\n # Create a DataArray for the parameters used in the regionalization\n param_da = xr.DataArray(\n reg_params,\n dims=(\"realization\", \"param\"),\n coords={\"param\": cp, \"realization\": cr},\n attrs={\"long_name\": \"Model parameters used in the regionalization.\"},\n )\n\n ens = xr.Dataset(\n data_vars={\"q_sim\": qsims, \"parameter\": param_da},\n attrs={\n \"title\": \"Regionalization ensemble\",\n \"institution\": \"\",\n \"source\": \"RAVEN V.{} - {}\".format(m.version, model),\n \"history\": \"Created by raven regionalize.\",\n \"references\": \"\",\n \"comment\": \"Regionalization method: {}\".format(method),\n },\n )\n\n # TODO: Add global attributes (model name, date, version, etc)\n return qsim, ens\n\n\ndef read_gauged_properties(properties):\n \"\"\"Return table of gauged catchments properties over North America.\n\n Returns\n -------\n pd.DataFrame\n Catchment properties keyed by catchment ID.\n \"\"\"\n proptable = pd.read_csv(\n DATA_DIR / \"gauged_catchment_properties.csv\", index_col=\"ID\"\n )\n\n return proptable[properties]\n\n\ndef read_gauged_params(model):\n \"\"\"Return table of NASH-Stucliffe Efficiency values and model parameters for North American catchments.\n\n Returns\n -------\n pd.DataFrame\n Nash-Sutcliffe Efficiency keyed by catchment ID.\n pd.DataFrame\n Model parameters keyed by catchment ID.\n \"\"\"\n\n params = pd.read_csv(DATA_DIR / \"{}_parameters.csv\".format(model), index_col=\"ID\")\n\n return params[\"NASH\"], params.iloc[:, 1:]\n\n\ndef haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Return the great circle distance between two points on the earth.\n\n Parameters\n ----------\n lon1, lat1 : ndarray\n Longitude and latitude coordinates in decimal degrees.\n lon2, lat2 : ndarray\n Longitude and latitude coordinates in decimal degrees.\n\n Returns\n -------\n ndarray\n Distance between points 1 and 2 [km].\n\n \"\"\"\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * (\n np.sin(dlon / 2.0) ** 2\n )\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = 6367 * c\n return km\n\n\ndef distance(gauged, ungauged):\n \"\"\"Return geographic distance [km] between ungauged and database of gauged catchments.\n\n Parameters\n ----------\n gauged : pd.DataFrame\n Table containing columns for longitude and latitude of catchment's centroid.\n ungauged : pd.Series\n Coordinates of the ungauged catchment.\n\n \"\"\"\n lon, lat = ungauged.longitude, ungauged.latitude\n lons, lats = gauged.longitude, gauged.latitude\n\n return pd.Series(\n data=haversine(lons.values, lats.values, lon, lat), index=gauged.index\n )\n\n\ndef similarity(gauged, ungauged, kind=\"ptp\"):\n \"\"\"Return similarity measure between gauged and ungauged catchments.\n\n Parameters\n ----------\n gauged : DataFrame\n Gauged catchment properties.\n ungauged : DataFrame\n Ungauged catchment properties\n kind : {'ptp', 'std', 'iqr'}\n Normalization method: peak to peak (maximum - minimum), standard deviation, interquartile range.\n\n \"\"\"\n\n stats = gauged.describe()\n\n if kind == \"ptp\":\n spread = stats.loc[\"max\"] - stats.loc[\"min\"]\n elif kind == \"std\":\n spread = stats.loc[\"std\"]\n elif kind == \"iqr\":\n spread = stats.loc[\"75%\"] - stats.loc[\"25%\"]\n\n d = ungauged.values - gauged.values\n n = np.abs(d) / spread.values\n return pd.Series(data=n.sum(axis=1), index=gauged.index)\n\n\ndef regionalization_params(\n method,\n gauged_params,\n gauged_properties,\n ungauged_properties,\n filtered_params,\n filtered_prop,\n):\n \"\"\"Return the model parameters to use for the regionalization.\n\n Parameters\n ----------\n method : {'MLR', 'SP', 'PS', 'SP_IDW', 'PS_IDW', 'SP_IDW_RA', 'PS_IDW_RA'}\n Name of the regionalization method to use.\n gauged_params\n DataFrame of parameters for donor catchments (size = number of donors)\n gauged_properties\n DataFrame of properties of the donor catchments (size = number of donors)\n ungauged_properties\n DataFrame of properties of the ungauged catchment (size = 1)\n filtered_params\n DataFrame of parameters of all filtered catchments (size = all catchments with NSE > min_NSE)\n filtered_prop\n DataFrame of properties of all filtered catchments (size = all catchments with NSE > min_NSE)\n\n Returns\n -------\n list\n List of model parameters to be used for the regionalization.\n \"\"\"\n\n if method == \"MLR\" or \"RA\" in method:\n mlr_params, r2 = multiple_linear_regression(\n filtered_prop, filtered_params, ungauged_properties.to_frame().T\n )\n\n if method == \"MLR\": # Return the multiple linear regression parameters.\n out = [\n mlr_params,\n ]\n\n elif \"RA\" in method:\n gp = gauged_params.copy()\n\n for p, r, col in zip(mlr_params, r2, gauged_params):\n # If we have an R2 > 0.5 then we consider this to be a better estimator\n\n if r > 0.5:\n gp[col] = p\n\n out = gp.values\n\n else:\n out = gauged_params.values\n\n return out\n\n\ndef IDW(qsims, dist):\n \"\"\"\n Inverse distance weighting.\n\n Parameters\n ----------\n qsims : DataArray\n Ensemble of hydrogram stacked along the `realization` dimension.\n dist : pd.Series\n Distance from catchment which generated each hydrogram to target catchment.\n\n Returns\n -------\n DataArray\n Inverse distance weighted average of ensemble.\n \"\"\"\n\n # In IDW, weights are 1 / distance\n weights = xr.DataArray(\n 1.0 / dist, dims=\"realization\", coords={\"realization\": qsims.realization}\n )\n\n # Make weights sum to one\n weights /= weights.sum(axis=0)\n\n # Calculate weighted average.\n out = qsims.dot(weights)\n out.name = qsims.name\n out.attrs = qsims.attrs\n return out\n\n\ndef multiple_linear_regression(source, params, target):\n \"\"\"\n Multiple Linear Regression for model parameters over catchment properties.\n\n Uses known catchment properties and model parameters to estimate model parameter over an\n ungauged catchment using its properties.\n\n Parameters\n ----------\n source : DataFrame\n Properties of gauged catchments.\n params : DataFrame\n Model parameters of gauged catchments.\n target : DataFrame\n Properties of the ungauged catchment.\n\n\n Returns\n -------\n (mrl_params, r2)\n A named tuple of the estimated model parameters and the R2 of the linear regression.\n \"\"\"\n # Add constants to the gauged predictors\n x = sm.add_constant(source)\n\n # Add the constant 1 for the ungauged catchment predictors\n predictors = sm.add_constant(target, prepend=True, has_constant=\"add\")\n\n # Perform regression for each parameter\n regression = [sm.OLS(params[param].values, x).fit() for param in params]\n\n # Perform prediction on each parameter based on the predictors\n mlr_parameters = [r.predict(exog=predictors)[0] for r in regression]\n\n # Extract the adjusted r_squared value for each parameter\n r2 = [r.rsquared_adj for r in regression]\n\n return mlr_parameters, r2\n", "sub_path": "ravenpy/utilities/regionalization.py", "file_name": "regionalization.py", "file_ext": "py", "file_size_in_byte": 12101, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 77, "usage_type": "attribute"}, {"api_name": "ravenpy.models.get_model", "line_number": 118, "usage_type": "call"}, {"api_name": "xarray.concat", "line_number": 126, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 146, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 153, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 177, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 217, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.arcsin", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 226, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 274, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 275, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 354, "usage_type": "call"}, {"api_name": "statsmodels.api.add_constant", "line_number": 391, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 391, "usage_type": "name"}, {"api_name": "statsmodels.api.add_constant", "line_number": 394, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 394, "usage_type": "name"}, {"api_name": "statsmodels.api.OLS", "line_number": 397, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 397, "usage_type": "name"}]} +{"seq_id": "90896885", "text": "# version: 0.7\n# description: may be buggy\n\n#USAGE in JAVASCRIPT && HTML:\n#let pr$my_parameter=12\n#let fn$my_function=()=>{}\n#(__pt$number__, __pt$name__, __pt$id__)=>{}\n\nimport sys\nimport re #REGEX\nimport io\nimport uuid\nfrom random import randint\n\ncounter=0\ndata={} # should be global to use the same MAPPED_NAME for all variables\n\nfor f in sys.argv[1:]:\n name=f.split(\".\")[0]\n ext=f.split(\".\")[1]\n\n r = io.open(f, 'r', encoding='utf8')\n Lines = r.readlines()\n r.close()\n\n w = io.open(name+\".obfuscated.\"+ext, 'w', encoding='utf8')\n\n X=[]\n for line in Lines:\n x=str(line)\n #x = re.sub(r'=( )*\\(\\)( )*=( )*>',r'=_=>', x.rstrip())#=()=> to =_=>\n\n try:\n found = re.search(r'((var|let|const)[ ]+)((fn\\$)\\w+?)( )*=( )*(async)?( )*\\((\\w*,*[ ]*_*\\$*)*\\)( )*=( )*>', x).group(3) #just Functions (USAGE) => fn$FUNC_NAME\n except AttributeError:\n found = None\n\n if found!=None:\n print(found)\n if found not in data:\n data[found]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n try:\n found = re.search(r'((var|let|const)[ ]+)((pr\\$)\\w+?)( )*(=|;)', x).group(3) #just variables (USAGE) => pr$VAR_NAME\n except AttributeError:\n found = None\n\n if found!=None:\n print(found)\n if found not in data:\n data[found]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n\n try:\n found = re.search(r'((__)(pr\\$)\\w+?(__))', x).group(1) #all other variables => __pr$NAME__\n except AttributeError:\n found = None\n\n if found!=None:\n print(found)\n if found not in data:\n data[found]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n try:\n found = re.finditer(r'((___)\\w+?(___))', x) #all other variables => ___NAME___\n except AttributeError:\n found = None\n for fo in found:\n fo=fo.group(1)\n if fo!=None:\n print(fo)\n if fo not in data:\n data[fo]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n X.append(x)\n\n print(\"__________________________________________\")\n\n for k in sorted(data, key=len, reverse=True):\n print(k+\" --- \"+data[k])\n\n for x in X:\n for k in sorted(data, key=len, reverse=True):\n x=x.replace(k,data[k])\n if x!=None and x.strip()!=\"\":\n chf=\"ضصثقفغعهخحشسیبلاتنمکگظطزرذدئوپچج\"\n chu=\"ضصثقفغعهخحشسیبلاتنمکگظطزرذدئوپچج\"\n idx=0\n for c in chf:\n x=x.replace(c,chu.split(\";\")[idx])\n idx+=1\n w.writelines(x)\n\n w.close()\n", "sub_path": "nsg-tools/nsg-js-obfuscator.py", "file_name": "nsg-js-obfuscator.py", "file_ext": "py", "file_size_in_byte": 3237, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "io.open", "line_number": 22, "usage_type": "call"}, {"api_name": "io.open", "line_number": 26, "usage_type": "call"}, {"api_name": "re.search", "line_number": 34, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "re.search", "line_number": 45, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 52, "usage_type": "call"}, {"api_name": "re.search", "line_number": 57, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 68, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "613020716", "text": "# coding: utf-8\n\nimport tempfile\nimport os\nimport shutil\nimport time\nfrom nose.tools import assert_equals, assert_not_equals, with_setup\nimport common\nfrom waiting import wait\nfrom swagger_client.rest import ApiException\n\nfrom swagger_client.models.tx import Tx\nfrom swagger_client.models.spend_tx import SpendTx\nfrom swagger_client.models.contract_create_data import ContractCreateData\nfrom swagger_client.models.contract_call_data import ContractCallData\nfrom swagger_client.models.contract_call_input import ContractCallInput\n\nsettings = common.test_settings(__name__.split(\".\")[-1])\n\ndef test_contract_create():\n test_settings = settings[\"test_contract_create\"]\n (root_dir, node, external_api, top) = setup_node_with_tokens(test_settings, \"node\") \n internal_api = common.internal_api(node)\n\n send_tokens_to_user(\"alice\", test_settings, internal_api, external_api)\n\n encoded_tx = get_unsigned_contract_create(test_settings[\"alice\"][\"pubkey\"], test_settings[\"create_contract\"], external_api)\n\n print(\"Unsigned encoded transaction: \" + encoded_tx)\n unsigned_tx = common.base58_decode(encoded_tx)\n unpacked_tx = common.unpack_tx(unsigned_tx)\n tx = common.parse_tx(unpacked_tx)\n print(\"Unsigned decoded transaction: \" + str(tx))\n\n # make sure same tx\n assert_equals(tx['type'], 'contract_create')\n assert_equals(tx['owner'], common.base58_decode(test_settings[\"alice\"][\"pubkey\"]))\n assert_equals(tx['vm_version'], test_settings[\"create_contract\"][\"vm_version\"])\n assert_equals(tx['deposit'], test_settings[\"create_contract\"][\"deposit\"])\n assert_equals(tx['amount'], test_settings[\"create_contract\"][\"amount\"])\n assert_equals(tx['gas'], test_settings[\"create_contract\"][\"gas\"])\n assert_equals(tx['gas_price'], test_settings[\"create_contract\"][\"gas_price\"])\n assert_equals(tx['fee'], test_settings[\"create_contract\"][\"fee\"])\n\n code = bytearray.fromhex(test_settings[\"create_contract\"][\"code\"][2:]) # without 0x\n assert_equals(tx['code'], code)\n\n call_data = bytearray.fromhex(test_settings[\"create_contract\"][\"call_data\"][2:]) # without 0x\n assert_equals(tx['call_data'], call_data)\n\n signature = bytearray(list(map(int, test_settings[\"create_contract\"][\"signature\"].split(\",\"))))\n signed = common.encode_signed_tx(unpacked_tx, [signature]) \n print(\"Signed transaction \" + signed)\n\n alice_balance0 = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n tx_object = Tx(tx=signed)\n external_api.post_tx(tx_object)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n alice_balance = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n\n assert_equals(alice_balance0, alice_balance + test_settings[\"create_contract\"][\"fee\"])\n\n cleanup(node, root_dir)\n\ndef test_contract_call():\n test_settings = settings[\"test_contract_call\"]\n create_settings = settings[\"test_contract_create\"]\n (root_dir, node, external_api, top) = setup_node_with_tokens(test_settings, \"node\") \n internal_api = common.internal_api(node)\n\n send_tokens_to_user(\"alice\", test_settings, internal_api, external_api)\n\n ## create contract\n encoded_tx = get_unsigned_contract_create(test_settings[\"alice\"][\"pubkey\"], create_settings[\"create_contract\"], external_api)\n unsigned_tx = common.base58_decode(encoded_tx)\n unpacked_tx = common.unpack_tx(unsigned_tx)\n signature = bytearray(list(map(int, create_settings[\"create_contract\"][\"signature\"].split(\",\"))))\n signed = common.encode_signed_tx(unpacked_tx,[signature]) \n\n alice_balance0 = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n tx_object = Tx(tx=signed)\n external_api.post_tx(tx_object)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n alice_balance = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n\n # assert contract created:\n call_contract = test_settings[\"contract_call\"]\n assert_equals(alice_balance0, alice_balance + create_settings[\"create_contract\"][\"fee\"])\n\n call_input = ContractCallInput(\"ring\", create_settings[\"create_contract\"][\"code\"],\\\n call_contract[\"data\"][\"function\"],\\\n call_contract[\"data\"][\"argument\"])\n result = external_api.call_contract(call_input)\n contract_call_obj = ContractCallData(\n caller=test_settings[\"alice\"][\"pubkey\"],\n contract=call_contract[\"contract\"],\n vm_version=call_contract[\"vm_version\"],\n fee=call_contract[\"fee\"],\n amount=call_contract[\"amount\"],\n gas=call_contract[\"gas\"],\n gas_price=call_contract[\"gas_price\"],\n call_data=result.out)\n\n\n call_tx_obj = external_api.post_contract_call(contract_call_obj)\n encoded_call_tx = call_tx_obj.tx\n\n print(\"Unsigned encoded transaction: \" + encoded_call_tx)\n unsigned_call_tx = common.base58_decode(encoded_call_tx)\n unpacked_call_tx = common.unpack_tx(unsigned_call_tx)\n tx = common.parse_tx(unpacked_call_tx)\n print(\"Unsigned decoded transaction: \" + str(tx))\n\n signature = bytearray(list(map(int, test_settings[\"contract_call\"][\"signature\"].split(\",\"))))\n\n signed = common.encode_signed_tx(unpacked_call_tx,[signature]) \n\n print(\"Signed transaction: \" + signed)\n alice_balance0 = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n tx_object = Tx(tx=signed)\n external_api.post_tx(tx_object)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n alice_balance = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n\n print(\"BALANCE0 \" + str(alice_balance0))\n print(\"BALANCE \" + str(alice_balance))\n # assert contract created:\n assert_equals(alice_balance0, alice_balance + test_settings[\"contract_call\"][\"fee\"])\n\n\n\n cleanup(node, root_dir)\n\n\ndef cleanup(node, root_dir):\n common.stop_node(node)\n shutil.rmtree(root_dir)\n\ndef make_mining_config(root_dir, file_name):\n sys_config = os.path.join(root_dir, file_name)\n f = open(sys_config, \"w\")\n # if autostart is not true - there will be no miner\n conf ='[{aecore, [{autostart, true},' + \\\n ' {expected_mine_rate, 100},' + \\\n ' {aec_pow_cuckoo, {\"mean16s-generic\", \"-t 5\", 16}}]}].'\n f.write(conf)\n f.close()\n return sys_config\n\n\ndef setup_node_with_tokens(test_settings, node_name):\n # prepare a dir to hold the configs and the keys\n root_dir = tempfile.mkdtemp()\n\n # setup the dir with Alice's node mining\n node = test_settings[\"nodes\"][node_name]\n sys_config = make_mining_config(root_dir, \"sys.config\")\n common.start_node(node, sys_config)\n api = common.external_api(node)\n\n # populate the chain so Alice had mined some blocks and has tokens\n # to spend\n blocks_to_mine = test_settings[\"blocks_to_mine\"]\n common.wait_until_height(api, blocks_to_mine)\n top = api.get_top()\n assert_equals(top.height >= blocks_to_mine, True)\n # Now the node has at least blocks_to_mine blocks mined by Alice \n\n return (root_dir, node, api, top)\n\n\ndef send_tokens_to_user(user, test_settings, internal_api, external_api):\n spend_tx_obj = SpendTx(\n recipient_pubkey=test_settings[user][\"pubkey\"],\n amount=test_settings[user][\"amount\"],\n fee=test_settings[user][\"amount\"])\n\n # populate Alice's account\n internal_api.post_spend_tx(spend_tx_obj)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n\n balance_obj = common.get_account_balance(internal_api, pub_key=test_settings[user][\"pubkey\"])\n print(user.capitalize() + \"'s balance is now \" + str(balance_obj.balance))\n\ndef get_unsigned_contract_create(owner, contract, external_api):\n contract_create_data_obj = ContractCreateData(\n owner=owner,\n code=contract[\"code\"],\n vm_version=contract[\"vm_version\"],\n deposit=contract[\"deposit\"],\n amount=contract[\"amount\"],\n gas=contract[\"gas\"],\n gas_price=contract[\"gas_price\"],\n fee=contract[\"fee\"],\n call_data=contract[\"call_data\"])\n\n tx_obj = external_api.post_contract_create(contract_create_data_obj)\n return tx_obj.tx\n", "sub_path": "py/tests/integration/test_unsigned_tx.py", "file_name": "test_unsigned_tx.py", "file_ext": "py", "file_size_in_byte": 8430, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "common.test_settings", "line_number": 18, "usage_type": "call"}, {"api_name": "common.internal_api", "line_number": 23, "usage_type": "call"}, {"api_name": "common.base58_decode", "line_number": 30, "usage_type": "call"}, {"api_name": "common.unpack_tx", "line_number": 31, "usage_type": "call"}, {"api_name": "common.parse_tx", "line_number": 32, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 36, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 37, "usage_type": "call"}, {"api_name": "common.base58_decode", "line_number": 37, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 38, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 39, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 40, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 41, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 42, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 43, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 46, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 49, "usage_type": "call"}, {"api_name": "common.encode_signed_tx", "line_number": 52, "usage_type": "call"}, {"api_name": "common.get_account_balance", "line_number": 55, "usage_type": "call"}, {"api_name": "swagger_client.models.tx.Tx", "line_number": 56, "usage_type": "call"}, {"api_name": "common.wait_until_height", "line_number": 60, "usage_type": "call"}, {"api_name": "common.get_account_balance", "line_number": 61, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 63, "usage_type": "call"}, {"api_name": "common.internal_api", "line_number": 71, "usage_type": "call"}, {"api_name": "common.base58_decode", "line_number": 77, "usage_type": "call"}, {"api_name": "common.unpack_tx", "line_number": 78, "usage_type": "call"}, {"api_name": "common.encode_signed_tx", "line_number": 80, "usage_type": "call"}, {"api_name": "common.get_account_balance", "line_number": 82, "usage_type": "call"}, {"api_name": "swagger_client.models.tx.Tx", "line_number": 83, "usage_type": "call"}, {"api_name": "common.wait_until_height", "line_number": 87, "usage_type": "call"}, {"api_name": "common.get_account_balance", "line_number": 88, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 92, "usage_type": "call"}, {"api_name": "swagger_client.models.contract_call_input.ContractCallInput", "line_number": 94, "usage_type": "call"}, {"api_name": "swagger_client.models.contract_call_data.ContractCallData", "line_number": 98, "usage_type": "call"}, {"api_name": "common.base58_decode", "line_number": 113, "usage_type": "call"}, {"api_name": "common.unpack_tx", "line_number": 114, "usage_type": "call"}, {"api_name": "common.parse_tx", "line_number": 115, "usage_type": "call"}, {"api_name": "common.encode_signed_tx", "line_number": 120, "usage_type": "call"}, {"api_name": "common.get_account_balance", "line_number": 123, "usage_type": "call"}, {"api_name": "swagger_client.models.tx.Tx", "line_number": 124, "usage_type": "call"}, {"api_name": "common.wait_until_height", "line_number": 128, "usage_type": "call"}, {"api_name": "common.get_account_balance", "line_number": 129, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 134, "usage_type": "call"}, {"api_name": "common.stop_node", "line_number": 142, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tempfile.mkdtemp", "line_number": 159, "usage_type": "call"}, {"api_name": "common.start_node", "line_number": 164, "usage_type": "call"}, {"api_name": "common.external_api", "line_number": 165, "usage_type": "call"}, {"api_name": "common.wait_until_height", "line_number": 170, "usage_type": "call"}, {"api_name": "nose.tools.assert_equals", "line_number": 172, "usage_type": "call"}, {"api_name": "swagger_client.models.spend_tx.SpendTx", "line_number": 179, "usage_type": "call"}, {"api_name": "common.wait_until_height", "line_number": 188, "usage_type": "call"}, {"api_name": "common.get_account_balance", "line_number": 190, "usage_type": "call"}, {"api_name": "swagger_client.models.contract_create_data.ContractCreateData", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "293817247", "text": "import ast\nimport base64\nimport os\nimport shutil\nimport sys\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, BatchNormalization\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nimport requests\nimport configs.config as config\n\nid_user = -1\nepochs = 100\n\n\nclass CustomCallback(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n if (epoch + 1) % 5 == 0:\n _ = requests.post(config.api_set_percent + str(100 * (epoch + 1) / epochs) + \"/\" + str(id_user))\n\n\ndef train(user_id):\n global id_user\n id_user = user_id\n main_dir = \"/Users/lashchenov/university/ТРКПО Маслаков/app_access_with_Face_Recognition/neural_network\"\n train_data_dir = os.path.join(main_dir, \"images\", f'{user_id}', \"train\")\n validation_data_dir = os.path.join(main_dir, \"images\", f'{user_id}', \"test\")\n nb_train_samples = 160\n nb_validation_samples = 40\n img_width, img_height = 128, 128\n\n batch_size = 160\n num_classes = 1 # username and not_username\n\n if K.image_data_format() == \"channels_first\":\n input_shape = (1, img_width, img_height)\n else:\n input_shape = (img_width, img_height, 1)\n\n model = Sequential()\n\n model.add(Conv2D(32, (3, 3), activation='relu', padding='same',\n input_shape=input_shape))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(4, 4)))\n model.add(Dropout(0.5))\n\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(4, 4)))\n model.add(Dropout(0.5))\n\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(3, 3)))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(1024, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Dense(num_classes, activation='sigmoid'))\n\n history = model.compile(loss=\"mse\",\n optimizer=\"adam\",\n metrics=[\"acc\"])\n\n model.summary()\n\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=False)\n\n # this is the augmentation configuration we will use for testing:\n # Rescale\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size, color_mode='grayscale',\n class_mode='categorical')\n\n validation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size, color_mode='grayscale',\n class_mode='categorical')\n\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_train_samples // batch_size,\n callbacks=[CustomCallback()])\n\n model.save(os.path.join(main_dir, \"models\", user_id, \"model_face.h5\"))\n", "sub_path": "neural_network/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3596, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "keras.callbacks", "line_number": 19, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 22, "usage_type": "call"}, {"api_name": "configs.config.api_set_percent", "line_number": 22, "usage_type": "attribute"}, {"api_name": "configs.config", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "keras.backend.image_data_format", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 38, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 77, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}]} +{"seq_id": "539114518", "text": "__author__ = 'pradyumnad'\n\nimport cv2\nimport numpy as np\nimport itertools\n\nimg = cv2.imread(\"Flat1.jpg\")\n\ndetector = cv2.FeatureDetector_create(\"SIFT\")\ndescriptor = cv2.DescriptorExtractor_create(\"SIFT\")\n\nskp = detector.detect(img)\nskp, sd = descriptor.compute(img, skp)\n\nprint(skp.count)\n\nprint(sd.size)\n", "sub_path": "iHear-Py/ocv.py", "file_name": "ocv.py", "file_ext": "py", "file_size_in_byte": 305, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.FeatureDetector_create", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.DescriptorExtractor_create", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "622798425", "text": "# Original Author: Keenan\n# Author: Habib Sabiu\n# Date: August 24, 2017\n#\n# Description: A Spark application to register drone images. Images should be in\n# in a group of 5 chennels. For example, IMG_OOO1 group should have\n# 5 images representing various chennels e.g IMG_OOO1_1.png to IMG_OOO1_5.png.\n# The output is a set of 5 registered images for each input group, and RGB of the\n# location, croped version of the RGB, and an NDVI.\n#\n# Usage: spark-submit --master [spark master] [file name] [input path] [output_path] [job name]\n# [spark master] = Can be Spark's Standalone, Mesos, or YARN\n# To run on:-\n# Standalone: spark://discus-p2irc-master:7077\n# Mesos: mesos://discus-p2irc-master:5050\n# YARN: yarn\n# [file name] = Full path to the python script (../imageRegistration.py)\n# [input_path] = Full HDFS path to input images\n# [output_path] = A network directory such as NFS3 that is accessible on all the worker nodes\n# [job_name] = A nice name for the job. This will be displayed on the web UI\n#\n# Example usage: spark-submit --master spark://discus-p2irc-master:7077 imageRegistration.py \\\n# hdfs://discus-p2irc-master:54310/user/hduser/habib/drone_images_png/ \\\n# /data/mounted_hdfs_path/user/hduser/habib/registered_images_output/ imageRegistration\n\n\nimport os\nimport cv2\nimport sys\nimport math\nimport string\nimport random\nimport pyspark\nimport os.path\nimport warnings\nimport argparse\nimport numpy as np\nimport skimage.io as io\n\nfrom time import time\nfrom operator import add\nfrom io import StringIO, BytesIO\nfrom skimage import img_as_ubyte\nfrom pyspark import SparkContext\nfrom PIL import Image, ImageFile\nfrom matplotlib import pyplot as plt\n\n\n# Set numpy array to print all it values instead of 3 dots in the middle\n#np.set_printoptions(threshold=np.nan)\n\n# Ignore all user warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Ignore divide by zero warning\nnp.seterr(divide='ignore', invalid='ignore')\n\n\ndef find_keypoints_and_features(image):\n\n # Check that image is not invalid\n if image is None:\n raise TypeError(\"Invalid image in find_keypoints_and_features\")\n\n descriptor = cv2.xfeatures2d.SIFT_create(nfeatures=100000)\n\n #if fails means can't find similarities between two images\n (key_points, features) = descriptor.detectAndCompute(image, None)\n\n # IF YOU HAVE CV2 VERSION 2 USE THIS STUFF, INSTEAD OF THE ABOVE TWO LINES\n # turn the image into greyscale to work with\n\n #grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n #detector = cv2.FeatureDetector_create(\"SURF\")\n #key_points = detector.detect(grey)\n #extractor = cv2.DescriptorExtractor_create(\"SURF\")\n #(key_points, features) = extractor.compute(grey, key_points)\n\n # Convert key_points from KeyPoint objects to numpy arrays\n key_points = np.float32([key_point.pt for key_point in key_points])\n return (key_points, features)\n\ndef match_key_points(right_key_points, left_key_points, right_features, left_features, ratio, reproj_thresh):\n\n # A cv2 class that matches keypoint descriptors\n # FLANN is a much faster method for large datasets, so it may be a good\n # idea to switch to that. However it is a very different code set up\n # that uses a couple dictionaries, so there's a bit that'll have to\n # change\n matcher = cv2.DescriptorMatcher_create(\"BruteForce\")\n # knnMatch makes a whole bunch of matches (as a DMatch class)\n # The k stands for how large the tuple will be (because that's\n # basically what DMatches are)\n # i picked two because straight lines\n raw_matches = matcher.knnMatch(right_features, left_features, 2)\n\n # Turns the raw_matches into tuples we can work with, while also\n # filtering out matches that occurred on the outside edges of the\n # pictures where matches really shouldn't have occurred\n # Is equivalent to the following for loop\n # matches = []\n # for m in raw_matches:\n # if len(m) == 2 and m[0].distance < m[1].distance * ratio:\n # matches.append((m[0].trainIdx, m[0].queryIdx))\n matches = [(m[0].trainIdx, m[0].queryIdx) for m in raw_matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]\n\n # Converts the tuples into a numpy array (for working with the\n # homograph), while also splitting up the right and left points\n # We are making a homograph of the matches to apply a ratio test, and\n # determine which of the matches are of a high quality. Typical ratio\n # values are between 0.7 and 0.8\n # Computing a homography requires at least 4 matches\n if len(matches) > 4:\n # Split right and left into numphy arrays\n src_pts = np.float32([right_key_points[i] for (_, i) in matches])\n dst_pts = np.float32([left_key_points[i] for (i, _) in matches])\n\n # Use the cv2 to actually connect the dots between the two pictures\n (H, status) = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, reproj_thresh)\n\n src_t = np.transpose(src_pts)\n dst_t = np.transpose(dst_pts)\n back_proj_error = 0\n inlier_count = 0\n\n for i in range(0, src_t.shape[1]):\n x_i = src_t[0][i]\n y_i = src_t[1][i]\n x_p = dst_t[0][i]\n y_p = dst_t[1][i]\n num1 = (H[0][0] * x_i + H[0][1] * y_i + H[0][2])\n num2 = (H[1][0] * x_i + H[1][1] * y_i + H[1][2])\n dnm = (H[2][0] * x_i + H[2][1] * y_i + H[2][2])\n\n tmp = (x_p - (num1 / dnm))**2 + (y_p - (num2 / dnm))**2\n if status[i] == 1:\n back_proj_error += tmp\n inlier_count += 1\n\n return (matches, H, status, back_proj_error, inlier_count)\n else:\n return None\n\ndef register_channels(C, idx=0, ratio=.75, reproj_thresh=4):\n\n # Check that the images in C are good images and not empty\n if C is None:\n raise TypeError(\"Invalid image set in register_channels\")\n for i in C:\n if len(i.shape) > 2:\n raise TypeError(\"Images have greater depth than 1!\")\n\n # Compute SIFT features for each channel.\n # Channel images are converted to unsigned byte. All proper scaling\n # is done by image_as_ubyte regardless of dtype of the input images.\n keypoints_and_features = [find_keypoints_and_features(img_as_ubyte(chan)) for chan in C]\n\n # Generate list of indices excluding the target channel index.\n channels_to_register = list(range(len(C)))\n del channels_to_register[idx]\n\n # Generate keypoint matches between each channel to be registered\n # and the target image.\n matched_key_points = [match_key_points(keypoints_and_features[i][0], keypoints_and_features[idx][0], keypoints_and_features[i][1],\n keypoints_and_features[idx][1], ratio=ratio, reproj_thresh=reproj_thresh) for i in channels_to_register]\n\n # extract the homography matrices from 'matched_key_points'.\n H = [x[1] for x in matched_key_points]\n BPError = [x[3] for x in matched_key_points]\n Inliers = [x[4] for x in matched_key_points]\n # Add the identity matrix for the target channel.\n H.insert(idx, np.identity(3))\n return H, BPError, Inliers\n\ndef warp_image(I, H):\n return cv2.warpPerspective(I, H, (I.shape[1], I.shape[0]))\n\ndef transform_channels(C, H):\n return [warp_image(C[i], H[i]) for i in range(len(C))]\n\ndef decompose_homography(H):\n\n if H is None:\n raise TypeError(\"Invalid homogrpahy input in decompose_homogrphy\")\n if H.shape != (3, 3):\n raise TypeError(\"Invalid homogrpahy shape in decompose_homogrphy\")\n\n a = H[0, 0]\n b = H[0, 1]\n c = H[0, 2]\n d = H[1, 0]\n e = H[1, 1]\n f = H[1, 2]\n\n p = math.sqrt(a * a + b * b)\n r = (a * e - b * d) / (p)\n q = (a * d + b * e) / (a * e - b * d)\n\n translation = (c, f)\n scale = (p, r)\n shear = q\n theta = math.atan2(b, a)\n\n return (translation, theta, scale, shear)\n\ndef register_group(images_group):\n\n images_key = images_group[0]\n images_values = images_group[1]\n images_values = sorted(zip(images_values[0::2], images_values[1::2]))\n\n keys = [x[0] for x in images_values]\n values = [x[1] for x in images_values]\n\n # Get the images and store them in an array, then calculate their homographies and transform the images.\n # H, Back-proj-error and the inlier points are all calculated\n C = np.array(values, dtype=float) / 65535\n\n H, BPError, Inliers = register_channels(C)\n # Add a 0 to the start of the list of back projection errors, since the\n # first image always has a BPError of 0 (This is for later where we need to print the BPErrors)\n\n BPError.insert(0, 0)\n T = transform_channels(C, H)\n\n # Decompose the homogrpahy and calculate the bounding box of the good data, where all 5 channels are present\n max_x = []\n max_y = []\n max_theta = []\n\n for j in H:\n max_x.append(abs(decompose_homography(j)[0][0]))\n max_y.append(abs(decompose_homography(j)[0][1]))\n max_theta.append(abs(decompose_homography(j)[1]))\n\n rot = math.ceil(math.sin(max(max_theta)) * C[0].shape[1])\n crop_x = math.ceil(max(max_x))\n crop_y = math.ceil(max(max_y))\n\n border_x = (crop_x + rot, C[0].shape[1] - crop_x - rot)\n border_y = (crop_y + rot, C[0].shape[0] - crop_y - rot)\n\n # Loop through each subset of images and re-save them now that they are registered\n for j in range(len(T)):\n\n output_image_path = os.path.abspath(os.path.join(OUTPUT_FILE_PATH, \"IMG_\" + images_key + \"_\" + str(j + 1) + OUTPUT_FILE_TYPE))\n\n # Different ways to save the numpy array as image\n #io.imsave(output_image_path, T[j])\n\n # Here the array is first converted into a cv2 image and then saved\n cv_image = np.array(T[j]*255)\n cv2.imwrite(output_image_path, cv_image)\n\n # Here the array is first converted into a PIL image and then saved\n #im = Image.fromarray(T[j])\n #im.save(output_image_path)\n\n # Create and save the RGB image\n rgb = np.dstack([T[2], T[1], T[0]])\n output_rgb_path = os.path.abspath(os.path.join(OUTPUT_PROCESSED_PATH, \"IMG_\" + images_key + \"_RGB\" + OUTPUT_FILE_TYPE))\n\n #io.imsave(output_rgb_path, rgb)\n\n cv_image = np.array(rgb*255)\n cv2.imwrite(output_rgb_path, cv_image)\n\n #im = Image.fromarray(rgb)\n #im.save(output_rgb_path)\n\n # Crop images\n crop_img = np.dstack([T[2], T[1], T[0]])\n crop_img = crop_img[int(border_y[0]):int(border_y[1]), int(border_x[0]):int(border_x[1])]\n output_crop_path = os.path.abspath(os.path.join(OUTPUT_PROCESSED_PATH, \"IMG_\" + images_key + \"_RGB_CROPPED\" + OUTPUT_FILE_TYPE))\n\n #io.imsave(output_crop_path, crop_img)\n\n cv_image = np.array(crop_img*255)\n cv2.imwrite(output_crop_path, cv_image)\n\n #im = Image.fromarray(crop_img)\n #im.save(output_crop_path)\n\n # Create and save the NDVI image\n num = np.subtract(T[3], T[2])\n dnm = np.add(T[3], T[2])\n\n ndvi_img = np.divide(num, dnm)\n\n original_ndvi = ndvi_img\n\n output_ndvi_path = os.path.abspath(os.path.join(OUTPUT_PROCESSED_PATH, \"IMG_\" + images_key + \"_NDVI\" + OUTPUT_FILE_TYPE))\n\n #io.imsave(output_ndvi_path, original_ndvi)\n\n cv_image = np.array(original_ndvi*255)\n cv2.imwrite(output_ndvi_path, cv_image)\n\n #im = Image.fromarray(original_ndvi)\n #im.save(output_ndvi_path)\n\ndef read_images(image_rawdata):\n #return image_rawdata[0], np.array(io.imread((StringIO(image_rawdata[1])), as_grey=True) / 65535)\n return image_rawdata[0], np.array(io.imread(BytesIO(image_rawdata[1]), as_grey=True))\n\n\nif __name__ == \"__main__\":\n\n application_start_time = time()\n\n input_path = sys.argv[1]\n output_root_path = sys.argv[2]\n job_name = sys.argv[3]\n \n OUTPUT_FILE_TYPE = \".png\"\n # Directory to store registered images\n OUTPUT_FILE_PATH = output_root_path\n # Directory to store processed registered images\n OUTPUT_PROCESSED_PATH = output_root_path + \"/processed/\"\n \n # Set spark configurations\n sc = SparkContext(appName = job_name)\n\n reading_start_time = time()\n\n # When reading from local file system\n #images_rdd = sc.binaryFiles('file:///sparkdata/registration_images')\n \n # When reading from HDFS\n images_rdd = sc.binaryFiles(input_path)\n \n # Calculate the index to use for getting images group\n index = images_rdd.first()[0].find(\"IMG_\")+4\n\n images_group_rdd = images_rdd.map(read_images) \\\n .map(lambda rawdata: (rawdata[0][index:rawdata[0].rfind('_')], (rawdata[0][index:], rawdata[1]))) \\\n .reduceByKey(lambda first_image, second_image: (first_image + second_image))\n\n reading_end_time = time() - reading_start_time\n\n processing_start_time = time()\n\n images_group_rdd.foreach(register_group)\n\n processing_end_time = time() - processing_start_time\n\n application_end_time = time() - application_start_time\n \n sc.stop()\n \n print(\"------------------------------------------------\")\n print(\"SUCCESS: Images read from HDFS in {} seconds\".format(round(reading_end_time, 3)))\n print(\"SUCCESS: Images processed in {} seconds\".format(round(processing_end_time, 3)))\n print(\"SUCCESS: Total time spent = {} seconds\".format(round(application_end_time, 3)))\n print(\"------------------------------------------------\")\n", "sub_path": "imageRegistration.py", "file_name": "imageRegistration.py", "file_ext": "py", "file_size_in_byte": 13419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "warnings.filterwarnings", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.seterr", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d.SIFT_create", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.DescriptorMatcher_create", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.findHomography", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.RANSAC", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.transpose", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 122, "usage_type": "call"}, {"api_name": "skimage.img_as_ubyte", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 172, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 176, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 195, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 217, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 236, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 236, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 237, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 252, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path", "line_number": 261, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 265, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 272, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path", "line_number": 274, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 278, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 292, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 296, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 304, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 304, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 304, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 304, "usage_type": "call"}, {"api_name": "time.time", "line_number": 309, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 311, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 312, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 313, "usage_type": "attribute"}, {"api_name": "pyspark.SparkContext", "line_number": 322, "usage_type": "call"}, {"api_name": "time.time", "line_number": 324, "usage_type": "call"}, {"api_name": "time.time", "line_number": 339, "usage_type": "call"}, {"api_name": "time.time", "line_number": 341, "usage_type": "call"}, {"api_name": "time.time", "line_number": 345, "usage_type": "call"}, {"api_name": "time.time", "line_number": 347, "usage_type": "call"}]} +{"seq_id": "38366293", "text": "import argparse\nimport os\nimport subprocess\nimport sys\nimport logging\nimport tempfile\n\n\ndef get_device_space_kb(tmp_path):\n space = subprocess.Popen(\n \"/bin/df --output=avail %s | tail -1\" % tmp_path, shell=True,\n stdout=subprocess.PIPE)\n return float(space.communicate()[0])\n\n\ndef kb_size(file_path):\n return float(os.path.getsize(file_path)) / 1024\n\n\ndef shrink(input_path, tmp_path=tempfile.gettempdir(), new_output_size=0):\n\n logging.basicConfig(stream=sys.stdout)\n log = logging.getLogger()\n log.setLevel(logging.INFO)\n\n def fmt_size(size_kb):\n MB = 1024.0\n GB = MB * MB\n if size_kb > GB:\n return \"%.2f GB\" % (float(size_kb) / GB)\n elif size_kb > MB:\n return \"%.2f MB\" % (float(size_kb) / MB)\n elif size_kb > 1:\n return \"%.2f kb\" % (size_kb)\n\n def createhd(output_path, intermediate, output_size):\n command = \"vboxmanage createhd --filename %s --format %s --size %s\" % (\n output_path, intermediate, output_size)\n log.info(command)\n proc = subprocess.check_output(command, shell=True)\n return proc\n\n def clonehd(source_file, dest_file, fmt):\n command = \"vboxmanage clonehd %s %s --existing\" % (\n source_file, dest_file)\n log.info(\"Executing %s\" % command)\n proc = subprocess.check_output(command, shell=True)\n return proc\n\n def compact(target):\n command = \"vboxmanage modifyhd --compact %s\" % (output_path)\n log.info(\"Executing %s\" % command)\n proc = subprocess.check_output(command, shell=True)\n return proc\n\n def delete_hdd(target):\n command = \"vboxmanage closemedium disk \\\"%s\\\" --delete\" % (target)\n proc = subprocess.check_output(command, shell=True)\n return proc\n\n def get_hdinfo(target):\n command = \"vboxmanage showhdinfo \\\"%s\\\"\" % (target)\n proc_out = subprocess.check_output(command, shell=True)\n\n lines = proc_out.splitlines()\n hd_info_vals = dict()\n for line in lines:\n (term, val) = line.split(\":\", 1)\n hd_info_vals[term] = str.strip(val)\n return hd_info_vals\n\n if not os.access(input_path, os.F_OK):\n raise IOError(\"Unable to read file %s \" % input_path)\n\n input_filename = os.path.basename(input_path)\n intermediate = \"VDI\"\n output_filename = input_filename[\n :input_filename.rindex(\".\") + 1] + intermediate.lower()\n\n required = kb_size(input_path)\n if not tmp_path.endswith(os.path.sep):\n output_path = os.path.join(tmp_path + os.path.sep, output_filename)\n else:\n output_path = os.path.join(tmp_path, output_filename)\n\n input_info = get_hdinfo(input_path)\n if not new_output_size:\n new_output_size = input_info[\"Capacity\"].split(\" \")[0]\n\n log.info(\"Information about source: %s\" % input_info)\n\n def check_overwrite(output_path):\n\n if os.path.isfile(output_path):\n log.warn(\"Output file exists %s\" % output_path)\n avail = kb_size(output_path)\n val = raw_input(\n \"The file exists do you wish to overwrite? [yes/no]\")\n if val.lower() == \"yes\":\n log.warn(\"Deleting file %s\" % output_path)\n os.delete_hdd(output_path)\n return not os.path.exists(output_path)\n elif val.lower() == \"no\":\n return\n else:\n print (\"invalid input\")\n\n log.info(\"Intermediate Output Path: %s\" % output_path)\n\n def print_space(source, dest):\n required = kb_size(source)\n avail = get_device_space_kb(os.path.dirname(dest))\n if os.path.isfile(dest):\n avail = avail + kb_size(dest)\n\n msg = \"Uncompressed image is %s, directory (%s) has %s available\" % (\n fmt_size(required), tmp_path, fmt_size(avail))\n\n if avail < required:\n log.error(msg)\n else:\n log.warn(msg)\n\n if not os.path.exists(output_path):\n if new_output_size:\n createhd(output_path, intermediate, new_output_size)\n\n log.info(\"Cloning disk\")\n clonehd(input_path, output_path, intermediate)\n log.info(\"Intermediate file %s is newer, will compact this\", output_path)\n\n if os.path.exists(output_path):\n compact(output_path)\n\n output_info = get_hdinfo(output_path)\n log.info(\"information about dest after to compact: %s\", output_info)\n\n\ndef main():\n main_parser = argparse.ArgumentParser()\n #main_parser.add_argument(\"virtual_disk_path\", help=\"The path of the virtual disk\")\n # main_parser.add_argument(\"-t\", \"--temp\", dest=\"tmp_path\",\n # help=\"The path of the temporary directory to use for intermediate files\", default=\"/tmp/\")\n #main_parser.add_argument(\"--disksize\", dest=\"disksize\", default=\"\")\n main_parser.add_argument(\"input_path\")\n main_parser.add_argument(\n \"-d\", dest=\"tmp_path\", default=tempfile.gettempdir())\n main_parser.add_argument(\"-s\", dest=\"new_output_size\")\n args = (main_parser.parse_args())\n print (args)\n shrink(input_path=args.input_path, tmp_path=args.tmp_path,\n new_output_size=args.new_output_size)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "shrink.py", "file_name": "shrink.py", "file_ext": "py", "file_size_in_byte": 5263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "subprocess.Popen", "line_number": 10, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 24, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 40, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 47, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 53, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 58, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 63, "usage_type": "call"}, {"api_name": "os.access", "line_number": 72, "usage_type": "call"}, {"api_name": "os.F_OK", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.delete_hdd", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 140, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "429277978", "text": "import elasticsearch\nimport configparser\n\n# Configuration\nconfig = configparser.RawConfigParser()\nconfig.read(\"config.conf\")\n\n# Setup Elasticsearch\nes_host = config.get(\"Elastic\", \"es_host\")\nelastic = elasticsearch.Elasticsearch([es_host])\n\n\ndef get_params():\n index_string = 'social_profile'\n\n params = {\n 'index': index_string,\n 'type': 'tweet',\n 'timeout': 300,\n }\n\n return params\n\n\ndef run_query(query):\n # This is an example query\n # query = {\n # \"from\": 0,\n # \"size\": 1000,\n # \"sort\": [\n # {\"@timestamp\": {\"order\": \"desc\"}}\n #\n # ],\n # \"query\": {\n # \"bool\": {\n # \"must\": [\n # {\"range\": {\"@timestamp\": {\"gte\": \"now-{}\".format(time_range), \"lte\": \"now\"}}},\n # {\"term\": {\"jsonEvent\": \"IDS\"}},\n # {\"term\": {\"event.event_type\": \"alert\"}}\n # ]\n # },\n # }\n # }\n\n params = get_params()\n result_json = elastic.search(index=params['index'], doc_type=params['type'], request_timeout=params['timeout'],\n body=query)\n return result_json\n\n\ndef get_document(doc_id, time_range=\"6h\"):\n query = {\n \"query\": {\n \"ids\": {\n \"values\": [doc_id]\n }\n }\n }\n\n params = get_params()\n result_json = elastic.search(index=params['index'], doc_type=params['type'], request_timeout=params['timeout'],\n body=query)\n return result_json\n\n", "sub_path": "mimic/libs/elastic_libs.py", "file_name": "elastic_libs.py", "file_ext": "py", "file_size_in_byte": 1554, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "configparser.RawConfigParser", "line_number": 5, "usage_type": "call"}, {"api_name": "elasticsearch.Elasticsearch", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "569067669", "text": "\"\"\"project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf.urls import url\nfrom django.conf import settings\nfrom .yasg import urlpatterns as doc_url\nfrom django.conf.urls.static import static\nimport main_app.views as views\nfrom django.urls import path, include\n\n# Посетитель портала\nurlpatterns = [\n\n # Главная страница - Получить последние опубликованные новости (поиск доступен)\n path('api/recent_messages', views.ShowRecentMessagesView.as_view()),\n\n # Главная страница - Получить самые популярные новости (поиск отсутствует)\n path('api/popular_news', views.ShowMostPopularMessagesView.as_view()),\n\n # Страница раздела - Получить новости выбранного раздела\n path('api/news_of_current_category', views.ShowMessagesOfCurrentCategoryView.as_view()),\n\n # Страница новости - Получить выбранную новость\n path('api/current_message', views.ShowCurrentMessageView.as_view()),\n\n # Увеличить счетчик просмотров выбранной новости\n path('api/update_view_counter/', views.UpdateViewCounterView.as_view()),\n\n # Получить самые популярные и закрепленные новости\n path('api/get_most_popular_and_pinned_messages', views.GetMostPopularAndPinnedMessages.as_view()),\n\n]\n\n# Администратор портала\nurlpatterns += [\n # Страница авторизации - Авторизация\n url(r'^auth/', include('djoser.urls')),\n url(r'^auth/', include('djoser.urls.jwt')),\n\n # Страница создания/редактирования/удаления новости - Добавить/изменить/удалить новость\n path('api/add_or_change_message', views.AddOrChangeMessageView.as_view()),\n]\n\n# Супер-администратор портала\nurlpatterns += [\n # Страница супер-администратора\n path('admin/', admin.site.urls),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n# urlpatterns += doc_url\n", "sub_path": "backend/project/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2893, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "main_app.views.ShowRecentMessagesView.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "main_app.views.ShowRecentMessagesView", "line_number": 28, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "main_app.views.ShowMostPopularMessagesView.as_view", "line_number": 31, "usage_type": "call"}, {"api_name": "main_app.views.ShowMostPopularMessagesView", "line_number": 31, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "main_app.views.ShowMessagesOfCurrentCategoryView.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "main_app.views.ShowMessagesOfCurrentCategoryView", "line_number": 34, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "main_app.views.ShowCurrentMessageView.as_view", "line_number": 37, "usage_type": "call"}, {"api_name": "main_app.views.ShowCurrentMessageView", "line_number": 37, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 37, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "main_app.views.UpdateViewCounterView.as_view", "line_number": 40, "usage_type": "call"}, {"api_name": "main_app.views.UpdateViewCounterView", "line_number": 40, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 40, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "main_app.views.GetMostPopularAndPinnedMessages.as_view", "line_number": 43, "usage_type": "call"}, {"api_name": "main_app.views.GetMostPopularAndPinnedMessages", "line_number": 43, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 43, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 50, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 51, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 51, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "main_app.views.AddOrChangeMessageView.as_view", "line_number": 54, "usage_type": "call"}, {"api_name": "main_app.views.AddOrChangeMessageView", "line_number": 54, "usage_type": "attribute"}, {"api_name": "main_app.views", "line_number": 54, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 60, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 63, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 63, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 63, "usage_type": "attribute"}]} +{"seq_id": "523252355", "text": "from setuptools import find_packages, setup\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\nwith open(\"LICENSE\") as f:\n license = f.read()\n\nsetup(\n name=\"py-blue-pedal\",\n version=\"0.1.0\",\n description=(\n \"Python library to interact with Bluetooth Lower Energy (BLE) \"\n \"cycling smart trainers and heart rate monitors\"),\n long_description=readme,\n author=\"Renato Torres\",\n author_email=\"renato@willful.pt\",\n url=\"https://github.com/willful-it/pybluepedal\",\n license=license,\n packages=find_packages(exclude=(\"tests\", \"docs\"))\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 576, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "setuptools.setup", "line_number": 9, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "403996815", "text": "# coding: utf-8\n\n\"\"\"\n Cloudera Manager API\n\n

Cloudera Manager API v33

Introduced in Cloudera Manager 6.3.0

Cloudera Product Documentation

\n\n OpenAPI spec version: 6.3.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass ApiCdhUpgradeArgs(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'cdh_parcel_version': 'str',\n 'cdh_package_version': 'str',\n 'rolling_restart_args': 'ApiRollingUpgradeClusterArgs',\n 'deploy_client_config': 'bool',\n 'start_all_services': 'bool'\n }\n\n attribute_map = {\n 'cdh_parcel_version': 'cdhParcelVersion',\n 'cdh_package_version': 'cdhPackageVersion',\n 'rolling_restart_args': 'rollingRestartArgs',\n 'deploy_client_config': 'deployClientConfig',\n 'start_all_services': 'startAllServices'\n }\n\n def __init__(self, cdh_parcel_version=None, cdh_package_version=None, rolling_restart_args=None, deploy_client_config=None, start_all_services=None):\n \"\"\"\n ApiCdhUpgradeArgs - a model defined in Swagger\n \"\"\"\n\n self._cdh_parcel_version = None\n self._cdh_package_version = None\n self._rolling_restart_args = None\n self._deploy_client_config = None\n self._start_all_services = None\n\n if cdh_parcel_version is not None:\n self.cdh_parcel_version = cdh_parcel_version\n if cdh_package_version is not None:\n self.cdh_package_version = cdh_package_version\n if rolling_restart_args is not None:\n self.rolling_restart_args = rolling_restart_args\n if deploy_client_config is not None:\n self.deploy_client_config = deploy_client_config\n if start_all_services is not None:\n self.start_all_services = start_all_services\n\n @property\n def cdh_parcel_version(self):\n \"\"\"\n Gets the cdh_parcel_version of this ApiCdhUpgradeArgs.\n If using parcels, the full version of an already distributed parcel for the next major CDH version. Default is null, which indicates this is a package upgrade. Example versions are: '5.0.0-1.cdh5.0.0.p0.11' or '5.0.2-1.cdh5.0.2.p0.32'\n\n :return: The cdh_parcel_version of this ApiCdhUpgradeArgs.\n :rtype: str\n \"\"\"\n return self._cdh_parcel_version\n\n @cdh_parcel_version.setter\n def cdh_parcel_version(self, cdh_parcel_version):\n \"\"\"\n Sets the cdh_parcel_version of this ApiCdhUpgradeArgs.\n If using parcels, the full version of an already distributed parcel for the next major CDH version. Default is null, which indicates this is a package upgrade. Example versions are: '5.0.0-1.cdh5.0.0.p0.11' or '5.0.2-1.cdh5.0.2.p0.32'\n\n :param cdh_parcel_version: The cdh_parcel_version of this ApiCdhUpgradeArgs.\n :type: str\n \"\"\"\n\n self._cdh_parcel_version = cdh_parcel_version\n\n @property\n def cdh_package_version(self):\n \"\"\"\n Gets the cdh_package_version of this ApiCdhUpgradeArgs.\n If using packages, the full version of the CDH packages being upgraded to, such as \\\"5.1.2\\\". These packages must already be installed on the cluster before running the upgrade command. For backwards compatibility, if \\\"5.0.0\\\" is specified here, then the upgrade command will relax validation of installed packages to match v6 behavior, only checking major version.

Introduced in v9. Has no effect in older API versions, which assume \\\"5.0.0\\\"\n\n :return: The cdh_package_version of this ApiCdhUpgradeArgs.\n :rtype: str\n \"\"\"\n return self._cdh_package_version\n\n @cdh_package_version.setter\n def cdh_package_version(self, cdh_package_version):\n \"\"\"\n Sets the cdh_package_version of this ApiCdhUpgradeArgs.\n If using packages, the full version of the CDH packages being upgraded to, such as \\\"5.1.2\\\". These packages must already be installed on the cluster before running the upgrade command. For backwards compatibility, if \\\"5.0.0\\\" is specified here, then the upgrade command will relax validation of installed packages to match v6 behavior, only checking major version.

Introduced in v9. Has no effect in older API versions, which assume \\\"5.0.0\\\"\n\n :param cdh_package_version: The cdh_package_version of this ApiCdhUpgradeArgs.\n :type: str\n \"\"\"\n\n self._cdh_package_version = cdh_package_version\n\n @property\n def rolling_restart_args(self):\n \"\"\"\n Gets the rolling_restart_args of this ApiCdhUpgradeArgs.\n If provided and rolling restart is available, will perform rolling restart with the requested arguments. If provided and rolling restart is not available, errors. If omitted, will do a regular restart.

Introduced in v9. Has no effect in older API versions, which must always do a hard restart.\n\n :return: The rolling_restart_args of this ApiCdhUpgradeArgs.\n :rtype: ApiRollingUpgradeClusterArgs\n \"\"\"\n return self._rolling_restart_args\n\n @rolling_restart_args.setter\n def rolling_restart_args(self, rolling_restart_args):\n \"\"\"\n Sets the rolling_restart_args of this ApiCdhUpgradeArgs.\n If provided and rolling restart is available, will perform rolling restart with the requested arguments. If provided and rolling restart is not available, errors. If omitted, will do a regular restart.

Introduced in v9. Has no effect in older API versions, which must always do a hard restart.\n\n :param rolling_restart_args: The rolling_restart_args of this ApiCdhUpgradeArgs.\n :type: ApiRollingUpgradeClusterArgs\n \"\"\"\n\n self._rolling_restart_args = rolling_restart_args\n\n @property\n def deploy_client_config(self):\n \"\"\"\n Gets the deploy_client_config of this ApiCdhUpgradeArgs.\n Not used starting in v9 - Client config is always deployed as part of upgrade. For older versions, determines whether client configuration should be deployed as part of upgrade. Default is true.\n\n :return: The deploy_client_config of this ApiCdhUpgradeArgs.\n :rtype: bool\n \"\"\"\n return self._deploy_client_config\n\n @deploy_client_config.setter\n def deploy_client_config(self, deploy_client_config):\n \"\"\"\n Sets the deploy_client_config of this ApiCdhUpgradeArgs.\n Not used starting in v9 - Client config is always deployed as part of upgrade. For older versions, determines whether client configuration should be deployed as part of upgrade. Default is true.\n\n :param deploy_client_config: The deploy_client_config of this ApiCdhUpgradeArgs.\n :type: bool\n \"\"\"\n\n self._deploy_client_config = deploy_client_config\n\n @property\n def start_all_services(self):\n \"\"\"\n Gets the start_all_services of this ApiCdhUpgradeArgs.\n Not used starting in v9 - All servies are always started as part of upgrade. For older versions, determines whether all services should be started should be deployed as part of upgrade. Default is true.\n\n :return: The start_all_services of this ApiCdhUpgradeArgs.\n :rtype: bool\n \"\"\"\n return self._start_all_services\n\n @start_all_services.setter\n def start_all_services(self, start_all_services):\n \"\"\"\n Sets the start_all_services of this ApiCdhUpgradeArgs.\n Not used starting in v9 - All servies are always started as part of upgrade. For older versions, determines whether all services should be started should be deployed as part of upgrade. Default is true.\n\n :param start_all_services: The start_all_services of this ApiCdhUpgradeArgs.\n :type: bool\n \"\"\"\n\n self._start_all_services = start_all_services\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, ApiCdhUpgradeArgs):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n", "sub_path": "venv/lib/python3.7/site-packages/cm_client/models/api_cdh_upgrade_args.py", "file_name": "api_cdh_upgrade_args.py", "file_ext": "py", "file_size_in_byte": 9725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "six.iteritems", "line_number": 192, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "570211085", "text": "from GUI.mainwindow import MainWindow\nimport sys\nimport argparse\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom PyQt5.QtGui import QIcon\nsys.path.append('./GUI/')\nsys.path.append('./gsi_classification/')\n\n\ndef build_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input',\n help='Path to an input image',\n required=False,\n type=str)\n return parser\n\n\nargs = build_argparser().parse_args()\n\n\napp = QApplication(sys.argv)\napp.setWindowIcon(QIcon('GUI/icon.ico'))\n\nwindow = MainWindow(inputFile=args.input)\nwindow.show()\n\n# Start the event loop.\napp.exec_()\n", "sub_path": "sources/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 670, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "17", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 23, "usage_type": "call"}, {"api_name": "GUI.mainwindow.MainWindow", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "464415236", "text": "from datetime import datetime\nf1=open('fisier_100k_aleator.txt','r')\nf2=open('fisier_100k_crescator_an.txt','w')\n\nx=[]\nstart=datetime.now()\n\ndef insertion(x):\n for i in range(1,len(x)):\n aux=x[i]\n j=i-1\n while j>=0 and aux

Title: %s

' % entry.title.text)\n playerURL = entry.GetSwfUrl()\n self.response.out.write('